hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf36a5e2061d7d973a164c2c452773bde550303 | 2,140 | py | Python | pytorch_gleam/callbacks/checkpoint.py | Supermaxman/pytorch-gleam | 8b0d8dddc812e8ae120c9760fd44fe93da3f902d | [
"Apache-2.0"
] | null | null | null | pytorch_gleam/callbacks/checkpoint.py | Supermaxman/pytorch-gleam | 8b0d8dddc812e8ae120c9760fd44fe93da3f902d | [
"Apache-2.0"
] | null | null | null | pytorch_gleam/callbacks/checkpoint.py | Supermaxman/pytorch-gleam | 8b0d8dddc812e8ae120c9760fd44fe93da3f902d | [
"Apache-2.0"
] | null | null | null |
import os
import torch
from pytorch_lightning.callbacks import Callback
import pytorch_lightning as pl
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities import rank_zero_only
class FitCheckpointCallback(Callback):
def __init__(self):
super().__init__()
def _get_checkpoint_path(self, trainer: pl.Trainer):
checkpoint_path = os.path.join(trainer.default_root_dir, 'pytorch_model.bin')
return checkpoint_path
@rank_zero_only
def on_fit_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
checkpoint_path = self._get_checkpoint_path(trainer)
print(f'Saving checkpoint...')
pl_module.to('cpu')
torch.save(pl_module.state_dict(), checkpoint_path)
def _load_fit_checkpoint(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
print(f'Loading checkpoint...')
checkpoint_path = self._get_checkpoint_path(trainer)
pl_module.load_state_dict(torch.load(checkpoint_path), strict=False)
def on_test_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
if trainer.state.fn != TrainerFn.FITTING:
self._load_fit_checkpoint(trainer, pl_module)
def on_validation_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
if trainer.state.fn != TrainerFn.FITTING:
self._load_fit_checkpoint(trainer, pl_module)
def on_predict_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
if trainer.state.fn != TrainerFn.FITTING:
self._load_fit_checkpoint(trainer, pl_module)
class PreTrainedCheckpointCallback(Callback):
def __init__(self, pre_model_path: str, pre_checkpoint_name: str = 'pytorch_model.bin'):
super().__init__()
self.pre_model_path = pre_model_path
self.pre_checkpoint_name = pre_checkpoint_name
self.checkpoint_path = os.path.join(self.pre_model_path, self.pre_checkpoint_name)
def _load_pre_checkpoint(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
print(f'Loading checkpoint...')
pl_module.load_state_dict(torch.load(self.checkpoint_path), strict=False)
def on_fit_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
self._load_pre_checkpoint(trainer, pl_module)
| 37.54386 | 89 | 0.798598 |
acf36d9fb50efb33bba98517f4946b6f13c37643 | 2,757 | py | Python | test_cases/general/primitive_type/date/py/test_generate/fromjsonable.py | Parquery/mapry | 93515307f9eba8447fe64b0ac7cc68b2d07205a7 | [
"MIT"
] | 11 | 2019-06-26T05:56:41.000Z | 2021-03-28T16:44:16.000Z | test_cases/general/primitive_type/date/py/test_generate/fromjsonable.py | Parquery/mapry | 93515307f9eba8447fe64b0ac7cc68b2d07205a7 | [
"MIT"
] | 4 | 2019-10-18T14:43:59.000Z | 2020-04-02T19:12:07.000Z | test_cases/general/primitive_type/date/py/test_generate/fromjsonable.py | Parquery/mapry | 93515307f9eba8447fe64b0ac7cc68b2d07205a7 | [
"MIT"
] | 3 | 2019-06-17T07:39:03.000Z | 2020-04-01T14:01:23.000Z | # File automatically generated by mapry. DO NOT EDIT OR APPEND!
"""parses JSONable objects."""
import datetime
import typing
import some.graph
import some.graph.parse
def some_graph_from(
value: typing.Any,
ref: str,
errors: some.graph.parse.Errors
) -> typing.Optional[some.graph.SomeGraph]:
"""
parses SomeGraph from a JSONable value.
:param value: JSONable value
:param ref: reference to the value (e.g., a reference path)
:param errors: errors encountered during parsing
:return: parsed SomeGraph, or None if ``errors``
"""
if errors.full():
return None
if not isinstance(value, dict):
errors.add(
ref,
"Expected a dictionary, but got: {}".format(type(value)))
return None
graph = some.graph.parse.placeholder_some_graph()
##
# Parse some_date
##
value_0 = value.get(
'some_date',
None)
if value_0 is None:
errors.add(
ref,
'Property is missing: some_date')
else:
if not isinstance(value_0, str):
errors.add(
'/'.join((
ref, 'some_date')),
"Expected a string, but got: {}".format(
type(value_0)))
else:
try:
graph.some_date = datetime.datetime.strptime(
value_0,
'%Y/%m/%d'
).date()
except ValueError:
errors.add(
'/'.join((
ref, 'some_date')),
'Expected to strptime %Y/%m/%d, but got: {}'.format(
value_0))
if errors.full():
return None
##
# Parse formatless_date
##
value_2 = value.get(
'formatless_date',
None)
if value_2 is None:
errors.add(
ref,
'Property is missing: formatless_date')
else:
if not isinstance(value_2, str):
errors.add(
'/'.join((
ref, 'formatless_date')),
"Expected a string, but got: {}".format(
type(value_2)))
else:
try:
graph.formatless_date = datetime.datetime.strptime(
value_2,
'%Y-%m-%d'
).date()
except ValueError:
errors.add(
'/'.join((
ref, 'formatless_date')),
'Expected to strptime %Y-%m-%d, but got: {}'.format(
value_2))
if errors.full():
return None
if not errors.empty():
return None
return graph
| 24.616071 | 72 | 0.479869 |
acf36ec3a01ef1fc80543f6891821b8c47d043db | 429 | py | Python | clients/models.py | sherzo/pv | b574b5c31a54514fe86ee3792e9e39a417d0816f | [
"MIT"
] | 1 | 2020-04-11T20:46:17.000Z | 2020-04-11T20:46:17.000Z | clients/models.py | sherzo/pv | b574b5c31a54514fe86ee3792e9e39a417d0816f | [
"MIT"
] | null | null | null | clients/models.py | sherzo/pv | b574b5c31a54514fe86ee3792e9e39a417d0816f | [
"MIT"
] | null | null | null | import uuid
class Client:
"""docstring for Client"""
def __init__(self, name, company, email, position, uid=None):
self.name = name
self.company = company
self.email = email
self.position = position
self.uid = uid or uuid.uuid4()
def to_dict(self):
return vars(self)
@staticmethod
def schema():
return ['name', 'company', 'email', 'position', 'uid'] | 23.833333 | 65 | 0.587413 |
acf36f19223fd3d11d254996c38cfd212d35544f | 215 | py | Python | frog_jmp.py | hvnsweeting/codility | 1904ad1928b7cb02e426023922578d1a4fa01d6b | [
"MIT"
] | null | null | null | frog_jmp.py | hvnsweeting/codility | 1904ad1928b7cb02e426023922578d1a4fa01d6b | [
"MIT"
] | null | null | null | frog_jmp.py | hvnsweeting/codility | 1904ad1928b7cb02e426023922578d1a4fa01d6b | [
"MIT"
] | null | null | null | # https://codility.com/programmers/lessons/3-time_complexity/frog_jmp/
import math
def solution(X, Y, D):
return math.ceil((Y - X) / float(D))
if __name__ == "__main__":
assert 3 == solution(10, 85, 30)
| 19.545455 | 70 | 0.665116 |
acf36f2861dfb82bde221290dc40773dabea0b7e | 6,051 | py | Python | lib/rucio/web/rest/flaskapi/v1/accountlimits.py | justincc/rucio | 95d81403c835d9f43fc30d328a8e2e388617a369 | [
"Apache-2.0"
] | 2 | 2021-05-19T08:55:40.000Z | 2021-05-19T08:55:43.000Z | lib/rucio/web/rest/flaskapi/v1/accountlimits.py | justincc/rucio | 95d81403c835d9f43fc30d328a8e2e388617a369 | [
"Apache-2.0"
] | null | null | null | lib/rucio/web/rest/flaskapi/v1/accountlimits.py | justincc/rucio | 95d81403c835d9f43fc30d328a8e2e388617a369 | [
"Apache-2.0"
] | 1 | 2018-06-25T19:12:53.000Z | 2018-06-25T19:12:53.000Z | # -*- coding: utf-8 -*-
# Copyright 2014-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <martin.barisits@cern.ch>, 2014
# - Vincent Garonne <vincent.garonne@cern.ch>, 2017
# - Thomas Beermann <thomas.beermann@cern.ch>, 2018-2021
# - Mario Lassnig <mario.lassnig@cern.ch>, 2018-2021
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Muhammad Aditya Hilmy <didithilmy@gmail.com>, 2020
# - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
from flask import Flask, Blueprint, request
from rucio.api.account_limit import set_local_account_limit, delete_local_account_limit, set_global_account_limit, \
delete_global_account_limit
from rucio.common.exception import RSENotFound, AccessDenied, AccountNotFound
from rucio.web.rest.flaskapi.v1.common import request_auth_env, response_headers, ErrorHandlingMethodView, \
generate_http_error_flask, json_parameters, param_get
class LocalAccountLimit(ErrorHandlingMethodView):
def post(self, account, rse):
""" Create or update an account limit.
.. :quickref: LocalAccountLimit; Create/update local account limits.
:param account: Account name.
:param rse: RSE name.
:status 201: Successfully created or updated.
:status 401: Invalid auth token.
:status 404: RSE not found.
:status 404: Account not found
"""
parameters = json_parameters()
bytes_param = param_get(parameters, 'bytes')
try:
set_local_account_limit(account=account, rse=rse, bytes=bytes_param, issuer=request.environ.get('issuer'), vo=request.environ.get('vo'))
except AccessDenied as error:
return generate_http_error_flask(401, error)
except (RSENotFound, AccountNotFound) as error:
return generate_http_error_flask(404, error)
return 'Created', 201
def delete(self, account, rse):
""" Delete an account limit.
.. :quickref: LocalAccountLimit; Delete local account limits.
:param account: Account name.
:param rse: RSE name.
:status 200: Successfully deleted.
:status 401: Invalid auth token.
:status 404: RSE not found.
:status 404: Account not found
"""
try:
delete_local_account_limit(account=account, rse=rse, issuer=request.environ.get('issuer'), vo=request.environ.get('vo'))
except AccessDenied as error:
return generate_http_error_flask(401, error)
except (AccountNotFound, RSENotFound) as error:
return generate_http_error_flask(404, error)
return '', 200
class GlobalAccountLimit(ErrorHandlingMethodView):
def post(self, account, rse_expression):
""" Create or update an account limit.
.. :quickref: GlobalAccountLimit; Create/update global account limits.
:param account: Account name.
:param rse_expression: RSE name.
:status 201: Successfully created or updated.
:status 401: Invalid auth token.
:status 404: RSE not found.
:status 404: Account not found
"""
parameters = json_parameters()
bytes_param = param_get(parameters, 'bytes')
try:
set_global_account_limit(
account=account,
rse_expression=rse_expression,
bytes=bytes_param,
issuer=request.environ.get('issuer'),
vo=request.environ.get('vo'),
)
except AccessDenied as error:
return generate_http_error_flask(401, error)
except (RSENotFound, AccountNotFound) as error:
return generate_http_error_flask(404, error)
return 'Created', 201
def delete(self, account, rse_expression):
""" Delete an account limit.
.. :quickref: GlobalAccountLimit; Delete global account limits.
:param account: Account name.
:param rse_expression: RSE name.
:status 200: Successfully deleted.
:status 401: Invalid auth token.
:status 404: RSE not found.
:status 404: Account not found
"""
try:
delete_global_account_limit(account=account, rse_expression=rse_expression, issuer=request.environ.get('issuer'), vo=request.environ.get('vo'))
except AccessDenied as error:
return generate_http_error_flask(401, error)
except (AccountNotFound, RSENotFound) as error:
return generate_http_error_flask(404, error)
return '', 200
def blueprint(no_doc=True):
bp = Blueprint('accountlimits', __name__, url_prefix='/accountlimits')
local_account_limit_view = LocalAccountLimit.as_view('local_account_limit')
bp.add_url_rule('/local/<account>/<rse>', view_func=local_account_limit_view, methods=['post', 'delete'])
if no_doc:
bp.add_url_rule('/<account>/<rse>', view_func=local_account_limit_view, methods=['post', 'delete'])
global_account_limit_view = GlobalAccountLimit.as_view('global_account_limit')
bp.add_url_rule('/global/<account>/<rse_expression>', view_func=global_account_limit_view, methods=['post', 'delete'])
bp.before_request(request_auth_env)
bp.after_request(response_headers)
return bp
def make_doc():
""" Only used for sphinx documentation """
doc_app = Flask(__name__)
doc_app.register_blueprint(blueprint(no_doc=False))
return doc_app
| 39.292208 | 155 | 0.683689 |
acf36f7cb3f153d60e49a6c175eaf96497092984 | 1,326 | py | Python | Temitope_Rekeying/vanity_generator.py | MoonX-Hub/Voting | e7d3672405a8752a7d8ea15bfde8201c943d7685 | [
"Apache-2.0"
] | 764 | 2021-08-30T23:25:24.000Z | 2022-03-12T06:58:23.000Z | Temitope_Rekeying/vanity_generator.py | MoonX-Hub/Voting | e7d3672405a8752a7d8ea15bfde8201c943d7685 | [
"Apache-2.0"
] | 331 | 2021-09-01T20:45:42.000Z | 2022-03-31T23:48:51.000Z | Temitope_Rekeying/vanity_generator.py | MoonX-Hub/Voting | e7d3672405a8752a7d8ea15bfde8201c943d7685 | [
"Apache-2.0"
] | 1,610 | 2021-08-31T20:27:07.000Z | 2022-02-19T14:15:09.000Z | from algosdk import account
from algosdk import mnemonic
from algosdk import account, mnemonic
from algosdk import transaction
from algosdk import algod
vanity_private_key, vanity_address = account.generate_account()
print("Private key:", vanity_private_key)
print("Address:", vanity_address)
prefix = "GREEN_REX"
while (not vanity_address.startswith(prefix)):
vanity_private_key, vanity_address = account.generate_account()
print("Mnemonic:", mnemonic.from_private_key(vanity_private_key))
existing_private_key, existing_address = account.generate_account()
algod_token = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
algod_address = 'http://localhost:4001'
algod_client = algod.AlgodClient(algod_token, algod_address)
params = algod_client.suggested_params()
txn_rekey = transaction.PaymentTxn(vanity_address, params['minFee'], params['lastRound'], params['lastRound']+1000, params['genesishashb64'], vanity_address, 0, rekey_to=existing_address)
stxn_rekey = txn_rekey.sign(vanity_private_key)
algod_client.send_transaction(stxn_rekey)
txn_test = transaction.Payment(vanity_address, params['minFee'], params['lastRound'], params['lastRound']+1000, params['genesishashb64'], existing_address, 1000000)
stxn_test = txn_test.sign(existing_private_key)
algod_client.send_transaction(stxn_test) | 41.4375 | 187 | 0.824284 |
acf3714dbe1a735d8e43fedc04a912d87d165bd5 | 9,549 | py | Python | lib/streamlit/ConfigOption.py | AdrienWehrle/streamlit | c6efb870088e5940add642189384ed12af8d1c79 | [
"Apache-2.0"
] | 1 | 2020-02-23T21:45:28.000Z | 2020-02-23T21:45:28.000Z | lib/streamlit/ConfigOption.py | AdrienWehrle/streamlit | c6efb870088e5940add642189384ed12af8d1c79 | [
"Apache-2.0"
] | null | null | null | lib/streamlit/ConfigOption.py | AdrienWehrle/streamlit | c6efb870088e5940add642189384ed12af8d1c79 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This class stores a key-value pair for the config system."""
# Python 2/3 compatibility
from __future__ import print_function, division, unicode_literals, absolute_import
from streamlit.compatibility import setup_2_3_shims
setup_2_3_shims(globals())
import datetime
import re
import textwrap
from typing import Any, Callable, Optional
from streamlit.logger import get_logger
LOGGER = get_logger(__name__)
class ConfigOption(object):
'''Stores a Streamlit configuration option.
A configuration option, like 'browser.serverPort', which indicates which port
to use when connecting to the proxy. There are two ways to create a
ConfigOption:
Simple ConfigOptions are created as follows:
ConfigOption('browser.serverPort',
description = 'Connect to the proxy at this port.',
default_val = 8501)
More complex config options resolve thier values at runtime as follows:
@ConfigOption('browser.serverPort')
def _proxy_port():
"""Connect to the proxy at this port.
Defaults to 8501.
"""
return 8501
NOTE: For complex config options, the function is called each time the
option.value is evaluated!
Attributes
----------
key : str
The fully qualified section.name
value
The value for this option. If this is a a complex config option then
the callback is called EACH TIME value is evaluated.
section : str
The section of this option. Example: 'global'.
name : str
See __init__.
description : str
See __init__.
where_defined : str
Indicates which file set this config option.
ConfigOption.DEFAULT_DEFINITION means this file.
visibility : {"visible", "hidden"}
See __init__.
scriptable : bool
See __init__.
deprecated: bool
See __init__.
deprecation_text : str or None
See __init__.
expiration_date : str or None
See __init__.
replaced_by : str or None
See __init__.
'''
# This is a special value for ConfigOption.where_defined which indicates
# that the option default was not overridden.
DEFAULT_DEFINITION = "<default>"
# This is a special value for ConfigOption.where_defined which indicates
# that the options was defined by Streamlit's own code.
STREAMLIT_DEFINITION = "<streamlit>"
def __init__(
self,
key,
description=None,
default_val=None,
visibility="visible",
scriptable=False,
deprecated=False,
deprecation_text=None,
expiration_date=None,
replaced_by=None,
config_getter=None,
type_=str,
):
"""Create a ConfigOption with the given name.
Parameters
----------
key : str
Should be of the form "section.optionName"
description : str
Like a comment for the config option.
default_val : anything
The value for this config option.
visibility : {"visible", "hidden"}
Whether this option should be shown to users.
scriptable : bool
Whether this config option can be set within a user script.
deprecated: bool
Whether this config option is deprecated.
deprecation_text : str or None
Required if deprecated == True. Set this to a string explaining
what to use instead.
expiration_date : str or None
Required if deprecated == True. set this to the date at which it
will no longer be accepted. Format: 'YYYY-MM-DD'.
replaced_by : str or None
If this is option has been deprecated in favor or another option,
set this to the path to the new option. Example:
'server.runOnSave'. If this is set, the 'deprecated' option
will automatically be set to True, and deprecation_text will have a
meaningful default (unless you override it).
config_getter : callable or None
Required if replaced_by != None. Should be set to
config.get_option.
type_ : one of str, int, float or bool
Useful to cast the config params sent by cmd option parameter.
"""
# Parse out the section and name.
self.key = key
key_format = r"(?P<section>\_?[a-z][a-z0-9]*)\.(?P<name>[a-z][a-zA-Z0-9]*)$"
match = re.match(key_format, self.key)
assert match, 'Key "%s" has invalid format.' % self.key
self.section, self.name = match.group("section"), match.group("name")
self.description = description
self.visibility = visibility
self.scriptable = scriptable
self.default_val = default_val
self.deprecated = deprecated
self.replaced_by = replaced_by
self._get_val_func = None # type: Optional[Callable[[], Any]]
self.where_defined = ConfigOption.DEFAULT_DEFINITION
self.type = type_
if self.replaced_by:
self.deprecated = True
if deprecation_text is None:
deprecation_text = "Replaced by %s." % self.replaced_by
if self.deprecated:
assert expiration_date, "expiration_date is required for deprecated items"
assert deprecation_text, "deprecation_text is required for deprecated items"
self.expiration_date = expiration_date
self.deprecation_text = textwrap.dedent(deprecation_text)
self.set_value(default_val)
def __call__(self, get_val_func):
"""Assign a function to compute the value for this option.
This method is called when ConfigOption is used as a decorator.
Parameters
----------
get_val_func : function
A function which will be called to get the value of this parameter.
We will use its docString as the description.
Returns
-------
ConfigOption
Returns self, which makes testing easier. See config_test.py.
"""
assert (
get_val_func.__doc__
), "Complex config options require doc strings for their description."
self.description = get_val_func.__doc__
self._get_val_func = get_val_func
return self
@property
def value(self) -> Any:
"""Get the value of this config option."""
if self._get_val_func is None:
return None
return self._get_val_func()
def set_value(self, value, where_defined=None):
"""Set the value of this option.
Parameters
----------
value
The new value for this parameter.
where_defined : str
New value to remember where this parameter was set.
"""
self._get_val_func = lambda: value
if where_defined is None:
self.where_defined = ConfigOption.DEFAULT_DEFINITION
else:
self.where_defined = where_defined
if self.deprecated and self.where_defined != ConfigOption.DEFAULT_DEFINITION:
details = {
"key": self.key,
"file": self.where_defined,
"explanation": self.deprecation_text,
"date": self.expiration_date,
}
if self.is_expired():
raise DeprecationError(
textwrap.dedent(
"""
════════════════════════════════════════════════
%(key)s IS NO LONGER SUPPORTED.
%(explanation)s
Please update %(file)s.
════════════════════════════════════════════════
"""
)
% details
)
else:
LOGGER.warning(
textwrap.dedent(
"""
════════════════════════════════════════════════
%(key)s IS DEPRECATED.
%(explanation)s
This option will be removed on or after %(date)s.
Please update %(file)s.
════════════════════════════════════════════════
"""
)
% details
)
def is_expired(self):
"""Returns true if expiration_date is in the past."""
if not self.deprecated:
return False
expiration_date = _parse_yyyymmdd_str(self.expiration_date)
now = datetime.datetime.now()
return now > expiration_date
def _parse_yyyymmdd_str(date_str: str) -> datetime.datetime:
year, month, day = [int(token) for token in date_str.split("-", 2)]
return datetime.datetime(year, month, day)
class Error(Exception):
pass
class DeprecationError(Error):
pass
| 33.15625 | 88 | 0.591999 |
acf3717b89809dd0aa8556aa728e5ef55d220d75 | 4,708 | py | Python | tests/components/ambiclimate/test_config_flow.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 23 | 2017-11-15T21:03:53.000Z | 2021-03-29T21:33:48.000Z | tests/components/ambiclimate/test_config_flow.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 9 | 2022-01-27T06:32:10.000Z | 2022-03-31T07:07:51.000Z | tests/components/ambiclimate/test_config_flow.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 10 | 2018-01-01T00:12:51.000Z | 2021-12-21T23:08:05.000Z | """Tests for the Ambiclimate config flow."""
from unittest.mock import Mock, patch
import ambiclimate
from homeassistant import data_entry_flow
from homeassistant.components.ambiclimate import config_flow
from homeassistant.setup import async_setup_component
from homeassistant.util import aiohttp
from tests.common import mock_coro
async def init_config_flow(hass):
"""Init a configuration flow."""
await async_setup_component(
hass, "http", {"http": {"base_url": "https://hass.com"}}
)
config_flow.register_flow_implementation(hass, "id", "secret")
flow = config_flow.AmbiclimateFlowHandler()
flow.hass = hass
return flow
async def test_abort_if_no_implementation_registered(hass):
"""Test we abort if no implementation is registered."""
flow = config_flow.AmbiclimateFlowHandler()
flow.hass = hass
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_config"
async def test_abort_if_already_setup(hass):
"""Test we abort if Ambiclimate is already setup."""
flow = await init_config_flow(hass)
with patch.object(hass.config_entries, "async_entries", return_value=[{}]):
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_setup"
with patch.object(hass.config_entries, "async_entries", return_value=[{}]):
result = await flow.async_step_code()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_setup"
async def test_full_flow_implementation(hass):
"""Test registering an implementation and finishing flow works."""
config_flow.register_flow_implementation(hass, None, None)
flow = await init_config_flow(hass)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
assert (
result["description_placeholders"]["cb_url"]
== "https://hass.com/api/ambiclimate"
)
url = result["description_placeholders"]["authorization_url"]
assert "https://api.ambiclimate.com/oauth2/authorize" in url
assert "client_id=id" in url
assert "response_type=code" in url
assert "redirect_uri=https%3A%2F%2Fhass.com%2Fapi%2Fambiclimate" in url
with patch(
"ambiclimate.AmbiclimateOAuth.get_access_token", return_value=mock_coro("test")
):
result = await flow.async_step_code("123ABC")
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Ambiclimate"
assert result["data"]["callback_url"] == "https://hass.com/api/ambiclimate"
assert result["data"]["client_secret"] == "secret"
assert result["data"]["client_id"] == "id"
with patch(
"ambiclimate.AmbiclimateOAuth.get_access_token", return_value=mock_coro(None)
):
result = await flow.async_step_code("123ABC")
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
with patch(
"ambiclimate.AmbiclimateOAuth.get_access_token",
side_effect=ambiclimate.AmbiclimateOauthError(),
):
result = await flow.async_step_code("123ABC")
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_abort_invalid_code(hass):
"""Test if no code is given to step_code."""
config_flow.register_flow_implementation(hass, None, None)
flow = await init_config_flow(hass)
with patch(
"ambiclimate.AmbiclimateOAuth.get_access_token", return_value=mock_coro(None)
):
result = await flow.async_step_code("invalid")
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "access_token"
async def test_already_setup(hass):
"""Test when already setup."""
config_flow.register_flow_implementation(hass, None, None)
flow = await init_config_flow(hass)
with patch.object(hass.config_entries, "async_entries", return_value=True):
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_setup"
async def test_view(hass):
"""Test view."""
hass.config_entries.flow.async_init = Mock()
request = aiohttp.MockRequest(b"", query_string="code=test_code")
request.app = {"hass": hass}
view = config_flow.AmbiclimateAuthCallbackView()
assert await view.get(request) == "OK!"
request = aiohttp.MockRequest(b"", query_string="")
request.app = {"hass": hass}
view = config_flow.AmbiclimateAuthCallbackView()
assert await view.get(request) == "No code"
| 35.398496 | 87 | 0.714316 |
acf371c01792a1aa118f631d271d0a7d289b2a83 | 12,532 | py | Python | othello.py | 2Bear/othello-zero | 910d9de816f33a8088b2a962f3816f6b679ecc0f | [
"MIT"
] | 10 | 2019-01-28T02:12:32.000Z | 2021-09-21T02:53:07.000Z | othello.py | 2Bear/othello-zero | 910d9de816f33a8088b2a962f3816f6b679ecc0f | [
"MIT"
] | 3 | 2020-05-10T01:58:40.000Z | 2021-05-31T11:06:35.000Z | othello.py | 2Bear/othello-zero | 910d9de816f33a8088b2a962f3816f6b679ecc0f | [
"MIT"
] | 7 | 2020-05-12T23:21:04.000Z | 2021-09-29T08:10:59.000Z | import argparse
import gc
import os
import random
import traceback
from multiprocessing import Pool, Process
import numpy as np
import tensorflow.compat.v1 as tf
import api
import board
import config
import gui
import net
import tree
from util import log, plane_2_line
class SelfPlayGame:
def __init__(self, worker_id, batch_size=config.self_play_batch_size, echo_max=config.self_play_echo_max):
self.version = 0
self.echo = 0
self.echo_max = echo_max
self.worker_id = worker_id
self.batch_size = batch_size
self.fake_nodes = [None] * batch_size
self.current_nodes = [None] * batch_size
def start(self):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=config.self_play_woker_gpu_memory_fraction)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as session:
saver = tf.train.Saver()
self.restore(session, saver)
nn = net.NN(session)
mcts_batch = tree.MCTS_Batch(nn)
while self.echo < self.echo_max:
log("selfplay worker", self.worker_id, "version:", self.version, "echo:", self.echo, "session start.")
self.play(mcts_batch)
self.save()
self.echo += 1
log("selfplay worker", self.worker_id, "session end.")
def play(self, mcts_batch):
terminals_num = 0
moves_num = 0
for i in range(self.batch_size):
self.fake_nodes[i] = tree.FakeNode()
self.current_nodes[i] = tree.Node(self.fake_nodes[i], 0, config.black, board.Board())
self.current_nodes[i].is_game_root = True
self.current_nodes[i].is_search_root = True
while terminals_num != self.batch_size:
terminals_num = 0
moves_num += 1
gc.collect()
pi_batch = mcts_batch.alpha(self.current_nodes, get_temperature(moves_num))
for i in range(self.batch_size):
if self.current_nodes[i].is_terminal is True:
terminals_num += 1
else:
move = pick_move_probabilistically(pi_batch[i])
self.current_nodes[i] = make_move(self.current_nodes[i], move)
def save(self):
data = []
for node in self.current_nodes:
winner = 0
black_stones_num = np.sum(node.board.black_array2d)
white_stones_num = np.sum(node.board.white_array2d)
if black_stones_num > white_stones_num:
winner = 1
elif black_stones_num < white_stones_num:
winner = -1
current = node
while True:
data.append(current.to_features())
data.append(current.pi)
data.append(winner)
if current.is_game_root:
break
current = current.parent
np.savez_compressed(config.data_path + "{0:03d}_{1:03d}_{2:02d}{3:02d}".format(self.batch_size, self.version, self.worker_id, self.echo), data=data)
def restore(self, session, saver):
checkpoint_name = restore_from_last_checkpoint(session, saver)
if checkpoint_name:
self.version = int(checkpoint_name[1:].split('-')[0])
last_echo = -1
npz_file_names = get_npz_file_names()
for file_name in npz_file_names:
file_name_splited = file_name.split('_')
if int(file_name_splited[-1][:2]) == self.worker_id:
if int(file_name_splited[1]) < self.version:
os.rename(config.data_path + file_name, config.archives_path + file_name)
else:
this_echo = int(file_name_splited[-1][2:4])
if this_echo > last_echo:
last_echo = this_echo
self.echo = last_echo + 1
class Train:
def __init__(self, batch_size=config.train_batch_size, echo_max=config.train_echo_max):
self.version = 0
self.state_data = np.zeros((0, config.N, config.N, config.history_num * 2 + 1), dtype=np.float)
self.pi_data = np.zeros((0, config.all_moves_num), dtype=np.float)
self.z_data = np.zeros((0, 1), dtype=np.float)
self.batch_size = batch_size
self.echo_max = echo_max
self.data_len = self.load_data()
self.batch_num = (self.data_len // self.batch_size) + 1
self.global_step = 0
def start(self):
if self.data_len == 0:
log("no data for training.")
return
with tf.Session() as session:
saver = tf.train.Saver(max_to_keep=config.train_checkpoint_max_to_keep)
self.restore(session, saver)
nn = net.NN(session)
log("training version:", self.version, "global step:", self.global_step, "session start.")
with open(config.log_path + "loss_log.csv", "a+") as loss_log_file:
for echo in range(self.echo_max):
for batch_index in range(self.batch_num):
self.global_step += 1
state_batch, pi_batch, z_batch = self.get_next_batch(batch_index, self.batch_size)
p_loss, v_loss = nn.train(state_batch, pi_batch, z_batch)
loss_log_file.write("{0},{1},{2}\n".format(self.global_step, p_loss, v_loss))
log("training echo:", echo, "global step:", self.global_step)
saver.save(session, config.checkpoint_path + "v{0:03d}".format(self.version), global_step=self.global_step)
self.clear()
log("training session end.")
def load_data(self):
npz_file_names = get_npz_file_names()
if len(npz_file_names) == 0:
self.data_len = 0
return self.data_len
self.version = int(npz_file_names[0].split('_')[1]) + 1
for npz_file_name in npz_file_names:
data = np.load(config.data_path + npz_file_name)['data']
data_len = int(len(data) / 3)
_state_data = np.zeros((data_len, config.N, config.N, config.history_num * 2 + 1), dtype=np.float)
_pi_data = np.zeros((data_len, config.all_moves_num), dtype=np.float)
_z_data = np.zeros((data_len, 1), dtype=np.float)
for i in range(data_len):
_state_data[i] = data[3 * i]
_pi_data[i] = data[3 * i + 1]
_z_data[i] = data[3 * i + 2]
self.state_data = np.concatenate((self.state_data, _state_data))
self.pi_data = np.concatenate((self.pi_data, _pi_data))
self.z_data = np.concatenate((self.z_data, _z_data))
self.data_len = len(self.state_data)
return self.data_len
def get_next_batch(self, index, size):
start = index * size
end = (index + 1) * size
if start >= self.data_len:
start = self.data_len - size
if end > self.data_len:
end = self.data_len
return self.state_data[start:end], self.pi_data[start:end], self.z_data[start:end]
def clear(self):
npz_file_names = get_npz_file_names()
for file_name in npz_file_names:
os.rename(config.data_path + file_name, config.archives_path + file_name)
log("all npz files archived.")
def restore(self, session, saver):
checkpoint_name = restore_from_last_checkpoint(session, saver)
if checkpoint_name:
self.global_step = int(checkpoint_name.split('-')[-1])
def pick_move_probabilistically(pi):
r = random.random()
s = 0
for move in range(len(pi)):
s += pi[move]
if s >= r:
return move
return np.argmax(pi)
def pick_move_greedily(pi):
return np.argmax(pi)
def get_temperature(moves_num):
if moves_num <= 6:
return 1
else:
return 0.95 ** (moves_num - 6)
def validate(move):
if not (isinstance(move, int) or isinstance(move, np.int64)) or not (0 <= move < config.N ** 2 or move == config.pass_move):
raise ValueError("move must be integer from [0, 63] or {}, got {}".format(config.pass_move, move))
def make_move(node, move):
validate(move)
if move not in node.child_nodes:
node = tree.Node(node, move, -node.player)
else:
node = node.child_nodes[move]
node.is_search_root = True
node.parent.child_nodes.clear()
node.parent.is_search_root = False
return node
def print_winner(node):
black_stones_num = np.sum(node.board.black_array2d)
white_stones_num = np.sum(node.board.white_array2d)
if black_stones_num > white_stones_num:
print("black wins.")
elif black_stones_num < white_stones_num:
print("white wins.")
else:
print("draw.")
def restore_from_last_checkpoint(session, saver):
checkpoint = tf.train.latest_checkpoint(config.checkpoint_path)
if checkpoint:
saver.restore(session, checkpoint)
log("restored from last checkpoint.", checkpoint)
return checkpoint.split('/')[-1]
else:
session.run(tf.global_variables_initializer())
log("checkpoint not found.")
return None
def get_npz_file_names():
npz_file_names = []
walk = os.walk(config.data_path)
for dpath, _, fnames in walk:
if dpath == config.data_path:
for fname in fnames:
if fname.split('.')[-1] == "npz":
npz_file_names.append(fname)
return npz_file_names
def self_play_woker(worker_id):
try:
game = SelfPlayGame(worker_id)
game.start()
except Exception as ex:
traceback.print_exc()
def train_woker():
try:
train = Train()
train.start()
except Exception as ex:
traceback.print_exc()
def learning_loop(self_play_wokers_num=config.self_play_wokers_num, echo_max=config.learning_loop_echo_max):
for i in range(echo_max):
pool = Pool(self_play_wokers_num)
for i in range(self_play_wokers_num):
pool.apply_async(self_play_woker, (i,))
pool.close()
pool.join()
process = Process(target=train_woker)
process.start()
process.join()
def play_game(player):
moves_num = 0
mcts_batch = None
current_node = tree.Node(tree.FakeNode(), 0, config.black, board.Board())
current_node.is_game_root = True
current_node.is_search_root = True
def make_move_with_gui(current_node, move):
current_node = make_move(current_node, move)
gui.print_node(current_node)
return current_node
with tf.Session() as session:
saver = tf.train.Saver()
restore_from_last_checkpoint(session, saver)
nn = net.NN(session)
mcts_batch = tree.MCTS_Batch(nn)
moves_num = 0
while True:
gc.collect()
moves_num += 1
# zero is thinking
pi = mcts_batch.alpha([current_node], get_temperature(moves_num))[0]
zero_move = pick_move_greedily(pi)
current_node = make_move_with_gui(current_node, zero_move)
if current_node.is_terminal:
break
# player is thinking
mcts_batch.alpha([current_node], get_temperature(moves_num))[0]
player_move = player.make_move(current_node)
print("player move: {}".format(player_move))
current_node = make_move_with_gui(current_node, player_move)
if current_node.is_terminal:
break
# who is the winner
print_winner(current_node)
def play_with_edax(edax_level=config.edax_level):
play_game(api.EdaxPlayer(edax_level))
def play_with_human():
play_game(api.HumanPlayer())
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--learning-loop", help='start a learning loop from the latest model, or a new random model if there is no any model', action="store_true")
parser.add_argument("-e", "--play-with-edax", help='play with edax, and print every move. but you need compile edax and copy it to right path first', action="store_true")
parser.add_argument("-m", "--play-with-human", help='play with you on the command line', action="store_true")
args = parser.parse_args()
if args.learning_loop:
learning_loop()
elif args.play_with_edax:
play_with_edax()
elif args.play_with_human:
play_with_human()
else:
learning_loop()
| 36.115274 | 170 | 0.615943 |
acf371ff1185af42b2598cdc99d91b7c0eed1726 | 4,985 | py | Python | bin/pyrad-to-pymsbayes.py | phyletica/gekgo | e0a08516a9e5bec9786327eec40c95d88d73819a | [
"CC-BY-4.0"
] | 1 | 2022-01-25T12:35:14.000Z | 2022-01-25T12:35:14.000Z | bin/pyrad-to-pymsbayes.py | phyletica/gekgo | e0a08516a9e5bec9786327eec40c95d88d73819a | [
"CC-BY-4.0"
] | null | null | null | bin/pyrad-to-pymsbayes.py | phyletica/gekgo | e0a08516a9e5bec9786327eec40c95d88d73819a | [
"CC-BY-4.0"
] | null | null | null | #! /usr/bin/env python
import os
import sys
from seqsift.utils.pymsbayes_utils import PyMsBayesComparisons
import gekgo_util
_LOG = gekgo_util.RunLogger(name=__file__,
log_to_stderr=True,
log_to_file=False)
comparisons = [
{'caluya': [
'CDS_590_Gekko_mindorensis_Caluya.trimmed',
'CDS_591_Gekko_mindorensis_Caluya.trimmed',
'CDS_607_Gekko_mindorensis_Caluya.trimmed',
'CDS_609_Gekko_mindorensis_Caluya.trimmed',
'CDS_610_Gekko_mindorensis_Caluya.trimmed',
],
'camiguin_sur': [
'CDS_99_Gekko_mindorensis_CamiguinSur.trimmed',
'CDS_100_Gekko_mindorensis_CamiguinSur.trimmed',
'CDS_101_Gekko_mindorensis_CamiguinSur.trimmed',
'CDS_102_Gekko_mindorensis_CamiguinSur.trimmed',
],
},
{'lubang': [
'CDS_3866_Gekko_mindorensis_Lubang.trimmed',
'CDS_3867_Gekko_mindorensis_Lubang.trimmed',
'CDS_3868_Gekko_mindorensis_Lubang.trimmed',
'CDS_3894_Gekko_mindorensis_Lubang.trimmed',
'CDS_3895_Gekko_mindorensis_Lubang.trimmed',
'CDS_3935_Gekko_mindorensis_Lubang.trimmed',
'CDS_3936_Gekko_mindorensis_Lubang.trimmed',
],
'mindoro_n': [
'ELR_911_Gekko_mindorensis_Mindoro.trimmed',
'ELR_1013_Gekko_mindorensis_Mindoro.trimmed',
'ELR_1006_Gekko_mindorensis_Mindoro.trimmed',
'ELR_1007_Gekko_mindorensis_Mindoro.trimmed',
'ELR_1008_Gekko_mindorensis_Mindoro.trimmed',
'ELR_1009_Gekko_mindorensis_Mindoro.trimmed',
'ELR_1010_Gekko_mindorensis_Mindoro.trimmed',
],
},
{'maestre_de_campo': [
'CDS_1459_Gekko_mindorensis_MaestreDeCampo.trimmed',
'CDS_1461_Gekko_mindorensis_MaestreDeCampo.trimmed',
'CDS_1462_Gekko_mindorensis_MaestreDeCampo.trimmed',
'CDS_1463_Gekko_mindorensis_MaestreDeCampo.trimmed',
],
'mindoro_s': [
'CDS_1229_Gekko_mindorensis_Mindoro.trimmed',
'CDS_1230_Gekko_mindorensis_Mindoro.trimmed',
'CDS_1231_Gekko_mindorensis_Mindoro.trimmed',
'RMB_4981_Gekko_mindorensis_Mindoro.trimmed',
'RMB_4982_Gekko_mindorensis_Mindoro.trimmed',
'RMB_5005_Gekko_mindorensis_Mindoro.trimmed',
'RMB_5006_Gekko_mindorensis_Mindoro.trimmed',
],
},
{'masbate': [
'CDS_754_Gekko_mindorensis_Masbate.trimmed',
'CDS_755_Gekko_mindorensis_Masbate.trimmed',
'CDS_757_Gekko_mindorensis_Masbate.trimmed',
'CDS_758_Gekko_mindorensis_Masbate.trimmed',
],
'panay_ne': [
'CDS_63_Gekko_mindorensis_Panay.trimmed',
'CDS_64_Gekko_mindorensis_Panay.trimmed',
'CDS_65_Gekko_mindorensis_Panay.trimmed',
'CDS_66_Gekko_mindorensis_Panay.trimmed',
],
},
{'negros': [
'CDS_277_Gekko_mindorensis_Negros.trimmed',
'CDS_278_Gekko_mindorensis_Negros.trimmed',
'CDS_279_Gekko_mindorensis_Negros.trimmed',
'CDSGS_39_Gekko_mindorensis_Negros.trimmed',
'CDSGS_40_Gekko_mindorensis_Negros.trimmed',
'CDSGS_41_Gekko_mindorensis_Negros.trimmed',
],
'panay_sw': [
'RMB_6425_Gekko_mindorensis_Panay.trimmed',
'RMB_6429_Gekko_mindorensis_Panay.trimmed',
'RMB_6430_Gekko_mindorensis_Panay.trimmed',
'RMB_6431_Gekko_mindorensis_Panay.trimmed',
'RMB_6448_Gekko_mindorensis_Panay.trimmed',
'RMB_6449_Gekko_mindorensis_Panay.trimmed',
'RMB_6479_Gekko_mindorensis_Panay.trimmed',
],
},
]
def main():
for d in [gekgo_util.PYMSBAYES_DIR, gekgo_util.PYMSBAYES_GM_DIR,
gekgo_util.PYMSBAYES_GM_FASTA_DIR,
gekgo_util.PYMSBAYES_GM_CONFIG_DIR]:
if not os.path.isdir(d):
os.mkdir(d)
loci_file_path = os.path.join(gekgo_util.MSG_ASSEMBLY_DIR, 'outfiles',
'd6q4c088s5h01.loci')
nloci = PyMsBayesComparisons.process_loci_file(
loci_file_obj = loci_file_path,
pop_id_maps = comparisons,
fasta_out_dir = gekgo_util.PYMSBAYES_GM_FASTA_DIR,
config_out_dir = gekgo_util.PYMSBAYES_GM_CONFIG_DIR,
minimum_sample_size = 3,
minimum_alignment_length = 80,
max_ambiguities_per_seq = 0.2,
estimate_hky_parameters = False,
require_shared_loci = True)
if __name__ == '__main__':
main()
| 41.541667 | 74 | 0.61344 |
acf372228b4850cf9e324c7c4ff89bcb024f2c67 | 243 | py | Python | threedi_ws_client/console.py | larsclaussen/threedi-ws-client | e16eadf92d944d4fd576915bd42d44553b2fc5ee | [
"MIT"
] | null | null | null | threedi_ws_client/console.py | larsclaussen/threedi-ws-client | e16eadf92d944d4fd576915bd42d44553b2fc5ee | [
"MIT"
] | null | null | null | threedi_ws_client/console.py | larsclaussen/threedi-ws-client | e16eadf92d944d4fd576915bd42d44553b2fc5ee | [
"MIT"
] | null | null | null | from rich.console import Console
from rich.theme import Theme
custom_theme = Theme({
"info" : "black",
"success" : "bold spring_green3",
"warning": "bold gold3",
"error": "bold red3"
})
console = Console(theme=custom_theme)
| 18.692308 | 37 | 0.666667 |
acf373df774282c8c3c1e0c4c8bc9b5f3c5d429b | 1,126 | py | Python | setup.py | chris-jones/django-genericadmin | 6e2923ef43d7c6122a6026280448c2664f96c432 | [
"MIT"
] | null | null | null | setup.py | chris-jones/django-genericadmin | 6e2923ef43d7c6122a6026280448c2664f96c432 | [
"MIT"
] | null | null | null | setup.py | chris-jones/django-genericadmin | 6e2923ef43d7c6122a6026280448c2664f96c432 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup
from subprocess import call
def convert_readme():
try:
call(["pandoc", "-f", "markdown_github", "-t", "rst", "-o", "README.txt", "README.markdown"])
except OSError:
pass
return open('README.txt').read()
setup(
name='django-genericadmin',
version='0.6.4',
description="Adds support for generic relations within Django's admin interface.",
author='Weston Nielson, Jan Schrewe',
author_email='wnielson@gmail.com, jschrewe@googlemail.com',
url='https://github.com/jschrewe/django-genericadmin',
packages=['genericadmin'],
# package_data={'genericadmin': ['static/genericadmin/js/genericadmin.js']},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
long_description=convert_readme(),
include_package_data=True,
zip_safe=False,
)
| 31.277778 | 103 | 0.648313 |
acf3766911ad2b6033a94a5da0885f4b28906ebc | 774 | py | Python | text/detector/utils/setup_cpu_win.py | kingemma/invoice | b381ffcd4b798434ea74cb4463eb5cff276ded3a | [
"MIT"
] | 1,017 | 2019-08-02T04:18:35.000Z | 2022-03-29T08:18:03.000Z | text/detector/utils/setup_cpu_win.py | kingemma/invoice | b381ffcd4b798434ea74cb4463eb5cff276ded3a | [
"MIT"
] | 47 | 2019-08-08T08:36:48.000Z | 2022-03-08T07:00:29.000Z | text/detector/utils/setup_cpu_win.py | kingemma/invoice | b381ffcd4b798434ea74cb4463eb5cff276ded3a | [
"MIT"
] | 300 | 2019-08-03T03:06:30.000Z | 2022-03-31T02:20:11.000Z | from Cython.Build import cythonize
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
ext_modules = cythonize([
Extension(
"utils.cython_nms",
sources=["cython_nms.pyx"],
language="c",
include_dirs = [numpy_include],
library_dirs=[],
libraries=[],
extra_compile_args=[],
extra_link_args=[]
# extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
),
])
setup(
ext_modules = ext_modules
# ,
# cmdclass = {'build_ext': build_ext},
)
| 22.114286 | 76 | 0.657623 |
acf3766fe8fc5b75639dd0a51df2e991a415b789 | 2,956 | py | Python | audit_inspection/hooks.py | sharpec/checklist-erpnext | fcdab8b32519007b101f0f4e48002904667efa9d | [
"MIT"
] | null | null | null | audit_inspection/hooks.py | sharpec/checklist-erpnext | fcdab8b32519007b101f0f4e48002904667efa9d | [
"MIT"
] | null | null | null | audit_inspection/hooks.py | sharpec/checklist-erpnext | fcdab8b32519007b101f0f4e48002904667efa9d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "audit_inspection"
app_title = "Audit Inspection"
app_publisher = "Texol"
app_description = "Ap for audit checklist inspection"
app_icon = "octicon octicon-checklist"
app_color = "blue"
app_email = "enzo@texol.it"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/audit_inspection/css/audit_inspection.css"
# app_include_js = "/assets/audit_inspection/js/audit_inspection.js"
# include js, css files in header of web template
# web_include_css = "/assets/audit_inspection/css/audit_inspection.css"
# web_include_js = "/assets/audit_inspection/js/audit_inspection.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "audit_inspection.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "audit_inspection.install.before_install"
# after_install = "audit_inspection.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "audit_inspection.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "audit_inspection.tasks.all"
# ],
# "daily": [
# "audit_inspection.tasks.daily"
# ],
# "hourly": [
# "audit_inspection.tasks.hourly"
# ],
# "weekly": [
# "audit_inspection.tasks.weekly"
# ]
# "monthly": [
# "audit_inspection.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "audit_inspection.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "audit_inspection.event.get_events"
# }
| 24.03252 | 84 | 0.685047 |
acf377cdb6e139e5b52bc4e6786ae0aa928f01bc | 1,075 | py | Python | app/windows/user_input.py | Ahmed-Khaled-dev/modern-meerkats | e54304303c77928a2566f4e4c3eefb98135acfc7 | [
"MIT"
] | 1 | 2022-01-07T02:38:08.000Z | 2022-01-07T02:38:08.000Z | app/windows/user_input.py | Ahmed-Khaled-dev/modern-meerkats | e54304303c77928a2566f4e4c3eefb98135acfc7 | [
"MIT"
] | null | null | null | app/windows/user_input.py | Ahmed-Khaled-dev/modern-meerkats | e54304303c77928a2566f4e4c3eefb98135acfc7 | [
"MIT"
] | null | null | null | import math
from blessed import Terminal
from pydantic import BaseModel
from app import constants as const
class UserInputWindow(BaseModel):
"""Window for dealing with user input"""
width: int = const.INPUT_WIDTH
height: int = const.INPUT_HEIGHT
pos_x: int = const.INPUT_X
pos_y: int = const.INPUT_Y
current_input: str = ""
@property
def _cursor_loc(self) -> tuple[int, int]:
row = self.pos_y + math.ceil(self.height / 2)
return self.pos_x, row
def prompt(self, term: Terminal) -> str:
"""Draw the prompt and move cursor to right location for input"""
x, y = self._cursor_loc
return term.move_xy(x + 2, y) + const.PROMPT_LINE + self.current_input
def content_lines(self, term: Terminal) -> list[str]:
"""Return content lines to be drawn"""
base = [
term.center(
term.bold(term.on_yellow("What's your next command?")),
width=self.width,
fillchar=" ",
),
"",
]
return base
| 27.564103 | 78 | 0.596279 |
acf37814d54f361297e9866d8628f90f4832911e | 10,942 | py | Python | planning_gui/src/hip_viewer/hip_viewer_plugin.py | jonbinney/python-planning | 0f5698bc806fa0dd223891c787bebb812d550088 | [
"MIT"
] | 8 | 2015-04-30T20:46:07.000Z | 2018-10-14T18:17:54.000Z | planning_gui/src/hip_viewer/hip_viewer_plugin.py | jonbinney/python-planning | 0f5698bc806fa0dd223891c787bebb812d550088 | [
"MIT"
] | null | null | null | planning_gui/src/hip_viewer/hip_viewer_plugin.py | jonbinney/python-planning | 0f5698bc806fa0dd223891c787bebb812d550088 | [
"MIT"
] | null | null | null | from __future__ import division
import os
from rosgui.QtBindingHelper import loadUi
from QtCore import QEvent, QFile, QIODevice, QObject, QPointF, QRectF, Qt, QTextStream, Signal, SIGNAL
from QtGui import QColor, QFileDialog, QGraphicsScene, QIcon, QImage, QPainter, QWidget
from QtSvg import QSvgGenerator
import roslib
roslib.load_manifest('hip_viewer')
from rosgui_dotgraph.edge_item import EdgeItem
from rosgui_dotgraph.node_item import NodeItem
from InteractiveGraphicsView import InteractiveGraphicsView
import hip
from hip_viewer.hip_viewer_node import HipViewerNode
from hip_viewer.colors import x11_colors
class HipViewerPlugin(QObject):
_deferred_fit_in_view = Signal()
def __init__(self, context):
super(HipViewerPlugin, self).__init__(context)
self.setObjectName('HipViewer')
self._current_dotcode = None
self._widget = QWidget()
ui_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'hip_viewer.ui')
loadUi(ui_file, self._widget, {'InteractiveGraphicsView': InteractiveGraphicsView})
self._widget.setObjectName('HipViewerUi')
if context.serial_number() > 1:
self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number()))
self._scene = QGraphicsScene()
self._widget.graphics_view.setScene(self._scene)
self._widget.graph_type_combo_box.insertItem(0, self.tr('infinite'), -1)
self._widget.graph_type_combo_box.insertItem(1, self.tr('1'), 2)
self._widget.graph_type_combo_box.insertItem(2, self.tr('2'), 3)
self._widget.graph_type_combo_box.insertItem(3, self.tr('3'), 4)
self._widget.graph_type_combo_box.insertItem(4, self.tr('4'), 5)
self._widget.graph_type_combo_box.setCurrentIndex(0)
self._widget.refresh_graph_push_button.setIcon(QIcon.fromTheme('view-refresh'))
self._widget.highlight_connections_check_box.toggled.connect(self._redraw_graph_view)
self._widget.auto_fit_graph_check_box.toggled.connect(self._redraw_graph_view)
self._widget.fit_in_view_push_button.setIcon(QIcon.fromTheme('zoom-original'))
self._widget.fit_in_view_push_button.pressed.connect(self._fit_in_view)
self._widget.load_dot_push_button.setIcon(QIcon.fromTheme('document-open'))
self._widget.load_dot_push_button.pressed.connect(self._load_dot)
self._widget.save_dot_push_button.setIcon(QIcon.fromTheme('document-save-as'))
self._widget.save_dot_push_button.pressed.connect(self._save_dot)
self._widget.save_as_svg_push_button.setIcon(QIcon.fromTheme('document-save-as'))
self._widget.save_as_svg_push_button.pressed.connect(self._save_svg)
self._widget.save_as_image_push_button.setIcon(QIcon.fromTheme('image'))
self._widget.save_as_image_push_button.pressed.connect(self._save_image)
self._deferred_fit_in_view.connect(self._fit_in_view, Qt.QueuedConnection)
self._deferred_fit_in_view.emit()
context.add_widget(self._widget)
self.connect(self, SIGNAL('update_planning_graph'), self.update_planning_graph_synchronous)
# start the planner in a separate thread
planning_node = HipViewerNode(ex_callback = self.update_planning_graph)
planning_node.start()
def save_settings(self, global_settings, perspective_settings):
perspective_settings.set_value('graph_type_combo_box_index', self._widget.graph_type_combo_box.currentIndex())
perspective_settings.set_value('filter_line_edit_text', self._widget.filter_line_edit.text())
perspective_settings.set_value('quiet_check_box_state', self._widget.quiet_check_box.isChecked())
perspective_settings.set_value('auto_fit_graph_check_box_state', self._widget.auto_fit_graph_check_box.isChecked())
perspective_settings.set_value('highlight_connections_check_box_state', self._widget.highlight_connections_check_box.isChecked())
def restore_settings(self, global_settings, perspective_settings):
self._widget.graph_type_combo_box.setCurrentIndex(int(perspective_settings.value('graph_type_combo_box_index', 0)))
self._widget.filter_line_edit.setText(perspective_settings.value('filter_line_edit_text', ''))
self._widget.quiet_check_box.setChecked(perspective_settings.value('quiet_check_box_state', True) in [True, 'true'])
self._widget.auto_fit_graph_check_box.setChecked(perspective_settings.value('auto_fit_graph_check_box_state', True) in [True, 'true'])
self._widget.highlight_connections_check_box.setChecked(perspective_settings.value('highlight_connections_check_box_state', True) in [True, 'true'])
def update_planning_graph(self, op, htree):
self.emit(SIGNAL('update_planning_graph'), op, htree)
def update_planning_graph_synchronous(self, op, htree):
graph = hip.dot_from_plan_tree(htree)
graph.layout('dot')
self._update_graph_view(graph)
def _update_graph_view(self, graph):
self._graph = graph
self._redraw_graph_view()
def _generate_tool_tip(self, url):
if url is not None and ':' in url:
item_type, item_path = url.split(':', 1)
if item_type == 'node':
tool_tip = 'Node:\n %s' % (item_path)
service_names = rosservice.get_service_list(node=item_path)
if service_names:
tool_tip += '\nServices:'
for service_name in service_names:
try:
service_type = rosservice.get_service_type(service_name)
tool_tip += '\n %s [%s]' % (service_name, service_type)
except rosservice.ROSServiceIOException, e:
tool_tip += '\n %s' % (e)
return tool_tip
elif item_type == 'topic':
topic_type, topic_name, _ = rostopic.get_topic_type(item_path)
return 'Topic:\n %s\nType:\n %s' % (topic_name, topic_type)
return url
def _redraw_graph_view(self):
self._scene.clear()
if self._widget.highlight_connections_check_box.isChecked():
highlight_level = 3
else:
highlight_level = 1
POINTS_PER_INCH = 72
nodes = {}
for node in self._graph.nodes_iter():
# decrease rect by one so that edges do not reach inside
bounding_box = QRectF(0, 0, POINTS_PER_INCH * float(node.attr['width']) - 1.0, POINTS_PER_INCH * float(node.attr['height']) - 1.0)
pos = node.attr['pos'].split(',')
bounding_box.moveCenter(QPointF(float(pos[0]), -float(pos[1])))
color_name = node.attr.get('color', None)
if color_name is None:
color = None
else:
color_name = color_name.lower()
if color_name in x11_colors:
color = QColor(*x11_colors[color_name])
else:
print 'Unrecognized color: %s' % color_name
color = None
name = str(node)
label = node.attr['label']
node_item = NodeItem(highlight_level, bounding_box, label, node.attr.get('shape', 'ellipse'), color)
#node_item.setToolTip(self._generate_tool_tip(node.attr.get('URL', None)))
nodes[name] = node_item
edges = {}
for edge in self._graph.edges_iter():
label = None
label_center = None
source_node = edge[0]
destination_node = edge[1]
# create edge with from-node and to-node
edge_item = EdgeItem(highlight_level, edge.attr['pos'], label_center, label, nodes[source_node], nodes[destination_node])
# symmetrically add all sibling edges with same label
if label is not None:
if label not in edges:
edges[label] = []
for sibling in edges[label]:
edge_item.add_sibling_edge(sibling)
sibling.add_sibling_edge(edge_item)
edges[label].append(edge_item)
edge_item.setToolTip(self._generate_tool_tip(edge.attr.get('URL', None)))
edge_item.add_to_scene(self._scene)
for node_item in nodes.itervalues():
self._scene.addItem(node_item)
self._scene.setSceneRect(self._scene.itemsBoundingRect())
if self._widget.auto_fit_graph_check_box.isChecked():
self._fit_in_view()
def _load_dot(self, file_name=None):
if file_name is None:
file_name, _ = QFileDialog.getOpenFileName(self._widget, self.tr('Open graph from file'), None, self.tr('DOT graph (*.dot)'))
if file_name is None or file_name == '':
return
try:
fh = open(file_name, 'rb')
dotcode = fh.read()
fh.close()
except IOError:
return
# disable controls customizing fetched ROS graph
self._widget.graph_type_combo_box.setEnabled(False)
self._widget.filter_line_edit.setEnabled(False)
self._widget.quiet_check_box.setEnabled(False)
self._update_graph_view(dotcode)
def _fit_in_view(self):
self._widget.graphics_view.fitInView(self._scene.itemsBoundingRect(), Qt.KeepAspectRatio)
def _save_dot(self):
file_name, _ = QFileDialog.getSaveFileName(self._widget, self.tr('Save as DOT'), 'rospackgraph.dot', self.tr('DOT graph (*.dot)'))
if file_name is None or file_name == '':
return
file = QFile(file_name)
if not file.open(QIODevice.WriteOnly | QIODevice.Text):
return
file.write(self._current_dotcode)
file.close()
def _save_svg(self):
file_name, _ = QFileDialog.getSaveFileName(self._widget, self.tr('Save as SVG'), 'rospackgraph.svg', self.tr('Scalable Vector Graphic (*.svg)'))
if file_name is None or file_name == '':
return
generator = QSvgGenerator()
generator.setFileName(file_name)
generator.setSize((self._scene.sceneRect().size() * 2.0).toSize())
painter = QPainter(generator)
painter.setRenderHint(QPainter.Antialiasing)
self._scene.render(painter)
painter.end()
def _save_image(self):
file_name, _ = QFileDialog.getSaveFileName(self._widget, self.tr('Save as image'), 'rospackgraph.png', self.tr('Image (*.bmp *.jpg *.png *.tiff)'))
if file_name is None or file_name == '':
return
img = QImage((self._scene.sceneRect().size() * 2.0).toSize(), QImage.Format_ARGB32_Premultiplied)
painter = QPainter(img)
painter.setRenderHint(QPainter.Antialiasing)
self._scene.render(painter)
painter.end()
img.save(file_name)
| 45.028807 | 156 | 0.664869 |
acf378b7d26e2710b0562d021160bd6ce0c7d8c4 | 12,334 | py | Python | tests/cocotb_tests/t13_components/test_13_predefined_component_classes.py | ktbarrett/pyuvm | 725e6e4b8088aa085a5ce16861b46db49ce46672 | [
"Apache-2.0"
] | 84 | 2021-02-21T23:12:59.000Z | 2022-03-25T21:22:27.000Z | tests/cocotb_tests/t13_components/test_13_predefined_component_classes.py | ktbarrett/pyuvm | 725e6e4b8088aa085a5ce16861b46db49ce46672 | [
"Apache-2.0"
] | 37 | 2021-05-20T05:35:13.000Z | 2022-03-13T09:12:16.000Z | tests/cocotb_tests/t13_components/test_13_predefined_component_classes.py | ktbarrett/pyuvm | 725e6e4b8088aa085a5ce16861b46db49ce46672 | [
"Apache-2.0"
] | 22 | 2021-03-31T02:57:09.000Z | 2022-03-09T17:30:22.000Z | from pyuvm.utility_classes import Singleton
import pyuvm_unittest
import unittest
from pyuvm.s13_predefined_component_classes import *
class my_test(uvm_test):
...
class s13_predefined_component_TestCase(pyuvm_unittest.pyuvm_TestCase):
"""Basic test cases."""
def setUp(self):
super().setUp()
ConfigDB().clear()
uvm_root().clear_children()
def test_uvm_component_no_parent(self):
"""
13.1.2.2 Basic component creation.
13.1.2.1 Constructor
13.1 class defined
"""
comp = uvm_component('test', None)
self.assertTrue('test' in uvm_component.component_dict)
self.assertTrue(comp.parent == uvm_root())
self.assertTrue(comp.print_enabled) # 13.1.2.2
def test_do_execute_op_13_1_2_3(self):
"""
13.1.2.3
We have not implemented policies.
"""
comp = uvm_component('test', None)
with self.assertRaises(error_classes.UVMNotImplemented):
comp.do_execute_op("op")
def test_component_with_parent(self):
parent = uvm_component('parent', None)
child = uvm_component('child', parent)
self.assertTrue('parent' in uvm_component.component_dict)
self.assertTrue('parent.child' in uvm_component.component_dict)
self.assertTrue(parent.parent == uvm_root())
self.assertTrue(child.parent == parent)
self.assertEqual(list(parent.hierarchy), [parent, child])
def test_hierarchy(self):
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
child2 = uvm_component('child2', parent)
child3 = uvm_component('child3', child1)
golden_list = [parent, child1, child3, child2]
self.assertEqual(list(parent.hierarchy), golden_list)
hier = list(parent.hierarchy)
hier.reverse()
golden_list.reverse()
self.assertEqual(hier, golden_list)
def test_get_parent_13_1_3_1(self):
"""
13.1.3.1 get_parent test
:return:
"""
parent = uvm_component('parent', None)
child = uvm_component('child', parent)
grandchild = uvm_component('grandchild', child)
par = grandchild.get_parent()
self.assertEqual(child, par)
par = child.get_parent()
self.assertEqual(par, parent)
par = grandchild.get_parent().get_parent()
self.assertEqual(parent, par)
def test_get_full_name_13_1_3_2(self):
"""
13.1.3.1 get_parent test
:return:
"""
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
child2 = uvm_component('child2', parent)
child21 = uvm_component('child21', child2)
parent_name = parent.get_full_name()
self.assertEqual("parent", parent_name)
self.assertEqual("parent.child1", child1.get_full_name())
self.assertEqual("parent.child2", child2.get_full_name())
self.assertEqual("parent.child2.child21", child21.get_full_name())
def test_get_children_13_1_3_3(self):
"""
13.1.3.3
"""
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
child2 = uvm_component('child2', parent)
_ = uvm_component('child3', parent)
child11 = uvm_component('child11', child1)
_ = uvm_component('child111', child11)
children = parent.get_children()
self.assertTrue(len(children) == 3)
children = child1.get_children()
self.assertTrue(len(children) == 1)
children = child2.get_children()
self.assertTrue(len(children) == 0)
children = list(parent.children)
self.assertEqual(children, parent.get_children())
def test_child_iterator_13_1_3_4(self):
"""
13.1.3.4
children is an iterator that we get from a
UVM component. We can loop over it without getting a new
copy of the children list.
"""
parent = uvm_component('parent', None)
_ = uvm_component('child1', parent)
_ = uvm_component('child2', parent)
_ = uvm_component('child3', parent)
cl = parent.get_children()
for cc in parent.children:
_ = cc
self.assertIn(cc, cl)
def test_get_child_13_1_3_4(self):
"""
oddly 13.1.3.4 defines several functions. We shall eschew
get_next_child() and get_first_child(). But get_child(str name)
is a righteous idea and so we'll implement that.
As per the spec we return None if there is no child of that name rather
than throwing a Lookup exception.
:return:
"""
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
_ = uvm_component('child2', parent)
_ = uvm_component('child3', parent)
self.assertEqual(parent.get_child("child1"), child1)
self.assertIsNone(parent.get_child("orphan"))
def test_get_num_children_13_1_3_5(self):
"""
13.1.3.5
get_num_children() returns the number of children.
"""
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
_ = uvm_component('child2', parent)
_ = uvm_component('child3', parent)
cl = parent.get_children()
self.assertEqual(parent.get_num_children(), len(cl))
self.assertEqual(child1.get_num_children(), len(child1.get_children()))
def test_has_child_13_1_3_6(self):
"""
13.1.3.6
Returns the child of the name
:return:
"""
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
_ = uvm_component('child2', parent)
_ = uvm_component('child3', child1)
self.assertTrue(child1.has_child('child3'))
self.assertEqual(len(parent.get_children()), 2)
self.assertEqual(parent.get_child('child1').get_name(), 'child1')
self.assertEqual(2, parent.get_num_children())
self.assertFalse(parent.has_child("orphan"))
def test_lookup_13_1_3_7(self):
"""
13.1.3.7
lookup finds components based on their full names.
a.b.c is relative to the parent of a
.a.b.c means a is the top level and we find our way down.
:return:
"""
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
_ = uvm_component('child2', parent)
child3 = uvm_component('child3', child1)
child4 = uvm_component('child4', child3)
self.assertEqual(child1, parent.lookup('child1'))
self.assertEqual(child3, parent.lookup('child1.child3'))
self.assertNotEqual(child1, parent.lookup('child2'))
self.assertEqual(child3, parent.lookup('.parent.child1.child3'))
self.assertEqual(child3, child1.lookup('child3'))
self.assertEqual(child4, child1.lookup('child3.child4'))
def test_get_depth_13_1_3_8(self):
"""
13.1.3.8
get_depth measures dept from uvm_root where uvm_root is 0
:return:
"""
parent = uvm_component('parent', None)
child1 = uvm_component('child1', parent)
_ = uvm_component('child2', parent)
child3 = uvm_component('child3', child1)
_ = uvm_component('child4', child3)
self.assertEqual(0, uvm_root().get_depth())
self.assertEqual(1, parent.get_depth())
self.assertEqual(2, child1.get_depth())
self.assertEqual(3, child3.get_depth())
class my_component(uvm_component):
async def run_phase(self):
...
def test_component_factory(self):
mc = self.my_component('mc', None)
mc2 = self.my_component.create('my_component', None)
self.assertEqual(type(mc), type(mc2))
def test_config_db(self):
aa = uvm_component('aa', None)
bb = uvm_component('bb', aa)
cc = uvm_component('cc', aa)
_ = uvm_component('D', cc)
ee = uvm_component('ee', bb)
aa.cdb_set("FIVE", 5, "")
datum = aa.cdb_get("FIVE", "")
self.assertEqual(5, datum)
with self.assertRaises(error_classes.UVMConfigItemNotFound):
bb.cdb_get("FIVE", "")
cc.cdb_set("TT", 33, "aa.bb.cc.*")
with self.assertRaises(error_classes.UVMConfigItemNotFound):
cc.cdb_get("TT", "")
ConfigDB().set(None, "aa.*", "TEN", 10)
datum = ee.cdb_get("TEN", "")
self.assertEqual(10, datum)
ConfigDB().set(None, "aa.cc", "FF", 44)
datum = cc.cdb_get("FF", "")
self.assertEqual(44, datum)
def test_wildcard_precedence(self):
aa = uvm_component('aa', None)
bb = uvm_component('bb', aa)
cc = uvm_component('cc', aa)
aa.cdb_set("TEST", 11, "*")
aa.cdb_set("TEST", 22, "bb")
ConfigDB().set(aa, "aa", "OTHER", 55)
_ = aa.cdb_get("TEST", "X")
bb_int = bb.cdb_get("TEST", "")
self.assertEqual(22, bb_int)
cc_int = cc.cdb_get("TEST", "")
self.assertEqual(11, cc_int)
aao = aa.cdb_get("OTHER", "aa")
self.assertEqual(55, aao)
def test_contextless_behavior_in_hierarchy(self):
aa = uvm_component('aa', None)
_ = uvm_component('B', aa)
_ = uvm_component('C', aa)
ConfigDB().set(aa, "*", "OTHER", 55)
aa = ConfigDB().get(aa, "B", "OTHER")
self.assertEqual(55, aa)
async def test_agent_config(self):
class bottom(uvm_agent):
def build_phase(self):
super().build_phase()
class comp(uvm_agent):
def build_phase(self):
super().build_phase()
ConfigDB().set(self, "bot", "is_active", 0)
self.bot = bottom("bot", self)
class test(uvm_test):
def build_phase(self):
self.cdb_set("is_active", uvm_active_passive_enum.UVM_ACTIVE)
self.agent = comp("agent", self)
async def run_phase(self):
self.raise_objection()
self.drop_objection()
await uvm_root().run_test("test", keep_singletons=True)
utt = uvm_root().get_child("uvm_test_top")
self.assertEqual(uvm_active_passive_enum.UVM_ACTIVE,
utt.agent.get_is_active())
self.assertEqual(uvm_active_passive_enum.UVM_PASSIVE,
utt.agent.bot.get_is_active())
self.assertTrue(utt.agent.active())
self.assertFalse(utt.agent.bot.active())
async def test_class_as_run_test_argument(self):
class DataHolder(metaclass=Singleton):
def __init__(self):
self.call_count = 0
def __str__(self):
return f"DataHolder.call_count: {self.call_count}"
class MyTest(uvm_test):
async def run_phase(self):
self.raise_objection()
DataHolder().call_count += 1
self.drop_objection()
await uvm_root().run_test("MyTest", keep_set={DataHolder})
await uvm_root().run_test(MyTest, keep_set={DataHolder})
self.assertTrue(DataHolder().call_count == 2)
async def test_default_agent_config(self):
class bottom(uvm_agent):
def build_phase(self):
super().build_phase()
class comp(uvm_agent):
def build_phase(self):
super().build_phase()
self.bot = bottom("bot", self)
class test(uvm_test):
def build_phase(self):
self.agent = comp("agent", self)
async def run_phase(self):
self.raise_objection()
self.drop_objection()
await uvm_root().run_test("test", keep_singletons=True)
utt = uvm_root().get_child("uvm_test_top")
self.assertEqual(uvm_active_passive_enum.UVM_ACTIVE,
utt.agent.get_is_active())
self.assertEqual(uvm_active_passive_enum.UVM_ACTIVE,
utt.agent.bot.get_is_active())
self.assertTrue(utt.agent.active())
self.assertTrue(utt.agent.bot.active())
if __name__ == '__main__':
unittest.main()
| 35.854651 | 79 | 0.601589 |
acf3798ae268b64e3d0093ecb1108ae7707d1958 | 2,714 | py | Python | cloudcafe/identity/v2_0/tokens_api/models/responses/role.py | ProjectMeniscus/cloudcafe | fa8fd796b303f0c5f0d6e98b2b5d01f6ea8fefe9 | [
"Apache-2.0"
] | null | null | null | cloudcafe/identity/v2_0/tokens_api/models/responses/role.py | ProjectMeniscus/cloudcafe | fa8fd796b303f0c5f0d6e98b2b5d01f6ea8fefe9 | [
"Apache-2.0"
] | null | null | null | cloudcafe/identity/v2_0/tokens_api/models/responses/role.py | ProjectMeniscus/cloudcafe | fa8fd796b303f0c5f0d6e98b2b5d01f6ea8fefe9 | [
"Apache-2.0"
] | 1 | 2020-11-17T19:05:08.000Z | 2020-11-17T19:05:08.000Z | """
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from xml.etree import ElementTree
from cloudcafe.identity.v2_0.tokens_api.models.base import \
BaseIdentityModel, BaseIdentityListModel
class Roles(BaseIdentityListModel):
def __init__(self, roles=None):
"""
An object that represents a roles response object.
Keyword arguments:
"""
super(Roles, self).__init__()
self.extend(roles or [])
@classmethod
def _json_to_obj(cls, serialized_str):
json_dict = json.loads(serialized_str)
return cls._list_to_obj(json_dict.get('roles'))
@classmethod
def _list_to_obj(cls, list_):
ret = {'roles': [Role(**role) for role in list_]}
return Roles(**ret)
@classmethod
def _xml_to_obj(cls, serialized_str):
element = ElementTree.fromstring(serialized_str)
cls._remove_identity_xml_namespaces(element)
if element.tag != 'roles':
return None
return cls._xml_list_to_obj(element.findall('role'))
@classmethod
def _xml_list_to_obj(cls, xml_list):
kwargs = {'roles': [Role._xml_ele_to_obj(role) for role in xml_list]}
return Roles(**kwargs)
class Role(BaseIdentityModel):
def __init__(self, id_=None, name=None, description=None):
super(Role, self).__init__()
self.id_ = id_
self.name = name
self.description = description
@classmethod
def _json_to_obj(cls, serialized_str):
json_dict = json.loads(serialized_str)
return Role(**json_dict.get('role'))
@classmethod
def _xml_to_obj(cls, serialized_str):
element = ElementTree.fromstring(serialized_str)
cls._remove_identity_xml_namespaces(element)
if element.tag != 'role':
return None
return cls._xml_ele_to_obj(element)
@classmethod
def _xml_ele_to_obj(cls, xml_ele):
kwargs = {'name': xml_ele.get('name'),
'description': xml_ele.get('description')}
try:
kwargs['id'] = int(xml_ele.get('id'))
except (ValueError, TypeError):
kwargs['id'] = xml_ele.get('id')
return Role(**kwargs)
| 31.929412 | 77 | 0.66986 |
acf37999cc15da04368e6bfac9492638e5b22cf1 | 415 | py | Python | nnvm/python/nnvm/testing/__init__.py | zhiics/tvm | 4782b1fc153d6614808f542155d58188f2dc8255 | [
"Apache-2.0"
] | 6 | 2019-08-29T19:00:57.000Z | 2020-06-15T14:55:16.000Z | nnvm/python/nnvm/testing/__init__.py | zhiics/tvm | 4782b1fc153d6614808f542155d58188f2dc8255 | [
"Apache-2.0"
] | 1 | 2020-10-23T18:56:21.000Z | 2020-10-23T18:56:33.000Z | nnvm/python/nnvm/testing/__init__.py | zhiics/tvm | 4782b1fc153d6614808f542155d58188f2dc8255 | [
"Apache-2.0"
] | 3 | 2018-06-29T17:19:21.000Z | 2020-12-11T07:50:33.000Z | """Utilities for testing and benchmarks"""
from __future__ import absolute_import as _abs
from .config import ctx_list
from .utils import create_workload
from . import mobilenet
from . import mobilenet_v2
from . import mlp
from . import resnet
from . import vgg
from . import squeezenet
from . import inception_v3
from . import dcgan
from . import dqn
from . import yolo2_detection
from . import check_computation
| 24.411765 | 46 | 0.8 |
acf37afb0e3fcf72d31b971e65d57bfba2a34af5 | 1,826 | py | Python | python-strings.py | dimitardanailov/google-python-class | 470bfb55e7a8240a793035e65e44695206090cde | [
"MIT"
] | null | null | null | python-strings.py | dimitardanailov/google-python-class | 470bfb55e7a8240a793035e65e44695206090cde | [
"MIT"
] | null | null | null | python-strings.py | dimitardanailov/google-python-class | 470bfb55e7a8240a793035e65e44695206090cde | [
"MIT"
] | null | null | null | # https://developers.google.com/edu/python/strings#devsite_header_1
s = 'h1'
print s[1] ## i
print len(s) ## 2
print s + ' there' ## hi there
print ''
simple = "Hello"
print simple[1:4] #ell - chars starting at index 1 and extending up to but not including index 4
print simple[1:] #ello - omitting either index defaults to the start or end of the string
print s[:] #Hello - omitting both always gives us a copy of the whole thing (this is the pythonic way to copy a sequence like a string or list)
print s[1:100] #ello - an index that is too big is truncated down to the string length
# The standard zero-based index numbers give easy access to chars
# near the start of the string. As an alternative, Python uses negative numbers to give easy access to the chars at the
# end of the string: s[-1] is the last char 'o', s[-2] is 'l' the next-to-last char, and so on.
# Negative index numbers count back from the end of the string:
print simple[-1] # is 'o' -- last char (1st from the end)
print simple[-4] # is 'e' -- 4th from the end
print simple[:-3] # is 'He' -- going up to but not including the last 3 chars.
print simple[-3:] # is 'llo' -- starting with the 3rd char from the end and extending to the end of the string.
print ''
#### String % ####
# Python has a printf()-like facility to put together a string.
# The % operator takes a printf-type format string on the left (%d int, %s string, %f/%g floating point),
# and the matching values in a tuple on the right (a tuple is made of values separated by commas,
# typically grouped inside parenthesis):
# % operator
text = "%d little pigs come out or I'll %s and %s and %s" % (3, 'huff', 'puff', 'blow down')
print text
| 50.722222 | 161 | 0.652245 |
acf37bb150f473dd8ee442c1618147bcccaa2bb4 | 717 | py | Python | print_lines.py | DoodlingDev/notational-fzf-vim | bfbf00697bc1d7d2b2fb559317d6d3cbc154be30 | [
"Apache-2.0"
] | null | null | null | print_lines.py | DoodlingDev/notational-fzf-vim | bfbf00697bc1d7d2b2fb559317d6d3cbc154be30 | [
"Apache-2.0"
] | null | null | null | print_lines.py | DoodlingDev/notational-fzf-vim | bfbf00697bc1d7d2b2fb559317d6d3cbc154be30 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Colorize the current line in the preview window in bold red."""
import os.path as path
import sys
line = int(sys.argv[1])
file = sys.argv[2]
height = int(sys.argv[3])
# ANSI escape sequences for coloring matched line
RED = "\033[1;31m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
if __name__ == "__main__":
is_sel = False
with open(path.normpath(file)) as f:
for linenum, line_content in enumerate(f, start=1):
if linenum == line:
print(BOLD + RED + line_content.rstrip() + RESET)
is_sel = True
elif is_sel or (line - linenum <= (height / 2 - 1)):
print(line_content.rstrip())
| 27.576923 | 66 | 0.592748 |
acf37cd77094e87d0fdcbb4613859b59e2f46b7d | 11,058 | py | Python | slixmpp/plugins/xep_0198/stream_management.py | isabella232/slixmpp | e15e6735f1dbfc66a5d43efe9fa9e7f5c9d1610a | [
"BSD-3-Clause"
] | null | null | null | slixmpp/plugins/xep_0198/stream_management.py | isabella232/slixmpp | e15e6735f1dbfc66a5d43efe9fa9e7f5c9d1610a | [
"BSD-3-Clause"
] | 1 | 2021-02-24T07:58:40.000Z | 2021-02-24T07:58:40.000Z | slixmpp/plugins/xep_0198/stream_management.py | isabella232/slixmpp | e15e6735f1dbfc66a5d43efe9fa9e7f5c9d1610a | [
"BSD-3-Clause"
] | null | null | null | """
Slixmpp: The Slick XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of Slixmpp.
See the file LICENSE for copying permission.
"""
import logging
import threading
import collections
from slixmpp.stanza import Message, Presence, Iq, StreamFeatures
from slixmpp.xmlstream import register_stanza_plugin
from slixmpp.xmlstream.handler import Callback, Waiter
from slixmpp.xmlstream.matcher import MatchXPath, MatchMany
from slixmpp.plugins.base import BasePlugin
from slixmpp.plugins.xep_0198 import stanza
log = logging.getLogger(__name__)
MAX_SEQ = 2 ** 32
class XEP_0198(BasePlugin):
"""
XEP-0198: Stream Management
"""
name = 'xep_0198'
description = 'XEP-0198: Stream Management'
dependencies = set()
stanza = stanza
default_config = {
#: The last ack number received from the server.
'last_ack': 0,
#: The number of stanzas to wait between sending ack requests to
#: the server. Setting this to ``1`` will send an ack request after
#: every sent stanza. Defaults to ``5``.
'window': 5,
#: The stream management ID for the stream. Knowing this value is
#: required in order to do stream resumption.
'sm_id': None,
#: A counter of handled incoming stanzas, mod 2^32.
'handled': 0,
#: A counter of unacked outgoing stanzas, mod 2^32.
'seq': 0,
#: Control whether or not the ability to resume the stream will be
#: requested when enabling stream management. Defaults to ``True``.
'allow_resume': True,
'order': 10100,
'resume_order': 9000
}
def plugin_init(self):
"""Start the XEP-0198 plugin."""
# Only enable stream management for non-components,
# since components do not yet perform feature negotiation.
if self.xmpp.is_component:
return
self.window_counter = self.window
self.window_counter_lock = threading.Lock()
self.enabled = threading.Event()
self.unacked_queue = collections.deque()
self.seq_lock = threading.Lock()
self.handled_lock = threading.Lock()
self.ack_lock = threading.Lock()
register_stanza_plugin(StreamFeatures, stanza.StreamManagement)
self.xmpp.register_stanza(stanza.Enable)
self.xmpp.register_stanza(stanza.Enabled)
self.xmpp.register_stanza(stanza.Resume)
self.xmpp.register_stanza(stanza.Resumed)
self.xmpp.register_stanza(stanza.Ack)
self.xmpp.register_stanza(stanza.RequestAck)
# Only end the session when a </stream> element is sent,
# not just because the connection has died.
self.xmpp.end_session_on_disconnect = False
# Register the feature twice because it may be ordered two
# different ways: enabling after binding and resumption
# before binding.
self.xmpp.register_feature('sm',
self._handle_sm_feature,
restart=True,
order=self.order)
self.xmpp.register_feature('sm',
self._handle_sm_feature,
restart=True,
order=self.resume_order)
self.xmpp.register_handler(
Callback('Stream Management Enabled',
MatchXPath(stanza.Enabled.tag_name()),
self._handle_enabled,
instream=True))
self.xmpp.register_handler(
Callback('Stream Management Resumed',
MatchXPath(stanza.Resumed.tag_name()),
self._handle_resumed,
instream=True))
self.xmpp.register_handler(
Callback('Stream Management Failed',
MatchXPath(stanza.Failed.tag_name()),
self._handle_failed,
instream=True))
self.xmpp.register_handler(
Callback('Stream Management Ack',
MatchXPath(stanza.Ack.tag_name()),
self._handle_ack,
instream=True))
self.xmpp.register_handler(
Callback('Stream Management Request Ack',
MatchXPath(stanza.RequestAck.tag_name()),
self._handle_request_ack,
instream=True))
self.xmpp.add_filter('in', self._handle_incoming)
self.xmpp.add_filter('out_sync', self._handle_outgoing)
self.xmpp.add_event_handler('session_end', self.session_end)
def plugin_end(self):
if self.xmpp.is_component:
return
self.xmpp.unregister_feature('sm', self.order)
self.xmpp.unregister_feature('sm', self.resume_order)
self.xmpp.del_event_handler('session_end', self.session_end)
self.xmpp.del_filter('in', self._handle_incoming)
self.xmpp.del_filter('out_sync', self._handle_outgoing)
self.xmpp.remove_handler('Stream Management Enabled')
self.xmpp.remove_handler('Stream Management Resumed')
self.xmpp.remove_handler('Stream Management Failed')
self.xmpp.remove_handler('Stream Management Ack')
self.xmpp.remove_handler('Stream Management Request Ack')
self.xmpp.remove_stanza(stanza.Enable)
self.xmpp.remove_stanza(stanza.Enabled)
self.xmpp.remove_stanza(stanza.Resume)
self.xmpp.remove_stanza(stanza.Resumed)
self.xmpp.remove_stanza(stanza.Ack)
self.xmpp.remove_stanza(stanza.RequestAck)
def session_end(self, event):
"""Reset stream management state."""
self.enabled.clear()
self.unacked_queue.clear()
self.sm_id = None
self.handled = 0
self.seq = 0
self.last_ack = 0
def send_ack(self):
"""Send the current ack count to the server."""
ack = stanza.Ack(self.xmpp)
with self.handled_lock:
ack['h'] = self.handled
self.xmpp.send_raw(str(ack))
def request_ack(self, e=None):
"""Request an ack from the server."""
req = stanza.RequestAck(self.xmpp)
self.xmpp.send_queue.put(str(req))
def _handle_sm_feature(self, features):
"""
Enable or resume stream management.
If no SM-ID is stored, and resource binding has taken place,
stream management will be enabled.
If an SM-ID is known, and the server allows resumption, the
previous stream will be resumed.
"""
if 'stream_management' in self.xmpp.features:
# We've already negotiated stream management,
# so no need to do it again.
return False
if not self.sm_id:
if 'bind' in self.xmpp.features:
self.enabled.set()
enable = stanza.Enable(self.xmpp)
enable['resume'] = self.allow_resume
enable.send()
self.handled = 0
elif self.sm_id and self.allow_resume:
self.enabled.set()
resume = stanza.Resume(self.xmpp)
resume['h'] = self.handled
resume['previd'] = self.sm_id
resume.send()
# Wait for a response before allowing stream feature processing
# to continue. The actual result processing will be done in the
# _handle_resumed() or _handle_failed() methods.
waiter = Waiter('resumed_or_failed',
MatchMany([
MatchXPath(stanza.Resumed.tag_name()),
MatchXPath(stanza.Failed.tag_name())]))
self.xmpp.register_handler(waiter)
result = waiter.wait()
if result is not None and result.name == 'resumed':
return True
return False
def _handle_enabled(self, stanza):
"""Save the SM-ID, if provided.
Raises an :term:`sm_enabled` event.
"""
self.xmpp.features.add('stream_management')
if stanza['id']:
self.sm_id = stanza['id']
self.xmpp.event('sm_enabled', stanza)
def _handle_resumed(self, stanza):
"""Finish resuming a stream by resending unacked stanzas.
Raises a :term:`session_resumed` event.
"""
self.xmpp.features.add('stream_management')
self._handle_ack(stanza)
for id, stanza in self.unacked_queue:
self.xmpp.send(stanza, use_filters=False)
self.xmpp.event('session_resumed', stanza)
def _handle_failed(self, stanza):
"""
Disable and reset any features used since stream management was
requested (tracked stanzas may have been sent during the interval
between the enable request and the enabled response).
Raises an :term:`sm_failed` event.
"""
self.enabled.clear()
self.unacked_queue.clear()
self.xmpp.event('sm_failed', stanza)
def _handle_ack(self, ack):
"""Process a server ack by freeing acked stanzas from the queue.
Raises a :term:`stanza_acked` event for each acked stanza.
"""
if ack['h'] == self.last_ack:
return
with self.ack_lock:
num_acked = (ack['h'] - self.last_ack) % MAX_SEQ
num_unacked = len(self.unacked_queue)
log.debug("Ack: %s, Last Ack: %s, " + \
"Unacked: %s, Num Acked: %s, " + \
"Remaining: %s",
ack['h'],
self.last_ack,
num_unacked,
num_acked,
num_unacked - num_acked)
for x in range(num_acked):
seq, stanza = self.unacked_queue.popleft()
self.xmpp.event('stanza_acked', stanza)
self.last_ack = ack['h']
def _handle_request_ack(self, req):
"""Handle an ack request by sending an ack."""
self.send_ack()
def _handle_incoming(self, stanza):
"""Increment the handled counter for each inbound stanza."""
if not self.enabled.is_set():
return stanza
if isinstance(stanza, (Message, Presence, Iq)):
with self.handled_lock:
# Sequence numbers are mod 2^32
self.handled = (self.handled + 1) % MAX_SEQ
return stanza
def _handle_outgoing(self, stanza):
"""Store outgoing stanzas in a queue to be acked."""
if not self.enabled.is_set():
return stanza
if isinstance(stanza, (Message, Presence, Iq)):
seq = None
with self.seq_lock:
# Sequence numbers are mod 2^32
self.seq = (self.seq + 1) % MAX_SEQ
seq = self.seq
self.unacked_queue.append((seq, stanza))
with self.window_counter_lock:
self.window_counter -= 1
if self.window_counter == 0:
self.window_counter = self.window
self.request_ack()
return stanza
| 35.216561 | 75 | 0.59622 |
acf37da0a03f2cc8cf20884ab766b7771c845d45 | 247 | py | Python | src/graph_transpiler/webdnn/util/flags/test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | 1 | 2021-04-09T15:55:35.000Z | 2021-04-09T15:55:35.000Z | src/graph_transpiler/webdnn/util/flags/test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | src/graph_transpiler/webdnn/util/flags/test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | import os
TEST_WEBGPU = os.environ.get("TEST_WEBGPU", "1") == "1"
TEST_WEBGL = os.environ.get("TEST_WEBGL", "1") == "1"
TEST_WEBASSEMBLY = os.environ.get("TEST_WEBASSEMBLY", "1") == "1"
TEST_FALLBACK = os.environ.get("TEST_FALLBACK", "1") == "1"
| 35.285714 | 65 | 0.663968 |
acf37eead33b2efc4b17a4655b997308522f46c0 | 942 | py | Python | endorsement/views/support/endorser_search.py | uw-it-aca/service-endorsement | a1ba3e4221bb3fe6c81c9f6947ad5e93f10a4a45 | [
"Apache-2.0"
] | 3 | 2017-10-16T17:19:32.000Z | 2019-07-31T22:31:48.000Z | endorsement/views/support/endorser_search.py | uw-it-aca/service-endorsement | a1ba3e4221bb3fe6c81c9f6947ad5e93f10a4a45 | [
"Apache-2.0"
] | 284 | 2016-06-17T18:21:31.000Z | 2022-03-21T16:55:03.000Z | endorsement/views/support/endorser_search.py | uw-it-aca/service-endorsement | a1ba3e4221bb3fe6c81c9f6947ad5e93f10a4a45 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.conf import settings
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.generic.base import TemplateView
from uw_saml.decorators import group_required
from endorsement.views.support import set_admin_wrapper_template
from string import ascii_uppercase
@method_decorator(login_required, name='dispatch')
@method_decorator(group_required(settings.PROVISION_SUPPORT_GROUP),
name='dispatch')
class EndorserSearch(TemplateView):
template_name = 'support/endorser.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['alphabet_string'] = ascii_uppercase
context['netid'] = self.request.GET.get('netid', '')
set_admin_wrapper_template(context)
return context
| 37.68 | 67 | 0.771762 |
acf37f5d00a981722128aab9ab5e4cc6dcd01845 | 9,265 | py | Python | mi/dataset/parser/dosta_abcdjm_cspp.py | rmanoni/mi-dataset | c1012a0cd8f2ea075e008cdd1ab291ed54f44d43 | [
"BSD-2-Clause"
] | null | null | null | mi/dataset/parser/dosta_abcdjm_cspp.py | rmanoni/mi-dataset | c1012a0cd8f2ea075e008cdd1ab291ed54f44d43 | [
"BSD-2-Clause"
] | null | null | null | mi/dataset/parser/dosta_abcdjm_cspp.py | rmanoni/mi-dataset | c1012a0cd8f2ea075e008cdd1ab291ed54f44d43 | [
"BSD-2-Clause"
] | null | null | null |
"""
@package mi.dataset.parser
@file marine-integrations/mi/dataset/parser/dosta_abcdjm_cspp.py
@author Mark Worden
@brief Parser for the dosta_abcdjm_cspp dataset driver
Release notes:
initial release
"""
__author__ = 'Mark Worden'
__license__ = 'Apache 2.0'
import numpy
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.core.instrument.data_particle import DataParticle
from mi.dataset.parser.common_regexes import INT_REGEX, FLOAT_REGEX, MULTIPLE_TAB_REGEX, END_OF_LINE_REGEX
from mi.dataset.parser.cspp_base import CsppParser, Y_OR_N_REGEX, CsppMetadataDataParticle, MetadataRawDataKey, \
encode_y_or_n
# A regular expression for special characters that could exist in a data record preceding the model
SPECIAL_CHARS_REGEX = r'(?:[\?][%])?'
# A regular expression that should match a dosta_abcdjm data record
DATA_REGEX = r'(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Profiler Timestamp
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Depth
DATA_REGEX += '(' + Y_OR_N_REGEX + ')' + MULTIPLE_TAB_REGEX # Suspect Timestamp
DATA_REGEX += SPECIAL_CHARS_REGEX + '(' + INT_REGEX + ')' + MULTIPLE_TAB_REGEX # Model Number
DATA_REGEX += '(' + INT_REGEX + ')' + MULTIPLE_TAB_REGEX # Serial Number
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # oxygen content
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # ambient temperature
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # calibrated phase
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # temperature compensated phase
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # phase measurement with blue excitation
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # phase measurement with red excitation
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # amplitude measurement with blue excitation
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # amplitude measurement with red excitation
DATA_REGEX += '(' + FLOAT_REGEX + ')' + END_OF_LINE_REGEX # raw temperature, voltage from thermistor
class DataMatchesGroupNumber(BaseEnum):
"""
An enum for group match indices for a data record only chunk.
"""
PROFILER_TIMESTAMP = 1
PRESSURE = 2
SUSPECT_TIMESTAMP = 3
MODEL = 4
SERIAL_NUMBER = 5
ESTIMATED_OXYGEN_CONCENTRATION = 6
OPTODE_TEMPERATURE = 7
CALIBRATED_PHASE = 8
TEMP_COMPENSATED_PHASE = 9
BLUE_PHASE = 10
RED_PHASE = 11
BLUE_AMPLITUDE = 12
RED_AMPLITUDE = 13
RAW_TEMPERATURE = 14
class DataParticleType(BaseEnum):
"""
The data particle types that a dosta_abcdjm_cspp parser could generate
"""
METADATA_RECOVERED = 'dosta_abcdjm_cspp_metadata_recovered'
INSTRUMENT_RECOVERED = 'dosta_abcdjm_cspp_instrument_recovered'
METADATA_TELEMETERED = 'dosta_abcdjm_cspp_metadata'
INSTRUMENT_TELEMETERED = 'dosta_abcdjm_cspp_instrument'
class DostaAbcdjmCsppParserDataParticleKey(BaseEnum):
"""
The data particle keys associated with dosta_abcdjm_cspp data particle parameters
"""
PRODUCT_NUMBER = 'product_number'
SERIAL_NUMBER = 'serial_number'
PROFILER_TIMESTAMP = 'profiler_timestamp'
PRESSURE = 'pressure_depth'
SUSPECT_TIMESTAMP = 'suspect_timestamp'
ESTIMATED_OXYGEN_CONCENTRATION = 'estimated_oxygen_concentration'
OPTODE_TEMPERATURE = 'optode_temperature'
CALIBRATED_PHASE = 'calibrated_phase'
TEMP_COMPENSATED_PHASE = 'temp_compensated_phase'
BLUE_PHASE = 'blue_phase'
RED_PHASE = 'red_phase'
BLUE_AMPLITUDE = 'blue_amplitude'
RED_AMPLITUDE = 'red_amplitude'
RAW_TEMPERATURE = 'raw_temperature'
# A group of non common metadata particle encoding rules used to simplify encoding using a loop
NON_COMMON_METADATA_PARTICLE_ENCODING_RULES = [
(DostaAbcdjmCsppParserDataParticleKey.PRODUCT_NUMBER, DataMatchesGroupNumber.MODEL, int),
(DostaAbcdjmCsppParserDataParticleKey.SERIAL_NUMBER, DataMatchesGroupNumber.SERIAL_NUMBER, int)
]
# A group of instrument data particle encoding rules used to simplify encoding using a loop
INSTRUMENT_PARTICLE_ENCODING_RULES = [
(DostaAbcdjmCsppParserDataParticleKey.PROFILER_TIMESTAMP, DataMatchesGroupNumber.PROFILER_TIMESTAMP, numpy.float),
(DostaAbcdjmCsppParserDataParticleKey.PRESSURE, DataMatchesGroupNumber.PRESSURE, float),
(DostaAbcdjmCsppParserDataParticleKey.SUSPECT_TIMESTAMP, DataMatchesGroupNumber.SUSPECT_TIMESTAMP, encode_y_or_n),
(DostaAbcdjmCsppParserDataParticleKey.ESTIMATED_OXYGEN_CONCENTRATION,
DataMatchesGroupNumber.ESTIMATED_OXYGEN_CONCENTRATION, float),
(DostaAbcdjmCsppParserDataParticleKey.OPTODE_TEMPERATURE, DataMatchesGroupNumber.OPTODE_TEMPERATURE, float),
(DostaAbcdjmCsppParserDataParticleKey.CALIBRATED_PHASE, DataMatchesGroupNumber.CALIBRATED_PHASE, float),
(DostaAbcdjmCsppParserDataParticleKey.TEMP_COMPENSATED_PHASE, DataMatchesGroupNumber.TEMP_COMPENSATED_PHASE, float),
(DostaAbcdjmCsppParserDataParticleKey.BLUE_PHASE, DataMatchesGroupNumber.BLUE_PHASE, float),
(DostaAbcdjmCsppParserDataParticleKey.RED_PHASE, DataMatchesGroupNumber.RED_PHASE, float),
(DostaAbcdjmCsppParserDataParticleKey.BLUE_AMPLITUDE, DataMatchesGroupNumber.BLUE_AMPLITUDE, float),
(DostaAbcdjmCsppParserDataParticleKey.RED_AMPLITUDE, DataMatchesGroupNumber.RED_AMPLITUDE, float),
(DostaAbcdjmCsppParserDataParticleKey.RAW_TEMPERATURE, DataMatchesGroupNumber.RAW_TEMPERATURE, float),
]
class DostaAbcdjmCsppMetadataDataParticle(CsppMetadataDataParticle):
"""
Class for building a dosta_abcdjm_cspp metadata particle
"""
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
"""
# Set the base metadata parsed values to the results to return
results = self._build_metadata_parsed_values()
data_match = self.raw_data[MetadataRawDataKey.DATA_MATCH]
# Process each of the non common metadata particle parameters
for (name, index, encoding) in NON_COMMON_METADATA_PARTICLE_ENCODING_RULES:
results.append(self._encode_value(name, data_match.group(index), encoding))
# Set the internal timestamp
internal_timestamp_unix = numpy.float(data_match.group(
DataMatchesGroupNumber.PROFILER_TIMESTAMP))
self.set_internal_timestamp(unix_time=internal_timestamp_unix)
return results
class DostaAbcdjmCsppMetadataRecoveredDataParticle(DostaAbcdjmCsppMetadataDataParticle):
"""
Class for building a dosta_abcdjm_cspp recovered metadata particle
"""
_data_particle_type = DataParticleType.METADATA_RECOVERED
class DostaAbcdjmCsppMetadataTelemeteredDataParticle(DostaAbcdjmCsppMetadataDataParticle):
"""
Class for building a dosta_abcdjm_cspp telemetered metadata particle
"""
_data_particle_type = DataParticleType.METADATA_TELEMETERED
class DostaAbcdjmCsppInstrumentDataParticle(DataParticle):
"""
Class for building a dosta_abcdjm_cspp instrument data particle
"""
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
"""
results = []
# Process each of the instrument particle parameters
for (name, index, encoding) in INSTRUMENT_PARTICLE_ENCODING_RULES:
results.append(self._encode_value(name, self.raw_data.group(index), encoding))
# # Set the internal timestamp
internal_timestamp_unix = numpy.float(self.raw_data.group(
DataMatchesGroupNumber.PROFILER_TIMESTAMP))
self.set_internal_timestamp(unix_time=internal_timestamp_unix)
return results
class DostaAbcdjmCsppInstrumentRecoveredDataParticle(DostaAbcdjmCsppInstrumentDataParticle):
"""
Class for building a dosta_abcdjm_cspp recovered instrument data particle
"""
_data_particle_type = DataParticleType.INSTRUMENT_RECOVERED
class DostaAbcdjmCsppInstrumentTelemeteredDataParticle(DostaAbcdjmCsppInstrumentDataParticle):
"""
Class for building a dosta_abcdjm_cspp telemetered instrument data particle
"""
_data_particle_type = DataParticleType.INSTRUMENT_TELEMETERED
class DostaAbcdjmCsppParser(CsppParser):
def __init__(self,
config,
stream_handle,
exception_callback):
"""
This method is a constructor that will instantiate an DostaAbcdjmCsppParser object.
@param config The configuration for this DostaAbcdjmCsppParser parser
@param stream_handle The handle to the data stream containing the dosta_abcdjm_cspp data
@param exception_callback The function to call to report exceptions
"""
# Call the superclass constructor
super(DostaAbcdjmCsppParser, self).__init__(config,
stream_handle,
exception_callback,
DATA_REGEX)
| 41.177778 | 120 | 0.748516 |
acf37fbc6fc897fba0acb669eac78aa7d4193722 | 15,140 | py | Python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2020_08_01_preview/aio/operations/_blob_inventory_policies_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-09T08:59:13.000Z | 2022-03-09T08:59:13.000Z | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2020_08_01_preview/aio/operations/_blob_inventory_policies_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2020_08_01_preview/aio/operations/_blob_inventory_policies_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._blob_inventory_policies_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BlobInventoryPoliciesOperations:
"""BlobInventoryPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2020_08_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, "_models.BlobInventoryPolicyName"],
**kwargs: Any
) -> "_models.BlobInventoryPolicy":
"""Gets the blob inventory policy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param blob_inventory_policy_name: The name of the storage account blob inventory policy. It
should always be 'default'.
:type blob_inventory_policy_name: str or
~azure.mgmt.storage.v2020_08_01_preview.models.BlobInventoryPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobInventoryPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2020_08_01_preview.models.BlobInventoryPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobInventoryPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
blob_inventory_policy_name=blob_inventory_policy_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobInventoryPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, "_models.BlobInventoryPolicyName"],
properties: "_models.BlobInventoryPolicy",
**kwargs: Any
) -> "_models.BlobInventoryPolicy":
"""Sets the blob inventory policy to the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param blob_inventory_policy_name: The name of the storage account blob inventory policy. It
should always be 'default'.
:type blob_inventory_policy_name: str or
~azure.mgmt.storage.v2020_08_01_preview.models.BlobInventoryPolicyName
:param properties: The blob inventory policy set to a storage account.
:type properties: ~azure.mgmt.storage.v2020_08_01_preview.models.BlobInventoryPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobInventoryPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2020_08_01_preview.models.BlobInventoryPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobInventoryPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(properties, 'BlobInventoryPolicy')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
blob_inventory_policy_name=blob_inventory_policy_name,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobInventoryPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, "_models.BlobInventoryPolicyName"],
**kwargs: Any
) -> None:
"""Deletes the blob inventory policy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param blob_inventory_policy_name: The name of the storage account blob inventory policy. It
should always be 'default'.
:type blob_inventory_policy_name: str or
~azure.mgmt.storage.v2020_08_01_preview.models.BlobInventoryPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
blob_inventory_policy_name=blob_inventory_policy_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ListBlobInventoryPolicy"]:
"""Gets the blob inventory policy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListBlobInventoryPolicy or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2020_08_01_preview.models.ListBlobInventoryPolicy]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobInventoryPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ListBlobInventoryPolicy", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies'} # type: ignore
| 48.216561 | 227 | 0.69214 |
acf380f75702afdfa67962f47d5a550c52834828 | 44,357 | py | Python | 19_back_track/backtracking_xrh.py | Xinrihui/Data-Structure-and-Algrithms | fa3a455f64878e42d033c1fd8d612f108c71fb72 | [
"Apache-2.0"
] | 1 | 2021-08-13T10:55:33.000Z | 2021-08-13T10:55:33.000Z | 19_back_track/backtracking_xrh.py | Xinrihui/Data-Structure-and-Algrithms | fa3a455f64878e42d033c1fd8d612f108c71fb72 | [
"Apache-2.0"
] | null | null | null | 19_back_track/backtracking_xrh.py | Xinrihui/Data-Structure-and-Algrithms | fa3a455f64878e42d033c1fd8d612f108c71fb72 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import itertools
from numpy import *
def test_CombinationAndPermutation():
"""
利用 itertools 实现 排列 和 组合
ref https://www.cnblogs.com/xiao-apple36/p/10861830.html
:return:
"""
#1.组合
for i in itertools.combinations('ABC', 1):
print(i)
for i in itertools.combinations('ABC', 2):
print(i)
# 输出 AB AC BC
for i in itertools.combinations('ABC', 3):
print(i)
# 输出 ABC
#2.排列
for i in itertools.permutations('ABC', 2):
print(''.join(i), end=",")
# 输出 BC BD CB CD DB DC
print('\r')
#3. 笛卡尔积
a = (1, 2)
b = ('A', 'B', 'C')
c = itertools.product(a, b)
for i in c:
print(i)
for i in itertools.product('ABC', repeat=2): # a='ABC' b='ABC' a与b 做笛卡尔积
print(''.join(i), end=",")
print('\n')
class CombinationAndPermutation:
"""
纯手工 实现 排列 与 组合
ref: https://docs.python.org/zh-cn/3.7/library/itertools.html
:return:
"""
# 1. 笛卡尔积
@staticmethod
def product(*args, repeat=1):
"""
笛卡尔积
eg.
product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
:param args:
:param repeat:
:return:
"""
pools = [tuple(pool) for pool in args] * repeat # [('A', 'B', 'C'), ('A', 'B', 'C')]
result = [[]]
# for pool in pools:
# result = [x + [y] for x in result for y in pool]
for pool in pools:
res=[]
for y in pool:
for x in result:
res.append(x + [y])
# print(res) # [['A'], ['B'], ['C']] ;
# [['A', 'A'], ['B', 'A'], ['C', 'A'], ['A', 'B'], ['B', 'B'], ['C', 'B'], ['A', 'C'], ['B', 'C'], ['C', 'C']]
result=res
for prod in result:
yield tuple(prod)
# for ele in product('ABC', repeat=2):
# print(ele)
# 2. 排列
@staticmethod
def permutations(iterable, r=None):
"""
排列
permutations() 可被改写为 product() 的子序列,
只要将含有重复元素(来自输入中同一位置的)的项排除。
:param iterable:
:param r:
:return:
"""
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r: # len(set( ('A','A')))=1
yield tuple(pool[i] for i in indices)
# for ele in permutations('ABC', 3):
# print(ele)
# 3. 组合
@staticmethod
def combinations(iterable, r):
"""
组合
combinations() 被改写为 permutations() 过滤后的子序列,(相对于元素在输入中的位置)元素不是有序的。
:param iterable:
:param r:
:return:
"""
pool = tuple(iterable)
n = len(pool)
for indices in CombinationAndPermutation.permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
@staticmethod
def combinations_v2(iterable, r):
"""
组合
eg.
combinations('ABCD', 2) --> AB AC AD BC BD CD
combinations(range(4), 3) --> 012 013 023 123
:param iterable:
:param r:
:return:
"""
pool = tuple(iterable) # ('A', 'B', 'C')
n = len(pool) # n=3
if r > n: # r=2
return
indices = list(range(r)) # indices=[0,1]
yield tuple(pool[i] for i in indices) # (pool[0],pol[1])
while True:
for i in reversed(range(r)): # i=1
# i=0 ;
# i=1
if indices[i] != i + n - r: # i + n - r: 1+3-2 =2 ;
# 0+3-2=1
break
else:
return
indices[i] += 1 # indices=[0,2] ;
# indices=[1,2] ;
#
for j in range(i + 1, r):
indices[j] = indices[j - 1] + 1
yield tuple(pool[i] for i in indices) # (pool[0],pol[2]) ;
# (pool[1],pol[2])
# for ele in combinations('ABC',2):
# print(ele) # ('A', 'B') ('A', 'C') ('B', 'C')
from functools import reduce
def test_reduce():
"""
reduce 函数的使用
reduce把一个函数作用在一个序列[x1, x2, x3, ...]上,这个函数必须接收两个参数,reduce把结果继续和序列的下一个元素做累积计算,其效果就是:
reduce(f, [x1, x2, x3, x4]) = f(f(f(x1, x2), x3), x4)
ref: https://www.liaoxuefeng.com/wiki/1016959663602400/1017329367486080
:return:
"""
def add(x, y):
return x + y
print(reduce(add, [1, 3, 5, 7, 9]))
def fn(x, y):
return x * 10 + y
print(reduce(fn, [1, 3, 5, 7, 9])) #把序列[1, 3, 5, 7, 9]变换成整数 13579
DIGITS = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}
def char2num(s):
return DIGITS[s]
def str2int(s):
return reduce(lambda x, y: x * 10 + y, map(char2num, s))
print(str2int('123'))
class solution_N_quene:
"""
by XRH
date: 2020-08-23
国际象棋中的 N 皇后 问题
"""
def N_quene(self,N):
self.N=N
self.states= zeros((N,N),dtype=int) # N X N 的棋盘
self.res=[]
prev_list=[]
level=0
# 初始情况 level=0 , 第一次放皇后(放在 第0行), 不需要考虑 和之前的皇后冲突
for j in range(self.N):
self.states[level][j]=1
self._process(level+1,prev_list+[j]) # 一行一行 往下放皇后
self.states[level][j] = 0
return self.res
def _check_legality(self,level,current):
"""
检查 当前皇后的摆放位置 是否 和之前的皇后 冲突
:param level:
:param current:
:return:
"""
# 1.向上检查 同一列
i=level-1
while i>=0:
if self.states[i][current]==1: # 发现冲突
return False
i-=1
# 2. 向上检查 45度的 左斜线
i = level - 1
j=current -1
while i >= 0 and j >= 0:
if self.states[i][j]==1: # 发现冲突
return False
i-=1
j-=1
# 3. 向上检查 45度的 右斜线
i = level - 1
j=current + 1
while i >= 0 and j < self.N:
if self.states[i][j]==1: # 发现冲突
return False
i-=1
j+=1
return True
def _process(self,level,prev_list):
if level==self.N: # 递归结束条件:
# 遍历到搜索树的叶子节点,找到可行解
# 搜索树的最后 一行 self.N-1 摆上皇后, level+1 = self.N
# print(prev_list)
self.res.append(prev_list)
elif level< self.N:
for j in range(self.N):
if self._check_legality(level,j):
self.states[level][j] = 1 # 皇后摆在 棋盘上的 [level][j] 上
self._process(level+1,prev_list+[j])
self.states[level][j] = 0 # 把皇后 从棋盘上的 [level][j] 拿下来
class solution_zero_one_bag_weight:
"""
01 背包问题 (只有 背包重量 , 没有背包价值)
"""
def zero_one_bag_weight_iteration(self,weights, capacity):
"""
迭代的方法解 01 背包问题
:param weights:
:param capacity:
:return:
"""
L = len(weights)
max_bag_weight = 0
res_bag = ()
for l in range(1, L + 1):
# 遍历 背包的 中的物品件数 l=1, l=2, l=3
for bag in itertools.combinations(weights, l): # 背包中的物品件数 为l 时,列举所有可能的物品的组合
bag_weight = sum(map(lambda x: x[1], bag)) # 背包中 所有物品的重量求和
# print('bag:', bag, ' weight: ', bag_weight)
if bag_weight > max_bag_weight and bag_weight <= capacity:
max_bag_weight = bag_weight
res_bag = bag
return max_bag_weight, res_bag
def zero_one_bag_weight(self,weights,capacity):
self.weights=weights
self.capacity=capacity
self.max_bag_weight=0
self.res_bag=()
current_bag=[]
self.__process(0,current_bag) #current_bag 表示当前已经装进去的物品;i表示考察到哪个物品了;
return self.max_bag_weight,self.res_bag
def __process(self,i,current_bag):
if i <= len(self.weights):
bag_weight = sum(map(lambda x: x[1], current_bag))
if bag_weight > self.capacity: #搜索剪枝: 当发现已经选择的物品的重量超过 Wkg 之后,我们就停止继续探测剩下的物品
return
print('bag:', current_bag, ' weight: ', bag_weight)
if bag_weight >self.max_bag_weight:
self.max_bag_weight=bag_weight
self.res_bag = current_bag
self.__process(i+1,current_bag) # 第i 个物品不放入背包
if i < len(self.weights):
self.__process(i + 1, current_bag+[self.weights[i]]) # 第i 个物品 放入背包
class solution_zero_one_bag_value:
"""
by XRH
date: 2020-08-23
01 背包问题 , 物品带价值, 求背包在满足重量的条件下, 背包总价值最大的物品放置策略
"""
def zero_one_bag_value(self, weights, values, capacity):
"""
01 背包问题的 回溯求解
建立搜索空间树, 搜索树的节点信息为: 是否放入第 i 个 物品 (物品id )
搜索树叶子节点 代表找到一个 可行解
物品的数量 N , 算法的时间复杂度 O(2^N)
:param weights:
:param values:
:param capacity:
:return:
"""
self.N=len(weights) # N 个物品
self.weights=weights
self.values=values
self.capacity=capacity
item_NO=0
bag_weight=0
bag_value=0
prev_items=[]
self.max_value=float('-inf')
self.max_value_bag_item=[]
self._process(item_NO,bag_weight,bag_value,prev_items) # 考虑 第0个物品 是否装入背包
return self.max_value_bag_item
def _process(self, item_NO, bag_weight, bag_value, prev_items):
if item_NO == self.N: # 递归结束条件: 最后一个 物品已考察完
if bag_value > self.max_value: # 找到了 更大价值的背包
self.max_value = bag_value
self.max_value_bag_item = prev_items
if item_NO < self.N:
# case1. 装入 item_NO 物品
if bag_weight + self.weights[item_NO] <= self.capacity: # 背包的重量不能超(搜索剪枝)
self._process(item_NO + 1, bag_weight + self.weights[item_NO],
bag_value + self.values[item_NO], prev_items + [item_NO])
# case2. 不装入 item_NO 物品
self._process(item_NO + 1, bag_weight,
bag_value, prev_items)
def zero_one_bag_value_cache(self, weights, values, capacity):
"""
01 背包问题的 回溯求解 (带备忘录)
1. 在朴素的搜索空间树的节点加上: 背包的重量 和 背包的价值 的状态 , 并利用这两个状态对搜索空间进行裁剪
2. 使用备忘录 记录 搜索树的节点信息 (物品id, 背包重量, 背包价值) , 若遇到重复的子树, 直接找备忘录即可
3. 把搜索树的节点信息 单独抽取出来 作为 动态规划中的 子问题的状态矩阵, 并写出 递推方程 即可得 动态规划 解法
详见 《数据结构与算法之美》 -> 基础算法-动态规划
:param weights:
:param values:
:param capacity:
:return:
"""
self.N = len(weights) # N 个物品
self.weights = weights
self.values = values
self.capacity = capacity
self.states = zeros((self.N, capacity + 1), dtype=int)
item_NO = 0
bag_weight = 0
bag_value = 0
prev_items = []
self.max_value = float('-inf')
self.max_value_bag_item = []
self._process_cache(item_NO, bag_weight, bag_value, prev_items) # 考虑 第0个物品 是否装入背包
return self.max_value_bag_item
def _process_cache(self,item_NO,bag_weight,bag_value,prev_items):
# print('item:{}, bag_weight:{}, bag_value:{}'.format(item_NO, bag_weight, bag_value))
if item_NO == self.N: #递归结束条件: 最后一个 物品已考察完
if bag_value> self.max_value: # 找到了 更大价值的背包
self.max_value=bag_value
self.max_value_bag_item=prev_items
if item_NO < self.N:
# case1. 装入 item_NO 物品
after_bag_weight=bag_weight+self.weights[item_NO]
after_bag_value=bag_value+self.values[item_NO]
if after_bag_weight<=self.capacity: # 背包的重量不能超 背包的容量(搜索剪枝)
if after_bag_value> self.states[item_NO][after_bag_weight]: # 装入后背包的价值 比 备忘录中记录的 考虑同一个 物品, 在相同的背包重量下的价值大 (搜索剪枝)
self.states[item_NO][after_bag_weight]=after_bag_value
self._process_cache(item_NO+1, # 考察 item_NO+1 号物品
after_bag_weight,
after_bag_value,prev_items+[item_NO]) #
# case2. 不装入 item_NO 物品
self._process_cache(item_NO+1,bag_weight,
bag_value,prev_items)
class soulution_bag_problem:
"""
by XRH
date: 2020-08-23
背包问题
与 01 背包问题的区别在于, 在背包问题中 每一个物品可以取多个
"""
def bag_problem_BranchBound(self, weights, values, capacity):
"""
分支限界法 解背包问题
适用于组合优化问题
:param weights:
:param values:
:param capacity:
:return:
"""
self.N=len(weights)
self.capacity = capacity
items=[ (values[i],weights[i]) for i in range(self.N)] # (values[i],weights[i])
self.items_sorted= sorted(items, key= lambda ele: (ele[0]/ele[1]) ,reverse=True) # 单位重量的价值排序
self.item_pre_value= list(map(lambda x:x[0]/x[1],self.items_sorted))
# print( self.items_sorted)
# print(self.item_pre_value)
self.max_bound = float('-inf')
self.max_value_bag_item = []
# self.max_value_bag_items = zeros(self.N,dtype=int)
item_NO = 0
bag_weight = 0
bag_value = 0
prev_items = [] # [(物品id, 物品个数),...]
root_bound=float('inf')
self._process(item_NO, bag_weight, bag_value, prev_items,root_bound) # 考虑 第0个物品 是否装入背包
return self.max_value_bag_item
def _process(self, item_NO, bag_weight, bag_value, prev_items,root_bound):
if item_NO == self.N: # 递归结束条件: 最后一个 物品已考察完( 找到新的可行解 )
if bag_value > self.max_bound: # 找到更大价值的 背包
# 若新的 可行解的优化函数值大于 当前的界, 则把 当前界 更新为该可行解的值
self.max_bound = bag_value
self.max_value_bag_item = prev_items
if item_NO < self.N:
# 装入 item_NO 物品
max_num= ( self.capacity // self.items_sorted[item_NO][1]) # item_NO 物品 最多装入的个数
for i in range(0,max_num+1):
current_bag_weight=bag_weight + i*self.items_sorted[item_NO][1]
if current_bag_weight <= self.capacity: # 背包重量不能超
current_bag_value = bag_value + i * self.items_sorted[item_NO][0]
current_prev_items= prev_items+[(item_NO,i)]
current_root_bound= current_bag_value+ (self.capacity-current_bag_weight)*self.item_pre_value[item_NO]
if root_bound > self.max_bound: # 分支定界: 以A为根节点的 子树下所有节点的值 必然小于根节点A; 若根节点A的代价函数值 比当前界小, 则此分支可以丢弃
self._process(item_NO + 1, current_bag_weight,
current_bag_value, current_prev_items,current_root_bound)
class solution_pattern_match:
"""
正则表达式 匹配
"""
def match(self,pattern,text):
self.pattern=pattern # 正则表达式
self.text=text # 待匹配的字符串
self.pattern_len=len(pattern)
self.text_len=len(text)
pattern_j=0
text_i=0
self.match_flag=False
self.__process(text_i,pattern_j)
return self.match_flag
def __process(self,text_i,pattern_j):
if self.match_flag == True: #如果已经匹配了,就不要继续递归了
return
if text_i==self.text_len:
if pattern_j == self.pattern_len:
# pattern 和 text 都到了末尾,说明模式匹配成功
self.match_flag = True
return
if text_i<self.text_len and pattern_j< self.pattern_len: #保证数组不越界
if self.pattern[pattern_j]=='*' :
for index in range(text_i,self.text_len+1): # 为了让指针 指向 text的末尾 ,self.text_len+1
# '*' 可以匹配任意个数的字符
#递归 检查 从text 的当前指针指向的位置 到 text 的末尾 与 pattern_j+1 的匹配
self.__process(index, pattern_j+1)
elif self.pattern[pattern_j]=='?' :
self.__process(text_i, pattern_j + 1) # 匹配0个字符
self.__process(text_i+1, pattern_j + 1) #匹配1个字符
else: # self.pattern[pattern_j] 为普通字符
if self.text[text_i]==self.pattern[pattern_j]:
self.__process(text_i + 1, pattern_j + 1)
class solution_Traveling_Salesman_Problem:
def tsp_recursive(self,city_distance,start_point):
"""
递归 + 剪枝 解 旅行推销员问题
ref: https://www.cnblogs.com/dddyyy/p/10084673.html
:param city_distance: 城市的距离矩阵
:param start_point: 起点城市
:return:
"""
self.city_distance=city_distance
self.city_names=list(range(len(city_distance)))
self.start_point=start_point
self.min_cost=float('inf') # 取一个足够大的数 作为初始的 最小路径代价
self.min_cost_path=[]
self.path_length=len(city_distance)+1 # path_length=5
stage = 0 # stage0:把起始的点 放入 当前路径中
current_path=[start_point]
current_cost=0
self.__process(stage+1,current_path,current_cost)
return self.min_cost,self.min_cost_path
def __process(self,stage,current_path,current_cost):
if stage < self.path_length-1: # stage1-stage3
if current_cost >= self.min_cost:
return
# print(current_path)
for next_city in set(self.city_names)-set(current_path): # 每个城市只会访问一次,从没走过的城市中选一个访问
current_city=current_path[-1]
cost= current_cost + self.city_distance[current_city][next_city] # 路径代价为:当前的路径代价 + 现在所在城市到下一个城市的距离
self.__process(stage + 1, current_path+[next_city],cost)
elif stage==self.path_length-1: #stage4: 最后要回到 起点
cost = current_cost + self.city_distance[current_path[-1]][self.start_point]
current_path=current_path+[self.start_point] # 把 起始节点 加到路径的末尾
if cost < self.min_cost: #
self.min_cost=cost
self.min_cost_path=current_path
def tsp_dp(self, city_distance,start_city=0):
"""
动态规划 解 旅行商问题
ref: https://blog.csdn.net/qq_39559641/article/details/101209534
:param city_distance:
:param start_city:
:return:
"""
N=len(city_distance) # 城市的数目 5
# M= pow(2,N-1) # N-1=4
# states = zeros((N, M), dtype=int)
states_dist={} #记录 到达状态的最短距离
states_prev_node={} # 记录 到达该状态的 最佳的上一个状态
city_set= set(list(range(N)))
mid_city_set=city_set-set([start_city]) #{1,2,3,4}
# print(mid_city_set)
for V_length in range(0,N-1): # 0,1,2,3
for mid_start in mid_city_set:
for V in itertools.combinations(mid_city_set-set([mid_start]), V_length):# 取组合
print(mid_start ,V) # mid_start=4, V=()
# mid_start=3, V=(4,)
# mid_start=2, V=(3, 4)
if len(V)==0:
states_dist[(mid_start, V)] = city_distance[mid_start][start_city]
states_prev_node[(mid_start, V)]=None
else:
min_dist=float('inf')
min_node=None
for city in V:
# v=set(V)-set([city]) # TypeError: unhashable type: 'set'
v=tuple( sorted(set(V)-set([city])) ) # TODO: sorted( set(V)-set([city]) )
dist=city_distance[mid_start][city]+states_dist[(city,v)]
if dist<min_dist:
min_dist=dist
min_node=(city, v)
states_dist[(mid_start,V)]=min_dist
states_prev_node[(mid_start,V)]=min_node
# 求原问题的解 d(start_city=0,mid_city_set={1, 2, 3, 4})
print(start_city ,tuple(mid_city_set) )
min_dist = float('inf')
min_node = None
for city in mid_city_set:
v = tuple(sorted(set(mid_city_set) - set([city])))
dist = city_distance[start_city][city] + states_dist[(city, v)]
if dist < min_dist:
min_dist = dist
min_node = (city, v)
states_dist[(start_city ,tuple(mid_city_set) )] = min_dist
states_prev_node[(start_city ,tuple(mid_city_set))] = min_node
print(states_dist)
print(states_prev_node)
#反向推出 最短路径 经过的节点
path=[start_city]
prev_node=(start_city ,tuple(mid_city_set))
for i in range(N-1):
node=states_prev_node[prev_node]
path.append(node[0])
prev_node=node
path.append(start_city)
return min_dist,path
def tsp_BranchBound(self,city_distance,start_point):
"""
分支 定界法 求 旅行推销员问题
:param city_distance:
:param start_point:
:return:
"""
self.city_distance = city_distance
self.citys = list(range(len(city_distance)))
self.start_point = start_point
self.min_cost = float('inf') # 取一个足够大的数 作为初始的 最小路径代价
self.min_cost_path = []
self.path_length = len(city_distance) + 1 # path_length=5
stage = 0 # stage0:把起始的点 放入 当前路径中
current_path = [start_point]
current_cost = 0
self.__process_BranchBound(stage + 1, current_path, current_cost) # 考察下一个顶点
return self.min_cost, self.min_cost_path
def __cost_func(self, current_path, current_cost):
"""
代价函数
在搜索树中, 以当前路径的最后一个节点 作为根节点的子树;
求出 在该子树中, 我们能找到的最优的可行解 对应的 代价函数的 下界
:param current_node:
:param current_group:
:return:
"""
end_node=current_path[-1]
end_node_min_edge=min( [ l for l in self.city_distance[end_node] if l!=0] )
lower_bound=0
for node in set(self.citys) - set(current_path):
node_min_edge=min( [ l for l in self.city_distance[node] if l!=0] )
lower_bound+=node_min_edge
return current_cost+ end_node_min_edge+lower_bound
def __process_BranchBound(self, stage, current_path, current_cost):
"""
:param stage:
:param current_path: 当前路径
:param current_cost: 当前路径的长度
:return:
"""
if stage < self.path_length - 1: # stage1-stage3
if self.__cost_func( current_path, current_cost) >= self.min_cost: # 分支定界: 对于极小化问题, 代价函数的的下界 比 搜索树当前的界大, 直接回溯
return
# print(current_path)
for next_city in set(self.citys) - set(current_path): # 每个城市只会访问一次,从没走过的城市中选一个访问
current_city = current_path[-1]
cost = current_cost + self.city_distance[current_city][next_city] # 路径代价为:当前的路径代价 + 现在所在城市到下一个城市的距离
self.__process_BranchBound(stage + 1, current_path + [next_city], cost)
elif stage == self.path_length - 1: # stage4: 最后要回到 起点
cost = current_cost + self.city_distance[current_path[-1]][self.start_point]
current_path = current_path + [self.start_point] # 把 起始节点 加到路径的末尾
if cost < self.min_cost:
self.min_cost = cost
self.min_cost_path = current_path
class solution_loading_problem:
"""
装载问题
n 个集装箱 重量为 weights, 两艘船, 它们的载重能力分别为 c1 和 c2
如何 选择合理的 装载方案 , 能把这 n 个集装箱 都装上船
原问题 => 第一艘船 在不超过 载重限制的情况下, 尽量装满, 若剩下的物品重量小于 第二艘船的载重(可以全部装入第二艘船),则此方案可行
"""
def loading_problem_recursive(self, weights , capacity , capacity2 ):
"""
递归法 解 装载问题
:param weights:
:param capacity: 第一艘船的 载重限制
:param capacity2: 第二艘船的 载重限制
:return:
"""
self.N = len(weights) # N 个集装箱
self.weights = weights
self.capacity = capacity
self.max_weight = float('-inf')
self.max_weight_bag_item = []
item_NO = 0
bag_weight = 0
prev_items = []
# 1. 找出 第一艘船 在不超过 载重限制的 最大装载重量的方案
self._process(item_NO, bag_weight, prev_items) # 考虑 第0个集装箱 是否 装船
# 2. 剩余的 集装箱, 第一艘船 是否能装完
item_set=set(self.max_weight_bag_item)
print('第一艘船 装入的集装箱:',item_set)
rest_item_set= set(list(range(self.N)))-item_set
print('第二艘船 装入的集装箱:', rest_item_set)
bag2_weight=0
for item in rest_item_set:
bag2_weight+=weights[item]
flag=False
if bag2_weight<=capacity2: flag=True
return flag
def _process(self, item_NO, bag_weight, prev_items):
if item_NO == self.N: # 递归结束条件: 最后一个 集装箱已考察完
if bag_weight > self.max_weight: # 找到了 总装载重量更高的 装载方案
self.max_weight = bag_weight
self.max_weight_bag_item = prev_items
if item_NO < self.N:
# case1. 装入 item_NO 集装箱
if bag_weight + self.weights[item_NO] <= self.capacity: # 船的载重限制 不能超(搜索剪枝)
self._process(item_NO + 1, bag_weight + self.weights[item_NO],
prev_items + [item_NO])
# case2. 不装入 item_NO 集装箱
self._process(item_NO + 1, bag_weight,
prev_items)
def loading_problem_iteration(self, weights, capacity, capacity2):
"""
递归法 解 装载问题
:param weights:
:param capacity: 第一艘船的 载重限制
:param capacity2: 第二艘船的 载重限制
:return:
"""
N = len(weights) # N 个集装箱
weights= sorted(weights,reverse=True)
max_weight = float('-inf')
max_weight_bag_item = []
item_NO = 0
bag_weight = 0
prev_items = []
while True:
while item_NO <= N-1:
after_bag_weight= bag_weight + weights[item_NO]
# case1. 装入 item_NO 集装箱
if after_bag_weight <= capacity:
bag_weight= after_bag_weight
prev_items.append(item_NO)
item_NO+=1
if item_NO==N: # 最后一个集装箱 已经考察完
if bag_weight>max_weight:
max_weight=bag_weight
max_weight_bag_item=array(prev_items) # 复制 prev_items
# 开始回溯
if len(prev_items)==0: # 没有一个 集装箱被装入, 说明所有分支 都已经回溯到了
break
root=prev_items[-1]
bag_weight-=weights[root] # root 集装箱 之前走的是装入分支, 现在选择不装入分支
item_NO = root + 1
prev_items.pop(-1) # 不装入 root 集装箱, 需要从 prev_items 中删除
return max_weight_bag_item
class solution_Graph_Coloring_Problem():
def mGCP(self,mapGraph,m):
"""
递归 解图的 m 着色 问题
详见 《数据结构与算法之美》 -> 基础算法-回溯法
:param mapGraph:
:param m:
:return:
"""
self.mapGraph=mapGraph
self.colors= list(range(1,m+1))
self.nodeNum= len(mapGraph) # nodeNum=5
self.res_nodes_color=[] # 节点的着色策略
#node0: 初始节点 可以选择 所有颜色中的任意一种颜色上色
node=0
current_nodes_color=[]
for color in self.colors:
self._process_find_all( node+1 ,current_nodes_color+[color])
return len(self.res_nodes_color),self.res_nodes_color
def _process_find_all(self,node,current_nodes_color):
"""
递归 找到所有的 可行解
:param node: 当前待上色的节点
:param current_nodes_color: 当前已经上过色的节点 所上的颜色,
eg. [1,2,3] 0号节点上颜色 1, 1号节点上颜色 2 , 2号节点上颜色 3
:return:
"""
if node < self.nodeNum: # node1-node4
node=node # 当前待上色的节点
adjacent_nodes=self.mapGraph[node] #待上色节点的 相邻节点
adjacent_nodes=adjacent_nodes[:len(current_nodes_color)] #相邻节点中 截取 已经上过色的节点 (current_colors 中按照节点的顺序 记录各个节点所上的颜色)
adjacent_nodes_colors=[ current_nodes_color[index]
for index,node in enumerate(adjacent_nodes) if node==1] #相邻节点中 已经上过色的节点的颜色 集合
available_colors=set(self.colors)-set(adjacent_nodes_colors) #当前待上色的节点的 可选颜色
for color in available_colors:
self._process_find_all(node + 1, current_nodes_color + [color])
elif node== self.nodeNum: # 说明 所有节点都已经成功上色
self.res_nodes_color.append(current_nodes_color)
def mGCP_v1(self, mapGraph, m):
"""
递归 解图的 m 着色 问题
递归找出一个可行解, 其他的解 利用对称性 直接生成
:param mapGraph:
:param m:
:return:
"""
self.mapGraph = mapGraph
self.colors = list(range(1, m + 1))
self.nodeNum = len(mapGraph) # nodeNum=5
self.flag = False
self.nodes_color = [] # 节点的着色策略
# 1. 找到一个 可行解
# node0: 初始节点 可以选择 所有颜色中的任意一种颜色上色
node = 0
current_nodes_color = []
for color in self.colors:
self._process_find_one(node + 1, current_nodes_color + [color])
# 2. 生成其他的 可行解
# 种子 self.nodes_color= [1, 2, 3, 4, 1]
self.res_nodes_color=[]
for permutation in itertools.permutations(self.colors, m): # 颜色的 排列
dict={ i+1:ele for i,ele in enumerate(permutation)}
new_nodes_color= list(map( lambda ele: dict[ele] ,self.nodes_color))
self.res_nodes_color.append(new_nodes_color)
return len(self.res_nodes_color), self.res_nodes_color
def _process_find_one(self, node, current_nodes_color):
"""
递归 一个可行解 即可退出
:param node: 当前待上色的节点
:param current_nodes_color: 当前已经上过色的节点 所上的颜色,
eg. [1,2,3] 0号节点上颜色 1, 1号节点上颜色 2 , 2号节点上颜色 3
:return:
"""
if self.flag:
return
if node < self.nodeNum: # node1-node4
node = node # 当前待上色的节点
adjacent_nodes = self.mapGraph[node] # 待上色节点的 相邻节点
adjacent_nodes = adjacent_nodes[
:len(current_nodes_color)] # 相邻节点中 截取 已经上过色的节点 (current_colors 中按照节点的顺序 记录各个节点所上的颜色)
adjacent_nodes_colors = [current_nodes_color[index]
for index, node in enumerate(adjacent_nodes) if node == 1] # 相邻节点中 已经上过色的节点的颜色 集合
available_colors = set(self.colors) - set(adjacent_nodes_colors) # 当前待上色的节点的 可选颜色
for color in available_colors:
self._process_find_one(node + 1, current_nodes_color + [color])
elif node == self.nodeNum: # 说明 所有节点都已经成功上色
self.flag=True
self.nodes_color=current_nodes_color
class solution_biggest_group:
"""
最大团问题
"""
def biggest_group_BranchBound(self,graph):
"""
分支定界法 求图的最大团
:param graph:
:return:
"""
self.graph = graph
self.nodeNum = len(graph) # nodeNum=5
self.max_bound = float('-inf')
self.res_biggest_group = [] # 图的最大团
# node0: 第一个节点
current_node = 0
current_group = []
self._process(current_node+1, current_group + [current_node]) # 第一个节点 加入 团
self._process(current_node+1, current_group ) # 第一个节点 不入 团
return len(self.res_biggest_group), self.res_biggest_group
def __check_connected(self,current_node,current_group):
"""
判断 node 和团中的点是否 有边相连
:param current_node:
:param current_group:
:return:
"""
flag=True
for node in current_group:
if self.graph[current_node][node]==0:
flag=False
break
return flag
def __cost_func(self,current_node,current_group):
"""
代价函数
目前的团 可能扩展为 极大团的 顶点数的上界
:param current_node:
:param current_group:
:return:
"""
C=len(current_group) # 目前团的顶点数
return C + self.nodeNum-current_node
def _process(self, current_node, current_group):
"""
递归 找到 最优解
:param current_node: 当前 节点
:param current_group: 当前的团
:return:
"""
print(current_node, current_group)
if current_node < self.nodeNum: # node1-node4
if self.__cost_func(current_node, current_group) >= self.max_bound: # 分支定界 , 若代价函数的值大于界,则有继续搜索此分支的需要
# 代价函数 表示了 此子树 可能出现的 最优可行解的 上界
# case1: current_node 节点 纳入最大团
if self.__check_connected(current_node,current_group):# 判断 current_node 和团中的点是否 有边相连
self._process(current_node + 1, current_group + [current_node])
# case1 和 case2 是根节点的两个分支, 都要走到
# case2: current_node 节点 不纳入最大团
self._process(current_node + 1, current_group )
elif current_node == self.nodeNum: # 找到可行解
if len(current_group) > self.max_bound: # 可行解比当前界好,更新界
self.max_bound=len(current_group)
self.res_biggest_group= array(current_group)
class solution_Circle_permutation:
"""
圆排列 问题
"""
def circle_permutation_BranchBound(self,R_list):
"""
回溯 + 分支定界法 求解 圆排列 问题
:param R_list: 圆的半径 列表
:return:
"""
self.R_list = R_list
self.stageNum = len(R_list) # self.stageNum=6
self.n= len(R_list)
self.circles = list(range(len(R_list)))
self.min_length = float('inf') # 取一个足够大的数 作为初始的 最小 圆的排列长度
self.min_length_permutation = []
stage = 0 # stage0
current_x=0 # 排列中 第 0个圆的初始坐标为0
current_length = 0 # 当前 圆排列的长度
current_permutation=[] # 当前 已经放入 排列的圆
self.__process_BranchBound(stage , current_x, current_permutation, current_length)
return self.min_length, self.min_length_permutation
def __cost_func(self, k,x_prev,circle_next,current_permutation,available_circles):
"""
计算代价函数
在搜索树中, 以当前路径的最后一个节点 作为根节点的子树;
求出 在该子树中, 我们能找到的最优的可行解 对应的 代价函数的 下界
:param k:
:param x_prev: 上一个圆 的坐标 x(k-1) k-1 个圆的 坐标
:param circle_next: 加入排列的下一个圆的标号
:param current_permutation: 当前的排列
:param available_circles: 未加入 排列的 圆的标号
:return x : k 个圆的坐标
:return l : 当前圆排列的长度
:return L : 代价函数
"""
if k==0:
r_first =self.R_list[circle_next] # r(0)
r_prev=0 # 第一次 r(k-1)=0
else:
circle_prev = current_permutation[-1] # 排列中的 最后一个圆的标号
circle_first = current_permutation[0] # 排列中的 第一个圆的标号
r_prev = self.R_list[circle_prev] # r(k-1)
r_first = self.R_list[circle_first] # r(0)
r = self.R_list[circle_next] # r(k)
d=2*sqrt(r*r_prev) # d(k)
x=x_prev+d # x(k) k 个圆的坐标
l= x+r+r_first # l(k)
min_r= min([self.R_list[circle] for circle in available_circles]) # 未被选择到的圆中, 半径最小的圆的半径
L= x+(self.n-(k+1))*(2*min_r)+min_r+r_first # L(k)
return float("%0.2f"%x), float("%0.2f"%l),float("%0.2f"%L) # 精度控制, 保留2位小数
def __process_BranchBound(self, stage, current_x,current_permutation, current_length):
"""
:param stage:
:param current_x: 圆的坐标
:param current_permutation: 当前 圆排列
:param current_length: 当前 圆排列的长度
:return:
"""
if stage < self.stageNum : # stage0-stage5
available_circles=set(self.circles) - set(current_permutation)
for circle_next in available_circles:
x,l,L=self.__cost_func(stage,current_x, circle_next, current_permutation,
available_circles)
if L >= self.min_length: # 分支定界: 对于极小化问题, 代价函数的的下界 比 搜索树当前的界大, 直接回溯
continue
self.__process_BranchBound(stage+1, x , current_permutation+[circle_next], l)
elif stage == self.stageNum: # stage6 找到一个 可行解
if current_length < self.min_length:
self.min_length = current_length
self.min_length_permutation = current_permutation
class solution_Continuous_postage:
"""
连续邮资 问题
n 种邮票, 一共 m 张 , 每一种的邮票的面值 <=100,
第一种邮票的面值 为 1 ,
求 可以贴出的 连续邮资的最大的值为多少
"""
def continuous_postage(self,n,m,max_value):
"""
回溯 + 动态规划
:param n: 邮票种类
:param m: 邮票总的张数
:param max_value: 邮票最大面值
:return:
"""
self.n= n
self.m = m
self.column_num=max_value*self.m+1
self.states= zeros((self.n,self.column_num),dtype=int) # 选择前i 种邮票 凑到邮资j 使用的最少邮票个数
self.max_continuous_postage = float('-inf') # 最大连续邮资
self.max_continuous_postage_stamps = [] # 达到 最大连续邮资 选择的邮票的 面值
# 1. 初始条件, 只能选 第一种邮票
stamp_list=[1] # 第一种邮票的面值 为 1
stage=0
self.states[0,:]= list(range(self.column_num))
r= self.m
self.__process(stage+1,stamp_list,r)
return self.max_continuous_postage,self.max_continuous_postage_stamps
def __cal_continuous_postage(self,stage,next_stamp,states=None):
"""
利用动态规划求解 在当前邮票面值列表下, 能凑成的连续邮资的上界
思路与 硬币选择问题 (背包问题变形) 类似
:param stage:
:param next_stamp: 选取的下一个邮票的面值
:param stamp_list: 当前(已经选取的)邮票面值列表
:return:
"""
if states==None:
states=self.states
r=0
for j in range(0,self.column_num): # 邮资总额 j
min_num=float('inf') # 凑到 邮资总额 j 需要的最少邮票张数
min_num_t=0 # 凑到 邮资总额 j 需要使用的 面值 为 next_stamp 的邮票的 张数
for t in range(1,self.m+1):
value= j-t*next_stamp # 使用 t 张面值为 next_stamp 的邮票后,剩余的价值
if value>=0:
current_num= t+ states[stage-1,value]
if current_num < min_num:
min_num=current_num
min_num_t=t
# 在 "选择面值为 next_stamp 的邮票" 和 "不选择面值为 next_stamp 的邮票" 中选邮票数目最少的
states[stage,j]= min(min_num,self.states[stage-1,j])
# if self.states[stage,j] > self.m: #凑到 邮资总额 j 需要的最少邮票张数 大于 总邮票个数 m , 说明断点 r 出现
# r=j-1
# break
# 找断点
for j in range(0, self.column_num):
if states[stage,j] > self.m: #凑到 邮资总额 j 需要的最少邮票张数 大于 总邮票个数 m , 说明断点 r 出现
r=j-1
break
return r,states
def __process(self,stage,stamp_list,r):
"""
:param stage:
:param stamp_list: 当前已经选取的邮票面值列表
:param r: 当前的连续邮资的上界
:return:
"""
if stage < self.n : # stage0-stage3
print('stage:{} ,x:{} ,r:{}'.format(stage,stamp_list, r))
next_stamp_lower_bound=stamp_list[-1]+1 # 可以选择的邮票的下界
next_stamp_upper_bound=r+1 # 可以选择的邮票的上界
print('lower_bound:{} upper_bound:{}'.format(next_stamp_lower_bound,next_stamp_upper_bound))
for next_stamp in range(next_stamp_lower_bound,next_stamp_upper_bound+1):
# print('next_stamp:{} '.format(next_stamp))
# print('before stage:{}'.format(stage))
# print(self.states)
r,_=self.__cal_continuous_postage(stage,next_stamp)
self.__process(stage+1, stamp_list+[next_stamp], r)
self.states[stage:, :] = zeros((self.n-stage,self.column_num),dtype=int) # 对 self.states 复位
# print('after stage:{}'.format(stage))
# print(self.states)
elif stage == self.n: # stage4 找到一个 可行解
print("--------------------------")
print('stage:{} ,x:{} ,r:{}'.format(stage, stamp_list, r))
print("--------------------------")
if r>self.max_continuous_postage:
self.max_continuous_postage=r
self.max_continuous_postage_stamps=array(stamp_list)
class Test:
def test_all(self):
sol = solution_N_quene()
# print(sol.N_quene(4))
sol = solution_zero_one_bag_weight()
items_info = [('a', 2), ('b', 2), ('c', 4), ('d', 6), ('e', 3)]
capacity = 9
# print(sol.zero_one_bag_weight_iteration(items_info, capacity))
# print(sol.zero_one_bag_weight(items_info, capacity))
sol = solution_zero_one_bag_value()
values = [12, 11, 9, 8]
weights = [8, 6, 4, 3]
capacity = 13
# print(sol.zero_one_bag_value(weights,values,capacity))
weights = [2, 2, 4]
values = [3, 4, 8]
capacity = 9
# print(sol.zero_one_bag_value_cache(weights, values, capacity))
sol = soulution_bag_problem()
weights = [2, 3, 4, 7]
values = [1, 3, 5, 9]
capacity = 10
# print(sol.bag_problem_BranchBound(weights, values, capacity))
sol=solution_loading_problem()
weights = [90, 80, 40, 30, 20, 12, 10]
# weights = [40, 30, 20, 12, 10, 80, 90]
capacity1=152
capacity2=130
# print(sol.loading_problem_recursive(weights,capacity1,capacity2))
# print(sol.loading_problem_iteration(weights,capacity1,capacity2))
sol2 = solution_pattern_match()
pattern = 'a*d'
text = 'abcd'
# print(sol2.match(pattern,text))
# print(sol2.match('a*d?f', 'abcdef'))
# print(sol2.match('', 'ab'))
# print(sol2.match('a*', 'ab'))
# print(sol2.match('a?', 'ab'))
# print(sol2.match('*', 'ab'))
# print(sol2.match('?', 'a'))
# print(sol2.match('**', 'ab'))
# print(sol2.match('??', 'ab'))
city_distance = [
[0, 4, 3, 1],
[4, 0, 1, 2],
[3, 1, 0, 5],
[1, 2, 5, 0],
]
sol3 = solution_Traveling_Salesman_Problem()
# print(sol3.tsp_recursive(city_distance,0))
city_distance2 = [
[0, 3, float('inf'), 8, 9],
[3, 0, 3, 10, 5],
[float('inf'), 3, 0, 4, 3],
[8, 10, 4, 0, 20],
[9, 5, 3, 20, 0],
]
# print(sol3.tsp_dp(city_distance2, 0))
# print(sol3.tsp_dp(city_distance2, 1))
# print(sol3.tsp_dp(city_distance2, 2))
# print(sol3.tsp_dp(city_distance2, 3))
# print(sol3.tsp_dp(city_distance2, 4))
city_distance3 = [
[0, 5, 9, 4],
[5, 0, 13, 2],
[9, 13, 0, 7],
[4, 2, 7, 0],
]
# print(sol3.tsp_BranchBound(city_distance3, 0))
mapGraph = [
[0, 1, 1, 1, 0],
[1, 0, 1, 1, 1],
[1, 1, 0, 1, 0],
[1, 1, 1, 0, 1],
[0, 1, 0, 1, 0]
]
sol4 = solution_Graph_Coloring_Problem()
# print(sol4.mGCP(mapGraph, 4))
# print(sol4.mGCP_v1(mapGraph, 4))
# print(sol4.mGCP(mapGraph, 3))
Graph = [
[0, 1, 1, 1, 1],
[1, 0, 0, 1, 0],
[1, 0, 0, 1, 1],
[1, 1, 1, 0, 1],
[1, 0, 1, 1, 0]
]
sol5= solution_biggest_group()
# print(sol5.biggest_group_BranchBound(Graph))
R=[1,1,2,2,3,5]
sol6= solution_Circle_permutation()
# print(sol6.circle_permutation_BranchBound(R))
sol7=solution_Continuous_postage()
n=4
m=3
print(sol7.continuous_postage(n,m,10))
if __name__ == '__main__':
sol=Test()
sol.test_all()
| 26.608878 | 136 | 0.532746 |
acf38138ef277858054c43eb0a120778e84dad17 | 32,190 | py | Python | PythonLibrairies/ShapeQuantifierCore.py | jbvimort/LongitudinalQuantification | 6fdc51f4bb67133af5e5c52bd8fea976cb46d265 | [
"Apache-2.0"
] | null | null | null | PythonLibrairies/ShapeQuantifierCore.py | jbvimort/LongitudinalQuantification | 6fdc51f4bb67133af5e5c52bd8fea976cb46d265 | [
"Apache-2.0"
] | null | null | null | PythonLibrairies/ShapeQuantifierCore.py | jbvimort/LongitudinalQuantification | 6fdc51f4bb67133af5e5c52bd8fea976cb46d265 | [
"Apache-2.0"
] | null | null | null | import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import numpy
import json
import time
#
# CalculateDisplacement
#
class ShapeQuantifierCore():
def __init__(self, parent = None, interface = None):
if parent:
parent.title = " "
self.selectedFidList = None
self.selectedModel = None
self.interface = interface
def get(self, objectName):
return self.findWidget(self.interface.widget, objectName)
def findWidget(self, widget, objectName):
if widget.objectName == objectName:
return widget
else:
for w in widget.children():
resulting_widget = self.findWidget(w, objectName)
if resulting_widget:
return resulting_widget
return None
def UpdateThreeDView(self, landmarkLabel):
# Update the 3D view on Slicer
if not self.selectedFidList:
return
if not self.selectedModel:
return
print "UpdateThreeDView"
active = self.selectedFidList
#deactivate all landmarks
list = slicer.mrmlScene.GetNodesByClass("vtkMRMLMarkupsFiducialNode")
end = list.GetNumberOfItems()
selectedFidReflID = self.findIDFromLabel(active,landmarkLabel)
for i in range(0,end):
fidList = list.GetItemAsObject(i)
landmarkDescription = self.decodeJSON(fidList.GetAttribute("landmarkDescription"))
for key in landmarkDescription.iterkeys():
markupsIndex = fidList.GetMarkupIndexByID(key)
if key != selectedFidReflID:
fidList.SetNthMarkupLocked(markupsIndex, True)
else:
fidList.SetNthMarkupLocked(markupsIndex, False)
displayNode = self.selectedModel.GetModelDisplayNode()
displayNode.SetScalarVisibility(False)
if selectedFidReflID != False:
displayNode.SetScalarVisibility(True)
def createIntermediateHardenModel(self, model):
hardenModel = slicer.mrmlScene.GetNodesByName("SurfaceRegistration_" + model.GetName() + "_hardenCopy_" + str(
slicer.app.applicationPid())).GetItemAsObject(0)
if hardenModel is None:
hardenModel = slicer.vtkMRMLModelNode()
hardenPolyData = vtk.vtkPolyData()
hardenPolyData.DeepCopy(model.GetPolyData())
hardenModel.SetAndObservePolyData(hardenPolyData)
hardenModel.SetName(
"SurfaceRegistration_" + model.GetName() + "_hardenCopy_" + str(slicer.app.applicationPid()))
if model.GetParentTransformNode():
hardenModel.SetAndObserveTransformNodeID(model.GetParentTransformNode().GetID())
hardenModel.HideFromEditorsOn()
slicer.mrmlScene.AddNode(hardenModel)
logic = slicer.vtkSlicerTransformLogic()
logic.hardenTransform(hardenModel)
return hardenModel
def onModelModified(self, obj, event):
#recompute the harden model
hardenModel = self.createIntermediateHardenModel(obj)
obj.SetAttribute("hardenModelID",hardenModel.GetID())
# for each fiducial list
list = slicer.mrmlScene.GetNodesByClass("vtkMRMLMarkupsFiducialNode")
end = list.GetNumberOfItems()
for i in range(0,end):
# If landmarks are projected on the modified model
fidList = list.GetItemAsObject(i)
if fidList.GetAttribute("connectedModelID"):
if fidList.GetAttribute("connectedModelID") == obj.GetID():
#replace the harden model with the new one
fidList.SetAttribute("hardenModelID",hardenModel.GetID())
#reproject the fiducials on the new model
landmarkDescription = self.decodeJSON(fidList.GetAttribute("landmarkDescription"))
for n in range(fidList.GetNumberOfMarkups()):
markupID = fidList.GetNthMarkupID(n)
if landmarkDescription[markupID]["projection"]["isProjected"] == True:
hardenModel = slicer.app.mrmlScene().GetNodeByID(fidList.GetAttribute("hardenModelID"))
markupsIndex = fidList.GetMarkupIndexByID(markupID)
self.replaceLandmark(hardenModel.GetPolyData(), fidList, markupsIndex,
landmarkDescription[markupID]["projection"]["closestPointIndex"])
fidList.SetAttribute("landmarkDescription",self.encodeJSON(landmarkDescription))
def ModelChanged(self, inputModelSelector, inputLandmarksSelector):
inputModel = inputModelSelector.currentNode()
# if a Model Node is present
if inputModel:
self.selectedModel = inputModel
hardenModel = self.createIntermediateHardenModel(inputModel)
inputModel.SetAttribute("hardenModelID",hardenModel.GetID())
modelModifieTagEvent = inputModel.AddObserver(inputModel.TransformModifiedEvent, self.onModelModified)
inputModel.SetAttribute("modelModifieTagEvent",self.encodeJSON({'modelModifieTagEvent':modelModifieTagEvent}))
inputLandmarksSelector.setEnabled(True)
# if no model is selected
else:
# Update the fiducial list selector
inputLandmarksSelector.setCurrentNode(None)
inputLandmarksSelector.setEnabled(False)
def isUnderTransform(self, markups):
if markups.GetParentTransformNode():
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(" /!\ WARNING /!\ ")
messageBox.setIcon(messageBox.Warning)
messageBox.setText("Your Markup Fiducial Node is currently modified by a transform,"
"if you choose to continue the program will apply the transform"
"before doing anything else!")
messageBox.setInformativeText("Do you want to continue?")
messageBox.setStandardButtons(messageBox.No | messageBox.Yes)
choice = messageBox.exec_()
if choice == messageBox.Yes:
logic = slicer.vtkSlicerTransformLogic()
logic.hardenTransform(markups)
return False
else:
messageBox.setText(" Node not modified")
messageBox.setStandardButtons(messageBox.Ok)
messageBox.setInformativeText("")
messageBox.exec_()
return True
else:
return False
def connectedModelChangement(self):
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(" /!\ WARNING /!\ ")
messageBox.setIcon(messageBox.Warning)
messageBox.setText("The Markup Fiducial Node selected is curently projected on an"
"other model, if you chose to continue the fiducials will be "
"reprojected, and this could impact the functioning of other modules")
messageBox.setInformativeText("Do you want to continue?")
messageBox.setStandardButtons(messageBox.No | messageBox.Yes)
choice = messageBox.exec_()
if choice == messageBox.Yes:
return True
else:
messageBox.setText(" Node not modified")
messageBox.setStandardButtons(messageBox.Ok)
messageBox.setInformativeText("")
messageBox.exec_()
return False
def createNewDataStructure(self,landmarks, model, onSurface):
landmarks.SetAttribute("connectedModelID",model.GetID())
landmarks.SetAttribute("hardenModelID",model.GetAttribute("hardenModelID"))
landmarkDescription = dict()
for n in range(landmarks.GetNumberOfMarkups()):
markupID = landmarks.GetNthMarkupID(n)
landmarkDescription[markupID] = dict()
landmarkLabel = landmarks.GetNthMarkupLabel(n)
landmarkDescription[markupID]["landmarkLabel"] = landmarkLabel
landmarkDescription[markupID]["ROIradius"] = 0
landmarkDescription[markupID]["projection"] = dict()
if onSurface:
landmarkDescription[markupID]["projection"]["isProjected"] = True
hardenModel = slicer.app.mrmlScene().GetNodeByID(landmarks.GetAttribute("hardenModelID"))
landmarkDescription[markupID]["projection"]["closestPointIndex"] = \
self.projectOnSurface(hardenModel, landmarks, markupID)
else:
landmarkDescription[markupID]["projection"]["isProjected"] = False
landmarkDescription[markupID]["projection"]["closestPointIndex"] = None
landmarkDescription[markupID]["midPoint"] = dict()
landmarkDescription[markupID]["midPoint"]["definedByThisMarkup"] = list()
landmarkDescription[markupID]["midPoint"]["isMidPoint"] = False
landmarkDescription[markupID]["midPoint"]["Point1"] = None
landmarkDescription[markupID]["midPoint"]["Point2"] = None
landmarks.SetAttribute("landmarkDescription",self.encodeJSON(landmarkDescription))
planeDescription = dict()
landmarks.SetAttribute("planeDescription",self.encodeJSON(planeDescription))
landmarks.SetAttribute("isClean",self.encodeJSON({"isClean":False}))
landmarks.SetAttribute("lastTransformID",None)
landmarks.SetAttribute("arrayName",model.GetName() + "_ROI")
def changementOfConnectedModel(self,landmarks, model, onSurface):
landmarks.SetAttribute("connectedModelID",model.GetID())
landmarks.SetAttribute("hardenModelID",model.GetAttribute("hardenModelID"))
landmarkDescription = self.decodeJSON(landmarks.GetAttribute("landmarkDescription"))
for n in range(landmarks.GetNumberOfMarkups()):
markupID = landmarks.GetNthMarkupID(n)
if onSurface:
if landmarkDescription[markupID]["projection"]["isProjected"] == True:
hardenModel = slicer.app.mrmlScene().GetNodeByID(landmarks.GetAttribute("hardenModelID"))
landmarkDescription[markupID]["projection"]["closestPointIndex"] = \
self.projectOnSurface(hardenModel, landmarks, markupID)
else:
landmarkDescription[markupID]["projection"]["isProjected"] = False
landmarkDescription[markupID]["projection"]["closestPointIndex"] = None
landmarks.SetAttribute("landmarkDescription",self.encodeJSON(landmarkDescription))
landmarks.SetAttribute("isClean",self.encodeJSON({"isClean":False}))
def connectLandmarks(self, modelSelector, landmarkSelector, onSurface):
model = modelSelector.currentNode()
landmarks = landmarkSelector.currentNode()
self.selectedFidList = landmarks
self.selectedModel = model
if not (model and landmarks):
return
if self.isUnderTransform(landmarks):
landmarkSelector.setCurrentNode(None)
return
connectedModelID = landmarks.GetAttribute("connectedModelID")
try:
tag = self.decodeJSON(landmarks.GetAttribute("MarkupAddedEventTag"))
landmarks.RemoveObserver(tag["MarkupAddedEventTag"])
print "Markups adding observer removed!"
except:
pass
try:
tag = self.decodeJSON(landmarks.GetAttribute("PointModifiedEventTag"))
landmarks.RemoveObserver(tag["PointModifiedEventTag"])
print "Markups moving observer removed!"
except:
pass
try:
tag = self.decodeJSON(landmarks.GetAttribute("MarkupRemovedEventTag"))
landmarks.RemoveObserver(tag["MarkupRemovedEventTag"])
print "Markups removing observers removed!"
except:
pass
if self.interface.moduleName is 'AnglePlanes':
try:
tag = self.decodeJSON(landmarks.GetAttribute("UpdatesPlanesEventTag"))
landmarks.RemoveObserver(tag["UpdatesPlanesEventTag"])
print "Planes modification observers removed!"
except:
pass
if self.interface.moduleName is 'Q3DC':
try:
tag = self.decodeJSON(landmarks.GetAttribute("UpdatesLinesEventTag"))
landmarks.RemoveObserver(tag["UpdatesLinesEventTag"])
print "Lines modification observers removed!"
except:
pass
if connectedModelID:
if connectedModelID != model.GetID():
if self.connectedModelChangement():
self.changementOfConnectedModel(landmarks, model, onSurface)
else:
landmarkSelector.setCurrentNode(None)
return
else:
landmarks.SetAttribute("hardenModelID",model.GetAttribute("hardenModelID"))
# creation of the data structure
else:
self.createNewDataStructure(landmarks, model, onSurface)
#update of the landmark Combo Box
self.resetAllLandmarkComboboxes(landmarks)
#adding of listeners
MarkupAddedEventTag = landmarks.AddObserver(landmarks.MarkupAddedEvent, self.onMarkupAddedEvent)
landmarks.SetAttribute("MarkupAddedEventTag",self.encodeJSON({"MarkupAddedEventTag":MarkupAddedEventTag}))
PointModifiedEventTag = landmarks.AddObserver(landmarks.PointModifiedEvent, self.onPointModifiedEvent)
landmarks.SetAttribute("PointModifiedEventTag",self.encodeJSON({"PointModifiedEventTag":PointModifiedEventTag}))
MarkupRemovedEventTag = landmarks.AddObserver(landmarks.MarkupRemovedEvent, self.onMarkupRemovedEvent)
landmarks.SetAttribute("MarkupRemovedEventTag",self.encodeJSON({"MarkupRemovedEventTag":MarkupRemovedEventTag}))
if self.interface.moduleName is 'AnglePlanes':
UpdatesPlanesEventTag = landmarks.AddObserver(landmarks.PointModifiedEvent, self.updatePlanesEvent)
landmarks.SetAttribute("UpdatesPlanesEventTag",self.encodeJSON({"UpdatesPlanesEventTag":UpdatesPlanesEventTag}))
if self.interface.moduleName is 'Q3DC':
UpdatesLinesEventTag = landmarks.AddObserver(landmarks.PointModifiedEvent, self.updateLinesEvent)
landmarks.SetAttribute("UpdatesLinesEventTag",self.encodeJSON({"UpdatesLinesEventTag":UpdatesLinesEventTag}))
# Called when a landmark is added on a model
def onMarkupAddedEvent(self, obj, event):
print "------markup adding-------"
landmarkDescription = self.decodeJSON(obj.GetAttribute("landmarkDescription"))
numOfMarkups = obj.GetNumberOfMarkups()
markupID = obj.GetNthMarkupID(numOfMarkups - 1)
landmarkDescription[markupID] = dict()
landmarkLabel = obj.GetNthMarkupLabel(numOfMarkups - 1)
landmarkDescription[markupID]["landmarkLabel"] = landmarkLabel
landmarkDescription[markupID]["ROIradius"] = 0
landmarkDescription[markupID]["projection"] = dict()
landmarkDescription[markupID]["projection"]["isProjected"] = True
# The landmark will be projected by onPointModifiedEvent
landmarkDescription[markupID]["midPoint"] = dict()
landmarkDescription[markupID]["midPoint"]["definedByThisMarkup"] = list()
landmarkDescription[markupID]["midPoint"]["isMidPoint"] = False
landmarkDescription[markupID]["midPoint"]["Point1"] = None
landmarkDescription[markupID]["midPoint"]["Point2"] = None
obj.SetAttribute("landmarkDescription",self.encodeJSON(landmarkDescription))
self.updateAllLandmarkComboBox(obj, markupID)
self.interface.UpdateInterface()
qt.QTimer.singleShot(0, lambda : self.onPointModifiedEvent(obj,None))
def updateMidPoint(self, fidList, landmarkID):
landmarkDescription = self.decodeJSON(fidList.GetAttribute("landmarkDescription"))
for midPointID in landmarkDescription[landmarkID]["midPoint"]["definedByThisMarkup"]:
if landmarkDescription[midPointID]["midPoint"]["isMidPoint"]:
landmark1ID = landmarkDescription[midPointID]["midPoint"]["Point1"]
landmark2ID = landmarkDescription[midPointID]["midPoint"]["Point2"]
coord = self.calculateMidPointCoord(fidList, landmark1ID, landmark2ID)
index = fidList.GetMarkupIndexByID(midPointID)
fidList.SetNthFiducialPositionFromArray(index, coord)
if landmarkDescription[midPointID]["projection"]["isProjected"]:
hardenModel = slicer.app.mrmlScene().GetNodeByID(fidList.GetAttribute("hardenModelID"))
landmarkDescription[midPointID]["projection"]["closestPointIndex"] = \
self.projectOnSurface(hardenModel, fidList, midPointID)
fidList.SetAttribute("landmarkDescription",self.encodeJSON(landmarkDescription))
self.updateMidPoint(fidList, midPointID)
# Called when a landmarks is moved
def onPointModifiedEvent(self, obj, event):
print "----onPointModifiedEvent-----"
landmarkDescription = self.decodeJSON(obj.GetAttribute("landmarkDescription"))
if not landmarkDescription:
return
selectedLandmarkID = self.findIDFromLabel(obj, self.interface.landmarkComboBox.currentText)
# remove observer to make sure, the callback function won't work..
tag = self.decodeJSON(obj.GetAttribute("PointModifiedEventTag"))
obj.RemoveObserver(tag["PointModifiedEventTag"])
if selectedLandmarkID:
activeLandmarkState = landmarkDescription[selectedLandmarkID]
if activeLandmarkState["projection"]["isProjected"]:
hardenModel = slicer.app.mrmlScene().GetNodeByID(obj.GetAttribute("hardenModelID"))
activeLandmarkState["projection"]["closestPointIndex"] = \
self.projectOnSurface(hardenModel, obj, selectedLandmarkID)
obj.SetAttribute("landmarkDescription",self.encodeJSON(landmarkDescription))
self.updateMidPoint(obj,selectedLandmarkID)
self.findROI(obj)
time.sleep(0.08)
# Add the observer again
PointModifiedEventTag = obj.AddObserver(obj.PointModifiedEvent, self.onPointModifiedEvent)
obj.SetAttribute("PointModifiedEventTag",self.encodeJSON({"PointModifiedEventTag":PointModifiedEventTag}))
def onMarkupRemovedEvent(self, obj, event):
print "------markup deleting-------"
landmarkDescription = self.decodeJSON(obj.GetAttribute("landmarkDescription"))
IDs = []
for ID, value in landmarkDescription.iteritems():
isFound = False
for n in range(obj.GetNumberOfMarkups()):
markupID = obj.GetNthMarkupID(n)
if ID == markupID:
isFound = True
if not isFound:
IDs.append(ID)
self.resetAllLandmarkComboboxes(obj)
# for ID in IDs:
# self.deleteLandmark(obj, landmarkDescription[ID]["landmarkLabel"])
# landmarkDescription.pop(ID,None)
# obj.SetAttribute("landmarkDescription",self.encodeJSON(landmarkDescription))
def updatePlanesEvent(self, obj, event):
for planeControls in self.interface.planeControlsDictionary.values():
if planeControls.fidlist is obj:
planeControls.update()
def updateLinesEvent(self, obj, event):
if self.interface.line1LAComboBox.currentText != '' and self.interface.line1LBComboBox.currentText != '' \
and self.interface.line1LAComboBox.currentText != self.interface.line1LBComboBox.currentText :
# Clear Lines, then define new ones
if self.interface.renderer1 :
self.interface.renderer1.RemoveActor(self.interface.actor1)
self.interface.renderer1, self.interface.actor1 = \
self.interface.logic.drawLineBetween2Landmark(self.interface.line1LAComboBox.currentText,
self.interface.line1LBComboBox.currentText,
self.interface.fidListComboBoxline1LA.currentNode(),
self.interface.fidListComboBoxline1LB.currentNode())
if self.interface.line2LAComboBox.currentText != '' and self.interface.line2LBComboBox.currentText != '' \
and self.interface.line2LAComboBox.currentText != self.interface.line2LBComboBox.currentText :
if self.interface.renderer2 :
self.interface.renderer2.RemoveActor(self.interface.actor2)
self.interface.renderer2, self.interface.actor2 = \
self.interface.logic.drawLineBetween2Landmark(self.interface.line2LAComboBox.currentText,
self.interface.line2LBComboBox.currentText,
self.interface.fidListComboBoxline2LA.currentNode(),
self.interface.fidListComboBoxline2LB.currentNode())
if self.interface.lineLAComboBox.currentText != '' and self.interface.lineLBComboBox.currentText != '' \
and self.interface.lineLAComboBox.currentText != self.interface.lineLBComboBox.currentText :
if self.interface.renderer3 :
self.interface.renderer3.RemoveActor(self.interface.actor3)
self.interface.renderer3, self.interface.actor3 = \
self.interface.logic.drawLineBetween2Landmark(self.interface.lineLAComboBox.currentText,
self.interface.lineLBComboBox.currentText,
self.interface.fidListComboBoxlineLA.currentNode(),
self.interface.fidListComboBoxlineLB.currentNode())
def findIDFromLabel(self, fidList, landmarkLabel):
# find the ID of the markupsNode from the label of a landmark!
landmarkDescription = self.decodeJSON(fidList.GetAttribute("landmarkDescription"))
for ID, value in landmarkDescription.iteritems():
if value["landmarkLabel"] == landmarkLabel:
return ID
return None
def getClosestPointIndex(self, fidNode, inputPolyData, landmarkID):
landmarkCoord = numpy.zeros(3)
fidNode.GetNthFiducialPosition(landmarkID, landmarkCoord)
pointLocator = vtk.vtkPointLocator()
pointLocator.SetDataSet(inputPolyData)
pointLocator.AutomaticOn()
pointLocator.BuildLocator()
indexClosestPoint = pointLocator.FindClosestPoint(landmarkCoord)
return indexClosestPoint
def replaceLandmark(self, inputModelPolyData, fidNode, landmarkID, indexClosestPoint):
landmarkCoord = [-1, -1, -1]
inputModelPolyData.GetPoints().GetPoint(indexClosestPoint, landmarkCoord)
fidNode.SetNthFiducialPositionFromArray(landmarkID,landmarkCoord)
def projectOnSurface(self, modelOnProject, fidNode, selectedFidReflID):
if selectedFidReflID:
markupsIndex = fidNode.GetMarkupIndexByID(selectedFidReflID)
indexClosestPoint = self.getClosestPointIndex(fidNode, modelOnProject.GetPolyData(), markupsIndex)
self.replaceLandmark(modelOnProject.GetPolyData(), fidNode, markupsIndex, indexClosestPoint)
return indexClosestPoint
def calculateMidPointCoord(self, fidList, landmark1ID, landmark2ID):
"""Set the midpoint when you know the the mrml nodes"""
landmark1Index = fidList.GetMarkupIndexByID(landmark1ID)
landmark2Index = fidList.GetMarkupIndexByID(landmark2ID)
coord1 = [-1, -1, -1]
coord2 = [-1, -1, -1]
fidList.GetNthFiducialPosition(landmark1Index, coord1)
fidList.GetNthFiducialPosition(landmark2Index, coord2)
midCoord = [-1, -1, -1]
midCoord[0] = (coord1[0] + coord2[0])/2
midCoord[1] = (coord1[1] + coord2[1])/2
midCoord[2] = (coord1[2] + coord2[2])/2
return midCoord
def addLandmarkToCombox(self, fidList, combobox, markupID):
if not fidList:
return
landmarkDescription = self.decodeJSON(fidList.GetAttribute("landmarkDescription"))
combobox.addItem(landmarkDescription[markupID]["landmarkLabel"])
def updateLandmarkComboBox(self, fidList, combobox, displayMidPoint = True):
combobox.blockSignals(True)
combobox.clear()
if not fidList:
return
landmarkDescription = self.decodeJSON(fidList.GetAttribute("landmarkDescription"))
numOfFid = fidList.GetNumberOfMarkups()
if numOfFid > 0:
for i in range(0, numOfFid):
if displayMidPoint is False:
ID = fidList.GetNthMarkupID(i)
if not landmarkDescription[ID]["midPoint"]["isMidPoint"]:
landmarkLabel = fidList.GetNthMarkupLabel(i)
combobox.addItem(landmarkLabel)
else:
landmarkLabel = fidList.GetNthMarkupLabel(i)
combobox.addItem(landmarkLabel)
combobox.setCurrentIndex(combobox.count - 1)
combobox.blockSignals(False)
def updateAllLandmarkComboBox(self, fidList, markupID):
# update of the Combobox that are always updated
self.updateLandmarkComboBox(fidList, self.interface.landmarkComboBox, False)
if hasattr(self.interface.logic, 'getComboboxesToUpdate'):
comboboxesToUpdate = self.interface.logic.getComboboxesToUpdate(fidList)
for combobox in comboboxesToUpdate:
self.addLandmarkToCombox(fidList, combobox, markupID)
def deleteLandmark(self, fidList, label, markupID):
# update of the Combobox that are always updated
self.interface.landmarkComboBox.removeItem(self.interface.landmarkComboBox.findText(label))
if hasattr(self.interface.logic, 'getComboboxesToUpdate'):
comboboxesToUpdate = self.interface.logic.getComboboxesToUpdate(fidList)
for combobox in comboboxesToUpdate:
combobox.removeItem(combobox.findText(label))
def resetAllLandmarkComboboxes(self, fidList):
# update of the Combobox that are always updated
self.updateLandmarkComboBox(fidList, self.interface.landmarkComboBox, False)
if hasattr(self.interface.logic, 'getComboboxesToUpdate'):
comboboxesToUpdate = self.interface.logic.getComboboxesToUpdate(fidList)
for combobox in comboboxesToUpdate:
self.updateLandmarkComboBox(fidList, combobox)
def GetConnectedVertices(self, connectedVerticesIDList, polyData, pointID):
# Return IDs of all the vertices that compose the first neighbor.
cellList = vtk.vtkIdList()
connectedVerticesIDList.InsertUniqueId(pointID)
# Get cells that vertex 'pointID' belongs to
polyData.GetPointCells(pointID, cellList)
numberOfIds = cellList.GetNumberOfIds()
for i in range(0, numberOfIds):
# Get points which compose all cells
pointIdList = vtk.vtkIdList()
polyData.GetCellPoints(cellList.GetId(i), pointIdList)
for j in range(0, pointIdList.GetNumberOfIds()):
connectedVerticesIDList.InsertUniqueId(pointIdList.GetId(j))
return connectedVerticesIDList
def addArrayFromIdList(self, connectedIdList, inputModelNode, arrayName):
if not inputModelNode:
return
inputModelNodePolydata = inputModelNode.GetPolyData()
pointData = inputModelNodePolydata.GetPointData()
numberofIds = connectedIdList.GetNumberOfIds()
hasArrayInt = pointData.HasArray(arrayName)
if hasArrayInt == 1: # ROI Array found
pointData.RemoveArray(arrayName)
arrayToAdd = vtk.vtkDoubleArray()
arrayToAdd.SetName(arrayName)
for i in range(0, inputModelNodePolydata.GetNumberOfPoints()):
arrayToAdd.InsertNextValue(0.0)
for i in range(0, numberofIds):
arrayToAdd.SetValue(connectedIdList.GetId(i), 1.0)
lut = vtk.vtkLookupTable()
tableSize = 2
lut.SetNumberOfTableValues(tableSize)
lut.Build()
ID = inputModelNode.GetDisplayNodeID()
slicer.app.mrmlScene().GetNodeByID(ID)
displayNode = slicer.app.mrmlScene().GetNodeByID(ID)
rgb = displayNode.GetColor()
lut.SetTableValue(0, rgb[0], rgb[1], rgb[2], 1)
lut.SetTableValue(1, 1.0, 0.0, 0.0, 1)
arrayToAdd.SetLookupTable(lut)
pointData.AddArray(arrayToAdd)
inputModelNodePolydata.Modified()
return True
def displayROI(self, inputModelNode, scalarName):
PolyData = inputModelNode.GetPolyData()
PolyData.Modified()
displayNode = inputModelNode.GetModelDisplayNode()
displayNode.SetScalarVisibility(False)
disabledModify = displayNode.StartModify()
displayNode.SetActiveScalarName(scalarName)
displayNode.SetScalarVisibility(True)
displayNode.EndModify(disabledModify)
def defineNeighbor(self, connectedVerticesList, inputModelNodePolyData, indexClosestPoint, distance):
self.GetConnectedVertices(connectedVerticesList, inputModelNodePolyData, indexClosestPoint)
if distance > 1:
for dist in range(1, int(distance)):
for i in range(0, connectedVerticesList.GetNumberOfIds()):
self.GetConnectedVertices(connectedVerticesList, inputModelNodePolyData,
connectedVerticesList.GetId(i))
return connectedVerticesList
def findROI(self, fidList):
hardenModel = slicer.app.mrmlScene().GetNodeByID(fidList.GetAttribute("hardenModelID"))
connectedModel = slicer.app.mrmlScene().GetNodeByID(fidList.GetAttribute("connectedModelID"))
landmarkDescription = self.decodeJSON(fidList.GetAttribute("landmarkDescription"))
arrayName = fidList.GetAttribute("arrayName")
ROIPointListID = vtk.vtkIdList()
for key,activeLandmarkState in landmarkDescription.iteritems():
tempROIPointListID = vtk.vtkIdList()
if activeLandmarkState["ROIradius"] != 0:
self.defineNeighbor(tempROIPointListID,
hardenModel.GetPolyData(),
activeLandmarkState["projection"]["closestPointIndex"],
activeLandmarkState["ROIradius"])
for j in range(0, tempROIPointListID.GetNumberOfIds()):
ROIPointListID.InsertUniqueId(tempROIPointListID.GetId(j))
listID = ROIPointListID
self.addArrayFromIdList(listID, connectedModel, arrayName)
self.displayROI(connectedModel, arrayName)
return ROIPointListID
def warningMessage(self, message):
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(" /!\ WARNING /!\ ")
messageBox.setIcon(messageBox.Warning)
messageBox.setText(message)
messageBox.setStandardButtons(messageBox.Ok)
messageBox.exec_()
def encodeJSON(self, input):
encodedString = json.dumps(input)
encodedString = encodedString.replace('\"', '\'')
return encodedString
def decodeJSON(self, input):
if input:
input = input.replace('\'','\"')
return self.byteify(json.loads(input))
return None
def byteify(self, input):
if isinstance(input, dict):
return {self.byteify(key):self.byteify(value) for key,value in input.iteritems()}
elif isinstance(input, list):
return [self.byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input | 53.118812 | 124 | 0.658062 |
acf3829b5dd41b923154fb6234b50efe44d26cb7 | 8,956 | py | Python | thirdparty/MicroPython/user/components/module/addnewmodule/makeqstrdata.py | flyghost/OneOS-V2.1.0 | 6fedab0558c07fe679d63ba1eb8ee9992c044d86 | [
"Apache-2.0"
] | null | null | null | thirdparty/MicroPython/user/components/module/addnewmodule/makeqstrdata.py | flyghost/OneOS-V2.1.0 | 6fedab0558c07fe679d63ba1eb8ee9992c044d86 | [
"Apache-2.0"
] | null | null | null | thirdparty/MicroPython/user/components/module/addnewmodule/makeqstrdata.py | flyghost/OneOS-V2.1.0 | 6fedab0558c07fe679d63ba1eb8ee9992c044d86 | [
"Apache-2.0"
] | null | null | null | """
Process raw qstr file and output qstr data with length, hash and data bytes.
This script works with Python 2.6, 2.7, 3.3 and 3.4.
"""
from __future__ import print_function
import re
import sys
# Python 2/3 compatibility:
# - iterating through bytes is different
# - codepoint2name lives in a different module
import platform
if platform.python_version_tuple()[0] == '2':
bytes_cons = lambda val, enc=None: bytearray(val)
from htmlentitydefs import codepoint2name
elif platform.python_version_tuple()[0] == '3':
bytes_cons = bytes
from html.entities import codepoint2name
# end compatibility code
codepoint2name[ord('-')] = 'hyphen';
# add some custom names to map characters that aren't in HTML
codepoint2name[ord(' ')] = 'space'
codepoint2name[ord('\'')] = 'squot'
codepoint2name[ord(',')] = 'comma'
codepoint2name[ord('.')] = 'dot'
codepoint2name[ord(':')] = 'colon'
codepoint2name[ord(';')] = 'semicolon'
codepoint2name[ord('/')] = 'slash'
codepoint2name[ord('%')] = 'percent'
codepoint2name[ord('#')] = 'hash'
codepoint2name[ord('(')] = 'paren_open'
codepoint2name[ord(')')] = 'paren_close'
codepoint2name[ord('[')] = 'bracket_open'
codepoint2name[ord(']')] = 'bracket_close'
codepoint2name[ord('{')] = 'brace_open'
codepoint2name[ord('}')] = 'brace_close'
codepoint2name[ord('*')] = 'star'
codepoint2name[ord('!')] = 'bang'
codepoint2name[ord('\\')] = 'backslash'
codepoint2name[ord('+')] = 'plus'
codepoint2name[ord('$')] = 'dollar'
codepoint2name[ord('=')] = 'equals'
codepoint2name[ord('?')] = 'question'
codepoint2name[ord('@')] = 'at_sign'
codepoint2name[ord('^')] = 'caret'
codepoint2name[ord('|')] = 'pipe'
codepoint2name[ord('~')] = 'tilde'
# static qstrs, should be sorted
static_qstr_list = [
"",
"__dir__", # Put __dir__ after empty qstr for builtin dir() to work
"\n",
" ",
"*",
"/",
"<module>",
"_",
"__call__",
"__class__",
"__delitem__",
"__enter__",
"__exit__",
"__getattr__",
"__getitem__",
"__hash__",
"__init__",
"__int__",
"__iter__",
"__len__",
"__main__",
"__module__",
"__name__",
"__new__",
"__next__",
"__qualname__",
"__repr__",
"__setitem__",
"__str__",
"ArithmeticError",
"AssertionError",
"AttributeError",
"BaseException",
"EOFError",
"Ellipsis",
"Exception",
"GeneratorExit",
"ImportError",
"IndentationError",
"IndexError",
"KeyError",
"KeyboardInterrupt",
"LookupError",
"MemoryError",
"NameError",
"NoneType",
"NotImplementedError",
"OSError",
"OverflowError",
"RuntimeError",
"StopIteration",
"SyntaxError",
"SystemExit",
"TypeError",
"ValueError",
"ZeroDivisionError",
"abs",
"all",
"any",
"append",
"args",
"bool",
"builtins",
"bytearray",
"bytecode",
"bytes",
"callable",
"chr",
"classmethod",
"clear",
"close",
"const",
"copy",
"count",
"dict",
"dir",
"divmod",
"end",
"endswith",
"eval",
"exec",
"extend",
"find",
"format",
"from_bytes",
"get",
"getattr",
"globals",
"hasattr",
"hash",
"id",
"index",
"insert",
"int",
"isalpha",
"isdigit",
"isinstance",
"islower",
"isspace",
"issubclass",
"isupper",
"items",
"iter",
"join",
"key",
"keys",
"len",
"list",
"little",
"locals",
"lower",
"lstrip",
"main",
"map",
"micropython",
"next",
"object",
"open",
"ord",
"pop",
"popitem",
"pow",
"print",
"range",
"read",
"readinto",
"readline",
"remove",
"replace",
"repr",
"reverse",
"rfind",
"rindex",
"round",
"rsplit",
"rstrip",
"self",
"send",
"sep",
"set",
"setattr",
"setdefault",
"sort",
"sorted",
"split",
"start",
"startswith",
"staticmethod",
"step",
"stop",
"str",
"strip",
"sum",
"super",
"throw",
"to_bytes",
"tuple",
"type",
"update",
"upper",
"utf-8",
"value",
"values",
"write",
"zip",
]
# this must match the equivalent function in qstr.c
def compute_hash(qstr, bytes_hash):
hash = 5381
for b in qstr:
hash = (hash * 33) ^ b
# Make sure that valid hash is never zero, zero means "hash not computed"
return (hash & ((1 << (8 * bytes_hash)) - 1)) or 1
def qstr_escape(qst):
def esc_char(m):
c = ord(m.group(0))
try:
name = codepoint2name[c]
except KeyError:
name = '0x%02x' % c
return "_" + name + '_'
return re.sub(r'[^A-Za-z0-9_]', esc_char, qst)
def parse_input_headers(infiles):
qcfgs = {}
qstrs = {}
# add static qstrs
for qstr in static_qstr_list:
# work out the corresponding qstr name
ident = qstr_escape(qstr)
# don't add duplicates
assert ident not in qstrs
# add the qstr to the list, with order number to retain original order in file
order = len(qstrs) - 300000
qstrs[ident] = (order, ident, qstr)
# read the qstrs in from the input files
for infile in infiles:
with open(infile, 'rt') as f:
for line in f:
line = line.strip()
# is this a config line?
match = re.match(r'^QCFG\((.+), (.+)\)', line)
if match:
value = match.group(2)
if value[0] == '(' and value[-1] == ')':
# strip parenthesis from config value
value = value[1:-1]
qcfgs[match.group(1)] = value
continue
# is this a QSTR line?
match = re.match(r'^Q\((.*)\)$', line)
if not match:
continue
# get the qstr value
qstr = match.group(1)
# special cases to specify control characters
if qstr == '\\n':
qstr = '\n'
elif qstr == '\\r\\n':
qstr = '\r\n'
# work out the corresponding qstr name
ident = qstr_escape(qstr)
# don't add duplicates
if ident in qstrs:
continue
# add the qstr to the list, with order number to retain original order in file
order = len(qstrs)
# but put special method names like __add__ at the top of list, so
# that their id's fit into a byte
if ident == "":
# Sort empty qstr above all still
order = -200000
elif ident == "__dir__":
# Put __dir__ after empty qstr for builtin dir() to work
order = -190000
elif ident.startswith("__"):
order -= 100000
qstrs[ident] = (order, ident, qstr)
if not qcfgs:
sys.stderr.write("ERROR: Empty preprocessor output - check for errors above\n")
sys.exit(1)
return qcfgs, qstrs
def make_bytes(cfg_bytes_len, cfg_bytes_hash, qstr):
qbytes = bytes_cons(qstr, 'utf8')
qlen = len(qbytes)
qhash = compute_hash(qbytes, cfg_bytes_hash)
if all(32 <= ord(c) <= 126 and c != '\\' and c != '"' for c in qstr):
# qstr is all printable ASCII so render it as-is (for easier debugging)
qdata = qstr
else:
# qstr contains non-printable codes so render entire thing as hex pairs
qdata = ''.join(('\\x%02x' % b) for b in qbytes)
if qlen >= (1 << (8 * cfg_bytes_len)):
print('qstr is too long:', qstr)
assert False
qlen_str = ('\\x%02x' * cfg_bytes_len) % tuple(((qlen >> (8 * i)) & 0xff) for i in range(cfg_bytes_len))
qhash_str = ('\\x%02x' * cfg_bytes_hash) % tuple(((qhash >> (8 * i)) & 0xff) for i in range(cfg_bytes_hash))
return '(const byte*)"%s%s" "%s"' % (qhash_str, qlen_str, qdata)
def print_qstr_data(qcfgs, qstrs):
# get config variables
cfg_bytes_len = int(qcfgs['BYTES_IN_LEN'])
cfg_bytes_hash = int(qcfgs['BYTES_IN_HASH'])
# print out the starter of the generated C header file
print('// This file was automatically generated by makeqstrdata.py')
print('')
# add NULL qstr with no hash or data
print('QDEF(MP_QSTRnull, (const byte*)"%s%s" "")' % ('\\x00' * cfg_bytes_hash, '\\x00' * cfg_bytes_len))
# go through each qstr and print it out
for order, ident, qstr in sorted(qstrs.values(), key=lambda x: x[0]):
qbytes = make_bytes(cfg_bytes_len, cfg_bytes_hash, qstr)
print('QDEF(MP_QSTR_%s, %s)' % (ident, qbytes))
def do_work(infiles):
qcfgs, qstrs = parse_input_headers(infiles)
print_qstr_data(qcfgs, qstrs)
| 25.371105 | 112 | 0.545333 |
acf38510258db320b91b4b478dcd1143075c7356 | 2,119 | py | Python | plansys2_executor/launch/executor_launch.py | movefasta/ros2_planning_system | c058cb16960b49a1c0f7d80280408a27136a9cc2 | [
"Apache-2.0"
] | 1 | 2022-02-11T18:24:24.000Z | 2022-02-11T18:24:24.000Z | plansys2_executor/launch/executor_launch.py | movefasta/ros2_planning_system | c058cb16960b49a1c0f7d80280408a27136a9cc2 | [
"Apache-2.0"
] | null | null | null | plansys2_executor/launch/executor_launch.py | movefasta/ros2_planning_system | c058cb16960b49a1c0f7d80280408a27136a9cc2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Intelligent Robotics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
namespace = LaunchConfiguration('namespace')
params_file = LaunchConfiguration('params_file')
default_action_bt_xml_filename = LaunchConfiguration('default_action_bt_xml_filename')
declare_namespace_cmd = DeclareLaunchArgument(
'namespace',
default_value='',
description='Namespace')
declare_default_bt_file_cmd = DeclareLaunchArgument(
'default_action_bt_xml_filename',
default_value=os.path.join(
get_package_share_directory('plansys2_executor'),
'behavior_trees', 'plansys2_action_bt.xml'),
description='BT representing a PDDL action')
# Specify the actions
executor_cmd = Node(
package='plansys2_executor',
executable='executor_node',
name='executor',
namespace=namespace,
output='screen',
parameters=[
{
'default_action_bt_xml_filename': default_action_bt_xml_filename
},
params_file
])
# Create the launch description and populate
ld = LaunchDescription()
ld.add_action(declare_namespace_cmd)
ld.add_action(declare_default_bt_file_cmd)
# Declare the launch options
ld.add_action(executor_cmd)
return ld
| 32.106061 | 90 | 0.731949 |
acf385b6aa6172606f05068c7b8cec7a664630ab | 1,848 | py | Python | agrspy/envspy-scrapy/envspy/spiders/nrtaqi.py | soonyenju/agrspy | 1c5d11d48933f7392d2246fda487256d5cd5b239 | [
"MIT"
] | 2 | 2019-01-10T07:00:25.000Z | 2019-01-10T07:15:00.000Z | agrspy/envspy-scrapy/envspy/spiders/nrtaqi.py | soonyenju/arspy | 1c5d11d48933f7392d2246fda487256d5cd5b239 | [
"MIT"
] | null | null | null | agrspy/envspy-scrapy/envspy/spiders/nrtaqi.py | soonyenju/arspy | 1c5d11d48933f7392d2246fda487256d5cd5b239 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy, re, time
import json
from datetime import datetime
from envspy.items import EnvspyItem
class HistaqiSpider(scrapy.Spider):
name = 'nrtaqi'
allowed_domains = ['www.tianqihoubao.com/aqi']
start_urls = ['http://www.tianqihoubao.com/aqi/']
def parse(self, response):
item = EnvspyItem()
with open("baseurl.json", "r", encoding='gbk') as f:
base_urls = json.load(f)
item["baseurls"] = base_urls
for prov_name, prov in base_urls.items():
# print(prov_name)
for city_name, city_url in prov.items():
# print(city_name)
abs_city_url = response.urljoin(city_url)
yield scrapy.Request(abs_city_url, callback=self.crawl_nrtaqi, dont_filter=True)
# exit(0)
# print(item["baseurls"])
# print(item)
# exit(0)
# https://www.jianshu.com/p/de61ed0f961d
def crawl_nrtaqi(self, response):
print(response.url)
print('ok')
# 时间要求,一个站点多次爬取失败需要跳过,目前有bug
# minute = int(datetime.now().strftime(r"%M"))
# if minute >= 48:
# print("stop")
# return 0
table = response.xpath('//*[@id="content"]/div[4]/table')
tds = table.xpath('.//td/text()').extract()
vals = []
for td in tds:
vals.append(td.strip())
timetag = response.xpath('//*[@id="content"]/div[1]/text()').extract_first().strip()
h1 = response.xpath('//*[@id="content"]/h1/text()').extract_first().strip()
timetag = re.findall(r'\d+-\d+-\d+', timetag)[0]
# '2018-12-20'
out = ''.join(re.findall(r'[\u4E00-\u9FA5]', h1))
idx = out.index(re.findall(r'[\u4E00-\u9FA5]' + '空气', h1)[0][0])
name = out[0: idx+1] + '-' + timetag
print(name,vals)
# 还没数据则重爬
if not ''.join(vals):
time.sleep(5)
yield scrapy.Request(response.url, callback=self.crawl_nrtaqi, dont_filter=True)
else:
with open(name + ".json", "w") as f:
json.dump({name: vals}, f, ensure_ascii=False, indent=4)
| 31.322034 | 86 | 0.648268 |
acf386b15a8fb35a696236649850695e57e440e7 | 4,533 | py | Python | param/gausian_optimization.py | Kdy0115/simulation | 5a450c7af943431dec5ba2761056a5e818e4fdc9 | [
"MIT"
] | null | null | null | param/gausian_optimization.py | Kdy0115/simulation | 5a450c7af943431dec5ba2761056a5e818e4fdc9 | [
"MIT"
] | null | null | null | param/gausian_optimization.py | Kdy0115/simulation | 5a450c7af943431dec5ba2761056a5e818e4fdc9 | [
"MIT"
] | null | null | null | import GPy
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from matplotlib.cm import ScalarMappable
from sklearn.preprocessing import MinMaxScaler
import GPyOpt
#%precision
arr = np.array([[0.61,0.52,0.004,0.00036,0.033,0.0003,0.033,0.0003,0.033,0.0003,1.751665]])
# arr_t = np.array([[17,6]])
df = pd.DataFrame(data=arr)
# df = pd.DataFrame(data=arr_t)
print(df)
df.columns = ['β','α','γ_window','out_window','γ_wall','out_wall','γ_floor','out_floor','γ_ceil','out_ceil','y']
# df.columns = ['x','y']
df.loc[1] = [0.4534,0.1512,0.0095,0.00091,0.0628,0.0004760,0.0538,0.000182608,0.0646,0.0006418,1.7849]
df.loc[2] = [0.026580,0.782135040039652,0.00957225021703172,0.000291306681649386,0.0360138882517681,0.000700471717015439,0.0225621825845767,0.00000866976356750566,0.0280810443519186,0.000819146530200011,2.8418]
df.loc[3] = [0.565716564332133,0.704914692529655,0.00208502340935936,0.000450735158713038,0.00444841904395332,0.000887054752878748,0.0417753575016765,0.000988170938707006,0.0205979234676524,0.000729992510395213,1.7665]
df.loc[4] = [0.0921071881791305,0.334329111237042,0.00831201687393285,0.000549438906665198,0.0670424679279283,0.00064123151254269,0.0969692118820464,0.0000619122257514758,0.0232270615748928,0.000665588763317336,1.9395]
df.loc[5] = [0.511842879132687,0.588204879029716,0.00332895961542675,0.0000963072020489354,0.00292915399399429,0.000706157011759899,0.0580068900705434,0.000406747334636892,0.0230228639407008,0.000312117894156826,1.7514]
df.loc[6] = [0.716432375742807,0.85242121004573,0.00853121379954252,0.000000419067650024888,0.0823477518623204,0.000528739084913966,0.037071866480651,0.000407032642383619,0.0000408894544489252,0.000896185353771878,1.69927]
df.loc[7] = [1,1,0.01,0.001,0.1,0.001,0.1,0.001,0.1,0.001,1.8090] # max
df.loc[8] = [0.1,0.1,0.001,0.0001,0.01,0.0001,0.01,0.0001,0.01,0.0001,1.65579] # min
n = len(df)
dim = 10
# df['γ_window'] = df['γ_window'] / 0.01
# df['γ_wall'] = df['γ_wall'] / 0.1
# df['γ_floor'] = df['γ_floor']/0.1
# df['γ_ceil'] = df['γ_ceil'] / 0.1
# df['out_window'] = df['out_window'] / 0.001
# df['out_wall'] = df['out_wall'] / 0.001
# df['out_floor'] = df['out_floor'] / 0.001
# df['out_ceil'] = df['out_ceil'] / 0.001
print(df)
# df.loc[1] = [-9,11]
# df.loc[2] = [-5,9]
# df.loc[3] = [1,5]
# df.loc[4] = [9,11]
# df.loc[5] = [13,7]
# df.loc[6] = [15,5]
# df.loc[7] = [-11,4]
# df.loc[8] = [-2,3]
# df.loc[9] = [3,8]
# df.loc[10] = [11,12]
# df.loc[11] = [1.5,5.7]
# df.loc[12] = [-2.5,3.2]
# df.loc[13] = [9.2,11.3]
# df.loc[14] = [14.2,6.3]
# df.loc[15] = [3.4,7.6]
# n = len(df)
# dim = 1
x_train = np.stack([df['β'],df['α'],df['γ_window'],df['out_window'],df['γ_wall'],df['out_wall'],df['γ_floor'],df['out_floor'],df['γ_ceil'],df['out_ceil']],axis=1)
# x_train = np.stack([df['x']],axis=1)
# x_train = df['x'].values[:,None]
y_train = df['y'].values**2
def f(x):
y_mean,y_var = gpy_model.predict(x)
acq = (y_mean + ((np.log(n) / n) ** 0.5 * y_var))
return acq
# kern = GPy.kern.RBF(input_dim=dim)
kern = GPy.kern.White(input_dim=dim)
# kern = GPy.kern.Exponential(input_dim=dim)
#kern = GPy.kern.RBF(input_dim=dim) + GPy.kern.White(input_dim=dim) + GPy.kern.Exponential(input_dim=dim)
gpy_model = GPy.models.GPRegression(
X=x_train,
Y=y_train[:,None],
kernel = kern
)
gpy_model.optimize()
bounds = [
{'name':'β','type':'continuous','domain':(0.1000,0.9999)},
{'name':'α','type':'continuous','domain':(0.10000,0.9999)},
{'name':'γ_window','type':'continuous','domain':(0.001000,0.01000)},
{'name':'out_window','type':'continuous','domain':(0.0001000,0.001000)},
{'name':'γ_wall','type':'continuous','domain':(0.01000,0.1000)},
{'name':'out_wall','type':'continuous','domain':(0.001000,0.01000)},
{'name':'γ_floor','type':'continuous','domain':(0.01000,0.1000)},
{'name':'out_floor','type':'continuous','domain':(0.0010000,0.01000)},
{'name':'γ_ceil','type':'continuous','domain':(0.010000,0.10000)},
{'name':'out_ceil','type':'continuous','domain':(0.0010000,0.010000)}
]
# bounds = [
# {'name':'x','type':'continuous','domain':(-10,10)}
# ]
num = 50
myBopt = GPyOpt.methods.BayesianOptimization(
f=f,
domain=bounds,
#constraints = constraints, # 制約条件を設定したい場合
maximize = False, # when True -f maximization of f is done by minimizing -f (default, False).
model_type = 'GP' # default 'GP'
)
myBopt.run_optimization(max_iter=num)
opt_x = myBopt.x_opt
# gpy_model.plot()
# plt.show()
print(opt_x) | 41.587156 | 222 | 0.677256 |
acf386cce0b088120a11a82078e7bf4e8f0e7311 | 936 | py | Python | docs/dalapi/doxypy/__init__.py | cmsxbc/oneDAL | eeb8523285907dc359c84ca4894579d5d1d9f57e | [
"Apache-2.0"
] | 169 | 2020-03-30T09:13:05.000Z | 2022-03-15T11:12:36.000Z | docs/dalapi/doxypy/__init__.py | cmsxbc/oneDAL | eeb8523285907dc359c84ca4894579d5d1d9f57e | [
"Apache-2.0"
] | 1,198 | 2020-03-24T17:26:18.000Z | 2022-03-31T08:06:15.000Z | docs/dalapi/doxypy/__init__.py | cmsxbc/oneDAL | eeb8523285907dc359c84ca4894579d5d1d9f57e | [
"Apache-2.0"
] | 75 | 2020-03-30T11:39:58.000Z | 2022-03-26T05:16:20.000Z | # file: __init__.py
#===============================================================================
# Copyright 2019-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
from .index import Index, index, to_dict, to_json, to_yaml
from .loader import TransformerPass, NameTransformer
from .listing import ListingReader
from . import model
| 42.545455 | 80 | 0.641026 |
acf387e78982096d11a037f8e14a1b1da37d538b | 391 | py | Python | third-party/osqp/tests/generate_tests_data.py | daniu22/Cheetah-Software | ceadda416c72a075d7a4fe5bb5ea512334817065 | [
"MIT"
] | 1,452 | 2019-06-21T15:02:55.000Z | 2022-03-31T14:44:18.000Z | tests/generate_tests_data.py | xuenke/osqp | 657a3b117320c4f8ecb57e27011e75e63f61ce4d | [
"Apache-2.0"
] | 282 | 2017-04-25T12:41:54.000Z | 2021-06-02T15:23:25.000Z | tests/generate_tests_data.py | xuenke/osqp | 657a3b117320c4f8ecb57e27011e75e63f61ce4d | [
"Apache-2.0"
] | 671 | 2019-06-30T03:34:25.000Z | 2022-03-31T08:57:22.000Z | # Code to generate the unittests for OSQP C code
import basic_qp.generate_problem
import basic_qp2.generate_problem
import lin_alg.generate_problem
import non_cvx.generate_problem
import primal_dual_infeasibility.generate_problem
import primal_infeasibility.generate_problem
import solve_linsys.generate_problem
import unconstrained.generate_problem
import update_matrices.generate_problem
| 32.583333 | 49 | 0.900256 |
acf38843a01b5f9e791cc17a7434072014358457 | 2,591 | py | Python | pytest_automation_infra/unit_tests/test_provisioner_client.py | AnyVisionltd/automation-infra | e94c10224b0711160c9fc361045b8f2cfc9c4ca8 | [
"MIT"
] | 6 | 2021-03-10T14:02:42.000Z | 2021-12-08T20:17:21.000Z | pytest_automation_infra/unit_tests/test_provisioner_client.py | solganik/automation-infra | 66379f7366eaa52f412a9150a018ea17ddcdf59b | [
"MIT"
] | 5 | 2021-05-10T18:00:07.000Z | 2022-03-12T00:36:54.000Z | pytest_automation_infra/unit_tests/test_provisioner_client.py | solganik/automation-infra | 66379f7366eaa52f412a9150a018ea17ddcdf59b | [
"MIT"
] | 5 | 2021-03-10T14:02:11.000Z | 2021-07-16T20:58:13.000Z | """
Can be run like: python -m pytest pytest_automation_infra/unit_tests/test_provisioner_client.py -s --log-cli-level=info
"""
import logging
import os
import threading
import time
import pytest
from pytest_provisioner import heartbeat_client, provisioner_client
def test_init_hardware_happy_flow():
provisioner = provisioner_client.ProvisionerClient(
ep=os.getenv('HABERTEST_PROVISIONER', "http://localhost:8080"),
cert=os.getenv('HABERTEST_SSL_CERT', None),
key=os.getenv('HABERTEST_SSL_KEY', None))
stop = threading.Event()
hb = heartbeat_client.HeartbeatClient(stop,
ep=os.getenv('HABERTEST_HEARTBEAT_SERVER', "http://localhost:7080"),
cert=os.getenv('HABERTEST_SSL_CERT', None),
key=os.getenv('HABERTEST_SSL_KEY', None))
req = {"host": {}}
hardware = provisioner.provision(req)
assert hardware
allocation_id = hardware['allocation_id']
logging.info(f"provisioned hardware successfully: {hardware}")
logging.info("starting heartbeat")
hb_thread = hb.send_heartbeats_on_thread(allocation_id)
time.sleep(3)
assert hb_thread.is_alive()
logging.info("stopping heartbeat")
stop.set()
time.sleep(3)
assert not hb_thread.is_alive()
logging.info("starting heartbeat")
stop.clear()
hb_thread = hb.send_heartbeats_on_thread(allocation_id)
time.sleep(3)
assert hb_thread.is_alive()
time.sleep(30)
logging.info("stopping heartbeat")
stop.set()
logging.info("releasing hardware")
provisioner.release(allocation_id)
allocations = provisioner.allocations()
assert allocation_id not in [allocation['allocation_id'] for allocation in allocations]
with pytest.raises(KeyError):
hb.send_heartbeat(allocation_id)
logging.info("finished first part")
# test expires
logging.info("provisioning again")
hardware = provisioner.provision(req)
assert hardware
allocation_id = hardware['allocation_id']
logging.info("sending heartbeats")
hb.send_heartbeat(allocation_id)
hb_thread = hb.send_heartbeats_on_thread(allocation_id)
time.sleep(5)
logging.info("stopping heartbeats")
stop.set()
time.sleep(3)
assert not hb_thread.is_alive()
logging.info("waiting for expiration")
time.sleep(35)
allocations = provisioner.allocations()
assert allocation_id not in [allocation['allocation_id'] for allocation in allocations]
logging.info("passed test successfully!")
| 35.986111 | 119 | 0.695098 |
acf388b483e0f4bfeee2dc844a1b76b7fd74b5ec | 787 | py | Python | LSCplus_ToolKit/SR_pair2single.py | huruifeng/LSCplus | 1e7c03cba476178c8233a18382dc9fd19bcc82e0 | [
"Apache-2.0"
] | 1 | 2020-06-23T21:01:47.000Z | 2020-06-23T21:01:47.000Z | LSCplus_ToolKit/SR_pair2single.py | huruifeng/LSCplus | 1e7c03cba476178c8233a18382dc9fd19bcc82e0 | [
"Apache-2.0"
] | null | null | null | LSCplus_ToolKit/SR_pair2single.py | huruifeng/LSCplus | 1e7c03cba476178c8233a18382dc9fd19bcc82e0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import sys
import os
if len(sys.argv)>=2:
in_filename = sys.argv[1]
else:
print("usage: ./SR_pair2single.py input_filename")
print("or python SR_pair2single.py input_filename")
sys.exit(1)
###############################################################
#in_filename = 'SRR3372215.fastq'
f = open(in_filename,'r')
l = f.readline()
o = open("output_"+in_filename,'w')
i = 0
t = 0
while l:
if l[0] !='>':
print "Err: invalid SR fasta/fa format"
exit(1)
t = t % 2
o.write('>SR_'+str(i)+"_"+str(t)+'\n')
l=f.readline()
o.write(l)
t = t + 1
o.write('>SR_'+str(i)+"_"+str(t)+'\n')
l=f.readline()
o.write(l)
t = t + 1
i = i+1
l=f.readline()
f.close()
o.close()
| 17.108696 | 63 | 0.493011 |
acf38a3e4474a6bed9a4e6cacecc65ac9cd09b30 | 1,724 | py | Python | test/Docbook/basic/html/html.py | moroten/scons | 20927b42ed4f0cb87f51287fa3b4b6cf915afcf8 | [
"MIT"
] | 1 | 2017-01-28T15:39:07.000Z | 2017-01-28T15:39:07.000Z | test/Docbook/basic/html/html.py | moroten/scons | 20927b42ed4f0cb87f51287fa3b4b6cf915afcf8 | [
"MIT"
] | 4 | 2019-04-11T16:27:45.000Z | 2019-04-11T23:56:30.000Z | test/Docbook/basic/html/html.py | moroten/scons | 20927b42ed4f0cb87f51287fa3b4b6cf915afcf8 | [
"MIT"
] | 2 | 2018-01-16T11:29:16.000Z | 2020-05-13T16:48:26.000Z | #!/usr/bin/env python
#
# Copyright (c) 2001-2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
Test the HTML builder.
"""
import TestSCons
test = TestSCons.TestSCons()
try:
import libxml2
import libxslt
except:
try:
import lxml
except:
test.skip_test('Cannot find installed Python binding for libxml2 or lxml, skipping test.\n')
test.dir_fixture('image')
# Normal invocation
test.run()
test.must_exist(test.workpath('manual.html'))
# Cleanup
test.run(arguments='-c')
test.must_not_exist(test.workpath('manual.html'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 29.220339 | 100 | 0.75 |
acf38a974b7e1c4ac4988615c533b6f8ad6075b1 | 973 | py | Python | NiaPy/algorithms/statistics.py | lukapecnik/NiaPy | a40ac08a4c06a13019ec5e39cc137461884928b0 | [
"MIT"
] | null | null | null | NiaPy/algorithms/statistics.py | lukapecnik/NiaPy | a40ac08a4c06a13019ec5e39cc137461884928b0 | [
"MIT"
] | null | null | null | NiaPy/algorithms/statistics.py | lukapecnik/NiaPy | a40ac08a4c06a13019ec5e39cc137461884928b0 | [
"MIT"
] | 1 | 2020-03-25T16:20:36.000Z | 2020-03-25T16:20:36.000Z | # encoding=utf8
# pylint: disable=mixed-indentation, multiple-statements, line-too-long, expression-not-assigned, len-as-condition, no-self-use, unused-argument, no-else-return
import numpy as np
__all__ = ['BasicStatistics']
class BasicStatistics:
Name = ['BasicStatistics']
def __init__(self, array):
self.array = array if isinstance(array, np.ndarray) else np.asarray(array)
def min_value(self):
return self.array.min()
def max_value(self):
return self.array.max()
def mean(self):
return self.array.mean()
def median(self):
return np.median(self.array)
def standard_deviation(self):
return self.array.std(ddof=1)
def generate_standard_report(self):
return "Min: {0}, Max: {1}, Mean: {2}, Median: {3}, Std. {4}".format(
self.min_value(),
self.max_value(),
self.mean(),
self.median(),
self.standard_deviation())
| 25.605263 | 160 | 0.625899 |
acf38b48a4abf81e57a631f5efe71fb29ec862aa | 5,754 | py | Python | verification/10par_xsec/template_mac/misc/run_plot_results.py | hwreeves-USGS/pyemu | 6b443601fbb9bcb9e97a8c200a78480c11c51f22 | [
"BSD-3-Clause"
] | 94 | 2015-01-09T14:19:47.000Z | 2022-03-14T18:42:23.000Z | verification/10par_xsec/template_mac/misc/run_plot_results.py | hwreeves-USGS/pyemu | 6b443601fbb9bcb9e97a8c200a78480c11c51f22 | [
"BSD-3-Clause"
] | 184 | 2020-05-29T14:25:23.000Z | 2022-03-29T04:01:42.000Z | verification/10par_xsec/template_mac/misc/run_plot_results.py | hwreeves-USGS/pyemu | 6b443601fbb9bcb9e97a8c200a78480c11c51f22 | [
"BSD-3-Clause"
] | 51 | 2015-01-14T15:55:11.000Z | 2021-12-28T17:59:24.000Z | import os
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
from matplotlib.patches import Rectangle as rect
import matplotlib.cm as mplcm
import matplotlib.colors as colors
from matplotlib.font_manager import FontProperties
mpl.rcParams['font.sans-serif'] = 'Arial'
mpl.rcParams['font.serif'] = 'Arial'
mpl.rcParams['font.cursive'] = 'Arial'
mpl.rcParams['font.fantasy'] = 'Arial'
mpl.rcParams['font.monospace'] = 'Arial'
mpl.rcParams['pdf.compression'] = 0
mpl.rcParams['pdf.fonttype'] = 42
ticksize = 6
mpl.rcParams['legend.fontsize'] = 8
mpl.rcParams['axes.labelsize'] = 8
mpl.rcParams['xtick.labelsize'] = 8
mpl.rcParams['ytick.labelsize'] = 8
mpl.rcParams['legend.handlelength'] = 3
import pylab
def plot_bar(syn_h,cal_k,cal_h,cell_nums,obs_idxs,delx,plt_name,kmin,kmax):
#fig = pylab.figure(figsize=(8,4))
fig = pylab.figure(figsize=(4.72,4.72))
axk = pylab.axes((0.1,0.77,0.7,0.21))
ax1 = pylab.axes((0.1,0.418,0.7,0.25))
ax2 = pylab.axes((0.1,0.1,0.7,0.25))
axk.text(-5.0,5.0,'a) Calibrated hydraulic conductivity distribution',fontsize=8)
ax1.text(-5.0,6.5,'b) Calibrated water level distribution',fontsize=8)
ax2.text(-5.0,6.5,'c ) Predictive water level distribution',fontsize=8)
arrowprops=dict(connectionstyle="angle,angleA=0,angleB=90,rad=10",arrowstyle='->')
bbox_args = dict(fc="1.0")
ax1.annotate('Q=0.5 $m^3/d$',fontsize=8,xy=(95,0),xytext=(70.0,1.0),
arrowprops=arrowprops,bbox=bbox_args)
ax2.annotate('Q=1.0 $m^3/d$',fontsize=8,xy=(95,0),xytext=(70.0,1.0),
arrowprops=arrowprops,bbox=bbox_args)
axk.text(0.0,-1.2,'Specified\nhead',ha='left',va='top',fontsize=8)
axk.text(100,-1.2,'Specified\nflux',ha='right',va='top',fontsize=8)
axk.text(50,-1.6,'Active model cells',ha='center',va='top',fontsize=8)
arrowprops=dict(arrowstyle='<->')
axk.annotate('',fontsize=8,xycoords='axes fraction',xy=(0.85,-0.075),xytext=(0.15,-0.075),
arrowprops=arrowprops)
#cmap_name = 'gray'
#cm = pylab.get_cmap(cmap_name)
#cnorm = colors.Normalize(vmin=k_min,vmax=k_max)
#smap = mplcm.ScalarMappable(norm=cnorm,cmap=cm)
#color = []
#for i,(k,col) in enumerate(zip(cal_k,cell_nums)):
# c = smap.to_rgba(k)
# color.append(c)
k_rects = axk.bar(cell_nums-(delx/2.0),cal_k,width=delx,color='#58ACFA',edgecolor='k',linewidth=0.5,alpha=0.5)
axk.plot([0,cell_nums.max()+(0.5*delx)],[2.5,2.5],'k--',lw=1.5)
axk.text(80,2.8,'True value',ha='left',va='bottom',fontsize=8)
ax1.plot(cell_nums,syn_h[0,:],color='b',ls='-')
ax1.scatter(cell_nums,syn_h[0,:],marker='.',s=25,edgecolor='b',facecolor='b',label='True')
ax1.plot(cell_nums,cal_h[0,:],color='r',ls='-')
ax1.scatter(cell_nums,cal_h[0,:],marker='.',s=25,edgecolor='r',facecolor='r',label='Calibrated')
ax2.plot(cell_nums,syn_h[1,:],color='b',ls='--')
ax2.scatter(cell_nums,syn_h[1,:],marker='.',s=25,edgecolor='b',facecolor='b',label='True')
ax2.plot(cell_nums,cal_h[1,:],color='r',ls='--')
ax2.scatter(cell_nums,cal_h[1,:],marker='.',s=25,edgecolor='r',facecolor='r',label='Calibrated')
for iobs,obs_idx in enumerate(obs_idxs):
if iobs == 0:
ax1.scatter([cell_nums[obs_idx]],[syn_h[0,obs_idx]],marker='^',facecolor='k',edgecolor='k',s=50,label='Observation')
else:
ax1.scatter([cell_nums[obs_idx]],[syn_h[0,obs_idx]],marker='^',facecolor='k',edgecolor='k',s=50)
for i,(col) in enumerate(cell_nums):
xmn,xmx = col-(delx*0.5),col+(delx*0.5)
ymn,ymx = -1.0,0.0
if i == 0:
c = 'm'
elif i == cell_nums.shape[0]-1:
c = 'g'
else:
c = '#E5E4E2'
a = 0.75
r1 = rect((xmn,ymn),xmx-xmn,ymx-ymn,color=c,ec='k',alpha=a)
ax1.add_patch(r1)
r2 = rect((xmn,ymn),xmx-xmn,ymx-ymn,color=c,ec='k',alpha=a)
ax2.add_patch(r2)
r3 = rect((xmn,ymn),xmx-xmn,ymx-ymn,color=c,ec='k',alpha=a)
axk.add_patch(r3)
x,y = (xmn+xmx)/2.0,(ymn+ymx)/2.0
ax1.text(x,y,i+1,ha='center',va='center',fontsize=8)
ax2.text(x,y,i+1,ha='center',va='center',fontsize=8)
axk.text(x,y,i+1,ha='center',va='center',fontsize=8)
axk.set_ylabel('Hydraulic conductivity\n ($m/d$)',multialignment='center')
axk.set_xticklabels([])
axk.set_yticklabels(['','0','1','2','3','4'])
axk.set_ylim(-1,4.5)
axk.set_xlim(0,cell_nums.max()+(0.5*delx))
ax1.set_ylabel('Water level ($m$)')
ax1.set_xticklabels([])
ax1.set_yticklabels(['','0','1','2','3','4','5','6'])
ax1.set_ylim(-1.0,6)
ax1.set_xlim(0,cell_nums.max()+(0.5*delx))
#ax1.grid()
ax2.set_ylabel('Water level ($m$)')
ax2.set_xlabel('Distance ($m$)')
ax2.set_ylim(-1.0,6)
ax2.set_yticklabels(['','0','1','2','3','4','5','6'])
ax2.set_xlim(0,cell_nums.max()+(0.5*delx))
#ax2.grid()
ax1.legend(scatterpoints=1,columnspacing=8,handletextpad=0.001,bbox_to_anchor=(.9,-1.35),ncol=2,frameon=False)
ax2.xaxis.labelpad = 1.5
pylab.savefig(plt_name,dpi=600,bbox_inches='tight')
pylab.savefig(plt_name.replace('pdf','png'),dpi=600,bbox_inches='tight')
delx = 10.0
obs_idxs,pred_idx = [3,5],8
syn_h = np.loadtxt('10par_xsec_truth.hds')
k_min,k_max = 0.85,2.65
os.system('pest.exe pest.pst')
cal_k = np.loadtxt('ref_cal\\hk_layer_1.ref')
cal_h = np.loadtxt('10par_xsec.hds')
cell_nums = np.arange(delx,(cal_k.shape[0]*delx)+delx,delx) - (0.5*delx)
plot_bar(syn_h,cal_k,cal_h,cell_nums,obs_idxs,delx,'results.png',k_min,k_max)
| 38.36 | 128 | 0.613312 |
acf38c06006b223a468e6024e9ee53024a47b0d6 | 1,581 | py | Python | openapi-python-client/test/test_process_instance_query_dto_sorting.py | yanavasileva/camunda-bpm-examples | 051f8f28c62845e68ce4059ab64264c5a0bdc009 | [
"Apache-2.0"
] | null | null | null | openapi-python-client/test/test_process_instance_query_dto_sorting.py | yanavasileva/camunda-bpm-examples | 051f8f28c62845e68ce4059ab64264c5a0bdc009 | [
"Apache-2.0"
] | null | null | null | openapi-python-client/test/test_process_instance_query_dto_sorting.py | yanavasileva/camunda-bpm-examples | 051f8f28c62845e68ce4059ab64264c5a0bdc009 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Camunda BPM REST API
OpenApi Spec for Camunda BPM REST API. # noqa: E501
The version of the OpenAPI document: 7.13.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.process_instance_query_dto_sorting import ProcessInstanceQueryDtoSorting # noqa: E501
from openapi_client.rest import ApiException
class TestProcessInstanceQueryDtoSorting(unittest.TestCase):
"""ProcessInstanceQueryDtoSorting unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ProcessInstanceQueryDtoSorting
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.process_instance_query_dto_sorting.ProcessInstanceQueryDtoSorting() # noqa: E501
if include_optional :
return ProcessInstanceQueryDtoSorting(
sort_by = 'instanceId',
sort_order = 'asc'
)
else :
return ProcessInstanceQueryDtoSorting(
)
def testProcessInstanceQueryDtoSorting(self):
"""Test ProcessInstanceQueryDtoSorting"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 29.277778 | 121 | 0.701455 |
acf38c554bfe141338d5589e4cfcb966a4124f00 | 4,349 | py | Python | parse_cta.py | tachijuan/python | b4b9e9ce75b5e8426af9df41427fff659ff0cc60 | [
"MIT"
] | null | null | null | parse_cta.py | tachijuan/python | b4b9e9ce75b5e8426af9df41427fff659ff0cc60 | [
"MIT"
] | null | null | null | parse_cta.py | tachijuan/python | b4b9e9ce75b5e8426af9df41427fff659ff0cc60 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# (C) 2014 Datalink Corp - Juan Orlandini
#
# This script is provided as-is with no implied warranties
#
#
# A simple script to parse the output from EMC CTA appliances
# There's very little error checking. The assumption is that all lines from the output include a time stamp and that the
# first line of the output file has the start time code and the last time has the end time code.
#
# Usage:
# python parse_cta.py <list of files>
#
# All output goes to stdout
#
# Revision history:
# 1.0 12-Nov-2014 - Initial working version
# 1.1 18-Nov-2014 - Reworked to make code clearer and use a "fields" list. Hoping to make this more generic for other CTA logs.
# This code makes it easier to add fields, remove fields, and also re-order fields for output. Just change the "fields" list
# Reworked time parsing to be more consistent with the rest of the code.
# 1.2 18-Nov-2014 - Added proper time parsing to calculate the year **** NOTE **** this code will report inaccurate run time if the run starts
# before midnight December 31 and runs into the next year
#
import sys # needed for argv
import re # we use this to strip unnecessary characters and stuff in parens
from datetime import datetime # date time calculation stuff
fields = ("share","tag_id","total_dirs_processed","total_files_processed","total_files_archived","total_bytes_archived",
"total_fileops_failed","disk_capacity","disk_usage_start","disk_usage_end","total_files_stubbed",
"total_bytes_stubbed","total_files_delay_stubbed","total_bytes_delay_stubbed","run_date","runtime")
if len(sys.argv) < 2: # check for the appropriate number of command line scripts
print "You need to enter more one or more files as the arguments to this script"
sys.exit(42)
else: # we have a good list of files
print ','.join(fields) # print out our header by joining all the fields with a comma
for myfile in sys.argv[1:]: # process each file in the argument list
sl = [line.strip() for line in open(myfile)] # slurp the whole file
d=dict() # blank dictionary to add our keys
for i in sl: # process each line
if 'Commands:' in i: # if we have the host/share add it to the dict
left = i.find('-Cs')
right = i.find(' ',left+4)
d['share']=i[left+4:right]
elif 'START SUPPORT' in i: # else if we find the START SUPPORT message
left = i.find('SUPPORT:')
rstarttime = datetime.strptime(i[left+9:],"%a %b %d %H:%M:%S %Y") # Use this to get the run date
starttime = datetime.strptime(sl[0][0:15],"%b %d %H:%M:%S") # use this to calculate run time
endtime = datetime.strptime(sl[len(sl)-1][0:15],"%b %d %H:%M:%S")
d['runtime'] = str((endtime-starttime).total_seconds()) # add runtime to the dict
d['run_date'] = rstarttime.strftime('%m-%d-%Y') # add run_date to the dict
else:
if i.count('=')==1: # look for lines that have a single =
for k,v in [i.split('=')]: # split on the equal
val = re.sub('\(.*\)|[," ]','',v) # get rid of " and , and also stuff in parens
k = k[42:] # get the key value from the line (strip out the time stamp stuff)
d[k.strip()]=val # store the value
# build the ouput string by using a lambda func to map keys in fields to values in dict and joining them with a comma
print ','.join(map(lambda x: d[x],fields))
| 67.953125 | 144 | 0.531616 |
acf38c5576a8b3e6e453f1d5ebd2d6684d7332e8 | 1,394 | py | Python | cowrie/commands/ifconfig.py | SecPascal/cowrie | 9dc70d788a65f087e8cebbb2e272dacf90928c80 | [
"BSD-3-Clause"
] | 12 | 2018-01-21T18:29:05.000Z | 2021-11-09T08:03:28.000Z | cowrie/commands/ifconfig.py | r3p3r/micheloosterhof-cowrie | 58d36cf1563eeaf3e660bff83c89ec0dc993c3a2 | [
"BSD-3-Clause"
] | null | null | null | cowrie/commands/ifconfig.py | r3p3r/micheloosterhof-cowrie | 58d36cf1563eeaf3e660bff83c89ec0dc993c3a2 | [
"BSD-3-Clause"
] | 8 | 2015-12-17T05:41:51.000Z | 2019-09-27T05:06:37.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Peter Reuterås <peter@reuteras.com>
# See the COPYRIGHT file for more information
from __future__ import division, absolute_import
from cowrie.shell.honeypot import HoneyPotCommand
commands = {}
class command_ifconfig(HoneyPotCommand):
def call(self):
l = """eth0 Link encap:Ethernet HWaddr 04:01:16:df:2d:01
inet addr:%s Bcast:%s.255 Mask:255.255.255.0
inet6 addr: fe80::601:16ff:fedf:2d01/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:139435762 errors:0 dropped:0 overruns:0 frame:0
TX packets:116082382 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:102191499830 (102.1 GB) TX bytes:68687923025 (68.6 GB)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:110 errors:0 dropped:0 overruns:0 frame:0
TX packets:110 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:19932 (19.9 KB) TX bytes:19932 (19.9 KB)""" % \
(self.protocol.kippoIP,
self.protocol.kippoIP.rsplit('.', 1)[0])
self.write(l+'\n')
commands['/sbin/ifconfig'] = command_ifconfig
# vim: set sw=4 et:
| 35.74359 | 74 | 0.647776 |
acf38c7aaab2ebc5cb562615671b571356524653 | 1,606 | py | Python | grama/data/__init__.py | OscarDeGar/py_grama | 0a02c291326b394a8d0c127dad4c58121e568777 | [
"MIT"
] | 13 | 2020-02-24T16:51:51.000Z | 2022-03-30T18:56:55.000Z | grama/data/__init__.py | OscarDeGar/py_grama | 0a02c291326b394a8d0c127dad4c58121e568777 | [
"MIT"
] | 78 | 2019-12-30T19:13:21.000Z | 2022-02-23T18:17:54.000Z | grama/data/__init__.py | OscarDeGar/py_grama | 0a02c291326b394a8d0c127dad4c58121e568777 | [
"MIT"
] | 7 | 2020-10-19T17:49:25.000Z | 2021-08-15T20:46:52.000Z | """Datasets
Built-in datasets.
Datasets:
df_diamonds: Diamond characteristics and prices. Columns:
carat:
cut:
color:
clarity:
depth:
table:
price:
x:
y:
z:
df_stang: Aluminum alloy data from Stang et al. (1946). Columns:
thick (inches): Nominal thickness
alloy: Alloy designation
E (Kips/inch^2): Young's modulus
mu (-): Poisson's ratio
ang (degrees): Angle of test to alloy roll direction
df_ruff: Metal data from Ruff (1984). Columns:
part: Part identifier
TYS: Tensile Yield Stress (ksi)
TUS: Tensile Ultimate Stress (ksi)
thickness: Part thickness (in)
df_trajectory_full: Simulated trajectory data. Columns:
t: Time since projectile launch (seconds)
x: Projectile range (meters)
y: Projectile height (meters)
df_shewhart: Aluminum die cast data from Table 3 in Shewhart (1931)
specimen: Specimen identifier
tensile_strength: Specimen tensile strength (psi)
hardness: Specimen hardness (Rockwell's "E")
density: Specimen density (gm/cm^3)
References:
Stang, Greenspan, and Newman, "Poisson's ratio of some structural alloys for
large strains" (1946) U.S. Department of Commerce National Bureau of Standards
Ruff, Paul E. An Overview of the MIL-HDBK-5 Program. BATTELLE COLUMBUS DIV
OH, 1984.
Shewhart *Economic Control of Quality of Manufactured Product* (1931) D. Van Nostrand Company, Inc.
"""
from .datasets import *
| 21.131579 | 103 | 0.638854 |
acf38cbbb9f933e28330016452ac439553603eff | 273 | py | Python | nmtlab/dataset/__init__.py | raphael-deeplearning/nlp | fbd18409bef7997d4c78a1632f4fab7b48710bd6 | [
"MIT"
] | 27 | 2018-07-31T05:25:05.000Z | 2021-09-28T11:26:55.000Z | nmtlab/dataset/__init__.py | raphael-deeplearning/nlp | fbd18409bef7997d4c78a1632f4fab7b48710bd6 | [
"MIT"
] | null | null | null | nmtlab/dataset/__init__.py | raphael-deeplearning/nlp | fbd18409bef7997d4c78a1632f4fab7b48710bd6 | [
"MIT"
] | 9 | 2018-09-20T19:44:53.000Z | 2020-12-09T09:51:56.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .base import Dataset
from .mt_dataset import MTDataset
from .transformer_dataset import FastTransformerDataset
| 24.818182 | 55 | 0.81685 |
acf38d04796956e512fe7f8fe1ec4ac2b8a02b31 | 1,612 | py | Python | bravado_core/exception.py | Adarnof/bravado-core | 765527566933a8e5da46f8459e2d3cc48ad69072 | [
"BSD-3-Clause"
] | null | null | null | bravado_core/exception.py | Adarnof/bravado-core | 765527566933a8e5da46f8459e2d3cc48ad69072 | [
"BSD-3-Clause"
] | null | null | null | bravado_core/exception.py | Adarnof/bravado-core | 765527566933a8e5da46f8459e2d3cc48ad69072 | [
"BSD-3-Clause"
] | 1 | 2022-03-26T12:14:35.000Z | 2022-03-26T12:14:35.000Z | # -*- coding: utf-8 -*-
import sys
import six
class SwaggerError(Exception):
"""Base exception class which all bravado-core specific exceptions
inherit from.
"""
class SwaggerMappingError(SwaggerError):
"""Raised when an error is encountered during processing of a request or
a response.
"""
class MatchingResponseNotFound(SwaggerMappingError):
"""Raised when an incoming or outgoing response cannot be matched to a
documented response in the swagger spec.
"""
class SwaggerValidationError(SwaggerMappingError):
"""Raised when an error is encountered during validating user defined
format values in a request or a resposne.
"""
class SwaggerSchemaError(SwaggerError):
"""Raised when an error is encountered during processing of a SwaggerSchema.
"""
class SwaggerSecurityValidationError(SwaggerValidationError):
"""Raised when an error is encountered during processing of
security related Swagger definitions
"""
def wrap_exception(exception_class):
"""Helper decorator method to modify the raised exception class to
`exception_class` but keeps the message and trace intact.
:param exception_class: class to wrap raised exception with
"""
def generic_exception(method):
def wrapper(*args, **kwargs):
try:
return method(*args, **kwargs)
except Exception as e:
six.reraise(
exception_class,
exception_class(str(e)),
sys.exc_info()[2])
return wrapper
return generic_exception
| 27.322034 | 80 | 0.679901 |
acf38de7fa4a013952def6b72f87ef7bed174058 | 10,626 | py | Python | src/training/hugectr/task.py | jarokaz/nvidia-merlin-on-vertex | f40fe997108395a310b9a9ec4d97110967d725ac | [
"Apache-2.0"
] | 1 | 2021-11-16T13:11:38.000Z | 2021-11-16T13:11:38.000Z | src/training/hugectr/task.py | js-ts/nvidia-merlin-on-vertex | 07610f5c1979d105598fb3a1fe51e78d4d2d6f8f | [
"Apache-2.0"
] | null | null | null | src/training/hugectr/task.py | js-ts/nvidia-merlin-on-vertex | 07610f5c1979d105598fb3a1fe51e78d4d2d6f8f | [
"Apache-2.0"
] | 1 | 2021-11-16T13:11:41.000Z | 2021-11-16T13:11:41.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DeepFM Network trainer."""
import argparse
import json
import logging
import os
import time
import hugectr
from hugectr.inference import CreateInferenceSession
from hugectr.inference import InferenceParams
import hypertune
from model import create_model
import utils
SNAPSHOT_DIR = 'snapshots'
HYPERTUNE_METRIC_NAME = 'AUC'
LOCAL_MODEL_DIR = '/tmp/saved_model'
LOCAL_CHECKPOINT_DIR = '/tmp/checkpoints'
def set_job_dirs():
"""Sets job directories based on env variables set by Vertex AI."""
model_dir = os.getenv('AIP_MODEL_DIR', LOCAL_MODEL_DIR)
if model_dir[0:5] == 'gs://':
model_dir = model_dir.replace('gs://', '/gcs/')
checkpoint_dir = os.getenv('AIP_CHECKPOINT_DIR', LOCAL_CHECKPOINT_DIR)
if checkpoint_dir[0:5] == 'gs://':
checkpoint_dir = checkpoint_dir.replace('gs://', '/gcs/')
return model_dir, checkpoint_dir
def save_model(model, model_name, model_dir):
"""Saves model graph and model parameters."""
parameters_path = os.path.join(model_dir, model_name)
logging.info('Saving model parameters to: %s', parameters_path)
model.save_params_to_files(prefix=parameters_path)
graph_path = os.path.join(model_dir, f'{model_name}.json')
logging.info('Saving model graph to: %s', graph_path)
model.graph_to_json(graph_config_file=graph_path)
def evaluate_model(
model_name,
model_dir,
eval_data_source,
num_batches,
slot_size_array,
max_batchsize=2048,
hit_rate_threshold=0.6,
device_id=0,
use_gpu_embedding_cache=True,
cache_size_percentage=0.6,
i64_input_key=True):
"""Evaluates a model on a validation dataset."""
dense_model_file = os.path.join(model_dir,
f'{model_name}_dense_0.model')
sparse_model_files = [os.path.join(model_dir,
f'{model_name}0_sparse_0.model')]
inference_params = InferenceParams(
model_name=model_name,
max_batchsize=max_batchsize,
hit_rate_threshold=hit_rate_threshold,
dense_model_file=dense_model_file,
sparse_model_files=sparse_model_files,
device_id=device_id,
use_gpu_embedding_cache=use_gpu_embedding_cache,
cache_size_percentage=cache_size_percentage,
i64_input_key=i64_input_key)
model_config_path = os.path.join(model_dir, f'{model_name}.json')
inference_session = CreateInferenceSession(
model_config_path=model_config_path,
inference_params=inference_params)
eval_results = inference_session.evaluate(
num_batches=num_batches,
source=eval_data_source,
data_reader_type=hugectr.DataReaderType_t.Parquet,
check_type=hugectr.Check_t.Non,
slot_size_array=slot_size_array)
return eval_results
def main(args):
"""Runs a training loop."""
repeat_dataset = False if args.num_epochs > 0 else True
model_dir, snapshot_dir = set_job_dirs()
num_gpus = sum([len(gpus) for gpus in args.gpus])
batch_size = num_gpus * args.per_gpu_batch_size
model = create_model(train_data=[args.train_data],
valid_data=args.valid_data,
max_eval_batches=args.max_eval_batches,
dropout_rate=args.dropout_rate,
num_dense_features=args.num_dense_features,
num_sparse_features=args.num_sparse_features,
num_workers=args.num_workers,
slot_size_array=args.slot_size_array,
nnz_per_slot=args.nnz_per_slot,
batchsize=batch_size,
lr=args.lr,
gpus=args.gpus,
repeat_dataset=repeat_dataset)
model.summary()
logging.info('Starting model training')
model.fit(num_epochs=args.num_epochs,
max_iter=args.max_iter,
display=args.display_interval,
eval_interval=args.eval_interval,
snapshot=args.snapshot_interval,
snapshot_prefix=os.path.join(snapshot_dir, args.model_name))
logging.info('Saving model')
save_model(model, args.model_name, model_dir)
logging.info('Starting model evaluation using %s batches ...',
args.eval_batches)
metric_value = evaluate_model(model_name=args.model_name,
model_dir=model_dir,
eval_data_source=args.valid_data,
num_batches=args.eval_batches,
device_id=0,
max_batchsize=args.per_gpu_batch_size,
slot_size_array=args.slot_size_array)
logging.info('%s on the evaluation dataset: %s',
HYPERTUNE_METRIC_NAME, metric_value)
# Report AUC to Vertex hypertuner
logging.info('Reporting %s metric at %s to Vertex hypertuner',
HYPERTUNE_METRIC_NAME, metric_value)
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag=HYPERTUNE_METRIC_NAME,
metric_value=metric_value,
global_step=args.max_iter if repeat_dataset else args.num_epochs)
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--model_name',
type=str,
required=False,
default='deepfm',
help='Model Name.')
parser.add_argument('-t',
'--train_data',
type=str,
required=True,
help='Path to training data _file_list.txt')
parser.add_argument('-v',
'--valid_data',
type=str,
required=True,
help='Path to validation data _file_list.txt')
parser.add_argument('--schema',
type=str,
required=True,
help='Path to the schema.pbtxt file')
parser.add_argument('--dropout_rate',
type=float,
required=False,
default=0.5,
help='Dropout rate')
parser.add_argument('--num_dense_features',
type=int,
required=False,
default=13,
help='Number of dense features')
parser.add_argument('--num_sparse_features',
type=int,
required=False,
default=26,
help='Number of sparse features')
parser.add_argument('--nnz_per_slot',
type=int,
required=False,
default=2,
help='NNZ per slot')
parser.add_argument('--lr',
type=float,
required=False,
default=0.001,
help='Learning rate')
parser.add_argument('-i',
'--max_iter',
type=int,
required=False,
default=0,
help='Number of training iterations')
parser.add_argument(
'--max_eval_batches',
type=int,
required=False,
default=100,
help='Max eval batches for evaluations during model.fit()')
parser.add_argument(
'--eval_batches',
type=int,
required=False,
default=100,
help='Number of evaluation batches for the final evaluation')
parser.add_argument('--num_epochs',
type=int,
required=False,
default=1,
help='Number of training epochs')
parser.add_argument('-b',
'--per_gpu_batch_size',
type=int,
required=False,
default=2048,
help='Per GPU Batch size')
parser.add_argument(
'-s',
'--snapshot_interval',
type=int,
required=False,
default=10000,
help='Saves a model snapshot after given number of iterations')
parser.add_argument('--gpus',
type=str,
required=False,
default='[[0]]',
help='GPU devices to use for Preprocessing')
parser.add_argument('-r',
'--eval_interval',
type=int,
required=False,
default=1000,
help='Run evaluation after given number of iterations')
parser.add_argument('--display_interval',
type=int,
required=False,
default=100,
help='Display progress after given number of iterations')
parser.add_argument('--workspace_size_per_gpu',
type=int,
required=False,
default=61,
help='Workspace size per gpu in MB')
parser.add_argument('--num_workers',
type=int,
required=False,
default=12,
help='Number of workers')
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s - %(message)s',
level=logging.INFO, datefmt='%d-%m-%y %H:%M:%S')
parsed_args = parse_args()
logging.info('Extracting cardinalities from schema...')
cardinalities = utils.retrieve_cardinalities(parsed_args.schema)
logging.info('Cardinalities are extracted.')
parsed_args.slot_size_array = [int(cardinality)
for cardinality in cardinalities.values()]
parsed_args.gpus = json.loads(parsed_args.gpus)
logging.info('Args: %s', parsed_args)
start_time = time.time()
logging.info('Starting training')
main(parsed_args)
end_time = time.time()
elapsed_time = end_time - start_time
logging.info('Training completed. Elapsed time: %s', elapsed_time )
| 35.42 | 79 | 0.594956 |
acf38e18a1b614996556936d6b07e9f84370ca01 | 2,447 | py | Python | caffe2/python/optimizer_context.py | gautamkmr/caffe2 | cde7f21d1e34ec714bc08dbfab945a1ad30e92ff | [
"MIT"
] | 40 | 2017-09-03T13:23:42.000Z | 2021-02-03T23:59:28.000Z | caffe2/python/optimizer_context.py | gautamkmr/caffe2 | cde7f21d1e34ec714bc08dbfab945a1ad30e92ff | [
"MIT"
] | 1 | 2017-12-15T09:49:26.000Z | 2018-01-21T10:54:36.000Z | caffe2/python/optimizer_context.py | gautamkmr/caffe2 | cde7f21d1e34ec714bc08dbfab945a1ad30e92ff | [
"MIT"
] | 6 | 2017-09-01T22:12:44.000Z | 2018-10-12T13:14:25.000Z | ## @package optimizer_context
# Module caffe2.python.optimizer_context
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import context
DEFAULT_OPTIM = 'DEFAULT'
@context.define_context(allow_default=True)
class OptimizerContext(object):
"""
provide context to allow param_info to have different optimizers
"""
def __init__(self):
self._optimizers = {}
self._optimizers_list = []
def _rebuild_optimizers(self):
self._optimizers = {}
for m in self._optimizers_list:
self._optimizers.update(m)
def has_optimizer(self, name):
return name in self._optimizers
def get_optimizer(self, name):
assert self.has_optimizer(name), (
"{} optimizer is not provided!".format(name))
return self._optimizers.get(name)
def push_optimizers(self, optimizers):
# optimizer override is allowed
self._optimizers_list.append(optimizers)
self._optimizers.update(optimizers)
def pop_optimizers(self):
assert len(self._optimizers_list) > 0
self._optimizers_list.pop()
self._rebuild_optimizers()
class UseOptimizer(object):
'''
context class to allow setting the current context.
Example usage with brew:
- with UseOptimizer(optim):
brew.func
- with UseOptimizer({'WEIGHT': weight_optim}):
brew.func
- with UseOptimizer({'DEFAULT': optim, 'BIAS': bias_optim,
'WEIGHT': weight_optim}):
brew.func
- with UseOptimizer(optim1):
brew.func
with UseOptimizer(optim2):
brew.func
Example useage with layer:
optimizers = {'optim1': optim1, 'optim2': optim2}
with Optimizers(optimizers):
optim = OptimizerContext.current().get_optimizer('optim1')
layer(optim=optim)
'''
def __init__(self, optim_or_dict):
if isinstance(optim_or_dict, dict):
self._optimizers = optim_or_dict
else:
self._optimizers = {DEFAULT_OPTIM: optim_or_dict}
def __enter__(self):
OptimizerContext.current().push_optimizers(self._optimizers)
return self
def __exit__(self, type, value, traceback):
OptimizerContext.current().pop_optimizers()
| 29.841463 | 70 | 0.650184 |
acf38e49bf5ed2c199a6ffeddf9deade9a0a5ddf | 1,314 | py | Python | tests/mr_job_where_are_you.py | ukwa/mrjob | 091572e87bc24cc64be40278dd0f5c3617c98d4b | [
"Apache-2.0"
] | 1,538 | 2015-01-02T10:22:17.000Z | 2022-03-29T16:42:33.000Z | tests/mr_job_where_are_you.py | ukwa/mrjob | 091572e87bc24cc64be40278dd0f5c3617c98d4b | [
"Apache-2.0"
] | 1,027 | 2015-01-09T21:30:37.000Z | 2022-02-26T18:21:42.000Z | tests/mr_job_where_are_you.py | ukwa/mrjob | 091572e87bc24cc64be40278dd0f5c3617c98d4b | [
"Apache-2.0"
] | 403 | 2015-01-06T15:49:44.000Z | 2022-03-29T16:42:34.000Z | # Copyright 2011 Yelp
# Copyright 2017 Yelp
# Copyright 2018 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import warnings
# PYTHONPATH takes precedence over any sys.path hacks
if os.environ.get('PYTHONPATH'):
sys.path = os.environ['PYTHONPATH'].split(os.pathsep) + sys.path
warnings.simplefilter('ignore')
# noqa: we need these top-level imports to come *after* sys.path hacks
import mrjob # noqa
from mrjob.job import MRJob # noqa
class MRJobWhereAreYou(MRJob):
"""Output what directory the mrjob library is in."""
def mapper(self):
pass
def mapper_final(self):
yield (None, None)
def reducer(self, key, values):
yield (None, os.path.dirname(os.path.realpath(mrjob.__file__)))
if __name__ == '__main__':
MRJobWhereAreYou.run()
| 29.2 | 74 | 0.727549 |
acf38ee003ffc5a9375cc7bff8a3c56459b4341e | 13,863 | py | Python | applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/core/database/mysql/mysql_db.py | mith1979/ansible_automation | 013dfa67c6d91720b787fadb21de574b6e023a26 | [
"Apache-2.0"
] | 1 | 2020-10-14T00:06:54.000Z | 2020-10-14T00:06:54.000Z | applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/core/database/mysql/mysql_db.py | mith1979/ansible_automation | 013dfa67c6d91720b787fadb21de574b6e023a26 | [
"Apache-2.0"
] | null | null | null | applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/core/database/mysql/mysql_db.py | mith1979/ansible_automation | 013dfa67c6d91720b787fadb21de574b6e023a26 | [
"Apache-2.0"
] | 2 | 2015-08-06T07:45:48.000Z | 2017-01-04T17:47:16.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Mark Theunissen <mark.theunissen@gmail.com>
# Sponsored by Four Kitchens http://fourkitchens.com.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: mysql_db
short_description: Add or remove MySQL databases from a remote host.
description:
- Add or remove MySQL databases from a remote host.
version_added: "0.6"
options:
name:
description:
- name of the database to add or remove
required: true
default: null
aliases: [ db ]
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
default: localhost
login_port:
description:
- Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used
required: false
default: 3306
login_unix_socket:
description:
- The path to a Unix domain socket for local connections
required: false
default: null
state:
description:
- The database state
required: false
default: present
choices: [ "present", "absent", "dump", "import" ]
collation:
description:
- Collation mode
required: false
default: null
encoding:
description:
- Encoding mode
required: false
default: null
target:
description:
- Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
files (C(.sql)) as well as bzip2 (C(.bz2)) and gzip (C(.gz)) compressed files are supported.
required: false
notes:
- Requires the MySQLdb Python package on the remote host. For Ubuntu, this
is as easy as apt-get install python-mysqldb. (See M(apt).)
- Both I(login_password) and I(login_user) are required when you are
passing credentials. If none are present, the module will attempt to read
the credentials from C(~/.my.cnf), and finally fall back to using the MySQL
default login of C(root) with no password.
requirements: [ ConfigParser ]
author: Mark Theunissen
'''
EXAMPLES = '''
# Create a new database with name 'bobdata'
- mysql_db: name=bobdata state=present
# Copy database dump file to remote host and restore it to database 'my_db'
- copy: src=dump.sql.bz2 dest=/tmp
- mysql_db: name=my_db state=import target=/tmp/dump.sql.bz2
'''
import ConfigParser
import os
import pipes
import stat
try:
import MySQLdb
except ImportError:
mysqldb_found = False
else:
mysqldb_found = True
# ===========================================
# MySQL module specific support methods.
#
def db_exists(cursor, db):
res = cursor.execute("SHOW DATABASES LIKE %s", (db.replace("_","\_"),))
return bool(res)
def db_delete(cursor, db):
query = "DROP DATABASE %s" % mysql_quote_identifier(db, 'database')
cursor.execute(query)
return True
def db_dump(module, host, user, password, db_name, target, port, socket=None):
cmd = module.get_bin_path('mysqldump', True)
cmd += " --quick --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password))
if socket is not None:
cmd += " --socket=%s" % pipes.quote(socket)
else:
cmd += " --host=%s --port=%i" % (pipes.quote(host), port)
cmd += " %s" % pipes.quote(db_name)
if os.path.splitext(target)[-1] == '.gz':
cmd = cmd + ' | gzip > ' + pipes.quote(target)
elif os.path.splitext(target)[-1] == '.bz2':
cmd = cmd + ' | bzip2 > ' + pipes.quote(target)
else:
cmd += " > %s" % pipes.quote(target)
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr
def db_import(module, host, user, password, db_name, target, port, socket=None):
if not os.path.exists(target):
return module.fail_json(msg="target %s does not exist on the host" % target)
cmd = module.get_bin_path('mysql', True)
cmd += " --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password))
if socket is not None:
cmd += " --socket=%s" % pipes.quote(socket)
else:
cmd += " --host=%s --port=%i" % (pipes.quote(host), port)
cmd += " -D %s" % pipes.quote(db_name)
if os.path.splitext(target)[-1] == '.gz':
gzip_path = module.get_bin_path('gzip')
if not gzip_path:
module.fail_json(msg="gzip command not found")
#gzip -d file (uncompress)
rc, stdout, stderr = module.run_command('%s -d %s' % (gzip_path, target))
if rc != 0:
return rc, stdout, stderr
#Import sql
cmd += " < %s" % pipes.quote(os.path.splitext(target)[0])
try:
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
return rc, stdout, stderr
finally:
#gzip file back up
module.run_command('%s %s' % (gzip_path, os.path.splitext(target)[0]))
elif os.path.splitext(target)[-1] == '.bz2':
bzip2_path = module.get_bin_path('bzip2')
if not bzip2_path:
module.fail_json(msg="bzip2 command not found")
#bzip2 -d file (uncompress)
rc, stdout, stderr = module.run_command('%s -d %s' % (bzip2_path, target))
if rc != 0:
return rc, stdout, stderr
#Import sql
cmd += " < %s" % pipes.quote(os.path.splitext(target)[0])
try:
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
return rc, stdout, stderr
finally:
#bzip2 file back up
rc, stdout, stderr = module.run_command('%s %s' % (bzip2_path, os.path.splitext(target)[0]))
else:
cmd += " < %s" % pipes.quote(target)
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr
def db_create(cursor, db, encoding, collation):
query_params = dict(enc=encoding, collate=collation)
query = ['CREATE DATABASE %s' % mysql_quote_identifier(db, 'database')]
if encoding:
query.append("CHARACTER SET %(enc)s")
if collation:
query.append("COLLATE %(collate)s")
query = ' '.join(query)
res = cursor.execute(query, query_params)
return True
def strip_quotes(s):
""" Remove surrounding single or double quotes
>>> print strip_quotes('hello')
hello
>>> print strip_quotes('"hello"')
hello
>>> print strip_quotes("'hello'")
hello
>>> print strip_quotes("'hello")
'hello
"""
single_quote = "'"
double_quote = '"'
if s.startswith(single_quote) and s.endswith(single_quote):
s = s.strip(single_quote)
elif s.startswith(double_quote) and s.endswith(double_quote):
s = s.strip(double_quote)
return s
def config_get(config, section, option):
""" Calls ConfigParser.get and strips quotes
See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html
"""
return strip_quotes(config.get(section, option))
def load_mycnf():
config = ConfigParser.RawConfigParser()
mycnf = os.path.expanduser('~/.my.cnf')
if not os.path.exists(mycnf):
return False
try:
config.readfp(open(mycnf))
except (IOError):
return False
# We support two forms of passwords in .my.cnf, both pass= and password=,
# as these are both supported by MySQL.
try:
passwd = config_get(config, 'client', 'password')
except (ConfigParser.NoOptionError):
try:
passwd = config_get(config, 'client', 'pass')
except (ConfigParser.NoOptionError):
return False
try:
creds = dict(user=config_get(config, 'client', 'user'),passwd=passwd)
except (ConfigParser.NoOptionError):
return False
return creds
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec = dict(
login_user=dict(default=None),
login_password=dict(default=None),
login_host=dict(default="localhost"),
login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None),
name=dict(required=True, aliases=['db']),
encoding=dict(default=""),
collation=dict(default=""),
target=dict(default=None),
state=dict(default="present", choices=["absent", "present","dump", "import"]),
)
)
if not mysqldb_found:
module.fail_json(msg="the python mysqldb module is required")
db = module.params["name"]
encoding = module.params["encoding"]
collation = module.params["collation"]
state = module.params["state"]
target = module.params["target"]
socket = module.params["login_unix_socket"]
login_port = module.params["login_port"]
if login_port < 0 or login_port > 65535:
module.fail_json(msg="login_port must be a valid unix port number (0-65535)")
# make sure the target path is expanded for ~ and $HOME
if target is not None:
target = os.path.expandvars(os.path.expanduser(target))
# Either the caller passes both a username and password with which to connect to
# mysql, or they pass neither and allow this module to read the credentials from
# ~/.my.cnf.
login_password = module.params["login_password"]
login_user = module.params["login_user"]
if login_user is None and login_password is None:
mycnf_creds = load_mycnf()
if mycnf_creds is False:
login_user = "root"
login_password = ""
else:
login_user = mycnf_creds["user"]
login_password = mycnf_creds["passwd"]
elif login_password is None or login_user is None:
module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided")
login_host = module.params["login_host"]
if state in ['dump','import']:
if target is None:
module.fail_json(msg="with state=%s target is required" % (state))
connect_to_db = db
else:
connect_to_db = ''
try:
if socket:
try:
socketmode = os.stat(socket).st_mode
if not stat.S_ISSOCK(socketmode):
module.fail_json(msg="%s, is not a socket, unable to connect" % socket)
except OSError:
module.fail_json(msg="%s, does not exist, unable to connect" % socket)
db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=socket, user=login_user, passwd=login_password, db=connect_to_db)
elif login_port != 3306 and module.params["login_host"] == "localhost":
module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined")
else:
db_connection = MySQLdb.connect(host=module.params["login_host"], port=login_port, user=login_user, passwd=login_password, db=connect_to_db)
cursor = db_connection.cursor()
except Exception, e:
if "Unknown database" in str(e):
errno, errstr = e.args
module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
else:
module.fail_json(msg="unable to connect, check login credentials (login_user, and login_password, which can be defined in ~/.my.cnf), check that mysql socket exists and mysql server is running")
changed = False
if db_exists(cursor, db):
if state == "absent":
try:
changed = db_delete(cursor, db)
except Exception, e:
module.fail_json(msg="error deleting database: " + str(e))
elif state == "dump":
rc, stdout, stderr = db_dump(module, login_host, login_user,
login_password, db, target,
port=login_port,
socket=module.params['login_unix_socket'])
if rc != 0:
module.fail_json(msg="%s" % stderr)
else:
module.exit_json(changed=True, db=db, msg=stdout)
elif state == "import":
rc, stdout, stderr = db_import(module, login_host, login_user,
login_password, db, target,
port=login_port,
socket=module.params['login_unix_socket'])
if rc != 0:
module.fail_json(msg="%s" % stderr)
else:
module.exit_json(changed=True, db=db, msg=stdout)
else:
if state == "present":
try:
changed = db_create(cursor, db, encoding, collation)
except Exception, e:
module.fail_json(msg="error creating database: " + str(e))
module.exit_json(changed=changed, db=db)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
if __name__ == '__main__':
main()
| 36.577836 | 210 | 0.619635 |
acf38feb5db1ee580a2d13cae7e2e748abae7b2a | 358 | py | Python | capstone_project/my_api/urls.py | foreverals2002/capstone_project | 67e61b6dd349266a861d0e6ed552c890d87db2f2 | [
"MIT"
] | null | null | null | capstone_project/my_api/urls.py | foreverals2002/capstone_project | 67e61b6dd349266a861d0e6ed552c890d87db2f2 | [
"MIT"
] | null | null | null | capstone_project/my_api/urls.py | foreverals2002/capstone_project | 67e61b6dd349266a861d0e6ed552c890d87db2f2 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
from my_api import views
urlpatterns = [
path('hello/', views.hello),
path(r'morning/<int:test>/', views.morning),
path('testjson/', views.TestJson.as_view()),
path('files/', views.FileUpload.as_view()),
path('account/', views.UserProfile.as_view()),
]
| 27.538462 | 51 | 0.664804 |
acf38ff1d32544a2df41737aaaa85840f238d18d | 3,885 | py | Python | project/settings.py | saberbill/hudoudata | 8e8ba7e1daff04a06cae48910adea2011f546455 | [
"CC0-1.0"
] | null | null | null | project/settings.py | saberbill/hudoudata | 8e8ba7e1daff04a06cae48910adea2011f546455 | [
"CC0-1.0"
] | null | null | null | project/settings.py | saberbill/hudoudata | 8e8ba7e1daff04a06cae48910adea2011f546455 | [
"CC0-1.0"
] | null | null | null | """
Django settings for this project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# The SECRET_KEY is provided via an environment variable in OpenShift
SECRET_KEY = os.getenv(
'DJANGO_SECRET_KEY',
# safe value used for development when DJANGO_SECRET_KEY might not be set
'9e4@&tw46$l31)zrqe3wi+-slqm(ruvz&se0^%9#6(_w3ui!c0'
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'django_apscheduler',
'hudou',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'hudou.interceptor.GeneralInvterceptor'
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
from . import database
DATABASES = {
'default': database.config()
}
'''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'hudou',
'USER':'hudou_user',
'PASSWORD':'iforgot',
'HOST':'127.0.0.1',
'PORT':'5432',
'CONN_MAX_AGE': 30,
}
}
'''
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-CN'
#TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
INTERNAL_IPS = ['127.0.0.1']
| 25.728477 | 91 | 0.694723 |
acf390132e4cd351be48200b8c7be461b22a99a4 | 313 | py | Python | recml/urls.py | 50mkw/mysite | 9767208b81e029f3896467570516f19f26cb0f3f | [
"Apache-2.0"
] | null | null | null | recml/urls.py | 50mkw/mysite | 9767208b81e029f3896467570516f19f26cb0f3f | [
"Apache-2.0"
] | 18 | 2020-01-28T22:51:22.000Z | 2022-02-10T13:45:30.000Z | recml/urls.py | 50mkw/mysite | 9767208b81e029f3896467570516f19f26cb0f3f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@Time : 2019/6/2 9:15 PM
@Author : Patrick Yang
@Email : 490286710@qq.com
@File : urls.py
@Software : PyCharm
"""
from django.urls import path
from . import views
app_name = 'recml'
urlpatterns = [
path('', views.IndexView.as_view(), name='index')
] | 17.388889 | 53 | 0.584665 |
acf39068990bc069aaf67d9d87c8682010473d54 | 1,956 | py | Python | tests/broker/test_del_campus.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 7 | 2015-07-31T05:57:30.000Z | 2021-09-07T15:18:56.000Z | tests/broker/test_del_campus.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 115 | 2015-03-03T13:11:46.000Z | 2021-09-20T12:42:24.000Z | tests/broker/test_del_campus.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 13 | 2015-03-03T11:17:59.000Z | 2021-09-09T09:16:41.000Z | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the del campus command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDelCampus(TestBrokerCommand):
def testdelte(self):
self.dsdb_expect_del_campus("ta")
command = "del campus --campus ta"
self.noouttest(command.split(" "))
self.dsdb_verify()
def testverifydelte(self):
command = "show campus --campus ta"
self.notfoundtest(command.split(" "))
def testdelbunotindsdb(self):
# add campus
test_campus = "bz"
self.dsdb_expect_add_campus(test_campus)
command = ["add", "campus", "--campus", test_campus, "--country", "us"]
self.successtest(command)
self.dsdb_verify()
errstr = "campus %s doesn't exist" % test_campus
self.dsdb_expect_del_campus(test_campus, fail=True, errstr=errstr)
command = "del campus --campus %s" % test_campus
self.statustest(command.split(" "))
self.dsdb_verify()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelCampus)
unittest.TextTestRunner(verbosity=2).run(suite)
| 33.152542 | 79 | 0.692229 |
acf39163d5f00ca8748be8b5e6fc9935c3c08fba | 502 | py | Python | riccipy/metrics/schwarzschild_4.py | cjayross/riccipy | 2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846 | [
"MIT"
] | 4 | 2019-08-17T04:28:06.000Z | 2021-01-02T15:19:18.000Z | riccipy/metrics/schwarzschild_4.py | grdbii/riccipy | 2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846 | [
"MIT"
] | 3 | 2019-08-02T04:07:43.000Z | 2020-06-18T07:49:38.000Z | riccipy/metrics/schwarzschild_4.py | grdbii/riccipy | 2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846 | [
"MIT"
] | null | null | null | """
Name: Schwarzschild
Coordinates: Spherical
Symmetry:
- Spherical
- Static
Notes: Isotropic Coordinates
"""
from sympy import Rational, diag, sin, symbols
coords = symbols("t r theta phi", real=True)
variables = symbols("M", constant=True)
functions = ()
t, r, th, ph = coords
M = variables
expr = (1 + Rational(1, 2) * M / r) ** 4
metric = diag(
-((1 - Rational(1, 2) * M / r) ** 2) / (1 + Rational(1, 2) * M / r) ** 2,
expr,
expr * r ** 2,
expr * r ** 2 * sin(th) ** 2,
)
| 21.826087 | 77 | 0.579681 |
acf393a39fc6e8ff9e95929a81e5a1e31ba5d23c | 27,198 | py | Python | graphia/testing_scripts.py | by256/graphia | 280c7f251bcab9081bc56b5fe4dc358de5d9c1dd | [
"MIT"
] | 3 | 2020-03-02T14:39:15.000Z | 2020-03-11T21:49:30.000Z | graphia/testing_scripts.py | by256/graphia | 280c7f251bcab9081bc56b5fe4dc358de5d9c1dd | [
"MIT"
] | null | null | null | graphia/testing_scripts.py | by256/graphia | 280c7f251bcab9081bc56b5fe4dc358de5d9c1dd | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from utils import degree_matrix
from datasets import Cora, UVvis
from layers import SpectralGraphConv, GAT, MultiHeadGAT, GIN, ARMAConv, GatedGraphConv, GraphSAGE
from pooling import GlobalMaxPooling, GlobalSumPooling, GlobalAvePooling, DiffPool, MinCutPooling, TopKPooling
def test_uvvis_dataset():
uvvis = UVvis(masked=True)
x, node_masks, A, y = uvvis[0]
print(x.shape, A.shape)
gc = SpectralGraphConv(in_features=121, out_features=64)
out = gc(A, x, node_masks)
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
self.gc1 = SpectralGraphConv(in_features=121, out_features=64)
self.gc2 = SpectralGraphConv(in_features=64, out_features=64)
self.gc3 = SpectralGraphConv(in_features=64, out_features=64)
self.pooling = GlobalMaxPooling()
self.linear = nn.Linear(64, 1)
self.relu = nn.ReLU()
def forward(self, A, x, masks):
x = self.relu(self.gc1(A, x))
x = x * masks[:, :, :x.shape[-1]]
x = self.relu(self.gc2(A, x))
x = x * masks[:, :, :x.shape[-1]]
x = self.relu(self.gc3(A, x))
x = x * masks[:, :, :x.shape[-1]]
x = self.pooling(x)
x = self.linear(x)
return x
def test_kipf_conv():
train_uvvis = UVvis(masked=True)
train_uvvis.df = train_uvvis.df.iloc[:1024, :]
val_uvvis = UVvis(masked=True)
val_uvvis.df = val_uvvis.df.iloc[1024:1280, :]
train_loader = DataLoader(train_uvvis, batch_size=64)
val_loader = DataLoader(val_uvvis, batch_size=64)
model = ToyModel()
optimizer = optim.Adam(model.parameters())
MSE = nn.MSELoss()
print(model, '\n')
for epoch in range(256):
train_losses = []
train_metrics = []
model.train()
for idx, (A, x, masks, y_true) in enumerate(train_loader):
optimizer.zero_grad()
y_pred = model(A, x, masks)
loss = MSE(y_true, y_pred)
train_losses.append(loss.item())
train_metrics.append(nn.L1Loss()(y_true, y_pred).item())
loss.backward()
optimizer.step()
val_losses = []
val_metrics = []
model.eval()
for idx, (A, x, masks, y_true) in enumerate(val_loader):
y_pred = model(A, x, masks)
val_losses.append(MSE(y_true, y_pred).item())
val_metrics.append(nn.L1Loss()(y_true, y_pred).item())
print('Epoch: {} Loss: {:.4f} Train MAE: {:.4f} Val Loss: {:.4f} Val MAE: {:.4f}'.format(epoch+1, np.mean(train_losses), np.mean(train_metrics), np.mean(val_losses), np.mean(val_metrics)))
class ToyDiffPoolModel(nn.Module):
def __init__(self):
super(ToyDiffPoolModel, self).__init__()
self.gc1 = SpectralGraphConv(in_features=121, out_features=64)
self.diffpool_gc_embedding_1 = SpectralGraphConv(in_features=121, out_features=64, bias=False)
self.diffpool_gc_pooling_1 = SpectralGraphConv(in_features=121, out_features=32, bias=False)
self.diffpool_1 = DiffPool(self.diffpool_gc_embedding_1, self.diffpool_gc_pooling_1)
self.gc2 = SpectralGraphConv(in_features=64, out_features=64)
self.diffpool_gc_embedding_2 = SpectralGraphConv(in_features=64, out_features=64, bias=False)
self.diffpool_gc_pooling_2 = SpectralGraphConv(in_features=64, out_features=16, bias=False)
self.diffpool_2 = DiffPool(self.diffpool_gc_embedding_2, self.diffpool_gc_pooling_2)
self.gc3 = SpectralGraphConv(in_features=64, out_features=64)
self.diffpool_gc_embedding_3 = SpectralGraphConv(in_features=64, out_features=64, bias=False)
self.diffpool_gc_pooling_3 = SpectralGraphConv(in_features=64, out_features=1, bias=False)
self.diffpool_3 = DiffPool(self.diffpool_gc_embedding_3, self.diffpool_gc_pooling_3)
self.linear = nn.Linear(64, 1)
self.relu = nn.ReLU()
self.pooling = GlobalMaxPooling()
def forward(self, A, x, masks):
# x = self.relu(self.gc1(A, x))
# x = x * masks[:, :, :x.shape[-1]]
# print(masks[0], '\n\n')
# x = self.relu(self.gc2(A, x))
# x = x * masks[:, :, :x.shape[-1]]
# x = self.relu(self.gc3(A, x))
# x = x * masks[:, :, :x.shape[-1]]
# print('x', x.shape)
A, x = self.diffpool_1(A, [A, x], [A, x])
# x = x.squeeze()
# print('x', x.shape, 'A', A.shape)
# x = self.pooling(x)
# print('x', x.shape)
# x = self.relu(x)
# x = self.relu(self.gc2(A, x))
A, x = self.diffpool_2(A, [A, x], [A, x])
# x = self.relu(x)
# x = self.relu(self.gc3(A, x))
A, x = self.diffpool_3(A, [A, x], [A, x])
x = x.squeeze()
# x = self.relu(x)
x = self.linear(x)
# print('out', x.shape)
# print('\n\n')
return x
def test_diffpool():
train_uvvis = UVvis(masked=True)
train_uvvis.df = train_uvvis.df.iloc[:4096, :]
mu, sigma = np.mean(train_uvvis.df['computational']), np.std(train_uvvis.df['computational'])
train_uvvis.df['computational'] = (train_uvvis.df['computational'] - mu) / sigma
val_uvvis = UVvis(masked=True)
val_uvvis.df = val_uvvis.df.iloc[4096:4096+256, :]
val_uvvis.df['computational'] = (val_uvvis.df['computational'] - mu) / sigma
train_loader = DataLoader(train_uvvis, batch_size=64)
val_loader = DataLoader(val_uvvis, batch_size=64)
model = ToyDiffPoolModel()
optimizer = optim.Adam(model.parameters(), lr=1e-2)
MSE = nn.MSELoss()
print(model, '\n')
for epoch in range(256):
train_losses = []
train_metrics = []
model.train()
for idx, (A, x, masks, y_true) in enumerate(train_loader):
# print('y_true', y_true.shape)
optimizer.zero_grad()
y_pred = model(A, x, masks)
loss = MSE(y_true, y_pred)
y_pred = (y_pred * sigma) + mu
y_true = (y_true * sigma) + mu
train_losses.append(loss.item())
train_metrics.append(nn.L1Loss()(y_true, y_pred).item())
loss.backward()
optimizer.step()
# break
# break
val_losses = []
val_metrics = []
model.eval()
for idx, (A, x, masks, y_true) in enumerate(val_loader):
y_pred = model(A, x, masks)
y_pred = (y_pred * sigma) + mu
y_true = (y_true * sigma) + mu
val_losses.append(MSE(y_true, y_pred).item())
val_metrics.append(nn.L1Loss()(y_true, y_pred).item())
print('Epoch: {} Loss: {:.4f} Train MAE: {:.4f} Val Loss: {:.4f} Val MAE: {:.4f}'.format(epoch+1, np.mean(train_losses), np.mean(train_metrics), np.mean(val_losses), np.mean(val_metrics)))
def get_uvvis_dataloaders(batch_size=4):
train_uvvis = UVvis(masked=True)
train_uvvis.df = train_uvvis.df.iloc[:4096, :]
mu, sigma = np.mean(train_uvvis.df['computational']), np.std(train_uvvis.df['computational'])
train_uvvis.df['computational'] = (train_uvvis.df['computational'] - mu) / sigma
val_uvvis = UVvis(masked=True)
val_uvvis.df = val_uvvis.df.iloc[4096:4096+256, :]
val_uvvis.df['computational'] = (val_uvvis.df['computational'] - mu) / sigma
train_loader = DataLoader(train_uvvis, batch_size=batch_size)
val_loader = DataLoader(val_uvvis, batch_size=batch_size)
return train_uvvis, val_uvvis, train_loader, val_loader
class ToyGATModel(nn.Module):
def __init__(self):
super(ToyGATModel, self).__init__()
self.gat1 = MultiHeadGAT(in_features=121, head_out_features=64)
self.gat2 = MultiHeadGAT(in_features=192, head_out_features=64)
self.gat3 = MultiHeadGAT(in_features=192, head_out_features=64, multihead_agg='average')
self.pooling = GlobalMaxPooling()
self.linear = nn.Linear(64, 1)
self.relu = nn.ReLU()
def forward(self, A, x, masks):
x = self.relu(self.gat1(A, x))
x = self.relu(self.gat2(A, x))
x = self.relu(self.gat3(A, x))
x = self.pooling(x)
x = self.linear(x)
return x
def test_GAT():
n_train = 4096
n_val = 256
train_uvvis = UVvis(masked=True)
train_uvvis.df = train_uvvis.df.iloc[:n_train, :]
mu, sigma = np.mean(train_uvvis.df['computational']), np.std(train_uvvis.df['computational'])
train_uvvis.df['computational'] = (train_uvvis.df['computational'] - mu) / sigma
val_uvvis = UVvis(masked=True)
val_uvvis.df = val_uvvis.df.iloc[n_train:n_train+n_val, :]
val_uvvis.df['computational'] = (val_uvvis.df['computational'] - mu) / sigma
train_loader = DataLoader(train_uvvis, batch_size=64)
val_loader = DataLoader(val_uvvis, batch_size=64)
model = ToyGATModel()
optimizer = optim.Adam(model.parameters())
MSE = nn.MSELoss()
print(model, '\n')
for epoch in range(256):
train_losses = []
train_metrics = []
model.train()
for idx, (A, x, masks, y_true) in enumerate(train_loader):
# print('y_true', y_true.shape)
optimizer.zero_grad()
y_pred = model(A, x, masks)
loss = MSE(y_true, y_pred)
y_pred = (y_pred * sigma) + mu
y_true = (y_true * sigma) + mu
train_losses.append(loss.item())
train_metrics.append(nn.L1Loss()(y_true, y_pred).item())
loss.backward()
optimizer.step()
# break
# break
val_losses = []
val_metrics = []
model.eval()
for idx, (A, x, masks, y_true) in enumerate(val_loader):
y_pred = model(A, x, masks)
y_pred = (y_pred * sigma) + mu
y_true = (y_true * sigma) + mu
val_losses.append(MSE(y_true, y_pred).item())
val_metrics.append(nn.L1Loss()(y_true, y_pred).item())
print('Epoch: {} Loss: {:.4f} Train MAE: {:.4f} Val Loss: {:.4f} Val MAE: {:.4f}'.format(epoch+1, np.mean(train_losses), np.mean(train_metrics), np.mean(val_losses), np.mean(val_metrics)))
class ToyGINModel(nn.Module):
def __init__(self):
super(ToyGINModel, self).__init__()
self.gin1 = GIN(in_features=121, out_features=64)
self.gin2 = GIN(in_features=64, out_features=64)
self.gin3 = GIN(in_features=64, out_features=64)
self.pooling = GlobalSumPooling()
self.linear = nn.Linear(64, 1)
self.relu = nn.ReLU()
def forward(self, A, x, masks):
x = self.relu(self.gin1(A, x))
x = x * masks[:, :, :x.shape[-1]]
x = self.relu(self.gin2(A, x))
x = x * masks[:, :, :x.shape[-1]]
x = self.relu(self.gin3(A, x))
x = x * masks[:, :, :x.shape[-1]]
x = self.pooling(x)
x = self.linear(x)
return x
def test_GIN():
n_train = 4096
n_val = 256
train_uvvis = UVvis(masked=True)
train_uvvis.df = train_uvvis.df.iloc[:n_train, :]
mu, sigma = np.mean(train_uvvis.df['computational']), np.std(train_uvvis.df['computational'])
train_uvvis.df['computational'] = (train_uvvis.df['computational'] - mu) / sigma
val_uvvis = UVvis(masked=True)
val_uvvis.df = val_uvvis.df.iloc[n_train:n_train+n_val, :]
val_uvvis.df['computational'] = (val_uvvis.df['computational'] - mu) / sigma
train_loader = DataLoader(train_uvvis, batch_size=4)
val_loader = DataLoader(val_uvvis, batch_size=4)
model = ToyGINModel()
optimizer = optim.Adam(model.parameters())
MSE = nn.MSELoss()
print(model, '\n')
for epoch in range(256):
train_losses = []
train_metrics = []
model.train()
for idx, (A, x, masks, y_true) in enumerate(train_loader):
optimizer.zero_grad()
y_pred = model(A, x, masks)
loss = MSE(y_true, y_pred)
y_pred = (y_pred * sigma) + mu
y_true = (y_true * sigma) + mu
train_losses.append(loss.item())
train_metrics.append(nn.L1Loss()(y_true, y_pred).item())
loss.backward()
optimizer.step()
# break
# break
val_losses = []
val_metrics = []
model.eval()
for idx, (A, x, masks, y_true) in enumerate(val_loader):
y_pred = model(A, x, masks)
y_pred = (y_pred * sigma) + mu
y_true = (y_true * sigma) + mu
val_losses.append(MSE(y_true, y_pred).item())
val_metrics.append(nn.L1Loss()(y_true, y_pred).item())
print('Epoch: {} Loss: {:.4f} Train MAE: {:.4f} Val Loss: {:.4f} Val MAE: {:.4f}'.format(epoch+1, np.mean(train_losses), np.mean(train_metrics), np.mean(val_losses), np.mean(val_metrics)))
class ToyGINCoraModel(nn.Module):
def __init__(self):
super(ToyGINCoraModel, self).__init__()
# self.gc1 = GIN(in_features=1433, out_features=64)
# self.gc2 = GIN(in_features=64, out_features=64)
# self.gc3 = GIN(in_features=64, out_features=7)
# self.gc1 = SpectralGraphConv(in_features=1433, out_features=64)
# self.gc2 = SpectralGraphConv(in_features=64, out_features=64)
# self.gc3 = SpectralGraphConv(in_features=64, out_features=7)
# self.gc1 = GAT(in_features=1433, out_features=64)
# self.gc2 = GAT(in_features=64, out_features=64)
# self.gc3 = GAT(in_features=64, out_features=7)
self.gc1 = ARMAConv(in_features=1433, out_features=64, timesteps=3)
self.gc2 = ARMAConv(in_features=64, out_features=64, timesteps=3)
self.gc3 = ARMAConv(in_features=64, out_features=7, timesteps=3)
self.relu = nn.ReLU()
def forward(self, A, x):
x = self.relu(self.gc1(A, x))
x = self.relu(self.gc2(A, x))
x = F.log_softmax(self.gc3(A, x), dim=2)
return x
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def test_cora():
cora = Cora()
A = cora.adj.to_dense().unsqueeze_(0)
x = cora.features.unsqueeze_(0)
labels = cora.labels#.unsqueeze_(0)
idx_train = cora.idx_train
idx_val = cora.idx_val
print('A', A.shape)
print('X', x.shape)
print('labels', labels.shape)
model = ToyGINCoraModel()
optimizer = optim.Adam(model.parameters())
nll = nn.NLLLoss()
for epoch in range(400):
train_losses = []
train_metrics = []
model.train()
optimizer.zero_grad()
y_pred = model(A, x).squeeze()
loss = nll(y_pred[idx_train, :], labels[idx_train])
train_losses.append(loss.item())
train_metrics.append(accuracy(y_pred[idx_train], labels[idx_train]).item())
loss.backward()
optimizer.step()
val_losses = []
val_metrics = []
model.eval()
y_pred = model(A, x).squeeze()
val_losses.append(nll(y_pred[idx_val, :], labels[idx_val]).item())
val_metrics.append(accuracy(y_pred[idx_val], labels[idx_val]).item())
print('Epoch: {} Loss: {:.4f} Train Accuracy: {:.4f} Val Loss: {:.4f} Val Accuracy: {:.4f}'.format(epoch+1, np.mean(train_losses), np.mean(train_metrics), np.mean(val_losses), np.mean(val_metrics)))
def test_mincut_cora():
cora = Cora(norm=False)
A = cora.adj.to_dense().unsqueeze_(0)
D = torch.diag(A.squeeze().sum(dim=-1))
x = cora.features.unsqueeze_(0)
print('A', A.shape)
print('D', D.shape)
print('x', x.shape)
print(D)
x = cora.features.unsqueeze_(0)
mincut = MinCutPooling(1433, 64, 64)
A, x, S = mincut(A, x)
mincut.mincut_loss(A, S)
class ToyMinCutModel(nn.Module):
def __init__(self):
super(ToyMinCutModel, self).__init__()
self.gin1 = GIN(in_features=121, out_features=64)
self.gin2 = GIN(in_features=64, out_features=64)
# self.gin3 = GIN(in_features=64, out_features=64)
self.pooling = MinCutPooling(64, 64, 1)
self.linear = nn.Linear(64, 1)
self.relu = nn.ReLU()
def forward(self, A, x, masks):
x = self.relu(self.gin1(A, x))
x = x * masks[:, :, :x.shape[-1]]
x = self.relu(self.gin2(A, x))
x = x * masks[:, :, :x.shape[-1]]
A, x = self.pooling(A, x)
x = self.linear(x)
return x
def test_mincut_uvvis():
n_train = 4096
n_val = 256
train_uvvis = UVvis(masked=True)
train_uvvis.df = train_uvvis.df.iloc[:n_train, :]
mu, sigma = np.mean(train_uvvis.df['computational']), np.std(train_uvvis.df['computational'])
train_uvvis.df['computational'] = (train_uvvis.df['computational'] - mu) / sigma
val_uvvis = UVvis(masked=True)
val_uvvis.df = val_uvvis.df.iloc[n_train:n_train+n_val, :]
val_uvvis.df['computational'] = (val_uvvis.df['computational'] - mu) / sigma
train_loader = DataLoader(train_uvvis, batch_size=4)
val_loader = DataLoader(val_uvvis, batch_size=4)
model = ToyMinCutModel()
optimizer = optim.Adam(model.parameters())
MSE = nn.MSELoss()
print(model, '\n')
for epoch in range(256):
train_losses = []
train_metrics = []
model.train()
for idx, (A, x, masks, y_true) in enumerate(train_loader):
optimizer.zero_grad()
y_pred = model(A, x, masks).squeeze(-1)
loss = MSE(y_true, y_pred)
y_pred = (y_pred * sigma) + mu
y_true = (y_true * sigma) + mu
train_losses.append(loss.item())
train_metrics.append(nn.L1Loss()(y_true, y_pred).item())
loss.backward()
optimizer.step()
# break
# break
val_losses = []
val_metrics = []
model.eval()
for idx, (A, x, masks, y_true) in enumerate(val_loader):
y_pred = model(A, x, masks).squeeze(-1)
y_pred = (y_pred * sigma) + mu
y_true = (y_true * sigma) + mu
val_losses.append(MSE(y_true, y_pred).item())
val_metrics.append(nn.L1Loss()(y_true, y_pred).item())
print('Epoch: {} Loss: {:.4f} Train MAE: {:.4f} Val Loss: {:.4f} Val MAE: {:.4f}'.format(epoch+1, np.mean(train_losses), np.mean(train_metrics), np.mean(val_losses), np.mean(val_metrics)))
class ToyGatedModel(nn.Module):
def __init__(self):
super(ToyGatedModel, self).__init__()
self.gc_1 = GatedGraphConv(in_features=121, out_features=64)
self.gc_2 = GatedGraphConv(in_features=64, out_features=64)
self.gc_3 = GatedGraphConv(in_features=64, out_features=64)
self.pooling = GlobalSumPooling()
self.linear = nn.Linear(64, 1)
self.relu = nn.ReLU()
def forward(self, A, x, masks):
x1 = self.relu(self.gc_1(A, x))
# x = x * masks[:, :, :x.shape[-1]]
x2 = self.relu(self.gc_2(A, x1)) + x1
# x = x * masks[:, :, :x.shape[-1]]
x3 = self.relu(self.gc_3(A, x2)) + x2
# x = x * masks[:, :, :x.shape[-1]]
x = self.pooling(x3)
x = self.linear(x)
return x
def test_gated_graph_conv():
n_train = 2048
n_val = 256
train_uvvis = UVvis(masked=True)
train_uvvis.df = train_uvvis.df.iloc[:n_train, :]
mu, sigma = np.mean(train_uvvis.df['computational']), np.std(train_uvvis.df['computational'])
train_uvvis.df['computational'] = (train_uvvis.df['computational'] - mu) / sigma
val_uvvis = UVvis(masked=True)
val_uvvis.df = val_uvvis.df.iloc[n_train:n_train+n_val, :]
val_uvvis.df['computational'] = (val_uvvis.df['computational'] - mu) / sigma
train_loader = DataLoader(train_uvvis, batch_size=64)
val_loader = DataLoader(val_uvvis, batch_size=64)
model = ToyGatedModel()
optimizer = optim.Adam(model.parameters(), lr=0.00075)
MSE = nn.MSELoss()
print(model, '\n')
# gated = GatedGraphConv(121, 16)
# for idx, (A, x, masks, y_true) in enumerate(train_loader):
# out = gated(A, x)
# print('out', out.shape)
# break
for epoch in range(256):
train_losses = []
train_metrics = []
model.train()
for idx, (A, x, masks, y_true) in enumerate(train_loader):
optimizer.zero_grad()
y_pred = model(A, x, masks)
loss = MSE(y_true, y_pred)
train_losses.append(loss.item())
y_pred = (y_pred * sigma) + mu
y_true = (y_true * sigma) + mu
train_metrics.append(nn.L1Loss()(y_true, y_pred).item())
loss.backward()
optimizer.step()
# break
# break
val_losses = []
val_metrics = []
model.eval()
for idx, (A, x, masks, y_true) in enumerate(val_loader):
y_pred = model(A, x, masks)
y_pred = (y_pred * sigma) + mu
y_true = (y_true * sigma) + mu
val_losses.append(MSE(y_true, y_pred).item())
val_metrics.append(nn.L1Loss()(y_true, y_pred).item())
print('Epoch: {} Loss: {:.4f} Train MAE: {:.4f} Val Loss: {:.4f} Val MAE: {:.4f}'.format(epoch+1, np.mean(train_losses), np.mean(train_metrics), np.mean(val_losses), np.mean(val_metrics)))
class ToyGraphSAGEModel(nn.Module):
def __init__(self):
super(ToyGraphSAGEModel, self).__init__()
self.gc1 = GraphSAGE(in_features=121, out_features=64)
self.gc2 = GraphSAGE(in_features=64, out_features=64)
self.gc3 = GraphSAGE(in_features=64, out_features=64)
self.pooling = GlobalMaxPooling()
self.linear = nn.Linear(64, 1)
self.relu = nn.ReLU()
def forward(self, A, x, masks):
x = self.relu(self.gc1(A, x))
x = x * masks[:, :, :x.shape[-1]]
x = self.relu(self.gc2(A, x))
x = x * masks[:, :, :x.shape[-1]]
x = self.relu(self.gc3(A, x))
x = x * masks[:, :, :x.shape[-1]]
x = self.pooling(x)
x = self.linear(x)
return x
def test_graphsage():
n_train = 1024
n_val = 256
train_uvvis = UVvis(masked=True)
train_uvvis.df = train_uvvis.df.iloc[:n_train, :]
mu, sigma = np.mean(train_uvvis.df['computational']), np.std(train_uvvis.df['computational'])
train_uvvis.df['computational'] = (train_uvvis.df['computational'] - mu) / sigma
val_uvvis = UVvis(masked=True)
val_uvvis.df = val_uvvis.df.iloc[n_train:n_train+n_val, :]
val_uvvis.df['computational'] = (val_uvvis.df['computational'] - mu) / sigma
train_loader = DataLoader(train_uvvis, batch_size=64)
val_loader = DataLoader(val_uvvis, batch_size=64)
model = ToyGraphSAGEModel()
optimizer = optim.Adam(model.parameters())
MSE = nn.MSELoss()
print(model, '\n')
for epoch in range(256):
train_losses = []
train_metrics = []
model.train()
for idx, (A, x, masks, y_true) in enumerate(train_loader):
optimizer.zero_grad()
y_pred = model(A, x, masks)
loss = MSE(y_true, y_pred)
train_losses.append(loss.item())
y_pred = (y_pred * sigma) + mu
y_true = (y_true * sigma) + mu
train_metrics.append(nn.L1Loss()(y_true, y_pred).item())
loss.backward()
optimizer.step()
# break
# break
val_losses = []
val_metrics = []
model.eval()
for idx, (A, x, masks, y_true) in enumerate(val_loader):
y_pred = model(A, x, masks)
y_pred = (y_pred * sigma) + mu
y_true = (y_true * sigma) + mu
val_losses.append(MSE(y_true, y_pred).item())
val_metrics.append(nn.L1Loss()(y_true, y_pred).item())
print('Epoch: {} Loss: {:.4f} Train MAE: {:.4f} Val Loss: {:.4f} Val MAE: {:.4f}'.format(epoch+1, np.mean(train_losses), np.mean(train_metrics), np.mean(val_losses), np.mean(val_metrics)))
class ToyTopKModel(nn.Module):
def __init__(self):
super(ToyTopKModel, self).__init__()
self.gc1 = GIN(in_features=121, out_features=64)
self.gc2 = GIN(in_features=64, out_features=64)
self.gc3 = GIN(in_features=64, out_features=64)
self.pool1 = TopKPooling(64, k=32)
self.pool2 = TopKPooling(64, k=16)
self.pool3 = TopKPooling(64, k=8)
self.readout = GlobalMaxPooling()
self.linear = nn.Linear(64, 1)
self.relu = nn.ReLU()
def forward(self, A, x, masks):
x = self.relu(self.gc1(A, x))
x = x * masks[:, :, :x.shape[-1]]
A, x = self.pool1(A, x)
x = self.relu(self.gc2(A, x))
A, x = self.pool2(A, x)
x = self.relu(self.gc3(A, x))
A, x = self.pool3(A, x)
x = self.readout(x)
x = self.linear(x)
return x
def test_topk_pooling():
n_train = 1024
n_val = 256
train_uvvis = UVvis(masked=True)
train_uvvis.df = train_uvvis.df.iloc[:n_train, :]
mu, sigma = np.mean(train_uvvis.df['computational']), np.std(train_uvvis.df['computational'])
train_uvvis.df['computational'] = (train_uvvis.df['computational'] - mu) / sigma
val_uvvis = UVvis(masked=True)
val_uvvis.df = val_uvvis.df.iloc[n_train:n_train+n_val, :]
val_uvvis.df['computational'] = (val_uvvis.df['computational'] - mu) / sigma
train_loader = DataLoader(train_uvvis, batch_size=64)
val_loader = DataLoader(val_uvvis, batch_size=64)
model = ToyTopKModel()
optimizer = optim.Adam(model.parameters(), lr=0.005)
MSE = nn.MSELoss()
print(model, '\n')
for epoch in range(256):
train_losses = []
train_metrics = []
model.train()
for idx, (A, x, masks, y_true) in enumerate(train_loader):
optimizer.zero_grad()
y_pred = model(A, x, masks)
loss = MSE(y_true, y_pred)
train_losses.append(loss.item())
y_pred = (y_pred * sigma) + mu
y_true = (y_true * sigma) + mu
train_metrics.append(nn.L1Loss()(y_true, y_pred).item())
loss.backward()
optimizer.step()
# break
# break
val_losses = []
val_metrics = []
model.eval()
for idx, (A, x, masks, y_true) in enumerate(val_loader):
y_pred = model(A, x, masks)
y_pred = (y_pred * sigma) + mu
y_true = (y_true * sigma) + mu
val_losses.append(MSE(y_true, y_pred).item())
val_metrics.append(nn.L1Loss()(y_true, y_pred).item())
print('Epoch: {} Loss: {:.4f} Train MAE: {:.4f} Val Loss: {:.4f} Val MAE: {:.4f}'.format(epoch+1, np.mean(train_losses), np.mean(train_metrics), np.mean(val_losses), np.mean(val_metrics)))
test_topk_pooling() | 35.36801 | 218 | 0.591036 |
acf393f5741e054d89d70ffd0aca21d2c4b944da | 281 | py | Python | ExPyBR/ExESPy004.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
] | 1 | 2021-12-11T19:53:41.000Z | 2021-12-11T19:53:41.000Z | ExPyBR/ExESPy004.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
] | null | null | null | ExPyBR/ExESPy004.py | us19861229c/Meu-aprendizado-Python | 575c0714ac5377ff3122f4cb57952969e07ba89b | [
"Unlicense"
] | null | null | null | """
4.Faça um Programa que peça as 4 notas bimestrais e mostre a média.
"""
nota1 = float(input("Nota 1: "))
nota2 = float(input("Nota 2: "))
nota3 = float(input("Nota 3: "))
nota4 = float(input("Nota 4: "))
print(f"A média das notas é {(nota1 + nota2 + nota3 + nota4)/4:.1f}.")
| 25.545455 | 70 | 0.629893 |
acf394069e0144c4de02877b6c44e14465a0318c | 5,049 | py | Python | tests/extension/thread_/memorymodel_readwrite/thread_memorymodel_readwrite.py | akmaru/veriloggen | 74f998139e8cf613f7703fa4cffd571bbf069bbc | [
"Apache-2.0"
] | 232 | 2015-09-01T16:07:48.000Z | 2022-03-28T14:53:28.000Z | tests/extension/thread_/memorymodel_readwrite/thread_memorymodel_readwrite.py | akmaru/veriloggen | 74f998139e8cf613f7703fa4cffd571bbf069bbc | [
"Apache-2.0"
] | 34 | 2015-08-21T09:13:03.000Z | 2022-03-21T23:52:44.000Z | tests/extension/thread_/memorymodel_readwrite/thread_memorymodel_readwrite.py | akmaru/veriloggen | 74f998139e8cf613f7703fa4cffd571bbf069bbc | [
"Apache-2.0"
] | 46 | 2015-09-24T14:39:57.000Z | 2022-02-23T21:59:56.000Z | from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
def mkLed():
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
datawidth = 32
addrwidth = 10
myaxi = vthread.AXIM(m, 'myaxi', clk, rst, datawidth)
myram = vthread.RAM(m, 'myram', clk, rst, datawidth, addrwidth)
saxi = vthread.AXISLiteRegister(m, 'saxi', clk, rst, datawidth)
all_ok = m.TmpReg(initval=0)
def blink(size):
# wait start
saxi.wait_flag(0, value=1, resetvalue=0)
# reset done
saxi.write(1, 0)
all_ok.value = True
for i in range(4):
print('# iter %d start' % i)
# Test for 4KB boundary check
offset = i * 1024 * 16 + (myaxi.boundary_size - 4)
body(size, offset)
print('# iter %d end' % i)
if all_ok:
print('# verify (local): PASSED')
else:
print('# verify (local): FAILED')
# result
saxi.write(2, all_ok)
# done
saxi.write_flag(1, 1, resetvalue=0)
def body(size, offset):
# write
for i in range(size):
wdata = i + 100
myram.write(i, wdata)
laddr = 0
gaddr = offset
myaxi.dma_write(myram, laddr, gaddr, size)
print('dma_write: [%d] -> [%d]' % (laddr, gaddr))
# write
for i in range(size):
wdata = i + 1000
myram.write(i, wdata)
laddr = 0
gaddr = (size + size) * 4 + offset
myaxi.dma_write(myram, laddr, gaddr, size)
print('dma_write: [%d] -> [%d]' % (laddr, gaddr))
# read
laddr = 0
gaddr = offset
myaxi.dma_read(myram, laddr, gaddr, size)
print('dma_read: [%d] <- [%d]' % (laddr, gaddr))
for i in range(size):
rdata = myram.read(i)
if vthread.verilog.NotEql(rdata, i + 100):
print('rdata[%d] = %d' % (i, rdata))
all_ok.value = False
# read
laddr = 0
gaddr = (size + size) * 4 + offset
myaxi.dma_read(myram, laddr, gaddr, size)
print('dma_read: [%d] <- [%d]' % (laddr, gaddr))
for i in range(size):
rdata = myram.read(i)
if vthread.verilog.NotEql(rdata, i + 1000):
print('rdata[%d] = %d' % (i, rdata))
all_ok.value = False
th = vthread.Thread(m, 'th_blink', clk, rst, blink)
fsm = th.start(16)
return m
def mkTest(memimg_name=None):
m = Module('test')
# target instance
led = mkLed()
# copy paras and ports
params = m.copy_params(led)
ports = m.copy_sim_ports(led)
clk = ports['CLK']
rst = ports['RST']
memory = axi.AxiMemoryModel(m, 'memory', clk, rst, memimg_name=memimg_name)
memory.connect(ports, 'myaxi')
# AXI-Slave controller
_saxi = vthread.AXIMLite(m, '_saxi', clk, rst, noio=True)
_saxi.connect(ports, 'saxi')
def ctrl():
for i in range(100):
pass
for i in range(16):
# byte addressing
v = memory.read(i * 4)
print('read: mem[%d] -> %x' % (i, v))
v = v + 1024
# byte addressing
memory.write(i * 4, v)
print('write: mem[%d] <- %x' % (i, v))
awaddr = 0
_saxi.write(awaddr, 1)
araddr = 4
v = _saxi.read(araddr)
while v == 0:
v = _saxi.read(araddr)
araddr = 8
v = _saxi.read(araddr)
if v:
print('# verify: PASSED')
else:
print('# verify: FAILED')
th = vthread.Thread(m, 'th_ctrl', clk, rst, ctrl)
fsm = th.start()
uut = m.Instance(led, 'uut',
params=m.connect_params(led),
ports=m.connect_ports(led))
#simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
Delay(1000000),
Systask('finish'),
)
return m
def run(filename='tmp.v', simtype='iverilog', outputfile=None):
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
test = mkTest(memimg_name=memimg_name)
if filename is not None:
test.to_verilog(filename)
sim = simulation.Simulator(test, sim=simtype)
rslt = sim.run(outputfile=outputfile)
lines = rslt.splitlines()
if simtype == 'verilator' and lines[-1].startswith('-'):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(filename='tmp.v')
print(rslt)
| 25.760204 | 79 | 0.543078 |
acf395ee6f6be97dadd441fc67426dfb45d0f77d | 22,324 | py | Python | examples/experimental/next-frame-wip.py | Darkar25/HyperGAN | 76ef7e0c20569ceece88dc76396d92c77050692b | [
"MIT"
] | 1 | 2020-01-02T06:29:56.000Z | 2020-01-02T06:29:56.000Z | examples/experimental/next-frame-wip.py | KonradLinkowski/HyperGAN | 3153daee838dbb8e8d8926b1e81419682a24f2fe | [
"MIT"
] | 218 | 2021-05-25T01:46:15.000Z | 2022-02-11T01:08:52.000Z | examples/experimental/next-frame-wip.py | KonradLinkowski/HyperGAN | 3153daee838dbb8e8d8926b1e81419682a24f2fe | [
"MIT"
] | null | null | null | import os
import uuid
import random
import tensorflow as tf
import hypergan as hg
import hyperchamber as hc
import numpy as np
import glob
import time
import re
from hypergan.viewer import GlobalViewer
from hypergan.samplers.base_sampler import BaseSampler
from hypergan.gan_component import ValidationException, GANComponent
from hypergan.samplers.random_walk_sampler import RandomWalkSampler
from hypergan.samplers.debug_sampler import DebugSampler
from hypergan.search.alphagan_random_search import AlphaGANRandomSearch
from hypergan.gans.base_gan import BaseGAN
from common import *
import copy
from hypergan.gans.alpha_gan import AlphaGAN
from hypergan.gan_component import ValidationException, GANComponent
from hypergan.gans.base_gan import BaseGAN
from hypergan.discriminators.fully_connected_discriminator import FullyConnectedDiscriminator
from hypergan.encoders.uniform_encoder import UniformEncoder
from hypergan.trainers.multi_step_trainer import MultiStepTrainer
from hypergan.trainers.multi_trainer_trainer import MultiTrainerTrainer
from hypergan.trainers.consensus_trainer import ConsensusTrainer
arg_parser = ArgumentParser("render next frame")
parser = arg_parser.add_image_arguments()
parser.add_argument('--frames', type=int, default=4, help='Number of frames to embed.')
parser.add_argument('--shuffle', type=bool, default=False, help='Randomize inputs.')
args = arg_parser.parse_args()
width, height, channels = parse_size(args.size)
config = lookup_config(args)
if args.action == 'search':
random_config = AlphaGANRandomSearch({}).random_config()
if args.config_list is not None:
config = random_config_from_list(args.config_list)
config["generator"]=random_config["generator"]
config["g_encoder"]=random_config["g_encoder"]
config["discriminator"]=random_config["discriminator"]
config["z_discriminator"]=random_config["z_discriminator"]
# TODO Other search terms?
else:
config = random_config
def tryint(s):
try:
return int(s)
except ValueError:
return s
def alphanum_key(s):
return [tryint(c) for c in re.split('([0-9]+)', s)]
class VideoFrameLoader:
"""
"""
def __init__(self, batch_size, frame_count, shuffle):
self.batch_size = batch_size
self.frame_count = frame_count
self.shuffle = shuffle
def inputs(self):
return self.frames
def create(self, directory, channels=3, format='jpg', width=64, height=64, crop=False, resize=False):
directories = glob.glob(directory+"/*")
directories = [d for d in directories if os.path.isdir(d)]
if(len(directories) == 0):
directories = [directory]
# Create a queue that produces the filenames to read.
if(len(directories) == 1):
# No subdirectories, use all the images in the passed in path
filenames = glob.glob(directory+"/*."+format)
else:
filenames = glob.glob(directory+"/**/*."+format)
if(len(filenames) < self.frame_count):
print("Error: Not enough frames in data folder ", directory)
self.file_count = len(filenames)
filenames = sorted(filenames, key=alphanum_key)
if self.file_count == 0:
raise ValidationException("No images found in '" + directory + "'")
# creates arrays of filenames[:end], filenames[1:end-1], etc for serialized random batching
if self.shuffle:
frames = [tf.train.slice_input_producer([filenames], shuffle=True)[0] for i in range(self.frame_count)]
else:
input_t = [filenames[i:i-self.frame_count] for i in range(self.frame_count)]
input_queue = tf.train.slice_input_producer(input_t, shuffle=True)
frames = input_queue
# Read examples from files in the filename queue.
frames = [self.read_frame(frame, format, crop, resize) for frame in frames]
frames = self._get_data(frames)
self.frames = frames
x = tf.train.slice_input_producer([filenames], shuffle=True)[0]
y = tf.train.slice_input_producer([filenames], shuffle=True)[0]
self.x = self.read_frame(x, format, crop, resize)
self.y = self.read_frame(y, format, crop, resize)
self.x = self._get_data([self.x])
self.y = self._get_data([self.y])
def read_frame(self, t, format, crop, resize):
value = tf.read_file(t)
if format == 'jpg':
img = tf.image.decode_jpeg(value, channels=channels)
elif format == 'png':
img = tf.image.decode_png(value, channels=channels)
else:
print("[loader] Failed to load format", format)
img = tf.cast(img, tf.float32)
# Image processing for evaluation.
# Crop the central [height, width] of the image.
if crop:
resized_image = hypergan.inputs.resize_image_patch.resize_image_with_crop_or_pad(img, height, width, dynamic_shape=True)
elif resize:
resized_image = tf.image.resize_images(img, [height, width], 1)
else:
resized_image = img
tf.Tensor.set_shape(resized_image, [height,width,channels])
# This moves the image to a range of -1 to 1.
float_image = resized_image / 127.5 - 1.
return float_image
def _get_data(self, imgs):
batch_size = self.batch_size
num_preprocess_threads = 24
return tf.train.shuffle_batch(
imgs,
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity= batch_size*2, min_after_dequeue=batch_size)
inputs = VideoFrameLoader(args.batch_size, args.frames, args.shuffle)
inputs.create(args.directory,
channels=channels,
format=args.format,
crop=args.crop,
width=width,
height=height,
resize=True)
save_file = "save/model.ckpt"
class AliNextFrameGAN(BaseGAN):
"""
"""
def __init__(self, *args, **kwargs):
BaseGAN.__init__(self, *args, **kwargs)
def create(self):
config = self.config
ops = self.ops
self.g_vars = []
d_vars = []
with tf.device(self.device):
def random_t(shape):
shape[-1] //= len(config.z_distribution.projections)
return UniformEncoder(self, config.z_distribution, output_shape=shape).sample
def random_like(x):
shape = self.ops.shape(x)
return random_t(shape)
self.frame_count = len(self.inputs.frames)
self.frames = self.inputs.frames
dist = UniformEncoder(self, config.z_distribution)
dist2 = UniformEncoder(self, config.z_distribution)
dist3 = UniformEncoder(self, config.z_distribution)
dist4 = UniformEncoder(self, config.z_distribution)
dist5 = UniformEncoder(self, config.z_distribution)
uz = self.create_component(config.uz, name='u_to_z', input=dist.sample)
uc = self.create_component(config.uc, name='u_to_c', input=dist2.sample)
uz2 = self.create_component(config.uz, name='u_to_z', input=dist3.sample, reuse=True)
uc2 = self.create_component(config.uc, name='u_to_c', input=dist4.sample, reuse=True)
uc3 = self.create_component(config.uc, name='u_to_c', input=dist5.sample, reuse=True)
self.g_vars += uz.variables()
self.g_vars += uc.variables()
def ec(zt, cp,reuse=True):
if config.noise:
randt = random_like(cp)
if config.proxy:
dist3 = UniformEncoder(self, config.z_distribution)
proxy_c = self.create_component(config.proxy_c, name='rand_ct', input=dist3.sample, reuse=reuse)
randt = proxy_c.sample
print("CC", zt, randt)
c = self.create_component(config.ec, name='ec', input=zt, features={'ct-1':cp, 'n':randt}, reuse=reuse)
else:
c = self.create_component(config.ec, name='ec', input=zt, features=[cp], reuse=reuse)
if not reuse:
if config.proxy:
self.g_vars += proxy_c.variables()
self.g_vars += c.variables()
return c.sample
def ez(ft, zp,reuse=True):
z = self.create_component(config.ez, name='ez', input=ft, features=[zp], reuse=reuse)
if not reuse:
self.g_vars += z.variables()
return z.sample
def build_g(zt, ct, reuse=True):
print("Gb", reuse)
g = self.create_component(config.generator, name='generator', input=ct, features=[zt], reuse=reuse)
if not reuse:
self.g_vars += g.variables()
return g.sample
def encode_frames(fs, c0, z0, reuse=True):
cs = [c0]
zs = [z0]
x_hats = [build_g(zs[-1], cs[-1], reuse=reuse)]
for i in range(len(fs)):
print("encode frames", i)
_reuse = reuse or (i!=0)
z = ez(fs[i], zs[-1], reuse=_reuse)
c = ec(z, cs[-1], reuse=_reuse)
x_hat = build_g(z, c, reuse=True)
zs.append(z)
cs.append(c)
x_hats.append(x_hat)
return cs, zs, x_hats
def build_sim(z0, c0, steps, reuse=True):
zs = [z0]
cs = [c0]
gs = [build_g(zs[-1], cs[-1], reuse=reuse)]
for i in range(steps):
_reuse = reuse or (i!=0)
z = ez(gs[-1], zs[-1], reuse=_reuse)
c = ec(z, cs[-1], reuse=_reuse)
g = build_g(z, c, reuse=True)
zs.append(z)
cs.append(c)
gs.append(g)
return gs, cs, zs
#self.frames = [f+tf.random_uniform(self.ops.shape(f), minval=-0.1, maxval=0.1) for f in self.frames ]
cs, zs, x_hats = encode_frames(self.frames, uc2.sample, uz2.sample, reuse=False)
self.zs = zs
self.cs = cs
ugs, ucs, uzs = build_sim(uz.sample, uc.sample, len(self.frames))
ugs_next, ucs_next, uzs_next = build_sim(uzs[-1], ucs[-1], len(self.frames))
re_ucs_next, re_uzs_next, re_ugs_next = encode_frames(ugs_next[1:], ucs_next[0], uzs_next[0])
gs_next, cs_next, zs_next = build_sim(zs[-1], cs[-1], len(self.frames))
#gs_next_next, cs_next_next, zs_next_next = build_sim(zs[-1], cs[-1], 21)
re_ucs, re_uzs, ugs_hat = encode_frames(ugs[1:], ucs[0], uzs[0])
re_cs_next, re_zs_next, re_gs_next = encode_frames(gs_next[1:], cs_next[0], zs_next[0])
self.x_hats = x_hats
t0 = tf.concat(zs[1:], axis=3)
t1 = tf.concat(re_uzs[:-1], axis=3)
t2 = tf.concat(re_zs_next[:-1], axis=3)
t3 = tf.concat(re_uzs_next[:-1], axis=3)
t4 = tf.concat(re_uzs[:-1], axis=3)
f0 = tf.concat(cs[1:], axis=3)
f1 = tf.concat(re_ucs[:-1], axis=3)
f2 = tf.concat(re_cs_next[:-1], axis=3)
f3 = tf.concat(re_ucs_next[:-1], axis=3)
stack = [t0,t1, t2]#, t4, t5]
stacked = ops.concat(stack, axis=0)
features =ops.concat([f0,f1,f2], axis=0)
d = self.create_component(config.z_discriminator, name='d_img', input=stacked, features=[features])
d_vars += d.variables()
l = self.create_loss(config.loss, d, None, None, len(stack))
d_loss = l.d_loss
g_loss = l.g_loss
self.video_generator_last_z = uzs[0]
self.video_generator_last_c = ucs[0]
self.gs_next = gs_next
ztn = uzs[1]
ctn = ucs[1]
self.video_generator_last_zn = ztn
self.video_generator_last_cn = ctn
gen = hc.Config({"sample":ugs[0]})
if config.use_x:
def rotate(first, second, offset=None):
rotations = [tf.concat(first[:offset], axis=3)]
elem = first
for e in second:
elem = elem[1:]+[e]
rotations.append(tf.concat(elem[:offset], axis=3))
return rotations
t0 = tf.concat(self.frames[1:], axis=3)
f0 = tf.concat(cs[1:-1], axis=3)
stack = [t0]
features = [f0]
if config.encode_forward:
stack += rotate(self.frames[2:]+[gs_next[0]], gs_next[1:])
features += rotate(cs[2:], cs_next[1:])
#stack += [gs_next_next[-frames:]]
if config.encode_ug:
stack += rotate(ugs[:-2], ugs[-2:]+ugs_next[1:])
features += rotate(ucs[:-2], ucs[-2:]+ucs_next[1:])
stacked = ops.concat(stack, axis=0)
features = tf.concat(features, axis=0)
d = self.create_component(config.discriminator, name='d_manifold', input=stacked, features=[features])
d_vars += d.variables()
l = self.create_loss(config.loss, d, None, None, len(stack))
d_loss += l.d_loss
g_loss += l.g_loss
gx_sample = gen.sample
gy_sample = gen.sample
gx = hc.Config({"sample":gx_sample})
gy = hc.Config({"sample":gy_sample})
last_frame = tf.slice(gy_sample, [0,0,0,0], [-1, -1, -1, 3])
self.y = hc.Config({"sample":last_frame})
self.gy = self.y
self.gx = self.y
self.uniform_sample = gen.sample
self.preview = tf.concat(self.inputs.frames[:-1] + [gen.sample], axis=1)#tf.concat(tf.split(gen.sample, (self.ops.shape(gen.sample)[3]//3), 3), axis=1)
metrics = {
'g_loss': g_loss,
'd_loss': d_loss
}
trainers = []
lossa = hc.Config({'sample': [d_loss, g_loss], 'metrics': metrics, 'd_fake': l.d_fake, 'd_real': l.d_real, 'config': l.config})
self.loss = lossa
self._g_vars = self.g_vars
self._d_vars = d_vars
trainer = self.create_component(config.trainer, loss = lossa, g_vars = self.g_vars, d_vars = d_vars)
self.session.run(tf.global_variables_initializer())
self.trainer = trainer
self.generator = gx
self.z_hat = gy.sample
self.x_input = self.inputs.frames[0]
self.uga = self.y.sample
self.uniform_encoder = dist
def g_vars(self):
return self._g_vars
def d_vars(self):
return self._d_vars
def fitness_inputs(self):
return self.inputs.frames
def create_loss(self, loss_config, discriminator, x, generator, split):
loss = self.create_component(loss_config, discriminator = discriminator, x=x, generator=generator, split=split)
return loss
def create_encoder(self, x_input, name='input_encoder', reuse=False):
config = self.config
input_encoder = dict(config.input_encoder or config.g_encoder or config.generator)
encoder = self.create_component(input_encoder, name=name, input=x_input, reuse=reuse)
return encoder
def create_z_discriminator(self, z, z_hat):
config = self.config
z_discriminator = dict(config.z_discriminator or config.discriminator)
z_discriminator['layer_filter']=None
net = tf.concat(axis=0, values=[z, z_hat])
encoder_discriminator = self.create_component(z_discriminator, name='z_discriminator', input=net)
return encoder_discriminator
def create_cycloss(self, x_input, x_hat):
config = self.config
ops = self.ops
distance = config.distance or ops.lookup('l1_distance')
pe_layers = self.gan.skip_connections.get_array("progressive_enhancement")
cycloss_lambda = config.cycloss_lambda
if cycloss_lambda is None:
cycloss_lambda = 10
if(len(pe_layers) > 0):
mask = self.progressive_growing_mask(len(pe_layers)//2+1)
cycloss = tf.reduce_mean(distance(mask*x_input,mask*x_hat))
cycloss *= mask
else:
cycloss = tf.reduce_mean(distance(x_input, x_hat))
cycloss *= cycloss_lambda
return cycloss
def create_z_cycloss(self, z, x_hat, encoder, generator):
config = self.config
ops = self.ops
total = None
distance = config.distance or ops.lookup('l1_distance')
if config.z_hat_lambda:
z_hat_cycloss_lambda = config.z_hat_cycloss_lambda
recode_z_hat = encoder.reuse(x_hat)
z_hat_cycloss = tf.reduce_mean(distance(z_hat,recode_z_hat))
z_hat_cycloss *= z_hat_cycloss_lambda
if config.z_cycloss_lambda:
recode_z = encoder.reuse(generator.reuse(z))
z_cycloss = tf.reduce_mean(distance(z,recode_z))
z_cycloss_lambda = config.z_cycloss_lambda
if z_cycloss_lambda is None:
z_cycloss_lambda = 0
z_cycloss *= z_cycloss_lambda
if config.z_hat_lambda and config.z_cycloss_lambda:
total = z_cycloss + z_hat_cycloss
elif config.z_cycloss_lambda:
total = z_cycloss
elif config.z_hat_lambda:
total = z_hat_cycloss
return total
def input_nodes(self):
"used in hypergan build"
if hasattr(self.generator, 'mask_generator'):
extras = [self.mask_generator.sample]
else:
extras = []
return extras + [
self.x_input
]
def output_nodes(self):
"used in hypergan build"
if hasattr(self.generator, 'mask_generator'):
extras = [
self.mask_generator.sample,
self.generator.g1x,
self.generator.g2x
]
else:
extras = []
return extras + [
self.encoder.sample,
self.generator.sample,
self.uniform_sample,
self.generator_int
]
class VideoFrameSampler(BaseSampler):
def __init__(self, gan, samples_per_row=8):
sess = gan.session
self.x = gan.session.run(gan.preview)
print("__________", np.shape(self.x),'---oo')
frames = np.shape(self.x)[1]//height
self.frames=frames
self.x = np.split(self.x, frames, axis=1)
self.i = 0
BaseSampler.__init__(self, gan, samples_per_row)
def _sample(self):
gan = self.gan
z_t = gan.uniform_encoder.sample
sess = gan.session
feed_dict = {}
for i,f in enumerate(gan.inputs.frames):
if len(self.x) > i+1:
feed_dict[f]=self.x[i+1]
#if(1 + self.frames < len(self.x)):
# feed_dict[f] = self.x[1+self.frames]
self.x = sess.run(gan.preview, feed_dict)
frames = np.shape(self.x)[1]//height
self.x = np.split(self.x, frames, axis=1)
x_ = self.x[-1]
time.sleep(0.15)
return {
'generator': x_
}
class TrainingVideoFrameSampler(BaseSampler):
def __init__(self, gan, samples_per_row=8):
self.z = None
self.i = 0
BaseSampler.__init__(self, gan, samples_per_row)
def _sample(self):
gan = self.gan
z_t = gan.uniform_encoder.sample
sess = gan.session
return {
'generator': gan.session.run(gan.preview)
}
def setup_gan(config, inputs, args):
gan = AliNextFrameGAN(config, inputs=inputs)
if(args.action != 'search' and os.path.isfile(save_file+".meta")):
gan.load(save_file)
tf.train.start_queue_runners(sess=gan.session)
config_name = args.config
GlobalViewer.title = "[hypergan] next-frame " + config_name
GlobalViewer.enabled = args.viewer
GlobalViewer.zoom = args.zoom
return gan
def train(config, inputs, args):
gan = setup_gan(config, inputs, args)
sampler = lookup_sampler(args.sampler or TrainingVideoFrameSampler)(gan)
samples = 0
#metrics = [batch_accuracy(gan.inputs.x, gan.uniform_sample), batch_diversity(gan.uniform_sample)]
#sum_metrics = [0 for metric in metrics]
for i in range(args.steps):
gan.step()
if args.action == 'train' and i % args.save_every == 0 and i > 0:
print("saving " + save_file)
gan.save(save_file)
if i % args.sample_every == 0:
sample_file="samples/%06d.png" % (samples)
samples += 1
sampler.sample(sample_file, args.save_samples)
#if i > args.steps * 9.0/10:
# for k, metric in enumerate(gan.session.run(metrics)):
# print("Metric "+str(k)+" "+str(metric))
# sum_metrics[k] += metric
tf.reset_default_graph()
return []#sum_metrics
def sample(config, inputs, args):
gan = setup_gan(config, inputs, args)
sampler = lookup_sampler(args.sampler or VideoFrameSampler)(gan)
samples = 0
for i in range(args.steps):
sample_file="samples/%06d.png" % (samples)
samples += 1
sampler.sample(sample_file, args.save_samples)
def search(config, inputs, args):
metrics = train(config, inputs, args)
config_filename = "colorizer-"+str(uuid.uuid4())+'.json'
hc.Selector().save(config_filename, config)
with open(args.search_output, "a") as myfile:
myfile.write(config_filename+","+",".join([str(x) for x in metrics])+"\n")
if args.action == 'train':
metrics = train(config, inputs, args)
print("Resulting metrics:", metrics)
elif args.action == 'sample':
sample(config, inputs, args)
elif args.action == 'search':
search(config, inputs, args)
else:
print("Unknown action: "+args.action)
| 36.960265 | 163 | 0.589545 |
acf396be8f7d9ed7507fc3d8a7735e44f1e06ee0 | 12,290 | py | Python | nearest_centroide_v2.py | TomVet/projet_info | 7ea6b93f5f1100412541c34a1be391b17248e781 | [
"MIT"
] | null | null | null | nearest_centroide_v2.py | TomVet/projet_info | 7ea6b93f5f1100412541c34a1be391b17248e781 | [
"MIT"
] | 1 | 2022-01-02T22:02:21.000Z | 2022-01-02T22:02:21.000Z | nearest_centroide_v2.py | TomVet/projet_info | 7ea6b93f5f1100412541c34a1be391b17248e781 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import csv
import time
import numpy as np
from sklearn.neighbors import NearestCentroid
HEART = ("Maladie cardiaque", "dataset_formater/heart.csv",
"dataset_formater/heart_test.csv")
WATER_POTABILITY = ("Potabilité de l'eau",
"dataset_formater/water_potability.csv",
"dataset_formater/water_potability_test.csv")
DIABETES = ("Diabète", "dataset_formater/diabetes.csv",
"dataset_formater/diabetes_test.csv")
IRIS = ("Iris", "dataset_formater/iris.csv", "dataset_formater/iris_test.csv")
# Définition des fonctions pour faire un algorithme de classification centroide
# le plus proche .
# _____________________________________________________________________________
def calcul_coordonnees_centroide(liste_coordonne):
"""
Calcule le centroide des points de liste_coordonne de dimension.
Parameters
----------
liste_coordonne : list
liste de coordonnée de point de même dimension.
Returns
-------
coordonnees : list
liste des coordonnée du centroide calculé.
"""
coordonnees = []
# on calcule la dimension de l'espace considéré pour le centroide
nb_dimension = len(liste_coordonne[0])
# on calcule les coordonnées du centroide dans chaque dimension
for dimension in range(nb_dimension):
somme = 0
# on somme les coordonnées de chaque points
for point in liste_coordonne:
somme += point[dimension]
# on ajoute la somme / par le nombre de point a coordonnées
coordonnees = np.append(coordonnees, somme/len(liste_coordonne))
return coordonnees
def calcul_distance_euclidienne(point_1, point_2):
"""
Calcule de la distance euclidienne au carré entre les points 1 et 2.
Parameters
----------
point_1 : list
liste des coordonnées du point 1.
point_2 : list
liste des coordonnées du point 2.
Returns
-------
distance : float
distance euclidienne au carré entre les points 1 et 2.
"""
# on calcule la dimension de l'espace des 2 points
nb_dimension = len(point_1)
distance = 0
# on fait la somme au carré des coordonnées des points 1 et 2 dans chaque
# dimension
for dimension in range(nb_dimension):
somme = (point_1[dimension] - point_2[dimension]) ** 2
distance += somme
return distance
def find_nearest_centroid(point, centroides):
"""
Permet de trouver le centroide le plus proche du point.
Parameters
----------
point : list
liste des coordonnées du point.
centroides : list
liste de coordonnée de centroides.
Returns
-------
classe_du_min : int
classe du centroide le plus proche de point dans la liste centroides.
"""
distance_min = calcul_distance_euclidienne(point, centroides[0][1])
classe_du_min = centroides[0][0]
# on parcoure la liste des centroides
for classe, centroide in centroides:
# on calcule la distance entre le centroide et le point
distance = calcul_distance_euclidienne(point, centroide)
# si la nouvelle distance est plus petite que le minimum
# elle devient le minimum
if distance_min > distance:
distance_min = distance
# on conserve la classe du centroide le plus proche
classe_du_min = classe
return classe_du_min
def recuperer_donnee_csv(fichier, separateur=","):
"""
Créée une liste de liste contenant les données de fichier.
Parameters
----------
fichier : string
chemin du fichier csv a lire
ce fichier ne doit contenir que des float.
separateur : string, optional
string contenant le séparateur utilisé dans fichier.
La valeur par défaut est ",".
Returns
-------
data : np.array
array de dimension 2.
"""
with open(fichier, newline="", encoding="utf-8") as data_file:
data = []
data_reader = csv.reader(data_file, delimiter=separateur)
for ligne in data_reader:
data.append(ligne)
data = np.array(data)
data = data.astype(np.float64)
return data
def calcul_centroides(fichier, separateur=","):
"""
Calcule les centroides pour chaque classe de fichier.
Parameters
----------
fichier : string
chemin du fichier csv avec les données d'entrainement
ce fichier ne doit contenir que des float.
separateur : string, optional
string contenant le séparateur utilisé dans fichier.
La valeur par défaut est ",".
Returns
-------
centroides : np.array
liste des coordonnées des centroides de chaque classe.
nb_parametres : int
nombre de parametres pour définir chaque point.
classes : set
set des classes du dataset
"""
dataset = recuperer_donnee_csv(fichier, separateur)
nb_parametres = len(dataset[1]) - 1
centroides = []
classes = {point[-1] for point in dataset}
for classe in classes:
liste_classe = []
for point in dataset:
if point[-1] == classe:
liste_classe.append(point[:nb_parametres])
centroide = classe, calcul_coordonnees_centroide(liste_classe)
centroides.append(centroide)
return centroides, nb_parametres, classes
def tester_data(fichier, centroides, nb_parametres, classes, separateur=","):
"""
Test la précision de l'algorithme.
Parameters
----------
fichier : string
chemin du fichier csv avec les données de test
ce fichier ne doit contenir que des float.
centroides : np.array
liste des coordonnées des centroides de chaque classe.
nb_parametres : int
nombre de paramètres pour définir chaque classe.
classes : set
set des classes du dataset
separateur : string, optional
string contenant le séparateur utilisé dans fichier.
La valeur par défaut est ",".
Returns
-------
nb_test : int
nombre de test effectuer.
nb_bon : int
nombre de test réussi.
"""
test_data = recuperer_donnee_csv(fichier, separateur)
nb_test = len(test_data)
nb_bon = 0
for test in test_data:
for classe in classes:
if (find_nearest_centroid(test[:nb_parametres], centroides),
test[-1]) == (classe, classe):
nb_bon += 1
break
return nb_test, nb_bon
def centroide_plus_proche(dataset, datatest, separateur=","):
"""
Test l'algorithme et renvoie le précision et la vitesse d'éxecution.
Utilise comme données d'apprentissage dataset et comme
données de test datatest
Parameters
----------
dataset : string
chemin du fichier csv avec les données d'entrainement
ce fichier ne doit contenir que des float.
datatest : string
chemin du fichier csv avec les données de test
ce fichier ne doit contenir que des float.
separateur : string, optional
string contenant le séparateur utilisé dans fichier.
La valeur par défaut est ",".
Returns
-------
fiabilite : float
précision de l'algorithme sur cet ensemble de données (en pourcentage).
temps : float
temps pour classer un point en milliseconde.
"""
start = time.time()
centroides, nb_parametres, classes = calcul_centroides(dataset, separateur)
nb_test, nb_bon = tester_data(datatest, centroides, nb_parametres,
classes, separateur)
fiabilite = nb_bon / nb_test * 100
end = time.time()
temps = (end - start) * 1000 / nb_test
return fiabilite, temps, classes
# Définition des fonctions pour utilisée l'algorithme nearest centroide de
# sklear.
# _____________________________________________________________________________
def apprentissage(fichier, clf, separateur=","):
"""
Ajuste le modèle en fonction des données de fichier.
Parameters
----------
fichier : string
chemin du fichier csv avec les données d'entrainement
ce fichier ne doit contenir que des float.
clf : fonction
fonction de classification de la bibliothèque scikitlearn,
ici NearestCentroid().
separateur : string, optional
string contenant le separateur utiliser dans fichier.
La valeur par défaut est ",".
Returns
-------
None.
"""
dataset = recuperer_donnee_csv(fichier, separateur)
echantillon = []
cibles = []
for point in dataset:
echantillon.append(point[:-1])
cibles.append(point[-1])
echantillon = np.resize(echantillon, (len(cibles), len(dataset[1])-1))
clf.fit(echantillon, cibles)
def test_donnees(fichier, clf, separateur=","):
"""
Test l'algorithme de classification pour les données de fichier.
Parameters
----------
fichier : string
chemin du fichier csv avec les donnees de test
ce fichier ne doit contenir que des float.
clf : fonction
fonction de classification de la bibliothèque scikitlearn,
ici NearestCentroid().
separateur : string, optional
string contenant le séparateur utilisé dans fichier.
La valeur par défaut est ",".
Returns
-------
fiabilite : float
précision de l'algorithme sur cet ensemble de données (en pourcentage).
nb_test : int
nombre de test éffectué pour calculer la fiabilité
"""
datatest = recuperer_donnee_csv(fichier, separateur)
nb_bon = 0
nb_test = len(datatest)
for point in datatest:
if clf.predict([point[:-1]]) == point[-1]:
nb_bon += 1
fiabilite = nb_bon / nb_test * 100
return fiabilite, nb_test
def centroide_plus_proche_sklearn(dataset, datatest, separateur=","):
"""
Réalise l'apprentissage et le test de l'algorithme.
Parameters
----------
dataset : np.array
chemin du fichier csv avec les données d'entrainement
ce fichier ne doit contenir que des float.
datatest : np.array
chemin du fichier csv avec les données de test
ce fichier ne doit contenir que des float.
separateur : string, optional
string contenant le séparateur utilisé dans fichier.
La valeur par défaut est ",".
Returns
-------
fiabilite : float
précision de l'algorithme sur cet ensemble de données (en pourcentage).
temps : float
temps pour classer un point en milliseconde.
"""
start = time.time()
clf = NearestCentroid()
apprentissage(dataset, clf, separateur)
fiabilite, nb_test = test_donnees(datatest, clf, separateur)
end = time.time()
temps = (end - start) * 1000 / nb_test
return fiabilite, temps
def comparaison(donnee, separateur=","):
"""
Compare notre algorithme et celui de scikit-learn.
Parameters
----------
donnee : tuple
tuple contenant : (nom du dataset, chemin dataset, chemin datatest).
separateur : string, optional
string contenant le séparateur utilisé dans fichier.
La valeur par défaut est ",".
Print
-------
Dataset :
Nombre de classe :
Notre algorithme :
Précision : 0.00 %
Temps d'execution : 0.000 ms
Algorithme du module :
Précision : 0.00 %
Temps d'execution : 0.000 ms
"""
nom, dataset, datatest = donnee
fiabilite_1, temps_1, nb_classe = centroide_plus_proche(dataset, datatest,
separateur)
fiabilite_2, temps_2 = centroide_plus_proche_sklearn(dataset, datatest,
separateur)
nb_classe = len(nb_classe)
print(f"""Dataset : {nom}\nNombre de classe : {nb_classe :.0f}
Notre algorithme :\n\tPrécision : {fiabilite_1 :.2f} %
\tTemps d'execution : {temps_1 :.3f} ms\n\tAlgorithme du module :
\tPrécision : {fiabilite_2 :.2f} %
\tTemps d'execution : {temps_2 :.3f} ms\n""")
# print("Nearest centroide :\n_________________________________________________")
# comparaison(HEART)
# comparaison(WATER_POTABILITY)
# comparaison(DIABETES)
# comparaison(IRIS)
| 30.802005 | 81 | 0.646867 |
acf3974a1dac99727ad253baa0fba6fe5724bf41 | 1,561 | py | Python | instapp/migrations/0002_auto_20211018_1241.py | bensammwaniki/insta-clone | 603fe0bc2313d49cd4df9c313fa5102ab4739cd8 | [
"MIT"
] | null | null | null | instapp/migrations/0002_auto_20211018_1241.py | bensammwaniki/insta-clone | 603fe0bc2313d49cd4df9c313fa5102ab4739cd8 | [
"MIT"
] | null | null | null | instapp/migrations/0002_auto_20211018_1241.py | bensammwaniki/insta-clone | 603fe0bc2313d49cd4df9c313fa5102ab4739cd8 | [
"MIT"
] | 1 | 2021-12-07T08:38:36.000Z | 2021-12-07T08:38:36.000Z | # Generated by Django 3.2.8 on 2021-10-18 09:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('instapp', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Image',
new_name='Post',
),
migrations.CreateModel(
name='Likes',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('likes', models.IntegerField(default=0)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='instapp.post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comments',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=100)),
('comment_date', models.DateTimeField(auto_now_add=True)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='instapp.post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 39.025 | 118 | 0.610506 |
acf397ee0535d0e037a00384c981d8733a0e4ae3 | 810 | py | Python | nautobot_chatops/tests/workers/two_commands.py | tim-fiola/nautobot-plugin-chatops | 6edcf57851bb6068f128de8dfaef0cbd0fe3fd17 | [
"Apache-2.0"
] | 27 | 2021-02-24T13:27:28.000Z | 2022-03-09T12:01:04.000Z | nautobot_chatops/tests/workers/two_commands.py | progala/nautobot-plugin-chatops | 8cf03633ec9f8d1905795577aefc5341fc723700 | [
"Apache-2.0"
] | 83 | 2021-03-11T17:45:22.000Z | 2022-03-18T14:51:29.000Z | nautobot_chatops/tests/workers/two_commands.py | progala/nautobot-plugin-chatops | 8cf03633ec9f8d1905795577aefc5341fc723700 | [
"Apache-2.0"
] | 22 | 2021-03-09T18:22:29.000Z | 2022-03-15T13:54:04.000Z | """Test file defining two commands and their subcommands (issue #20)."""
from django_rq import job
from nautobot_chatops.workers import subcommand_of, handle_subcommands
@job("default")
def first_command(subcommand, **kwargs):
"""My first command."""
return handle_subcommands("first_command", subcommand, **kwargs)
@job("default")
def second_command(subcommand, **kwargs):
"""My second command."""
return handle_subcommands("second_command", subcommand, **kwargs)
# pylint: disable=unused-argument
@subcommand_of("first_command")
def first_subcommand(dispatcher, *args):
"""Do the first thing of the first command."""
# pylint: disable=unused-argument
@subcommand_of("second_command")
def second_subcommand(dispatcher, *args):
"""Do the second thing of the second command."""
| 27.931034 | 72 | 0.740741 |
acf3982ab5d74e8199eb59d5521aa8a728beef77 | 1,167 | py | Python | codegen/python/fixtures/class/Tree.py | mrpotes/go-raml | f151e1e143c47282b294fe70c5e56f113988ed10 | [
"BSD-2-Clause"
] | 142 | 2016-02-11T06:23:34.000Z | 2022-03-24T06:05:22.000Z | codegen/python/fixtures/class/Tree.py | mrpotes/go-raml | f151e1e143c47282b294fe70c5e56f113988ed10 | [
"BSD-2-Clause"
] | 297 | 2016-02-04T06:23:13.000Z | 2020-08-20T13:23:22.000Z | codegen/python/fixtures/class/Tree.py | mrpotes/go-raml | f151e1e143c47282b294fe70c5e56f113988ed10 | [
"BSD-2-Clause"
] | 49 | 2016-02-01T10:59:50.000Z | 2021-05-20T15:04:01.000Z | # DO NOT EDIT THIS FILE. This file will be overwritten when re-running go-raml.
"""
Auto-generated class for Tree
"""
from six import string_types
from . import client_support
class Tree(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(**kwargs):
"""
:type name: string_types
:type right: Tree
:rtype: Tree
"""
return Tree(**kwargs)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'Tree'
data = json or kwargs
# set attributes
data_types = [string_types]
self.name = client_support.set_property('name', data, data_types, False, [], False, True, class_name)
data_types = [Tree]
self.right = client_support.set_property('right', data, data_types, False, [], False, True, class_name)
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 24.829787 | 111 | 0.622108 |
acf39842590524dbdbb6da9ee504f9bf5164c4e6 | 793 | py | Python | playlist/views.py | ChocoYokan/PlayMix | f7324f7c9b992b070c12b21618eb8633b8d99f57 | [
"MIT"
] | null | null | null | playlist/views.py | ChocoYokan/PlayMix | f7324f7c9b992b070c12b21618eb8633b8d99f57 | [
"MIT"
] | null | null | null | playlist/views.py | ChocoYokan/PlayMix | f7324f7c9b992b070c12b21618eb8633b8d99f57 | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from playlist.models import Content, PlayList
from playlist.serializers import (
ContentsDetailSerializer,
PlaylistSerializer,
PlayListWriteSerializer,
)
class PlayListViewSet(viewsets.ModelViewSet):
queryset = PlayList.objects.all()
def get_queryset(self):
"""
ログインしているユーザのプレイリスト情報を返す
"""
user = self.request.user
return PlayList.objects.filter(user=user)
def get_serializer_class(self):
methods = self.action
if methods == "create" or methods == "update":
return PlayListWriteSerializer
return PlaylistSerializer
class ContentViewSet(viewsets.ModelViewSet):
queryset = Content.objects.all()
serializer_class = ContentsDetailSerializer
| 24.78125 | 54 | 0.703657 |
acf398579d502e52d9a75b54967bf0797078e827 | 1,166 | py | Python | dimreducers_crusher/reducers/Chalmer.py | iggisv9t/dimreducers-crusher | b3a327b08d42580d946054b2ade4936d67dc37c3 | [
"Apache-2.0"
] | 1 | 2021-05-24T15:05:13.000Z | 2021-05-24T15:05:13.000Z | dimreducers_crusher/reducers/Chalmer.py | V-Marco/dimreducers-crusher | 3632ed9f51d26e1732199c11750eefd54cfda45d | [
"Apache-2.0"
] | 12 | 2021-04-01T14:04:57.000Z | 2021-05-24T20:27:00.000Z | dimreducers_crusher/reducers/Chalmer.py | V-Marco/dimreducers-crusher | 3632ed9f51d26e1732199c11750eefd54cfda45d | [
"Apache-2.0"
] | 5 | 2021-04-06T11:31:59.000Z | 2021-05-05T08:23:30.000Z | from .AbstractReducer import AbstractReducer
import numpy as np
import forcelayout as fl
import warnings
class Chalmer(AbstractReducer):
def __init__(self, d: int = 2, random_state: int = 0, **kwargs):
super().__init__(d, random_state)
warnings.warn("Setting random seed does not affect Chalmer.", UserWarning)
warnings.warn("Chalmer supports only d = 2.", UserWarning)
self.fitted = None
def fit_transform(self, x: np.ndarray, **kwargs) -> np.ndarray:
self.fitted = fl.spring_layout(dataset = x, algorithm = fl.NeighbourSampling)
return self.fitted.spring_layout()
def fit(self, x: np.ndarray, **kwargs):
raise NotImplementedError
def transform(self, x: np.ndarray, **kwargs) -> np.ndarray:
raise NotImplementedError
def set_random_state(self, random_state: int = 0):
warnings.warn("Setting random seed does not affect Chalmer.", UserWarning)
@property
def is_deterministic(self) -> bool:
return True
@property
def is_stateful(self) -> bool:
return True
@staticmethod
def get_parameter_ranges() -> dict:
return None
| 30.684211 | 85 | 0.674099 |
acf398d47f1aaf9c51a9f75e0ea70bdc56060a79 | 8,660 | py | Python | scripts/interwikidata.py | ModelTemplate/WarframeWikiBot | 7685057f65d95551ccdb7e958aac3bb58ee8b5b3 | [
"MIT"
] | null | null | null | scripts/interwikidata.py | ModelTemplate/WarframeWikiBot | 7685057f65d95551ccdb7e958aac3bb58ee8b5b3 | [
"MIT"
] | 6 | 2021-02-27T03:35:42.000Z | 2021-03-07T22:17:40.000Z | scripts/interwikidata.py | ModelTemplate/WarframeWikiBot | 7685057f65d95551ccdb7e958aac3bb58ee8b5b3 | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""
Script to handle interwiki links based on Wikibase.
This script connects pages to Wikibase items using language links on the page.
If multiple language links are present, and they are connected to different
items, the bot skips. After connecting the page to an item, language links
can be removed from the page.
These command line parameters can be used to specify which pages to work on:
¶ms;
Furthermore, the following command line parameters are supported:
-clean Clean pages.
-create Create items.
-merge Merge items.
-summary: Use your own edit summary for cleaning the page.
"""
# (C) Pywikibot team, 2015-2021
#
# Distributed under the terms of the MIT license.
#
from typing import Union
import pywikibot
from pywikibot import pagegenerators, output, warning
from pywikibot.backports import Set
from pywikibot.bot import ExistingPageBot, SingleSiteBot, suggest_help
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {'¶ms;': pagegenerators.parameterHelp} # noqa: N816
# Allowed namespaces. main, project, template, category
NAMESPACES = (0, 4, 10, 14)
# TODO: Some templates on pages, like csd, inuse and afd templates,
# should cause the bot to skip the page, see T134497
class IWBot(ExistingPageBot, SingleSiteBot):
"""The bot for interwiki."""
def __init__(self, **kwargs) -> None:
"""Initialize the bot."""
self.available_options.update({
'clean': False,
'create': False,
'merge': False,
'summary': None,
'ignore_ns': False, # used by interwikidata_tests only
})
super().__init__(**kwargs)
if not self.site.has_data_repository:
raise ValueError('{site} does not have a data repository, '
'use interwiki.py instead.'.format(
site=self.site))
self.repo = self.site.data_repository()
if not self.opt.summary:
self.opt.summary = pywikibot.i18n.twtranslate(
self.site, 'interwikidata-clean-summary')
def treat_page(self) -> None:
"""Check page."""
if (self.current_page.namespace() not in NAMESPACES
and not self.opt.ignore_ns):
output('{page} is not in allowed namespaces, skipping'
.format(page=self.current_page.title(
as_link=True)))
return
self.iwlangs = pywikibot.textlib.getLanguageLinks(
self.current_page.text, insite=self.current_page.site)
if not self.iwlangs:
output('No interlanguagelinks on {page}'.format(
page=self.current_page.title(as_link=True)))
return
try:
item = pywikibot.ItemPage.fromPage(self.current_page)
except pywikibot.NoPage:
item = None
if item is None:
item = self.try_to_add()
if self.opt.create and item is None:
item = self.create_item()
else:
if self.opt.merge:
item = self.try_to_merge(item)
if item and self.opt.clean:
self.current_item = item
self.clean_page()
def create_item(self) -> pywikibot.ItemPage:
"""Create item in repo for current_page."""
data = {'sitelinks':
{self.site.dbName():
{'site': self.site.dbName(),
'title': self.current_page.title()}
},
'labels':
{self.site.lang:
{'language': self.site.lang,
'value': self.current_page.title()}
}
}
for site, page in self.iwlangs.items():
if not page.exists():
continue
dbname = site.dbName()
title = page.title()
data['sitelinks'][dbname] = {'site': dbname, 'title': title}
data['labels'][site.lang] = {'language': site.lang, 'value': title}
summary = ('Bot: New item with sitelink(s) from '
+ self.current_page.title(as_link=True, insite=self.repo))
item = pywikibot.ItemPage(self.repo)
item.editEntity(data, new='item', summary=summary)
output('Created item {item}'.format(item=item.getID()))
return item
def handle_complicated(self) -> bool:
"""
Handle pages when they have interwiki conflict.
When this method returns True it means conflict has resolved
and it's okay to clean old interwiki links.
This method should change self.current_item and fix conflicts.
Change it in subclasses.
"""
return False
def clean_page(self) -> None:
"""Clean interwiki links from the page."""
if not self.iwlangs:
return
dbnames = [iw_site.dbName() for iw_site in self.iwlangs]
if set(dbnames) - set(self.current_item.sitelinks.keys()):
if not self.handle_complicated():
warning('Interwiki conflict in {}, skipping...'
.format(self.current_page.title(as_link=True)))
return
output('Cleaning up the page')
new_text = pywikibot.textlib.removeLanguageLinks(
self.current_page.text, site=self.current_page.site)
self.put_current(new_text, summary=self.opt.summary)
def get_items(self) -> Set[pywikibot.ItemPage]:
"""Return all items of pages linked through the interwiki."""
wd_data = set()
for iw_page in self.iwlangs.values():
if not iw_page.exists():
warning('Interwiki {} does not exist, skipping...'
.format(iw_page.title(as_link=True)))
continue
try:
wd_data.add(pywikibot.ItemPage.fromPage(iw_page))
except pywikibot.NoPage:
output('Interwiki {} does not have an item'
.format(iw_page.title(as_link=True)))
return wd_data
def try_to_add(self) -> Union[pywikibot.ItemPage, bool, None]:
"""Add current page in repo."""
wd_data = self.get_items()
if not wd_data:
# will create a new item with interwiki
return None
if len(wd_data) > 1:
warning('Interwiki conflict in {}, skipping...'
.format(self.current_page.title(as_link=True)))
return False
item = list(wd_data).pop()
if self.current_page.site.dbName() in item.sitelinks:
warning('Interwiki conflict in {}, skipping...'
.format(item.title(as_link=True)))
return False
output('Adding link to ' + item.title())
item.setSitelink(self.current_page, summary='Added ' + (
self.current_page.title(as_link=True, insite=item.site)))
return item
def try_to_merge(self, item) -> Union[pywikibot.ItemPage, bool, None]:
"""Merge two items."""
wd_data = self.get_items()
if not wd_data:
# todo: add links to item
return None
if len(wd_data) > 1:
warning('Interwiki conflict in {}, skipping...'
.format(self.current_page.title(as_link=True)))
return False
target_item = list(wd_data).pop()
try:
item.mergeInto(target_item)
except pywikibot.data.api.APIError:
# warning already printed by the API
return False
else:
target_item.get(force=True)
return target_item
def main(*args) -> None:
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
local_args = pywikibot.handle_args(args)
gen_factory = pagegenerators.GeneratorFactory()
local_args = gen_factory.handle_args(local_args)
options = {}
for arg in local_args:
option, sep, value = arg.partition(':')
option = option[1:] if option.startswith('-') else None
if option == 'summary':
options[option] = value
else:
options[option] = True
site = pywikibot.Site()
generator = gen_factory.getCombinedGenerator(preload=True)
if generator:
bot = IWBot(generator=generator, site=site, **options)
bot.run()
else:
suggest_help(missing_generator=True)
if __name__ == '__main__':
main()
| 35.060729 | 79 | 0.590416 |
acf3993b09a90467b46abf9045e4deb0bdae3d72 | 72 | py | Python | examples/example_aa.py | Ichunjo/vardefunc | c6ace014f8359909363919ba151b38486d6d73d4 | [
"MIT"
] | 18 | 2020-05-06T19:56:48.000Z | 2022-03-21T07:26:20.000Z | examples/example_aa.py | Ichunjo/vardefunc | c6ace014f8359909363919ba151b38486d6d73d4 | [
"MIT"
] | 4 | 2021-05-01T13:34:22.000Z | 2021-08-21T13:12:01.000Z | examples/example_aa.py | Ichunjo/vardefunc | c6ace014f8359909363919ba151b38486d6d73d4 | [
"MIT"
] | 8 | 2020-12-01T04:25:48.000Z | 2022-03-06T11:58:19.000Z | import vardefunc as vdf
import vapoursynth as vs
core = vs.core
# TODO | 12 | 24 | 0.763889 |
acf399708a022487891e0f1c9ae3a810353d5edf | 4,821 | py | Python | xetra/common/meta_process.py | Kenebehi/xetra-production-etl-pipeline | a76df84c7a5fdbee4559e9799dc605ac0beaa6b3 | [
"Apache-2.0"
] | null | null | null | xetra/common/meta_process.py | Kenebehi/xetra-production-etl-pipeline | a76df84c7a5fdbee4559e9799dc605ac0beaa6b3 | [
"Apache-2.0"
] | null | null | null | xetra/common/meta_process.py | Kenebehi/xetra-production-etl-pipeline | a76df84c7a5fdbee4559e9799dc605ac0beaa6b3 | [
"Apache-2.0"
] | null | null | null | """
Methods for processing the meta file
"""
import collections
from datetime import datetime, timedelta
import pandas as pd
from xetra.common.s3 import S3BucketConnector
from xetra.common.constants import MetaProcessFormat
from xetra.common.custom_exceptions import WrongMetaFileException
class MetaProcess():
"""
class for working with the meta file
"""
@staticmethod
def update_meta_file(extract_date_list: list, meta_key: str, s3_bucket_meta: S3BucketConnector):
"""
Updating the meta file with the processed Xetra dates and todays date as processed date
:param: extract_date_list -> a list of dates that are extracted from the source
:param: meta_key -> key of the meta file on the S3 bucket
:param: s3_bucket_meta -> S3BucketConnector for the bucket with the meta file
"""
# Creating an empty DataFrame using the meta file column names
df_new = pd.DataFrame(columns=[
MetaProcessFormat.META_SOURCE_DATE_COL.value,
MetaProcessFormat.META_PROCESS_COL.value])
# Filling the date column with extract_date_list
df_new[MetaProcessFormat.META_SOURCE_DATE_COL.value] = extract_date_list
# Filling the processed column
df_new[MetaProcessFormat.META_PROCESS_COL.value] = \
datetime.today().strftime(MetaProcessFormat.META_PROCESS_DATE_FORMAT.value)
try:
# If meta file exists -> union DataFrame of old and new meta data is created
df_old = s3_bucket_meta.read_csv_to_df(meta_key)
if collections.Counter(df_old.columns) != collections.Counter(df_new.columns):
raise WrongMetaFileException
df_all = pd.concat([df_old, df_new])
except s3_bucket_meta.session.client('s3').exceptions.NoSuchKey:
# No meta file exists -> only the new data is used
df_all = df_new
# Writing to S3
s3_bucket_meta.write_df_to_s3(df_all, meta_key, MetaProcessFormat.META_FILE_FORMAT.value)
return True
@staticmethod
def return_date_list(first_date: str, meta_key: str, s3_bucket_meta: S3BucketConnector):
"""
Creating a list of dates based on the input first_date and the already
processed dates in the meta file
:param: first_date -> the earliest date Xetra data should be processed
:param: meta_key -> key of the meta file on the S3 bucket
:param: s3_bucket_meta -> S3BucketConnector for the bucket with the meta file
returns:
min_date: first date that should be processed
return_date_list: list of all dates from min_date till today
"""
start = datetime.strptime(first_date,
MetaProcessFormat.META_DATE_FORMAT.value)\
.date() - timedelta(days=1)
today = datetime.today().date()
try:
# If meta file exists create return_date_list using the content of the meta file
# Reading meta file
df_meta = s3_bucket_meta.read_csv_to_df(meta_key)
# Creating a list of dates from first_date untill today
dates = [start + timedelta(days=x) for x in range(0, (today - start).days + 1)]
# Creating set of all dates in meta file
src_dates = set(pd.to_datetime(
df_meta[MetaProcessFormat.META_SOURCE_DATE_COL.value]
).dt.date)
dates_missing = set(dates[1:]) - src_dates
if dates_missing:
# Determining the earliest date that should be extracted
min_date = min(set(dates[1:]) - src_dates) - timedelta(days=1)
# Creating a list of dates from min_date untill today
return_min_date = (min_date + timedelta(days=1))\
.strftime(MetaProcessFormat.META_DATE_FORMAT.value)
return_dates = [
date.strftime(MetaProcessFormat.META_DATE_FORMAT.value) \
for date in dates if date >= min_date
]
else:
# Setting values for the earliest date and the list of dates
return_dates = []
return_min_date = datetime(2200, 1, 1).date()\
.strftime(MetaProcessFormat.META_DATE_FORMAT.value)
except s3_bucket_meta.session.client('s3').exceptions.NoSuchKey:
# No meta file found -> creating a date list from first_date - 1 day untill today
return_min_date = first_date
return_dates = [
(start + timedelta(days=x)).strftime(MetaProcessFormat.META_DATE_FORMAT.value) \
for x in range(0, (today - start).days + 1)
]
return return_min_date, return_dates
| 47.732673 | 100 | 0.640946 |
acf399d0376cd3657294ac2e539ef75f655c88c7 | 37,516 | py | Python | styler.py | kymwatts/Houdini-Plugin-for-Tensorflow-Smoke-Stylization | c7f4e614187e4d36402c1e4401a82f513eb83e0c | [
"Apache-2.0"
] | 15 | 2019-10-11T12:02:31.000Z | 2020-06-14T21:56:12.000Z | styler.py | kymwatts/Houdini-Plugin-for-Tensorflow-Smoke-Stylization | c7f4e614187e4d36402c1e4401a82f513eb83e0c | [
"Apache-2.0"
] | 2 | 2021-03-04T19:00:32.000Z | 2022-03-31T20:58:41.000Z | styler.py | kymwatts/Houdini-Plugin-for-Tensorflow-Smoke-Stylization | c7f4e614187e4d36402c1e4401a82f513eb83e0c | [
"Apache-2.0"
] | 8 | 2020-07-28T19:39:54.000Z | 2022-03-22T16:01:35.000Z | import argparse
import os
import matplotlib.pyplot as plt
from PIL import Image
from tqdm import trange
import platform
import subprocess as sp
import numpy as np
import tensorflow as tf
from util import *
from transform import grad, curl, advect, rotate, rot_mat
import sys
import pickle
parser = argparse.ArgumentParser()
parser.add_argument("--houdini", type=str2bool, default=False)
parser.add_argument("--single_frame", type=str2bool, default=False)
parser.add_argument("--iter_seg", type=int, default=0)
parser.add_argument("--style_path", type=str, default='')
parser.add_argument("--data_dir", type=str, default='data/smoke_gun')
parser.add_argument("--log_dir", type=str, default='log/smoke_gun')
parser.add_argument("--npz2vdb_dir", type=str, default='data\\npz2vdb')
parser.add_argument("--tag", type=str, default='test')
parser.add_argument("--seed", type=int, default=777)
parser.add_argument("--model_path", type=str, default='data/model/tensorflow_inception_graph.pb')
parser.add_argument("--pool1", type=str2bool, default=True)
parser.add_argument("--transmit", type=float, default=0.1)
parser.add_argument("--rotate", type=str2bool, default=True)
parser.add_argument('--phi0', type=int, default=-5) # latitude (elevation) start
parser.add_argument('--phi1', type=int, default=5) # latitude end
parser.add_argument('--phi_unit', type=int, default=5)
parser.add_argument('--theta0', type=int, default=-10) # longitude start
parser.add_argument('--theta1', type=int, default=10) # longitude end
parser.add_argument('--theta_unit', type=int, default=10)
parser.add_argument('--v_batch', type=int, default=1, help='# of rotation matrix for batch process')
parser.add_argument('--n_views', type=int, default=9, help='# of view points')
parser.add_argument('--sample_type', type=str, default='poisson',
choices=['uniform', 'poisson', 'both'])
parser.add_argument("--target_frame", type=int, default=70)
parser.add_argument("--num_frames", type=int, default=1)
parser.add_argument("--window_size", type=int, default=1)
parser.add_argument("--scale", type=float, default=1.0)
parser.add_argument("--mask", type=str2bool, default=True)
parser.add_argument("--field_type", type=str, default='field',
choices=['field', 'velocity', 'density'])
parser.add_argument("--w_field", type=float, default=1, help='weight between pot. and str.')
parser.add_argument("--adv_order", type=int, default=2, choices=[1,2], help='SL or MACCORMACK')
parser.add_argument("--resize_scale", type=float, default=1.0)
parser.add_argument("--content_layer", type=str, default='mixed4d_3x3_bottleneck_pre_relu')
parser.add_argument("--content_channel", type=int, default=139)
parser.add_argument("--style_layer", type=str, default='conv2d2,mixed3b,mixed4b') # Weight layer # Layer list
parser.add_argument("--w_content", type=float, default=1)
parser.add_argument("--w_content_amp", type=float, default=100)
parser.add_argument("--w_style", type=float, default=0)
parser.add_argument("--w_style_layer", type=str, default='1,1,1') # Weight Ratio
parser.add_argument("--content_target", type=str, default='')
parser.add_argument("--style_target", type=str, default='')
parser.add_argument("--top_k", type=int, default=5)
parser.add_argument("--iter", type=int, default=20)
parser.add_argument("--lr", type=float, default=0.002)
parser.add_argument("--lap_n", type=int, default=3)
parser.add_argument("--octave_n", type=int, default=3)
parser.add_argument("--octave_scale", type=float, default=1.8)
parser.add_argument("--g_sigma", type=float, default=1.2)
class Styler(object):
def __init__(self, self_dict):
# get arguments
for arg in self_dict: setattr(self, arg, self_dict[arg])
self.rng = np.random.RandomState(self.seed)
tf.set_random_seed(self.seed)
# network setting
self.graph = tf.Graph()
self.sess = tf.InteractiveSession(graph=self.graph)
self.model_path = self.style_path + "/" + self.model_path
with tf.gfile.GFile(self.model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# fix checkerboard artifacts: ksize should be divisible by the stride size
# but it changes scale
if self.pool1:
for n in graph_def.node:
if 'conv2d0_pre_relu/conv' in n.name:
n.attr['strides'].list.i[1:3] = [1,1]
# density input
# shape: [D,H,W]
d_shp = [None,None,None]
self.d = tf.placeholder(dtype=tf.float32, shape=d_shp, name='density')
# add batch dim / channel dim
# shape: [1,D,H,W,1]
d = tf.expand_dims(tf.expand_dims(self.d, axis=0), axis=-1)
######
# sequence stylization
self.d_opt = tf.placeholder(dtype=tf.float32, name='opt')
if 'field' in self.field_type:
if self.w_field == 1:
self.c = 1
elif self.w_field == 0:
self.c = 3
else:
self.c = 4 # scalar (1) + vector field (3)
elif 'density' in self.field_type:
self.c = 1 # scalar field
else:
self.c = 3 # vector field
if 'field' in self.field_type:
d_opt = self.d_opt[:,:,::-1] * tf.to_float(tf.shape(self.d_opt)[2])
if self.w_field == 1:
self.v_ = grad(d_opt)
elif self.w_field == 0:
self.v_ = curl(d_opt)
else:
pot = d_opt[...,0,None]
strf = d_opt[...,1:]
self.v_p = grad(pot)
self.v_s = curl(strf)
self.v_ = self.v_p*self.w_field + self.v_s*(1-self.w_field)
v = self.v_[:,:,::-1]
vx = v[...,0] / tf.to_float(tf.shape(v)[3])
vy = -v[...,1] / tf.to_float(tf.shape(v)[2])
vz = v[...,2] / tf.to_float(tf.shape(v)[1])
v = tf.stack([vz,vy,vx], axis=-1)
d = advect(d, v, order=self.adv_order, is_3d=True)
elif 'velocity' in self.field_type:
v = self.d_opt # [1,D,H,W,3]
d = advect(d, v, order=self.adv_order, is_3d=True)
else:
# stylize by addition
d += self.d_opt # [1,D,H,W,1]
self.b_num = self.v_batch
######
######
# velocity fields to advect gradients [B,D,H,W,3]
if self.window_size > 1:
self.v = tf.placeholder(dtype=tf.float32, name='velocity')
self.g = tf.placeholder(dtype=tf.float32, name='gradient')
self.adv = advect(self.g, self.v, order=self.adv_order, is_3d=True)
######
# value clipping (d >= 0)
d = tf.maximum(d, 0)
# stylized 3d result
self.d_out = d
if self.rotate:
d, self.rot_mat = rotate(d) # [b,D,H,W,1]
# compute rotation matrices
self.rot_mat_, self.views = rot_mat(self.phi0, self.phi1, self.phi_unit,
self.theta0, self.theta1, self.theta_unit,
sample_type=self.sample_type, rng=self.rng,
nv=self.n_views)
if self.n_views is None:
self.n_views = len(self.views)
print('# vps:', self.n_views)
assert(self.n_views % self.v_batch == 0)
# render 3d volume
transmit = tf.exp(-tf.cumsum(d[:,::-1], axis=1)*self.transmit)
d = tf.reduce_sum(d[:,::-1]*transmit, axis=1)
d /= tf.reduce_max(d) # [0,1]
# resize if needed
if abs(self.resize_scale - 1) > 1e-7:
h = tf.to_int32(tf.multiply(float(self.resize_scale), tf.to_float(tf.shape(d)[1])))
w = tf.to_int32(tf.multiply(float(self.resize_scale), tf.to_float(tf.shape(d)[2])))
d = tf.image.resize_images(d, size=[h, w])
# change the range of image to [0-255]
self.d_img = tf.concat([d*255]*3, axis=-1) # [B,H,W,3]
# plug-in to the pre-trained network
imagenet_mean = 117.0
d_preprocessed = self.d_img - imagenet_mean
tf.import_graph_def(graph_def, {'input': d_preprocessed})
self.layers = [op.name for op in self.graph.get_operations() if op.type=='Conv2D' and 'import/' in op.name]
#print(self.layers)
def _layer(self, layer):
if 'input' in layer:
return self.d_img
if 'vgg' in self.model_path:
return self.layers[layer]
else:
return self.graph.get_tensor_by_name("import/%s:0" % layer)
def _gram_matrix(self, x):
g_ = []
for i in range(self.b_num):
F = tf.reshape(x[i], (-1, x.shape[-1]))
g = tf.matmul(tf.transpose(F), F)
g_.append(g)
return tf.stack(g_, axis=0)
def _loss(self, params):
self.content_loss = 0
self.style_loss = 0
self.total_loss = 0
if self.w_content:
feature = self._layer(self.content_layer) # assert only one layer
if 'content_target' in params:
self.content_feature = tf.placeholder(tf.float32)
# self.content_loss -= tf.reduce_mean(feature*self.content_feature) # dot
self.content_loss += tf.reduce_mean(tf.squared_difference(feature,
self.content_feature*self.w_content_amp))
else:
if self.content_channel:
self.content_loss -= tf.reduce_mean(feature[...,self.content_channel])
self.content_loss += tf.reduce_mean(tf.abs(feature[...,:self.content_channel]))
self.content_loss += tf.reduce_mean(tf.abs(feature[...,self.content_channel+1:]))
else:
self.content_loss -= tf.reduce_mean(feature)
self.total_loss += self.content_loss*self.w_content
if self.w_style and 'style_target' in params:
self.style_features = []
self.style_denoms = []
style_layers = self.style_layer.split(',')
for style_layer in style_layers:
feature = self._layer(style_layer)
gram = self._gram_matrix(feature)
f_shp = feature.shape
style_feature = tf.placeholder(tf.float32, shape=f_shp)
style_gram = self._gram_matrix(style_feature)
style_denom = tf.placeholder(tf.float32, shape=1)
self.style_loss += tf.reduce_sum(tf.squared_difference(gram, style_gram)) / style_denom
self.style_features.append(style_feature)
self.style_denoms.append(style_denom)
self.total_loss += self.style_loss*self.w_style
def _content_feature(self, content_target, content_shp):
if abs(self.resize_scale - 1) > 1e-7:
content_shp = [int(s*self.resize_scale) for s in content_shp]
content_target_ = resize(content_target, content_shp)
feature = self._layer(self.content_layer)
feature_ = self.sess.run(feature, {self.d_img: [content_target_]*self.b_num})
if self.top_k > 0:
assert('softmax2_pre_activation' in self.content_layer)
feature_k_ = self.sess.run(tf.nn.top_k(np.abs(feature_), k=self.top_k))
for i in range(len(feature_)):
exclude_idx = np.setdiff1d(np.arange(feature_.shape[1]), feature_k_.indices[i])
feature_[i,exclude_idx] = 0
return feature_
def _style_feature(self, style_target, style_shp):
style_mask = None
if style_target.shape[-1] == 4:
style_mask = style_target[...,-1] / 255
style_target = style_target[...,:-1]
# plt.figure()
# plt.subplot(131)
# plt.imshow(style_target.astype(np.uint8))
# plt.subplot(132)
# plt.imshow(style_mask)
# plt.subplot(133)
# plt.imshow(np.stack([style_mask]*3, axis=-1)*(style_target/255))
# plt.show()
# ISSUE HERE
if abs(self.resize_scale - 1) > 1e-7:
style_shp = [int(s*self.resize_scale) for s in style_shp]
style_target_ = resize(style_target, style_shp)
style_layers = self.style_layer.split(',')
w_style_layers = self.w_style_layer.split(',')
style_features = []
style_denoms = []
for style_layer, w_style_layer in zip(style_layers, w_style_layers):
style_feature = self._layer(style_layer)
style_feature_ = self.sess.run(style_feature, {self.d_img: [style_target_]*self.b_num})
# style_gram = self._gram_matrix(style_feature)
# style_gram_ = self.sess.run(style_gram, {self.d_img: [style_target_]})
# plt.figure()
# plt.imshow(style_gram_)
# plt.show()
f_shp = style_feature_.shape
area = f_shp[1]*f_shp[2]
nc = f_shp[3]
denom = [4.0 * area**2 * nc**2 * 1e6 / float(w_style_layer)]
if style_mask is not None:
feature_mask = resize(style_mask, style_feature_.shape[1:-1])
feature_mask = np.stack([feature_mask]*style_feature_.shape[-1], axis=-1)
for i in range(self.b_num):
style_feature_[i] *= feature_mask
# plt.figure()
# # plt.subplot(121)
# plt.imshow(style_feature_[i,...,0])
# # plt.subplot(122)
# # plt.imshow(feature_mask)
# plt.show()
style_features.append(style_feature_)
style_denoms.append(denom)
return style_features, style_denoms
def _transport(self, g, v, a, b):
if a < b:
for i in range(a,b):
g = self.sess.run(self.adv, {self.g: g, self.v: v[i,None]})
elif a > b:
for i in reversed(range(b,a)):
g = self.sess.run(self.adv, {self.g: g, self.v: -v[i,None]})
return g
def run(self, params):
# loss
self._loss(params)
# gradient
g = tf.gradients(-self.total_loss, self.d_opt)[0]
# laplacian gradient normalizer
grad_norm = tffunc(np.float32)(partial(lap_normalize,
scale_n=self.lap_n, c=self.c, is_3d=True))
d = params['d']
if 'mask' in params:
mask = params['mask']
mask = np.stack([mask]*self.c, axis=-1)
if 'v' in params:
v = params['v']
# settings for octave process
oct_size = []
hw = np.int32(d.shape)[1:]
for _ in range(self.octave_n):
oct_size.append(hw.copy())
hw = np.int32(np.float32(hw)/self.octave_scale)
print('input size for each octave', oct_size)
d_shp = [self.num_frames] + [s for s in oct_size[-1]] + [self.c]
d_opt_ = np.zeros(shape=d_shp, dtype=np.float32)
# optimize
loss_history = []
for octave in trange(self.octave_n):
# octave process: scale-down for input
if octave < self.octave_n-1:
d_ = []
for i in range(self.num_frames):
d_.append(resize(d[i], oct_size[-octave-1]))
d_ = np.array(d_)
if 'mask' in params:
mask_ = []
for i in range(self.num_frames):
m = resize(mask[i], oct_size[-octave-1])
mask_.append(m)
if 'v' in params:
v_ = []
for i in range(self.num_frames-1):
v_.append(resize(v[i], oct_size[-octave-1]))
v_ = np.array(v_)
else:
d_ = d
if 'mask' in params: mask_ = mask
if 'v' in params: v_ = v
if octave > 0:
d_opt__ = []
for i in range(self.num_frames):
d_opt__.append(resize(d_opt_[i], oct_size[-octave-1]))
del d_opt_
d_opt_ = np.array(d_opt__)
feed = {}
if 'content_target' in params:
feed[self.content_feature] = self._content_feature(
params['content_target'], oct_size[-octave-1][1:])
if 'style_target' in params:
style_features, style_denoms = self._style_feature(
params['style_target'], oct_size[-octave-1][1:]
)
for i in range(len(self.style_features)):
feed[self.style_features[i]] = style_features[i]
feed[self.style_denoms[i]] = style_denoms[i]
if (octave == self.octave_n - 1):
d_opt_iter = []
for step in trange(self.iter):
g__ = []
for t in trange(self.num_frames):
feed[self.d] = d_[t]
feed[self.d_opt] = d_opt_[t,None]
if self.rotate:
g_ = None
l_ = 0
for i in range(0, self.n_views, self.v_batch):
feed[self.rot_mat] = self.rot_mat_[i:i+self.v_batch]
g_vp, l_vp = self.sess.run([g, self.total_loss], feed)
if g_ is None:
g_ = g_vp
else:
g_ += g_vp
l_ += l_vp
l_ /= np.ceil(self.n_views/self.v_batch)
if not 'uniform' in self.sample_type:
self.rot_mat_, self.views = rot_mat(
self.phi0, self.phi1, self.phi_unit,
self.theta0, self.theta1, self.theta_unit,
sample_type=self.sample_type, rng=self.rng,
nv=self.n_views)
else:
g_, l_ = self.sess.run([g, self.total_loss], feed)
loss_history.append(l_)
g_ = denoise(g_, sigma=self.g_sigma)
if 'lr' in params:
lr = params['lr'][min(t, len(params['lr'])-1)]
g_[0] = grad_norm(g_[0]) * lr
else:
g_[0] = grad_norm(g_[0]) * self.lr
if 'mask' in params: g_[0] *= mask_[t]
g__.append(g_)
if self.window_size > 1:
n = (self.window_size-1) // 2
for t in range(self.num_frames):
t0 = np.maximum(t - n, 0)
t1 = np.minimum(t + n, self.num_frames-1)
# print(t, t0, t1)
w = [1/(t1-t0+1)]*self.num_frames
g_ = g__[t].copy() * w[t]
for s in range(t0,t1+1):
if s == t: continue
g_ += self._transport(g__[s].copy(), v_, s, t) * w[s] # move s to t
d_opt_[t] += g_[0]
g__[t] = g_
else:
for t in range(self.num_frames):
d_opt_[t] += g__[t][0]
# to avoid resizing numerical error
if 'mask' in params:
for t in range(self.num_frames):
d_opt_[t] *= np.ceil(mask_[t])
if self.iter_seg > 0 and octave == self.octave_n - 1:
if (((step / float(self.iter_seg)) - int(step / self.iter_seg)) < 0.00001) and (step != self.iter - 1) and (step != 0):
d_opt_iter.append(np.array(d_opt_, copy=True))
# gather outputs
result = {'l': loss_history}
d_opt_iter = np.array(d_opt_iter)
d_iter = []
for i in range(d_opt_iter.shape[0]):
d__ = []
d_out_ = tf.identity(self.d_out)
#feed_ = tf.identity(feed)
for t in range(self.num_frames):
feed[self.d_opt] = d_opt_iter[i, t, None]
feed[self.d] = d[t]
d__.append(self.sess.run(d_out_, feed)[0,...,0])
d__ = np.array(d__)
d_iter.append(d__)
d_iter = np.array(d_iter)
result['d_iter'] = d_iter
d_ = []
for t in range(self.num_frames):
feed[self.d_opt] = d_opt_[t,None]
feed[self.d] = d[t]
d_.append(self.sess.run(self.d_out, feed)[0,...,0])
d_ = np.array(d_)
result['d'] = d_
return result
# Helper functions for Houdini Plugin
def help_send(data):
if (is_py3()):
sys.stdout.buffer.write(data)
sys.stdout.buffer.write(("|").encode())
else:
sys.stdout.write(data)
sys.stdout.write(("|").encode())
def help_receive():
variable = ""
next_byte = sys.stdin.read(1)
while str(next_byte) != "|":
variable = variable + next_byte
next_byte = sys.stdin.read(1)
return variable
def is_py3():
return (sys.version_info > (3, 0))
def stylize(args):
# create a styler
styler = Styler(vars(args))
prepare_dirs_and_logger(args)
# set file path format
d_path_format = os.path.join(args.data_dir, 'd', '%03d.npz')
v_path_format = os.path.join(args.data_dir, 'v', '%03d.npz')
# directories for some visual results
d_dir = os.path.join(args.log_dir, 'd') # original
v_dir = os.path.join(args.log_dir, 'v') # velocity-mid slice
r_dir = os.path.join(args.log_dir, 'r') # result
for img_dir in [d_dir, v_dir, r_dir]:
if not os.path.exists(img_dir):
os.makedirs(img_dir)
d_path = d_path_format % args.target_frame
print('load density fields')
# Houdini plugin
control_data=bytes(1)
control_stop=bytes(0)
if (args.houdini):
# First step (D and V Houdini): Load all data from stdin
first_frame = True
d_img_amount = []
d_max = 0
v_max = 0
while True:
data=sys.stdin.read(1)
if (data == control_data and not is_py3()) or (bytes(int(data)) == control_data):
info_dlen = help_receive()
info_vlen = help_receive()
info_frame = help_receive()
if (is_py3()):
frame_d = pickle.loads(bytes(sys.stdin.read(int(info_dlen)), 'latin1'), encoding='latin1')
else:
frame_d = pickle.loads(sys.stdin.read(int(info_dlen)))
sys.stdin.read(1) # Consume
if (is_py3()):
frame_v = pickle.loads(bytes(sys.stdin.read(int(info_vlen)), 'latin1'), encoding='latin1')
else:
frame_v = pickle.loads(sys.stdin.read(int(info_vlen)))
sys.stdin.read(1) # Consume
if frame_d.max() > d_max: d_max = frame_d.max()
if np.max(np.abs(frame_v)) > v_max: v_max = frame_v.max()
transmit = np.exp(-np.cumsum(frame_d[::-1], axis=0)*args.transmit)
d_img = np.sum(frame_d[::-1]*transmit, axis=0)
d_img /= d_img.max()
# Optional Step: Save original slices
d_xy, d_xz, d_yz = np.array(frame_d, copy=True), np.array(frame_d, copy=True), np.array(frame_d, copy=True)
mid_xy, mid_xz, mid_yz = int(d_xy.shape[0]/2), int(d_xz.shape[1]/2), int(d_yz.shape[2]/2)
d_xy, d_xz, d_yz = d_xy[mid_xy, ...], d_xz[:, mid_xz, ...], d_yz[..., mid_yz]
if d_xy.max() > 0: d_xy /= d_xy.max()
if d_xz.max() > 0: d_xz /= d_xz.max()
if d_yz.max() > 0: d_yz /= d_yz.max()
i_xy, i_xz, i_yz = d_xy*255, d_xz*255, d_yz*255
i_xy, i_xz, i_yz = Image.fromarray(i_xy.astype(np.uint8)), Image.fromarray(i_xz.astype(np.uint8)), Image.fromarray(i_yz.astype(np.uint8))
i_xy_path = os.path.join(d_dir, 'd_xy' + ('%03d.png' % int(info_frame)))
i_xz_path = os.path.join(d_dir, 'd_xz' + ('%03d.png' % int(info_frame)))
i_yz_path = os.path.join(d_dir, 'd_yz' + ('%03d.png' % int(info_frame)))
i_xy.save(i_xy_path)
i_xz.save(i_xz_path)
i_yz.save(i_yz_path)
v_xy, v_xz, v_yz = np.array(frame_v, copy=True), np.array(frame_v, copy=True), np.array(frame_v, copy=True)
mid_xy, mid_xz, mid_yz = int(v_xy.shape[0]/2), int(v_xz.shape[1]/2), int(v_yz.shape[2]/2)
v_xy = (v_xy[mid_xy,...,0]**2 + v_xy[mid_xy,...,1]**2 + v_xy[mid_xy,...,2]**2)**0.5
v_xz = (v_xz[:, mid_xz, ..., 0]**2 + v_xz[:, mid_xz, ..., 1]**2 + v_xz[:, mid_xz, ..., 2]**2)**0.5
v_yz = (v_yz[..., mid_yz, 0]**2 + v_yz[..., mid_yz, 1]**2 + v_yz[..., mid_yz, 2]**2)**0.5
if v_xy.max() > 0: v_xy /= (v_xy.max() + 1e-7)
if v_xz.max() > 0: v_xz /= (v_xz.max() + 1e-7)
if v_yz.max() > 0: v_yz /= (v_yz.max() + 1e-7)
i_xy, i_xz, i_yz = np.uint8(plt.cm.viridis(v_xy)*255), np.uint8(plt.cm.viridis(v_xz)*255), np.uint8(plt.cm.viridis(v_yz)*255)
i_xy, i_xz, i_yz = Image.fromarray(i_xy), Image.fromarray(i_xz), Image.fromarray(i_yz)
i_xy_path = os.path.join(v_dir, 'v_xy' + ('%03d.png' % int(info_frame)))
i_xz_path = os.path.join(v_dir, 'v_xz' + ('%03d.png' % int(info_frame)))
i_yz_path = os.path.join(v_dir, 'v_yz' + ('%03d.png' % int(info_frame)))
i_xy.save(i_xy_path)
i_xz.save(i_xz_path)
i_yz.save(i_yz_path)
t_xy, t_xz, t_yz = np.array(transmit, copy=True), np.array(transmit, copy=True), np.array(transmit, copy=True)
mid_xy, mid_xz, mid_yz = int(t_xy.shape[0]/2), int(t_xz.shape[1]/2), int(t_yz.shape[2]/2)
t_xy, t_xz, t_yz = t_xy[mid_xy, ...], t_xz[:, mid_xz, ...], t_yz[..., mid_yz]
if t_xy.max() > 0: t_xy /= t_xy.max()
if t_xz.max() > 0: t_xz /= t_xz.max()
if t_yz.max() > 0: t_yz /= t_yz.max()
t_xy, t_xz, t_yz = t_xy*255, t_xz*255, t_yz*255
t_xy, t_xz, t_yz = Image.fromarray(t_xy.astype(np.uint8)), Image.fromarray(t_xz.astype(np.uint8)), Image.fromarray(t_yz.astype(np.uint8))
t_xy_path = os.path.join(d_dir, 'txy' + ('%03d.png' % int(info_frame)))
t_xz_path = os.path.join(d_dir, 'txz' + ('%03d.png' % int(info_frame)))
t_yz_path = os.path.join(d_dir, 'tyz' + ('%03d.png' % int(info_frame)))
t_xy.save(t_xy_path)
t_xz.save(t_xz_path)
t_yz.save(t_yz_path)
d_img_amount.append(np.sum(d_img))
if (args.single_frame):
assert(first_frame)
d = np.array([frame_d])
v = np.array([frame_v])
first_frame = False
else:
if first_frame:
first_frame = False
d_dim = frame_d.shape
v_dim = frame_v.shape
d = np.empty([args.num_frames, d_dim[0], d_dim[1], d_dim[2]])
v = np.empty([args.num_frames, v_dim[0], v_dim[1], v_dim[2], v_dim[3]])
d[int(info_frame) - args.target_frame, ...] = frame_d
v[int(info_frame) - args.target_frame, ...] = frame_v
print("Added frame: " + info_frame)
sys.stdout.flush()
elif (data == control_stop and not is_py3()) or (bytes(int(data)) == control_stop):
# Second step: Normalize the densities and velocities
d /= d_max
v /= v_max
print("Begin Stylization")
sys.stdout.flush()
break
else:
print("ERROR: Invalid control byte sent")
print(data)
sys.stdout.flush()
sys.exit(0)
if (not args.houdini):
d = []
d_img_amount = []
# First Step (D non-Houdini): Load all data from path
for i in trange(args.num_frames):
d_path = d_path_format % (args.target_frame+i)
with np.load(d_path) as data:
d_ = data['x'][:,::-1]
if abs(args.scale - 1) > 1e-7:
hw = [int(s*args.scale) for s in hw]
d_ = resize(d_, hw)
# Optional Step: Save original slices
transmit = np.exp(-np.cumsum(d_[::-1], axis=0)*args.transmit)
d_img = np.sum(d_[::-1]*transmit, axis=0)
d_img /= d_img.max()
im = d_img*255
im = Image.fromarray(im.astype(np.uint8))
im_path = os.path.join(d_dir, '%03d.png' % (args.target_frame+i))
im.save(im_path)
d_img_amount.append(np.sum(d_img))
d.append(d_)
d = np.array(d)
params = {'d': d}
# Third Step (All): set learning rate depending on the amount of density
d_amount = np.sum(d, axis=(1,2,3))
d_img_amount = np.array(d_img_amount)
d_img_amount /= d_img_amount.max()
params['lr'] = d_img_amount*args.lr
d_shp = d.shape[1:] # zyx -> dhw
if (not args.houdini):
v_ = []
# First step (V non-Houdini): Load all data from path
for i in trange(args.num_frames-1):
v_path = v_path_format % (args.target_frame+i)
with np.load(v_path) as data:
v = data['x']
vx = np.dstack((v,np.zeros((v.shape[0],v.shape[1],1,v.shape[3]))))
vx = (vx[:,:,1:,0] + vx[:,:,:-1,0]) * 0.5
vy = np.hstack((v,np.zeros((v.shape[0],1,v.shape[2],v.shape[3]))))
vy = (vy[:,1:,:,1] + vy[:,:-1,:,1]) * 0.5
vz = np.vstack((v,np.zeros((1,v.shape[1],v.shape[2],v.shape[3]))))
vz = (vz[1:,:,:,2] + vz[:-1,:,:,2]) * 0.5
v = np.stack([vx,vy,vz], axis=-1)
v = v[:,::-1]
if v.shape[:-1] != d_shp: v = resize(v, d_shp)
# Optional Step: Save original slices
z_mid = int(v.shape[0]/2)
m = (v[z_mid,...,0]**2 + v[z_mid,...,1]**2 + v[z_mid,...,2]**2)**0.5
m /= (m.max() + 1e-7)
# Below originally comments
#v_max = np.abs(v).max()
#v_img = ((v / v_max + 1) / 2)[z_mid]
#im = np.uint8(plt.cm.viridis(m)*255)
#im = Image.fromarray(im)
#im_path = os.path.join(v_dir, '%03d.png' % (args.target_frame+i))
#im.save(im_path)
time_step = 0.5
vx = v[...,0] / time_step / v.shape[2] * (args.scale*0.5)
vy = -v[...,1] / time_step / v.shape[1] * (args.scale*0.5)
vz = v[...,2] / time_step / v.shape[0] * (args.scale*0.5)
v = np.stack([vz,vy,vx], axis=-1)
v_.append(v)
v = np.array(v_) #<-- Velocity here is the full list
params['v'] = v
# Fourth Step (All): mask
if args.mask:
params['mask'] = denoise(d, args.g_sigma)
# Fifth Step (All): load a content target image
if args.content_target:
content_target = np.float32(Image.open(args.content_target))
# remove alpha channel
if content_target.shape[-1] == 4:
content_target = content_target[...,:-1]
# crop
ratio = d_shp[2] / float(d_shp[1]) # x/y
content_target = crop_ratio(content_target, ratio)
# range is still [0-255]
params['content_target'] = content_target
# plt.figure()
# plt.imshow(content_target/255)
# plt.show()
# Sixth Step (All): load a style target
if args.style_target:
style_target = np.float32(Image.open(args.style_target))
# crop
ratio = d_shp[2] / float(d_shp[1])
style_target = crop_ratio(style_target, ratio)
# range is still [0-255]
params['style_target'] = style_target
# plt.figure()
# plt.imshow(style_target/255)
# plt.show()
#########
# Seventh Step (All): stylize
result = styler.run(params)
d_sty, loss, d_iter = result['d'], result['l'], result['d_iter']
print("complete")
sys.stdout.flush()
if (args.houdini):
# Eigth Step (Houdini): Denormalize results
if d_iter.size == 0:
d_iter = np.array([d_sty])
else:
d_iter = np.vstack([d_iter, [d_sty]])
d_iter *= d_max
inters = []
if args.iter_seg > 0:
for i in range(args.iter):
if ((i / float(args.iter_seg) - int(i / args.iter_seg)) < 0.00001) and i != 0:
inters.append(i)
inters.append(args.iter)
assert(len(inters) == d_iter.shape[0]), ("Prog ERROR: Incorrect definition of iterables")
# d_iter = (iter, frame, D, H, W)
for it, d_iter_ in enumerate(d_iter):
for i, d_sty_ in enumerate(d_iter_):
# Optional Step: Save returned results
transmit = np.exp(-np.cumsum(d_sty_[::-1], axis=0)*args.transmit)
d_sty_img = np.sum(d_sty_[::-1]*transmit, axis=0)
d_sty_img /= d_sty_img.max()
im = d_sty_img*255
im = Image.fromarray(im.astype(np.uint8))
im_path = os.path.join(r_dir, ('i%03d_' % (inters[it])) + ('%03d.png' % (args.target_frame+i)) )
im.save(im_path)
# Ninth Step (Houdini): Return results
frame_r = np.array(d_sty_, copy=True, dtype = np.float32)
frame_r = frame_r[:,::-1]
if (is_py3()):
data_r = pickle.dumps(frame_r, 2)
info_r = str(len(data_r)).encode()
sys.stdout.write(str(len(control_data)))
sys.stdout.flush()
help_send(str(inters[it]).encode())
help_send(str(int(args.target_frame+i)).encode())
help_send(info_r)
help_send(data_r)
else:
# NOTE: Python 2.7 modified to use tensorflow cannot use pickle dumps
# We will have to send data back via a string
frame_r = np.around(frame_r, decimals = 2)
util_dim = frame_r.shape
frame_r = frame_r.flatten()
r_string = " ".join(str(elem) for elem in frame_r)
sys.stdout.write(control_data)
help_send(str(inters[it]).encode())
help_send(str(int(args.target_frame+i)).encode())
help_send(str(int(util_dim[2])).encode())
help_send(str(int(util_dim[1])).encode())
help_send(str(int(util_dim[0])).encode())
print(r_string)
sys.stdout.flush()
if (is_py3()):
sys.stdout.write(str(len(control_stop)))
else:
sys.stdout.write(control_stop)
sys.stdout.flush()
else:
for i, d_sty_ in enumerate(d_sty):
# save stylized density
d_path = os.path.join(args.log_dir, '%03d.npz' % (args.target_frame+i)) # saves original density as npz
np.savez_compressed(d_path, x=d_sty_[:,::-1])
# export as vdb
if 'Windows' in platform.system():
manta_path = os.path.join(args.npz2vdb_dir, 'manta.exe')
py_path = os.path.join(args.npz2vdb_dir, 'npz2vdb.py')
sh = [manta_path, py_path, '--src_path='+d_path]
sp.call(sh, shell=True)
# save image
transmit = np.exp(-np.cumsum(d_sty_[::-1], axis=0)*args.transmit)
d_sty_img = np.sum(d_sty_[::-1]*transmit, axis=0)
d_sty_img /= d_sty_img.max()
im = d_sty_img*255
im = Image.fromarray(im.astype(np.uint8))
im_path = os.path.join(r_dir, '%03d.png' % (args.target_frame+i))
im.save(im_path)
if __name__ == '__main__':
args = parser.parse_args()
stylize(args) | 43.67404 | 154 | 0.516766 |
acf39a62ae36938c5b7e2af518773f4a7a96d1e3 | 1,292 | py | Python | demo/flask_tornado.py | xtqxk/owl | 584f5b2eb07d9a0f38dc1b14c7f2a8a730a94dcf | [
"Apache-2.0"
] | 10 | 2019-09-04T06:13:31.000Z | 2021-08-17T08:06:05.000Z | demo/flask_tornado.py | xtqxk/owl | 584f5b2eb07d9a0f38dc1b14c7f2a8a730a94dcf | [
"Apache-2.0"
] | null | null | null | demo/flask_tornado.py | xtqxk/owl | 584f5b2eb07d9a0f38dc1b14c7f2a8a730a94dcf | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
import logging
import sys, os
from flask import Flask
from tornado.wsgi import WSGIContainer
from tornado.options import define
from tornado.ioloop import IOLoop
from tornado.httpserver import HTTPServer
__ROOT__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(__ROOT__, ".."))
from owl.tornado import options
class DemoCfgOptions(object):
__base_key__ = "myroot"
"""
# the default value is just type template,
# if key in consul k/v, the value will override by consul
"""
api_url = "h"
api_port = 0
admin_emails = []
uname = ""
switch = True
op = DemoCfgOptions()
def on_callback(key, v):
print(key, getattr(op, key))
app = Flask(__name__)
@app.route("/")
def index():
return op.uname
def main():
define("port", default=8888, help="run on the given port", type=int)
loop = IOLoop.instance()
xcfg = options.DynamicPatch(loop, op)
xcfg.add_change_callback(
"api_url", "uname", "api_port","switch", callback_handler=on_callback
)
logging.info("start")
container = WSGIContainer(app)
http_server = HTTPServer(container)
http_server.listen(8888)
xcfg.start()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
main()
| 21.533333 | 77 | 0.683437 |
acf39b802b29267bcdfa25537cd8c0c17a4139d1 | 13,456 | py | Python | src/plugins/pipeline_plugins/hooks/ads_cm_hook.py | google/cc4d | 206543832368f96bac7f55c0de93c96e32127779 | [
"Apache-2.0"
] | 11 | 2021-03-23T22:03:00.000Z | 2022-03-30T17:12:38.000Z | src/plugins/pipeline_plugins/hooks/ads_cm_hook.py | google/cc4d | 206543832368f96bac7f55c0de93c96e32127779 | [
"Apache-2.0"
] | 3 | 2021-07-21T10:13:24.000Z | 2021-10-18T03:44:03.000Z | src/plugins/pipeline_plugins/hooks/ads_cm_hook.py | google/cc4d | 206543832368f96bac7f55c0de93c96e32127779 | [
"Apache-2.0"
] | 5 | 2021-05-07T03:30:29.000Z | 2021-11-03T21:05:00.000Z | # python3
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom hook for Google Ads Customer Match.
For customer match details refer to
https://developers.google.com/google-ads/api/docs/remarketing/audience-types/customer-match
"""
import re
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple
from plugins.pipeline_plugins.hooks import ads_hook
from plugins.pipeline_plugins.hooks import output_hook_interface
from plugins.pipeline_plugins.utils import blob
from plugins.pipeline_plugins.utils import errors
_DEFAULT_BATCH_SIZE = 1000
_SHA256_DIGEST_PATTERN = r'^[A-Fa-f0-9]{64}$'
def _validate_sha256_pattern(field_data: str) -> None:
"""Validates if field_data matches sha256 digest string pattern.
The correct patterh is '^[A-Fa-f0-9]{64}$'
Note: None is an invalid sha256 value
Args:
field_data: A field data which is a part of member data entity of Google
Adwords API
Raises:
DataOutConnectorValueError: If the any field data is invalid or None.
"""
if field_data is None or not re.match(_SHA256_DIGEST_PATTERN, field_data):
raise errors.DataOutConnectorValueError(
'None or string is not in SHA256 format.',
errors.ErrorNameIDMap
.ADS_CM_HOOK_ERROR_PAYLOAD_FIELD_VIOLATES_SHA256_FORMAT)
def _is_address_info_available(event: Dict[Any, Any]) -> bool:
"""Check if address info needs to be added to formatted event.
Args:
event: the raw event from data source.
Returns:
bool to indicate that address info is available.
"""
keys_exist = all(k in event for k in (
'hashedFirstName', 'hashedLastName', 'countryCode', 'zipCode'))
if not keys_exist:
return False
values_exist = all((event['hashedFirstName'], event['hashedLastName'],
event['countryCode'], event['zipCode']))
if not values_exist:
return False
return True
def _format_contact_info_event(event: Dict[Any, Any]) -> Dict[Any, Any]:
"""Format a contact_info event.
Args:
event: A raw contact_info event.
Returns:
A formatted contact_info event.
Raises:
DataOutConnectorValueError for the following scenarios:
- If filed hashedEmail and hashedPhoneNumber not
exist in the payload.
- hashedEmail or hashedPhoneNumber fields do not meet SHA256 format.
"""
member = {}
if event.get('hashedEmail', None) is not None:
_validate_sha256_pattern(event.get('hashedEmail', None))
member['hashedEmail'] = event['hashedEmail']
if event.get('hashedPhoneNumber', None) is not None:
_validate_sha256_pattern(event.get('hashedPhoneNumber', None))
member['hashedPhoneNumber'] = event['hashedPhoneNumber']
if 'hashedEmail' not in member and 'hashedPhoneNumber' not in member:
raise errors.DataOutConnectorValueError(
'Data must contain either a valid hashed email or phone number.',
errors.ErrorNameIDMap.ADS_CM_HOOK_ERROR_INVALID_EMAIL_AND_PHONE_NUMBER)
if _is_address_info_available(event):
hashed_first_name = event['hashedFirstName']
_validate_sha256_pattern(hashed_first_name)
hashed_last_name = event['hashedLastName']
_validate_sha256_pattern(hashed_last_name)
member['addressInfo'] = {
'hashedFirstName': hashed_first_name,
'hashedLastName': hashed_last_name,
'countryCode': event['countryCode'],
'zipCode': event['zipCode'],
}
return member
def _format_crm_id_event(event: Dict[Any, Any]) -> Dict[Any, Any]:
"""Format a crm_id event.
Args:
event: A raw crm_id event.
Returns:
A formatted crm_id event.
Raises:
DataOutConnectorValueError if userId is not exist in the event.
"""
if 'userId' not in event:
raise errors.DataOutConnectorValueError(
"""userId doesn't exist in crm_id event.""",
errors.ErrorNameIDMap.ADS_CM_HOOK_ERROR_MISSING_USERID_IN_CRMID_EVENT)
member = {'userId': event['userId']}
return member
def _format_mobile_advertising_event(event: Dict[Any, Any]) -> Dict[Any, Any]:
"""Format a mobile_advertising_event event.
Args:
event: A raw mobile_advertising_event event.
Returns:
A formatted mobile_advertising_event event.
Raises:
DataOutConnectorValueError if mobileId field doesn't exist in the event.
"""
if 'mobileId' not in event:
raise errors.DataOutConnectorValueError(
'mobileId field doesn\'t exist in the event.',
errors.ErrorNameIDMap.ADS_CM_HOOK_ERROR_MISSING_MOBILEID_IN_EVENT)
member = {'mobileId': event['mobileId']}
return member
class GoogleAdsCustomerMatchHook(
ads_hook.GoogleAdsHook, output_hook_interface.OutputHookInterface):
"""Custom hook for Google Ads Customer Match API.
Sample code for AdWords Customer Match API can be found here.
https://github.com/google/customer-match-upload-script/blob/master/create_and_populate_list.py
"""
def __init__(
self, ads_cm_user_list_name: str,
ads_upload_key_type: str,
ads_credentials: str,
ads_cm_membership_lifespan: int = ads_hook.MEMBERSHIP_LIFESPAN_DAYS,
ads_cm_create_list: bool = True,
ads_cm_app_id: Optional[str] = None,
**kwargs) -> None:
"""Initialize with a specified user_list_name.
Args:
ads_cm_user_list_name: The name of the user list to add members to.
ads_upload_key_type: The upload key type. Refer to ads_hook.UploadKeyType
for more information.
ads_credentials: A dict of Adwords client ids and tokens.
Reference for desired format:
https://developers.google.com/adwords/api/docs/guides/first-api-call
ads_cm_membership_lifespan: Number of days a user's cookie stays. Refer to
ads_hook.GoogleAdsHook for details.
ads_cm_create_list: A flag to enable a new list creation if a list called
user_list_name doesn't exist.
ads_cm_app_id: An ID required for creating a new list if upload_key_type
is MOBILE_ADVERTISING_ID.
**kwargs: Other optional arguments.
Raises:
DataOutConnectorValueError if any of the following happens.
- user_list_name is null.
- membership_lifespan is negative or bigger than 10000.
- upload_key_type is not supported by ads_hook.
- app_id is not specificed when create_list = True and upload_key_type
is MOBILE_ADVERTISING_ID.
"""
super().__init__(ads_yaml_doc=ads_credentials)
self._validate_init_params(ads_cm_user_list_name,
ads_cm_membership_lifespan)
self.user_list_name = ads_cm_user_list_name
self.membership_lifespan = ads_cm_membership_lifespan
self.create_list = ads_cm_create_list
self.upload_key_type = self._validate_and_set_upload_key_type(
ads_upload_key_type, ads_cm_app_id)
self.app_id = ads_cm_app_id
self._format_event = self._select_format_event()
def _validate_init_params(
self, user_list_name: str, membership_lifespan: int) -> None:
"""Validate user_list_name and membership_lifespan parameters.
Args:
user_list_name: The name of the user list to add members to.
membership_lifespan: Number of days a user's cookie stays.
Raises:
DataOutConnectorValueError if user_list_name is null or
membership_lifespan is negative or bigger than 10000.
"""
if not user_list_name:
raise errors.DataOutConnectorValueError(
'User list name is empty.',
errors.ErrorNameIDMap.ADS_CM_HOOK_ERROR_EMPTY_USER_LIST_NAME)
if membership_lifespan < 0 or membership_lifespan > 10000:
raise errors.DataOutConnectorValueError(
'Membership lifespan is not between 0 and 10,000.',
errors.ErrorNameIDMap.ADS_CM_HOOK_ERROR_INVALID_MEMBERSHIP_LIFESPAN)
def _validate_and_set_upload_key_type(
self, upload_key_type: str, app_id: str) -> ads_hook.UploadKeyType:
"""Validate upload_key_type and the subsequent parameters for each key type.
Args:
upload_key_type: The upload key type. Refer to ads_hook.UploadKeyType for
more information.
app_id: An ID required for creating a new list if upload_key_type is
MOBILE_ADVERTISING_ID.
Returns:
UploadKeyType: An UploadKeyType object defined in ads_hook.
Raises:
DataOutConnectorValueError in the following scenarios:
- upload_key_type is not supported by ads_hook.
- app_id is not specificed when create_list = True and upload_key_type
is MOBILE_ADVERTISING_ID.
"""
try:
validated_upload_key_type = ads_hook.UploadKeyType[upload_key_type]
except KeyError:
raise errors.DataOutConnectorValueError(
'Invalid upload key type. See ads_hook.UploadKeyType for details',
errors.ErrorNameIDMap.ADS_CM_HOOK_ERROR_INVALID_UPLOAD_KEY_TYPE)
if (validated_upload_key_type ==
ads_hook.UploadKeyType.MOBILE_ADVERTISING_ID and self.create_list and
not app_id):
raise errors.DataOutConnectorValueError(
'app_id needs to be specified for '
'MOBILE_ADVERTISING_ID when create_list is True.',
errors.ErrorNameIDMap.ADS_CM_HOOK_ERROR_MISSING_APPID)
return validated_upload_key_type
def _select_format_event(self) -> Callable[[Dict[Any, Any]], Dict[Any, Any]]:
"""select how to format events based on upload_key_type.
Returns:
A formatting function that corresponds to the given upload_key_type.
"""
format_event_dict = {
ads_hook.UploadKeyType.CONTACT_INFO:
_format_contact_info_event,
ads_hook.UploadKeyType.CRM_ID:
_format_crm_id_event,
ads_hook.UploadKeyType.MOBILE_ADVERTISING_ID:
_format_mobile_advertising_event
}
return format_event_dict[self.upload_key_type]
def _validate_and_prepare_events_to_send(
self, events: List[Dict[str, Any]]
) -> Tuple[List[Tuple[int, Dict[str, Any]]],
List[Tuple[int, errors.ErrorNameIDMap]]]:
"""Converts events to correct format before sending.
Reference for the correct format:
https://developers.google.com/adwords/api/docs/reference/v201809/AdwordsUserListService.Member
Args:
events: All unformated events.
Returns:
members: Formated events.
"""
valid_events = []
invalid_indices_and_errors = []
for i, event in enumerate(events):
try:
payload = self._format_event(event)
except errors.DataOutConnectorValueError as error:
invalid_indices_and_errors.append((i, error.error_num))
else:
valid_events.append((i, payload))
return valid_events, invalid_indices_and_errors
def _batch_generator(
self, events: List[Tuple[int, Dict[str, Any]]]
) -> Generator[List[Tuple[int, Dict[str, Any]]], None, None]:
"""Splits conversion events into batches of _CONVERSION_BATCH_MAX_SIZE.
AdWords API batch constraints can be found at:
https://developers.google.com/adwords/api/docs/reference/v201809/AdwordsUserListService.MutateMembersOperand
Args:
events: Indexed events to send.
Yields:
List of batches of events. Each batch is of _CONVERSION_BATCH_MAX_SIZE.
"""
for i in range(0, len(events), _DEFAULT_BATCH_SIZE):
yield events[i : i + _DEFAULT_BATCH_SIZE]
def send_events(self, blb: blob.Blob) -> blob.Blob:
"""Sends Customer Match events to Google AdWords API.
Args:
blb: A blob containing Customer Match data to send.
Returns:
A blob containing updated data about any failing events or reports.
Raises:
DataOutConnectorValueError when user list with given name doesn't exist
and create_list is false.
"""
user_list_id = None
valid_events, invalid_indices_and_errors = (
self._validate_and_prepare_events_to_send(blb.events))
batches = self._batch_generator(valid_events)
for batch in batches:
if not user_list_id:
try:
user_list_id = self.get_user_list_id(self.user_list_name)
except errors.DataOutConnectorValueError:
if self.create_list:
user_list_id = self.create_user_list(
self.user_list_name,
self.upload_key_type,
self.membership_lifespan,
self.app_id)
else:
raise errors.DataOutConnectorValueError(
'user_list_name does NOT exist (create_list = False).')
try:
user_list = [event[1] for event in batch]
self.add_members_to_user_list(user_list_id, user_list)
except errors.DataOutConnectorSendUnsuccessfulError as error:
for event in batch:
invalid_indices_and_errors.append((event[0], error.error_num))
for event in invalid_indices_and_errors:
blb.append_failed_event(event[0] + blb.position, blb.events[event[0]],
event[1].value)
return blb
| 35.597884 | 112 | 0.714997 |
acf39bac5b45766fc1497ee73404f52969a804c3 | 29,782 | py | Python | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_retry_execution.py | silentsokolov/dagster | 510bf07bf6906294d5a239d60079c88211002ebf | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_retry_execution.py | silentsokolov/dagster | 510bf07bf6906294d5a239d60079c88211002ebf | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_retry_execution.py | silentsokolov/dagster | 510bf07bf6906294d5a239d60079c88211002ebf | [
"Apache-2.0"
] | null | null | null | import os
from time import sleep
from dagster_graphql.client.query import (
LAUNCH_PIPELINE_EXECUTION_MUTATION,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
PIPELINE_REEXECUTION_INFO_QUERY,
)
from dagster_graphql.schema.inputs import GrapheneReexecutionStrategy
from dagster_graphql.test.utils import (
execute_dagster_graphql,
execute_dagster_graphql_and_finish_runs,
infer_pipeline_selector,
)
from dagster.core.execution.plan.resume_retry import ReexecutionStrategy
from dagster.core.storage.pipeline_run import PipelineRunStatus
from dagster.core.storage.tags import RESUME_RETRY_TAG
from dagster.core.test_utils import poll_for_finished_run
from dagster.core.utils import make_new_run_id
from dagster.seven.temp_dir import get_system_temp_directory
from .graphql_context_test_suite import ExecutingGraphQLContextTestMatrix
from .setup import csv_hello_world_solids_config, get_retry_multi_execution_params, retry_config
from .utils import (
get_all_logs_for_finished_run_via_subscription,
step_did_fail,
step_did_fail_in_records,
step_did_not_run,
step_did_not_run_in_records,
step_did_skip,
step_did_succeed,
step_did_succeed_in_records,
step_started,
sync_execute_get_events,
)
def first_event_of_type(logs, message_type):
for log in logs:
if log["__typename"] == message_type:
return log
return None
def has_event_of_type(logs, message_type):
return first_event_of_type(logs, message_type) is not None
def get_step_output_event(logs, step_key, output_name="result"):
for log in logs:
if (
log["__typename"] == "ExecutionStepOutputEvent"
and log["stepKey"] == step_key
and log["outputName"] == output_name
):
return log
return None
class TestRetryExecution(ExecutingGraphQLContextTestMatrix):
def test_retry_pipeline_execution(self, graphql_context):
selector = infer_pipeline_selector(graphql_context, "eventually_successful")
result = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"mode": "default",
"selector": selector,
"runConfigData": retry_config(0),
}
},
)
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(graphql_context, run_id)[
"pipelineRunLogs"
]["messages"]
assert step_did_succeed(logs, "spawn")
assert step_did_fail(logs, "fail")
assert step_did_not_run(logs, "fail_2")
assert step_did_not_run(logs, "fail_3")
assert step_did_not_run(logs, "reset")
assert step_did_not_run(logs, "collect")
retry_one = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"mode": "default",
"selector": selector,
"runConfigData": retry_config(1),
"executionMetadata": {
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
}
},
)
run_id = retry_one.data["launchPipelineReexecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(graphql_context, run_id)[
"pipelineRunLogs"
]["messages"]
assert step_did_not_run(logs, "spawn")
assert step_did_succeed(logs, "fail")
assert step_did_fail(logs, "fail_2")
assert step_did_not_run(logs, "fail_3")
assert step_did_not_run(logs, "reset")
assert step_did_not_run(logs, "collect")
retry_two = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"mode": "default",
"selector": selector,
"runConfigData": retry_config(2),
"executionMetadata": {
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
}
},
)
run_id = retry_two.data["launchPipelineReexecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(graphql_context, run_id)[
"pipelineRunLogs"
]["messages"]
assert step_did_not_run(logs, "spawn")
assert step_did_not_run(logs, "fail")
assert step_did_succeed(logs, "fail_2")
assert step_did_fail(logs, "fail_3")
assert step_did_not_run(logs, "reset")
assert step_did_not_run(logs, "collect")
retry_three = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"mode": "default",
"selector": selector,
"runConfigData": retry_config(3),
"executionMetadata": {
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
}
},
)
run_id = retry_three.data["launchPipelineReexecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(graphql_context, run_id)[
"pipelineRunLogs"
]["messages"]
assert step_did_not_run(logs, "spawn")
assert step_did_not_run(logs, "fail")
assert step_did_not_run(logs, "fail_2")
assert step_did_succeed(logs, "fail_3")
assert step_did_succeed(logs, "reset")
assert step_did_succeed(logs, "collect")
def test_retry_resource_pipeline(self, graphql_context):
context = graphql_context
selector = infer_pipeline_selector(graphql_context, "retry_resource_pipeline")
result = execute_dagster_graphql_and_finish_runs(
context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"mode": "default",
"selector": selector,
}
},
)
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(context, run_id)["pipelineRunLogs"][
"messages"
]
assert step_did_succeed(logs, "start")
assert step_did_fail(logs, "will_fail")
retry_one = execute_dagster_graphql_and_finish_runs(
context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"mode": "default",
"selector": selector,
"executionMetadata": {
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
}
},
)
run_id = retry_one.data["launchPipelineReexecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(context, run_id)["pipelineRunLogs"][
"messages"
]
assert step_did_not_run(logs, "start")
assert step_did_fail(logs, "will_fail")
def test_retry_multi_output(self, graphql_context):
context = graphql_context
result = execute_dagster_graphql_and_finish_runs(
context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": get_retry_multi_execution_params(context, should_fail=True)
},
)
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(context, run_id)["pipelineRunLogs"][
"messages"
]
assert step_did_succeed(logs, "multi")
assert step_did_skip(logs, "child_multi_skip")
assert step_did_fail(logs, "can_fail")
assert step_did_not_run(logs, "child_fail")
assert step_did_not_run(logs, "child_skip")
assert step_did_not_run(logs, "grandchild_fail")
retry_one = execute_dagster_graphql_and_finish_runs(
context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": get_retry_multi_execution_params(
context, should_fail=True, retry_id=run_id
)
},
)
run_id = retry_one.data["launchPipelineReexecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(context, run_id)["pipelineRunLogs"][
"messages"
]
assert step_did_not_run(logs, "multi")
assert step_did_not_run(logs, "child_multi_skip")
assert step_did_fail(logs, "can_fail")
assert step_did_not_run(logs, "child_fail")
assert step_did_not_run(logs, "child_skip")
assert step_did_not_run(logs, "grandchild_fail")
retry_two = execute_dagster_graphql_and_finish_runs(
context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": get_retry_multi_execution_params(
context, should_fail=False, retry_id=run_id
)
},
)
run_id = retry_two.data["launchPipelineReexecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(context, run_id)["pipelineRunLogs"][
"messages"
]
assert step_did_not_run(logs, "multi")
assert step_did_not_run(logs, "child_multi_skip")
assert step_did_succeed(logs, "can_fail")
assert step_did_succeed(logs, "child_fail")
assert step_did_skip(logs, "child_skip")
assert step_did_succeed(logs, "grandchild_fail")
def test_successful_pipeline_reexecution(self, graphql_context):
selector = infer_pipeline_selector(graphql_context, "csv_hello_world")
run_id = make_new_run_id()
result_one = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": csv_hello_world_solids_config(),
"executionMetadata": {"runId": run_id},
"mode": "default",
}
},
)
assert result_one.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
result = get_all_logs_for_finished_run_via_subscription(graphql_context, run_id)
logs = result["pipelineRunLogs"]["messages"]
assert get_step_output_event(logs, "sum_solid")
assert get_step_output_event(logs, "sum_sq_solid")
# retry
new_run_id = make_new_run_id()
result_two = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": csv_hello_world_solids_config(),
"stepKeys": ["sum_sq_solid"],
"executionMetadata": {
"runId": new_run_id,
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
"mode": "default",
}
},
)
query_result = result_two.data["launchPipelineReexecution"]
assert query_result["__typename"] == "LaunchRunSuccess"
result = get_all_logs_for_finished_run_via_subscription(graphql_context, new_run_id)
logs = result["pipelineRunLogs"]["messages"]
assert isinstance(logs, list)
assert has_event_of_type(logs, "RunStartEvent")
assert has_event_of_type(logs, "RunSuccessEvent")
assert not has_event_of_type(logs, "RunFailureEvent")
assert not get_step_output_event(logs, "sum_solid")
assert get_step_output_event(logs, "sum_sq_solid")
def test_pipeline_reexecution_info_query(self, graphql_context, snapshot):
context = graphql_context
selector = infer_pipeline_selector(graphql_context, "csv_hello_world")
run_id = make_new_run_id()
execute_dagster_graphql_and_finish_runs(
context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": csv_hello_world_solids_config(),
"executionMetadata": {"runId": run_id},
"mode": "default",
}
},
)
# retry
new_run_id = make_new_run_id()
execute_dagster_graphql_and_finish_runs(
context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": csv_hello_world_solids_config(),
"stepKeys": ["sum_sq_solid"],
"executionMetadata": {
"runId": new_run_id,
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
"mode": "default",
}
},
)
result_one = execute_dagster_graphql_and_finish_runs(
context, PIPELINE_REEXECUTION_INFO_QUERY, variables={"runId": run_id}
)
query_result_one = result_one.data["pipelineRunOrError"]
assert query_result_one["__typename"] == "Run"
assert query_result_one["stepKeysToExecute"] is None
result_two = execute_dagster_graphql_and_finish_runs(
context, PIPELINE_REEXECUTION_INFO_QUERY, variables={"runId": new_run_id}
)
query_result_two = result_two.data["pipelineRunOrError"]
assert query_result_two["__typename"] == "Run"
stepKeysToExecute = query_result_two["stepKeysToExecute"]
assert stepKeysToExecute is not None
snapshot.assert_match(stepKeysToExecute)
def test_pipeline_reexecution_invalid_step_in_subset(self, graphql_context):
run_id = make_new_run_id()
selector = infer_pipeline_selector(graphql_context, "csv_hello_world")
result_one = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": csv_hello_world_solids_config(),
"executionMetadata": {"runId": run_id},
"mode": "default",
}
},
)
assert result_one.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
# retry
new_run_id = make_new_run_id()
result_two = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"runConfigData": csv_hello_world_solids_config(),
"stepKeys": ["nope"],
"executionMetadata": {
"runId": new_run_id,
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
"mode": "default",
}
},
)
query_result = result_two.data["launchPipelineReexecution"]
assert query_result["__typename"] == "PythonError"
assert query_result["className"] == "DagsterExecutionStepNotFoundError"
assert "Can not build subset plan from unknown step: nope" in query_result["message"]
class TestHardFailures(ExecutingGraphQLContextTestMatrix):
def test_retry_hard_failure(self, graphql_context):
selector = infer_pipeline_selector(graphql_context, "hard_failer")
result = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"mode": "default",
"selector": selector,
"runConfigData": {"solids": {"hard_fail_or_0": {"config": {"fail": True}}}},
}
},
)
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(graphql_context, run_id)[
"pipelineRunLogs"
]["messages"]
assert step_started(logs, "hard_fail_or_0")
assert step_did_not_run(logs, "hard_fail_or_0")
assert step_did_not_run(logs, "increment")
retry = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"mode": "default",
"selector": selector,
"runConfigData": {"solids": {"hard_fail_or_0": {"config": {"fail": False}}}},
"executionMetadata": {
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
}
},
)
run_id = retry.data["launchPipelineReexecution"]["run"]["runId"]
logs = get_all_logs_for_finished_run_via_subscription(graphql_context, run_id)[
"pipelineRunLogs"
]["messages"]
assert step_did_succeed(logs, "hard_fail_or_0")
assert step_did_succeed(logs, "increment")
def test_retry_failure_all_steps_with_reexecution_params(self, graphql_context):
"""
Test with providng reexecutionParams rather than executionParams
"""
selector = infer_pipeline_selector(graphql_context, "chained_failure_pipeline")
# trigger failure in the conditionally_fail solid
output_file = os.path.join(
get_system_temp_directory(), "chained_failure_pipeline_conditionally_fail"
)
try:
with open(output_file, "w", encoding="utf8"):
result = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"mode": "default",
"selector": selector,
}
},
)
finally:
os.remove(output_file)
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
assert graphql_context.instance.get_run_by_id(run_id).status == PipelineRunStatus.FAILURE
retry = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={"reexecutionParams": {"parentRunId": run_id, "strategy": "ALL_STEPS"}},
)
run_id = retry.data["launchPipelineReexecution"]["run"]["runId"]
assert graphql_context.instance.get_run_by_id(run_id).status == PipelineRunStatus.SUCCESS
logs = get_all_logs_for_finished_run_via_subscription(graphql_context, run_id)[
"pipelineRunLogs"
]["messages"]
assert step_did_succeed(logs, "always_succeed")
assert step_did_succeed(logs, "conditionally_fail")
assert step_did_succeed(logs, "after_failure")
def test_retry_hard_failure_with_reexecution_params_run_config_changed(self, graphql_context):
"""
Test that reexecution fails if the run config changes
"""
selector = infer_pipeline_selector(graphql_context, "chained_failure_pipeline")
# trigger failure in the conditionally_fail solid
output_file = os.path.join(
get_system_temp_directory(), "chained_failure_pipeline_conditionally_fail"
)
try:
with open(output_file, "w", encoding="utf8"):
result = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"mode": "default",
"selector": selector,
}
},
)
finally:
os.remove(output_file)
parent_run_id = result.data["launchPipelineExecution"]["run"]["runId"]
parent_run = graphql_context.instance.get_run_by_id(parent_run_id)
assert parent_run.status == PipelineRunStatus.FAILURE
# override run config to make it fail
graphql_context.instance.delete_run(parent_run_id)
graphql_context.instance.add_run(parent_run._replace(run_config={"bad": "config"}))
retry = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"reexecutionParams": {"parentRunId": parent_run_id, "strategy": "FROM_FAILURE"}
},
)
assert "DagsterInvalidConfigError" in str(
retry.data["launchPipelineReexecution"]["message"]
)
def test_retry_failure_with_reexecution_params(self, graphql_context):
"""
Test with providng reexecutionParams rather than executionParams
"""
selector = infer_pipeline_selector(graphql_context, "chained_failure_pipeline")
# trigger failure in the conditionally_fail solid
output_file = os.path.join(
get_system_temp_directory(), "chained_failure_pipeline_conditionally_fail"
)
try:
with open(output_file, "w", encoding="utf8"):
result = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"mode": "default",
"selector": selector,
}
},
)
finally:
os.remove(output_file)
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
assert graphql_context.instance.get_run_by_id(run_id).status == PipelineRunStatus.FAILURE
retry = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={"reexecutionParams": {"parentRunId": run_id, "strategy": "FROM_FAILURE"}},
)
run_id = retry.data["launchPipelineReexecution"]["run"]["runId"]
assert graphql_context.instance.get_run_by_id(run_id).status == PipelineRunStatus.SUCCESS
logs = get_all_logs_for_finished_run_via_subscription(graphql_context, run_id)[
"pipelineRunLogs"
]["messages"]
assert step_did_not_run(logs, "always_succeed")
assert step_did_succeed(logs, "conditionally_fail")
assert step_did_succeed(logs, "after_failure")
def test_graphene_reexecution_strategy():
"""Check that graphene enum has corresponding values in the ReexecutionStrategy enum"""
for strategy in GrapheneReexecutionStrategy.__enum__:
assert ReexecutionStrategy[strategy.value]
def _do_retry_intermediates_test(graphql_context, run_id, reexecution_run_id):
selector = infer_pipeline_selector(graphql_context, "eventually_successful")
logs = sync_execute_get_events(
context=graphql_context,
variables={
"executionParams": {
"mode": "default",
"selector": selector,
"executionMetadata": {"runId": run_id},
}
},
)
assert step_did_succeed(logs, "spawn")
assert step_did_fail(logs, "fail")
assert step_did_not_run(logs, "fail_2")
assert step_did_not_run(logs, "fail_3")
assert step_did_not_run(logs, "reset")
retry_one = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"mode": "default",
"selector": selector,
"executionMetadata": {
"runId": reexecution_run_id,
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
}
},
)
return retry_one
class TestRetryExecutionAsyncOnlyBehavior(ExecutingGraphQLContextTestMatrix):
def test_retry_requires_intermediates_async_only(self, graphql_context):
run_id = make_new_run_id()
reexecution_run_id = make_new_run_id()
_do_retry_intermediates_test(graphql_context, run_id, reexecution_run_id)
reexecution_run = graphql_context.instance.get_run_by_id(reexecution_run_id)
assert reexecution_run.is_failure_or_canceled
def test_retry_early_terminate(self, graphql_context):
instance = graphql_context.instance
selector = infer_pipeline_selector(
graphql_context, "retry_multi_input_early_terminate_pipeline"
)
run_id = make_new_run_id()
execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"mode": "default",
"selector": selector,
"runConfigData": {
"solids": {
"get_input_one": {"config": {"wait_to_terminate": True}},
"get_input_two": {"config": {"wait_to_terminate": True}},
},
},
"executionMetadata": {"runId": run_id},
}
},
)
# Wait until the first step succeeded
while instance.get_run_stats(run_id).steps_succeeded < 1:
sleep(0.1)
# Terminate the current pipeline run at the second step
graphql_context.instance.run_launcher.terminate(run_id)
records = instance.all_logs(run_id)
# The first step should succeed, the second should fail or not start,
# and the following steps should not appear in records
assert step_did_succeed_in_records(records, "return_one")
assert not step_did_fail_in_records(records, "return_one")
assert any(
[
step_did_fail_in_records(records, "get_input_one"),
step_did_not_run_in_records(records, "get_input_one"),
]
)
assert step_did_not_run_in_records(records, "get_input_two")
assert step_did_not_run_in_records(records, "sum_inputs")
# Wait for the original run to finish
poll_for_finished_run(instance, run_id, timeout=30)
assert instance.get_run_by_id(run_id).status == PipelineRunStatus.CANCELED
# Start retry
new_run_id = make_new_run_id()
execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"mode": "default",
"selector": selector,
"runConfigData": {
"solids": {
"get_input_one": {"config": {"wait_to_terminate": False}},
"get_input_two": {"config": {"wait_to_terminate": False}},
},
},
"executionMetadata": {
"runId": new_run_id,
"rootRunId": run_id,
"parentRunId": run_id,
"tags": [{"key": RESUME_RETRY_TAG, "value": "true"}],
},
}
},
)
retry_records = instance.all_logs(new_run_id)
# The first step should not run and the other three steps should succeed in retry
assert step_did_not_run_in_records(retry_records, "return_one")
assert step_did_succeed_in_records(retry_records, "get_input_one")
assert step_did_succeed_in_records(retry_records, "get_input_two")
assert step_did_succeed_in_records(retry_records, "sum_inputs")
| 39.498674 | 98 | 0.593278 |
acf39c2987cf4a77ba61beaf26d5b72d850dbdcc | 12,046 | py | Python | control/phaseplot.py | SpioradObrach/python-control | a4b4c43e51f0fc2cbf389336a90230a6a741c0dc | [
"BSD-3-Clause"
] | 1 | 2020-04-06T06:29:39.000Z | 2020-04-06T06:29:39.000Z | control/phaseplot.py | Sudo-Pluralize-Abenson/python-control | bad993e49155b11fb0aff1abc1cb12a92545d2c0 | [
"BSD-3-Clause"
] | null | null | null | control/phaseplot.py | Sudo-Pluralize-Abenson/python-control | bad993e49155b11fb0aff1abc1cb12a92545d2c0 | [
"BSD-3-Clause"
] | 3 | 2019-10-13T22:30:31.000Z | 2020-08-13T02:35:39.000Z | #! TODO: add module docstring
# phaseplot.py - generate 2D phase portraits
#
# Author: Richard M. Murray
# Date: 24 July 2011, converted from MATLAB version (2002); based on
# a version by Kristi Morgansen
#
# Copyright (c) 2011 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Python 3 compatibility
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as mpl
from scipy.integrate import odeint
from .exception import ControlNotImplemented
__all__ = ['phase_plot', 'box_grid']
def _find(condition):
"""Returns indices where ravel(a) is true.
Private implementation of deprecated matplotlib.mlab.find
"""
return np.nonzero(np.ravel(condition))[0]
def phase_plot(odefun, X=None, Y=None, scale=1, X0=None, T=None,
lingrid=None, lintime=None, logtime=None, timepts=None,
parms=(), verbose=True):
"""
Phase plot for 2D dynamical systems
Produces a vector field or stream line plot for a planar system.
Call signatures:
phase_plot(func, X, Y, ...) - display vector field on meshgrid
phase_plot(func, X, Y, scale, ...) - scale arrows
phase_plot(func. X0=(...), T=Tmax, ...) - display stream lines
phase_plot(func, X, Y, X0=[...], T=Tmax, ...) - plot both
phase_plot(func, X0=[...], T=Tmax, lingrid=N, ...) - plot both
phase_plot(func, X0=[...], lintime=N, ...) - stream lines with arrows
Parameters
----------
func : callable(x, t, ...)
Computes the time derivative of y (compatible with odeint).
The function should be the same for as used for
scipy.integrate. Namely, it should be a function of the form
dxdt = F(x, t) that accepts a state x of dimension 2 and
returns a derivative dx/dt of dimension 2.
X, Y: 3-element sequences, optional, as [start, stop, npts]
Two 3-element sequences specifying x and y coordinates of a
grid. These arguments are passed to linspace and meshgrid to
generate the points at which the vector field is plotted. If
absent (or None), the vector field is not plotted.
scale: float, optional
Scale size of arrows; default = 1
X0: ndarray of initial conditions, optional
List of initial conditions from which streamlines are plotted.
Each initial condition should be a pair of numbers.
T: array-like or number, optional
Length of time to run simulations that generate streamlines.
If a single number, the same simulation time is used for all
initial conditions. Otherwise, should be a list of length
len(X0) that gives the simulation time for each initial
condition. Default value = 50.
lingrid = N or (N, M): integer or 2-tuple of integers, optional
If X0 is given and X, Y are missing, a grid of arrows is
produced using the limits of the initial conditions, with N
grid points in each dimension or N grid points in x and M grid
points in y.
lintime = N: integer, optional
Draw N arrows using equally space time points
logtime = (N, lambda): (integer, float), optional
Draw N arrows using exponential time constant lambda
timepts = [t1, t2, ...]: array-like, optional
Draw arrows at the given list times
parms: tuple, optional
List of parameters to pass to vector field: `func(x, t, *parms)`
See also
--------
box_grid : construct box-shaped grid of initial conditions
Examples
--------
"""
#
# Figure out ranges for phase plot (argument processing)
#
#! TODO: need to add error checking to arguments
#! TODO: think through proper action if multiple options are given
#
autoFlag = False; logtimeFlag = False; timeptsFlag = False; Narrows = 0;
if lingrid is not None:
autoFlag = True;
Narrows = lingrid;
if (verbose):
print('Using auto arrows\n')
elif logtime is not None:
logtimeFlag = True;
Narrows = logtime[0];
timefactor = logtime[1];
if (verbose):
print('Using logtime arrows\n')
elif timepts is not None:
timeptsFlag = True;
Narrows = len(timepts);
# Figure out the set of points for the quiver plot
#! TODO: Add sanity checks
elif (X is not None and Y is not None):
(x1, x2) = np.meshgrid(
np.linspace(X[0], X[1], X[2]),
np.linspace(Y[0], Y[1], Y[2]))
Narrows = len(x1)
else:
# If we weren't given any grid points, don't plot arrows
Narrows = 0;
if ((not autoFlag) and (not logtimeFlag) and (not timeptsFlag)
and (Narrows > 0)):
# Now calculate the vector field at those points
(nr,nc) = x1.shape;
dx = np.empty((nr, nc, 2))
for i in range(nr):
for j in range(nc):
dx[i, j, :] = np.squeeze(odefun((x1[i,j], x2[i,j]), 0, *parms))
# Plot the quiver plot
#! TODO: figure out arguments to make arrows show up correctly
if scale is None:
mpl.quiver(x1, x2, dx[:,:,1], dx[:,:,2], angles='xy')
elif (scale != 0):
#! TODO: optimize parameters for arrows
#! TODO: figure out arguments to make arrows show up correctly
xy = mpl.quiver(x1, x2, dx[:,:,0]*np.abs(scale),
dx[:,:,1]*np.abs(scale), angles='xy')
# set(xy, 'LineWidth', PP_arrow_linewidth, 'Color', 'b');
#! TODO: Tweak the shape of the plot
# a=gca; set(a,'DataAspectRatio',[1,1,1]);
# set(a,'XLim',X(1:2)); set(a,'YLim',Y(1:2));
mpl.xlabel('x1'); mpl.ylabel('x2');
# See if we should also generate the streamlines
if X0 is None or len(X0) == 0:
return
# Convert initial conditions to a numpy array
X0 = np.array(X0);
(nr, nc) = np.shape(X0);
# Generate some empty matrices to keep arrow information
x1 = np.empty((nr, Narrows)); x2 = np.empty((nr, Narrows));
dx = np.empty((nr, Narrows, 2))
# See if we were passed a simulation time
if T is None:
T = 50
# Parse the time we were passed
TSPAN = T;
if (isinstance(T, (int, float))):
TSPAN = np.linspace(0, T, 100);
# Figure out the limits for the plot
if scale is None:
# Assume that the current axis are set as we want them
alim = mpl.axis();
xmin = alim[0]; xmax = alim[1];
ymin = alim[2]; ymax = alim[3];
else:
# Use the maximum extent of all trajectories
xmin = np.min(X0[:,0]); xmax = np.max(X0[:,0]);
ymin = np.min(X0[:,1]); ymax = np.max(X0[:,1]);
# Generate the streamlines for each initial condition
for i in range(nr):
state = odeint(odefun, X0[i], TSPAN, args=parms);
time = TSPAN
mpl.plot(state[:,0], state[:,1])
#! TODO: add back in colors for stream lines
# PP_stream_color(np.mod(i-1, len(PP_stream_color))+1));
# set(h[i], 'LineWidth', PP_stream_linewidth);
# Plot arrows if quiver parameters were 'auto'
if (autoFlag or logtimeFlag or timeptsFlag):
# Compute the locations of the arrows
#! TODO: check this logic to make sure it works in python
for j in range(Narrows):
# Figure out starting index; headless arrows start at 0
k = -1 if scale is None else 0;
# Figure out what time index to use for the next point
if (autoFlag):
# Use a linear scaling based on ODE time vector
tind = np.floor((len(time)/Narrows) * (j-k)) + k;
elif (logtimeFlag):
# Use an exponential time vector
# MATLAB: tind = find(time < (j-k) / lambda, 1, 'last');
tarr = _find(time < (j-k) / timefactor);
tind = tarr[-1] if len(tarr) else 0;
elif (timeptsFlag):
# Use specified time points
# MATLAB: tind = find(time < Y[j], 1, 'last');
tarr = _find(time < timepts[j]);
tind = tarr[-1] if len(tarr) else 0;
# For tailless arrows, skip the first point
if tind == 0 and scale is None:
continue;
# Figure out the arrow at this point on the curve
x1[i,j] = state[tind, 0];
x2[i,j] = state[tind, 1];
# Skip arrows outside of initial condition box
if (scale is not None or
(x1[i,j] <= xmax and x1[i,j] >= xmin and
x2[i,j] <= ymax and x2[i,j] >= ymin)):
v = odefun((x1[i,j], x2[i,j]), 0, *parms)
dx[i, j, 0] = v[0]; dx[i, j, 1] = v[1];
else:
dx[i, j, 0] = 0; dx[i, j, 1] = 0;
# Set the plot shape before plotting arrows to avoid warping
# a=gca;
# if (scale != None):
# set(a,'DataAspectRatio', [1,1,1]);
# if (xmin != xmax and ymin != ymax):
# mpl.axis([xmin, xmax, ymin, ymax]);
# set(a, 'Box', 'on');
# Plot arrows on the streamlines
if scale is None and Narrows > 0:
# Use a tailless arrow
#! TODO: figure out arguments to make arrows show up correctly
mpl.quiver(x1, x2, dx[:,:,0], dx[:,:,1], angles='xy')
elif (scale != 0 and Narrows > 0):
#! TODO: figure out arguments to make arrows show up correctly
xy = mpl.quiver(x1, x2, dx[:,:,0]*abs(scale), dx[:,:,1]*abs(scale),
angles='xy')
# set(xy, 'LineWidth', PP_arrow_linewidth);
# set(xy, 'AutoScale', 'off');
# set(xy, 'AutoScaleFactor', 0);
if (scale < 0):
bp = mpl.plot(x1, x2, 'b.'); # add dots at base
# set(bp, 'MarkerSize', PP_arrow_markersize);
return;
# Utility function for generating initial conditions around a box
def box_grid(xlimp, ylimp):
"""box_grid generate list of points on edge of box
list = box_grid([xmin xmax xnum], [ymin ymax ynum]) generates a
list of points that correspond to a uniform grid at the end of the
box defined by the corners [xmin ymin] and [xmax ymax].
"""
sx10 = np.linspace(xlimp[0], xlimp[1], xlimp[2])
sy10 = np.linspace(ylimp[0], ylimp[1], ylimp[2])
sx1 = np.hstack((0, sx10, 0*sy10+sx10[0], sx10, 0*sy10+sx10[-1]))
sx2 = np.hstack((0, 0*sx10+sy10[0], sy10, 0*sx10+sy10[-1], sy10))
return np.transpose( np.vstack((sx1, sx2)) )
| 38.24127 | 79 | 0.599286 |
acf39d12d594a24c00ef2d7007721d143ebb1fe0 | 7,085 | py | Python | src/cattrs_extras/tortoise/converter.py | droserasprout/cattrs-extras | 32674bf6053c9a2a61180856a882920e81a7a603 | [
"MIT"
] | 2 | 2020-11-10T09:23:09.000Z | 2021-02-18T11:37:44.000Z | src/cattrs_extras/tortoise/converter.py | droserasprout/cattrs-extras | 32674bf6053c9a2a61180856a882920e81a7a603 | [
"MIT"
] | 6 | 2020-11-10T09:56:58.000Z | 2021-02-02T12:24:12.000Z | src/cattrs_extras/tortoise/converter.py | droserasprout/cattrs-extras | 32674bf6053c9a2a61180856a882920e81a7a603 | [
"MIT"
] | null | null | null | import collections
import importlib
from contextlib import suppress
from datetime import date
from datetime import datetime
from datetime import timedelta
from types import ModuleType
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Mapping
from typing import Optional
from typing import Type
from typing import Union
import tortoise
from tortoise import fields
from tortoise.queryset import QuerySet
from typing_inspect import get_args # type: ignore
from cattrs_extras.converter import Converter
from cattrs_extras.converter import StructureError
from cattrs_extras.tortoise.fields import ReversedCharEnumFieldInstance
JSONType = Union[Dict[str, Any], List[Dict[str, Any]]]
NoneType = type(None)
# TODO: Set datetime timezone awareness?
# TODO: Ability to format timestamps as strings?
class TortoiseConverter(Converter):
def __init__(self, models: str) -> None:
super().__init__()
self._models: ModuleType = importlib.import_module(models)
self.register_structure_hook(tortoise.Model, self._structure_tortoise_model)
self.register_unstructure_hook(tortoise.Model, self._unstructure_tortoise_model)
def _structure_tortoise_model(
self, obj: Dict[str, Any], cls: Type[tortoise.Model]
) -> tortoise.Model:
result_dict: Dict[str, Any] = {}
saved_in_db = False
for field_name, field in cls._meta.fields_map.items():
field_value = obj.get(field_name)
if field.pk and field.generated:
if field_value is not None:
saved_in_db = True
else:
continue
if all(
[
field.null is False,
field_value is None,
not (
isinstance(field, fields.DatetimeField) and field.auto_now_add
),
]
):
raise StructureError(
f'Cannot structure {cls.__qualname__}: "{field_name}" field is not nullable'
)
if field_name not in obj:
continue
known_type = None
if isinstance(field, fields.BooleanField):
known_type = Optional[bool] if field.null else bool
elif isinstance(field, fields.DatetimeField):
known_type = Optional[datetime] if field.null else datetime
elif isinstance(field, fields.DateField):
known_type = Optional[date] if field.null else date
elif isinstance(field, fields.TimeDeltaField):
known_type = Optional[timedelta] if field.null else timedelta
elif isinstance(
field,
(fields.data.CharEnumFieldInstance, ReversedCharEnumFieldInstance),
):
known_type = (
Optional[field.enum_type] if field.null else field.enum_type
)
if known_type is not None:
result_dict[field_name] = self.structure(
obj=field_value,
cl=known_type, # type: ignore
)
# FIXME: tortoise.exceptions.ConfigurationError: You can't set backward relations through init, change related model instead
# Should we try to hack it somehow or just ignore backward relations even if fetched?
elif isinstance(field, fields.relational.BackwardFKRelation):
continue
elif isinstance(field, fields.relational.RelationalField) and field_value:
# FIXME: Hinted as Type['Model']
if isinstance(field.model, str):
field_model = getattr(self._models, field.model.split('.')[-1])
else:
field_model = field.model
related_model = self.structure(
obj=field_value,
cl=field_model,
)
related_model._saved_in_db = True
result_dict[field_name] = related_model
else:
result_dict[field_name] = field_value
model = cls(**result_dict)
model._saved_in_db = saved_in_db
return model
def _unstructure_tortoise_model(self, obj: tortoise.Model) -> JSONType:
result_dict = {}
for field_name, field in obj.__class__._meta.fields_map.items():
field_value = getattr(obj, field_name, None)
if isinstance(field_value, QuerySet):
continue
if isinstance(field, fields.relational.BackwardFKRelation):
try:
result_dict[field_name] = self.unstructure(
field_value.related_objects # type: ignore
)
except tortoise.exceptions.NoValuesFetched:
pass
elif isinstance(field, fields.relational.RelationalField):
try:
result_dict[field_name] = self.unstructure(field_value)
except tortoise.exceptions.NoValuesFetched:
pass
else:
result_dict[field_name] = self.unstructure(field_value)
return result_dict
# FIXME: super() copypaste
@staticmethod
def _get_dis_func(union: Type) -> Callable[..., Type]:
with suppress(StructureError):
return super()._get_dis_func(union)
union_types = get_args(union)
if NoneType in union_types: # type: ignore
union_types = tuple(e for e in union_types if e is not NoneType) # type: ignore
if not all(hasattr(e, '_meta') for e in union_types):
raise StructureError(
f'Cannot structure {union}: only unions of attr classes or tortoise models are supported currently. '
f'Register a structure hook manually.'
)
if len(union_types) < 2:
raise StructureError(
f'Cannot structure {union}: at least two classes required.'
)
cls_and_attrs = [(cl, set(at for at in cl._meta.fields)) for cl in union_types]
if len([attrs for _, attrs in cls_and_attrs if len(attrs) == 0]) > 1:
raise StructureError(
f'Cannot structure {union}: at least two classes have no attributes.'
)
cls_and_attrs.sort(key=lambda c_a: len(c_a[1]))
def _dis_func(data: Mapping) -> Type:
if not isinstance(data, collections.Mapping):
raise StructureError(
f'Cannot structure {union}: only mappings are supported as input.'
)
for cls, cls_keys in cls_and_attrs:
if all([data_key in cls_keys for data_key in data]):
return cls
raise StructureError(
f'Cannot structure {union}: {data} does not match any of generic arguments'
)
return _dis_func
| 36.333333 | 136 | 0.596471 |
acf39d9ebb1570ed26416c9457aaf0ead2bdef4a | 3,138 | py | Python | tools/my_infer.py | flying0712/reid-strong-baseline | b67fdddf31a1bbd3114e50727318af30301643ef | [
"MIT"
] | null | null | null | tools/my_infer.py | flying0712/reid-strong-baseline | b67fdddf31a1bbd3114e50727318af30301643ef | [
"MIT"
] | null | null | null | tools/my_infer.py | flying0712/reid-strong-baseline | b67fdddf31a1bbd3114e50727318af30301643ef | [
"MIT"
] | null | null | null | from data.datasets.dataset_loader import read_image # 图片读取方法,可以自己写,我是用的baseline里自带的
import os
import torch
import numpy as np
import json
from utils.re_ranking import re_ranking
os.environ['CUDA_VISIBLE_DEVICES'] = '0' # 指定gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_root = '/root/code/my_data/'
def my_inference(model, transform, batch_size): # 传入模型,数据预处理方法,batch_size
query_list = list()
# with open(data_root + 'query_a_list.txt', 'r') as f:
# # 测试集中txt文件
# lines = f.readlines()
# for i, line in enumerate(lines):
# data = line.split(" ")
# image_name = data[0].split("/")[1]
# img_file = os.path.join(data_root + 'query_b', image_name) # 测试集query文件夹
# query_list.append(img_file)
query_list = [os.path.join(data_root + 'query_b', x) for x in # 测试集gallery文件夹
os.listdir(data_root + 'query_b')]
gallery_list = [os.path.join(data_root + 'gallery_b', x) for x in # 测试集gallery文件夹
os.listdir(data_root + 'gallery_b')]
query_num = len(query_list)
img_list = list()
for q_img in query_list:
q_img = read_image(q_img)
q_img = transform(q_img)
img_list.append(q_img)
for g_img in gallery_list:
g_img = read_image(g_img)
g_img = transform(g_img)
img_list.append(g_img)
# img_list = img_list[:1000]
iter_n = int(len(img_list)/batch_size) # batch_size
if len(img_list) % batch_size != 0:
iter_n += 1
# img_list = img_list[0:iter_n*batch_size]
print(iter_n)
img_data = torch.Tensor([t.numpy() for t in img_list]).cuda()
# img_data = torch.Tensor([t.numpy() for t in img_list]).cpu
model = model.to(device)
model.eval()
all_feature = list()
for i in range(iter_n):
print("batch ----%d----" % (i))
batch_data = img_data[i*batch_size:(i+1)*batch_size]
with torch.no_grad():
batch_feature = model(batch_data).detach().cpu()
# print(batch_feature)
# batch_feature = model( batch_data ).detach().cuda()
all_feature.append(batch_feature)
print('done')
all_feature = torch.cat(all_feature)
gallery_feat = all_feature[query_num:]
query_feat = all_feature[:query_num]
distmat = re_ranking(query_feat, gallery_feat, k1=20, k2=6, lambda_value=0.3) # rerank方法
# distmat = distmat # 如果使用 euclidean_dist,不使用rerank改为:distamt = distamt.numpy()
num_q, num_g = distmat.shape
print(num_q)
indices = np.argsort(distmat, axis=1)
max_200_indices = indices[:, :200]
print(max_200_indices)
res_dict = dict()
for q_idx in range(num_q):
print(query_list[q_idx])
filename = query_list[q_idx][query_list[q_idx].rindex("/")+1:]
max_200_files = [gallery_list[i][gallery_list[i].rindex("/")+1:] for i in max_200_indices[q_idx]]
res_dict[filename] = max_200_files
with open(r'submission_B_4.json', 'w' ,encoding='utf-8') as f: # 提交文件
json.dump(res_dict, f)
# if __name__ == '__main__':
# my_inference()
| 36.917647 | 105 | 0.634799 |
acf39dd79e5e8f85a3e7580df8e50bf0ca11e50a | 143 | py | Python | spexxy/tools/lsf/__init__.py | thusser/spexxy | 14a8d121076b9e043bdf2e27222a65088f771ff9 | [
"MIT"
] | 4 | 2019-05-13T21:36:31.000Z | 2021-09-06T01:56:36.000Z | spexxy/tools/lsf/__init__.py | thusser/spexxy | 14a8d121076b9e043bdf2e27222a65088f771ff9 | [
"MIT"
] | 2 | 2020-02-12T14:36:39.000Z | 2020-07-14T11:43:10.000Z | spexxy/tools/lsf/__init__.py | thusser/spexxy | 14a8d121076b9e043bdf2e27222a65088f771ff9 | [
"MIT"
] | 1 | 2019-11-08T09:26:23.000Z | 2019-11-08T09:26:23.000Z | import sys
from spexxy.tools import add_tree_node
def add_parser(parser):
add_tree_node(parser, sys.modules[__name__], 'lsf operations')
| 20.428571 | 66 | 0.783217 |
acf39e3e83bdad47b0f7b68e461698601f1650d7 | 262 | bzl | Python | third_party/cargo_raze/deps.bzl | msfschaffner/opentitan-bak | de4cb1bb9e7b707a3ca2a6882d83af7ed2aa1ab8 | [
"Apache-2.0"
] | null | null | null | third_party/cargo_raze/deps.bzl | msfschaffner/opentitan-bak | de4cb1bb9e7b707a3ca2a6882d83af7ed2aa1ab8 | [
"Apache-2.0"
] | 2 | 2021-11-01T15:02:37.000Z | 2022-01-17T14:34:36.000Z | third_party/cargo_raze/deps.bzl | msfschaffner/opentitan-bak | de4cb1bb9e7b707a3ca2a6882d83af7ed2aa1ab8 | [
"Apache-2.0"
] | null | null | null | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
load("@cargo_raze//:repositories.bzl", "cargo_raze_repositories")
def raze_deps():
cargo_raze_repositories()
| 29.111111 | 74 | 0.770992 |
acf39e76216caf65dc63a75c0a2d17102992213a | 373 | py | Python | exercicios/lista4-ex06.py | jrgoncalves85/aulas-python | 9afa20f41e316569ae8cd484c33d80f23179ea56 | [
"MIT"
] | 3 | 2021-08-31T20:12:22.000Z | 2021-11-23T12:16:54.000Z | exercicios/lista4-ex06.py | jrgoncalves85/aulas-python | 9afa20f41e316569ae8cd484c33d80f23179ea56 | [
"MIT"
] | null | null | null | exercicios/lista4-ex06.py | jrgoncalves85/aulas-python | 9afa20f41e316569ae8cd484c33d80f23179ea56 | [
"MIT"
] | 1 | 2021-09-23T19:56:55.000Z | 2021-09-23T19:56:55.000Z | # 6. Escreva um algoritmo para ler uma temperatura em graus Celsius
# e apresentá-la convertida em graus Fahrenheit. A fórmula de conversão
# é F = (9 * C + 160) / 5, # sendo F a temperatura em Fahrenheit e
# C a temperatura em Celsius.
cel = int(input("Digite a temperatura em graus Celsius: "))
fah = (9 * cel + 160) / 5
print(cel,"graus Celsius em Fahrenheit é", fah)
| 37.3 | 71 | 0.705094 |
acf39ed2dbfb2fef999d606e87ae70de4b145dda | 4,435 | py | Python | pyscf/cc/ccsd_t_lambda_slow.py | robert-anderson/pyscf | cdc56e168cb15f47e8cdc791a92d689fa9b655af | [
"Apache-2.0"
] | 2 | 2019-05-28T05:25:56.000Z | 2019-11-09T02:16:43.000Z | pyscf/cc/ccsd_t_lambda_slow.py | robert-anderson/pyscf | cdc56e168cb15f47e8cdc791a92d689fa9b655af | [
"Apache-2.0"
] | 2 | 2019-09-16T17:58:31.000Z | 2019-09-22T17:26:01.000Z | pyscf/cc/ccsd_t_lambda_slow.py | robert-anderson/pyscf | cdc56e168cb15f47e8cdc791a92d689fa9b655af | [
"Apache-2.0"
] | 2 | 2021-09-16T23:37:42.000Z | 2021-10-14T23:00:39.000Z | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Spin-free lambda equation of RHF-CCSD(T)
Ref:
JCP, 147, 044104
'''
import time
import ctypes
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import _ccsd
from pyscf.cc import ccsd_lambda
# Note: not support fov != 0
def kernel(mycc, eris=None, t1=None, t2=None, l1=None, l2=None,
max_cycle=50, tol=1e-8, verbose=logger.INFO):
return ccsd_lambda.kernel(mycc, eris, t1, t2, l1, l2, max_cycle, tol,
verbose, make_intermediates, update_lambda)
def make_intermediates(mycc, t1, t2, eris):
imds = ccsd_lambda.make_intermediates(mycc, t1, t2, eris)
nocc, nvir = t1.shape
eris_ovvv = numpy.asarray(eris.get_ovvv())
eris_ovoo = numpy.asarray(eris.ovoo)
eris_ovov = numpy.asarray(eris.ovov)
mo_e = eris.mo_energy
eia = lib.direct_sum('i-a->ia', mo_e[:nocc], mo_e[nocc:])
d3 = lib.direct_sum('ia,jb,kc->ijkabc', eia, eia, eia)
def p6(t):
t1 = t + t.transpose(0,2,1,3,5,4)
return t1 + t1.transpose(1,0,2,4,3,5) + t1.transpose(1,2,0,4,5,3)
def r6(w):
return (4 * w + w.transpose(0,1,2,4,5,3) + w.transpose(0,1,2,5,3,4)
- 2 * w.transpose(0,1,2,5,4,3) - 2 * w.transpose(0,1,2,3,5,4)
- 2 * w.transpose(0,1,2,4,3,5))
w =(numpy.einsum('iafb,kjcf->ijkabc', eris_ovvv.conj(), t2)
- numpy.einsum('iajm,mkbc->ijkabc', eris_ovoo.conj(), t2)) / d3
v =(numpy.einsum('iajb,kc->ijkabc', eris_ovov.conj(), t1)
+ numpy.einsum('ck,ijab->ijkabc', eris.fock[nocc:,:nocc], t2)) / d3
w = p6(w)
v = p6(v)
imds.l1_t = numpy.einsum('jbkc,ijkabc->ia', eris_ovov, r6(w)).conj() / eia * .5
def as_r6(m):
# When making derivative over t2, r6 should be called on the 6-index
# tensor. It gives the equation for lambda2, but not corresponding to
# the lambda equation used by RCCSD-lambda code. A transformation was
# applied in RCCSD-lambda equation F(lambda)_{ijab} = 0:
# 2/3 * # F(lambda)_{ijab} + 1/3 * F(lambda)_{jiab} = 0
# Combining this transformation with r6 operation, leads to the
# transformation code below
return m * 2 - m.transpose(0,1,2,5,4,3) - m.transpose(0,1,2,3,5,4)
m = as_r6(w * 2 + v * .5)
joovv = numpy.einsum('kfbe,ijkaef->ijab', eris_ovvv, m.conj())
joovv-= numpy.einsum('ncmj,imnabc->ijab', eris_ovoo, m.conj())
joovv = joovv + joovv.transpose(1,0,3,2)
rw = as_r6(w)
joovv+= numpy.einsum('kc,ijkabc->ijab', eris.fock[:nocc,nocc:], rw.conj())
imds.l2_t = joovv / lib.direct_sum('ia+jb->ijab', eia, eia)
return imds
def update_lambda(mycc, t1, t2, l1, l2, eris=None, imds=None):
if eris is None: eris = mycc.ao2mo()
if imds is None: imds = make_intermediates(mycc, t1, t2, eris)
l1, l2 = ccsd_lambda.update_lambda(mycc, t1, t2, l1, l2, eris, imds)
l1 += imds.l1_t
l2 += imds.l2_t
return l1, l2
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf.cc import ccsd
from pyscf import ao2mo
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = 'cc-pvdz'
mol.build()
rhf = scf.RHF(mol)
rhf.conv_tol = 1e-16
rhf.scf()
mcc = ccsd.CCSD(rhf)
mcc.conv_tol = 1e-12
ecc, t1, t2 = mcc.kernel()
#l1, l2 = mcc.solve_lambda()
#print(numpy.linalg.norm(l1)-0.0132626841292)
#print(numpy.linalg.norm(l2)-0.212575609057)
conv, l1, l2 = kernel(mcc, mcc.ao2mo(), t1, t2, tol=1e-8)
print(numpy.linalg.norm(l1)-0.013575484203926739)
print(numpy.linalg.norm(l2)-0.22029981372536928)
| 33.854962 | 83 | 0.629763 |
acf39fe02a7315dd5b3e097443498f6b778a2e23 | 1,463 | py | Python | app/core/controller/ControllerOrdenArchivos.py | raquelnany/ACTIVO_DJANGO_DOCKER_PROGRES | 6dfcea960bac60ff862f204440cc52689bba0e2a | [
"MIT"
] | null | null | null | app/core/controller/ControllerOrdenArchivos.py | raquelnany/ACTIVO_DJANGO_DOCKER_PROGRES | 6dfcea960bac60ff862f204440cc52689bba0e2a | [
"MIT"
] | null | null | null | app/core/controller/ControllerOrdenArchivos.py | raquelnany/ACTIVO_DJANGO_DOCKER_PROGRES | 6dfcea960bac60ff862f204440cc52689bba0e2a | [
"MIT"
] | null | null | null | from ..serializers import OrdenArchivosSerializer
from ..models import Orden_Archivos, Usuario
class ControllerOrdenArchivos:
def crearordenarchivos(request):
datosOrdenArchivos = request.data
try:
usuario = Usuario.objects.get(id_usuario = datosOrdenArchivos['usuario'])
ordenArchivosNuevo = Orden_Archivos.objects.create(
nombre_archivo = datosOrdenArchivos['nombre_archivo'],
archivo = datosOrdenArchivos['archivo'],
fecha = datosOrdenArchivos['fecha'],
comentarios = datosOrdenArchivos['comentarios'],
id_orden = datosOrdenArchivos['id_orden'],
usuario = usuario
)
except Exception:
return {"estatus":"Error"}
return {"estatus":"Ok", 'orden_archivos': ordenArchivosNuevo.nombre_archivo}
def listarordenarchivos(id_orden_archivos=None):
if id_orden_archivos:
try:
queryset = Orden_Archivos.objects.get(id_orden_archivos=id_orden_archivos)
except Orden_Archivos.DoesNotExist:
return ({'result': 'No se encontró el orden de archivos deseado'})
serializer = OrdenArchivosSerializer(queryset)
return serializer.data
else:
queryset = Orden_Archivos.objects.all()
serializer = OrdenArchivosSerializer(queryset, many=True)
return serializer.data | 40.638889 | 90 | 0.641148 |
acf3a0d7ff9608f4e360608e0e2f79c281466a4f | 184 | py | Python | front/urls.py | Ambrou/asvn | 3da2b3b5128629f9c2ad0b4b7d0489e222507529 | [
"MIT"
] | null | null | null | front/urls.py | Ambrou/asvn | 3da2b3b5128629f9c2ad0b4b7d0489e222507529 | [
"MIT"
] | null | null | null | front/urls.py | Ambrou/asvn | 3da2b3b5128629f9c2ad0b4b7d0489e222507529 | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from front.views import CreerDepotView
urlpatterns = [
url(r'^creerdepot/$', CreerDepotView.as_view()),
] | 23 | 53 | 0.679348 |
acf3a0da4187adcfb5990f49435afaeba7581b64 | 2,554 | py | Python | venv/lib/python2.7/site-packages/ssh/resource.py | BlueMoon3000/election-2016 | 9e8f130605bf02ef85391f203381f518e673e183 | [
"FSFAP"
] | null | null | null | venv/lib/python2.7/site-packages/ssh/resource.py | BlueMoon3000/election-2016 | 9e8f130605bf02ef85391f203381f518e673e183 | [
"FSFAP"
] | null | null | null | venv/lib/python2.7/site-packages/ssh/resource.py | BlueMoon3000/election-2016 | 9e8f130605bf02ef85391f203381f518e673e183 | [
"FSFAP"
] | null | null | null | # Copyright (C) 2011 Jeff Forcier <jeff@bitprophet.org>
#
# This file is part of ssh.
#
# 'ssh' is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# 'ssh' is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with 'ssh'; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA.
"""
Resource manager.
"""
import weakref
class ResourceManager (object):
"""
A registry of objects and resources that should be closed when those
objects are deleted.
This is meant to be a safer alternative to python's C{__del__} method,
which can cause reference cycles to never be collected. Objects registered
with the ResourceManager can be collected but still free resources when
they die.
Resources are registered using L{register}, and when an object is garbage
collected, each registered resource is closed by having its C{close()}
method called. Multiple resources may be registered per object, but a
resource will only be closed once, even if multiple objects register it.
(The last object to register it wins.)
"""
def __init__(self):
self._table = {}
def register(self, obj, resource):
"""
Register a resource to be closed with an object is collected.
When the given C{obj} is garbage-collected by the python interpreter,
the C{resource} will be closed by having its C{close()} method called.
Any exceptions are ignored.
@param obj: the object to track
@type obj: object
@param resource: the resource to close when the object is collected
@type resource: object
"""
def callback(ref):
try:
resource.close()
except:
pass
del self._table[id(resource)]
# keep the weakref in a table so it sticks around long enough to get
# its callback called. :)
self._table[id(resource)] = weakref.ref(obj, callback)
# singleton
ResourceManager = ResourceManager()
| 34.986301 | 79 | 0.676586 |
acf3a356a1da86154a336f9e6b9cfd561dec6699 | 477 | py | Python | python/longest_substring_without_repeating_characters.py | birkhoffcheng/leetcode | 0fb38b4a23485c59744ee9ca1c9e832726155d9d | [
"MIT"
] | null | null | null | python/longest_substring_without_repeating_characters.py | birkhoffcheng/leetcode | 0fb38b4a23485c59744ee9ca1c9e832726155d9d | [
"MIT"
] | null | null | null | python/longest_substring_without_repeating_characters.py | birkhoffcheng/leetcode | 0fb38b4a23485c59744ee9ca1c9e832726155d9d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
longest = 0
substrset = set()
begin = 0
for i in range(len(s)):
while s[i] in substrset:
substrset.remove(s[begin])
begin += 1
substrset.add(s[i])
longest = max(longest, i - begin + 1)
return longest
s = Solution()
print(s.lengthOfLongestSubstring(sys.argv[1]))
| 23.85 | 54 | 0.542977 |
acf3a3841d4414c01f819d3510e0f7fb49fe426e | 64,364 | py | Python | tensorflow/python/ops/ragged/ragged_tensor.py | hyBlue/tensorflow | 62e8e1fa7ed38b76870ed851121d56df524c7287 | [
"Apache-2.0"
] | 2 | 2020-03-20T20:05:13.000Z | 2020-03-20T20:05:15.000Z | tensorflow/python/ops/ragged/ragged_tensor.py | wolves7/tensorflow | ae244e6dabeb6b879c5adb9ca4c2a85cb4722dc5 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/ragged/ragged_tensor.py | wolves7/tensorflow | ae244e6dabeb6b879c5adb9ca4c2a85cb4722dc5 | [
"Apache-2.0"
] | 1 | 2020-05-15T06:20:58.000Z | 2020-05-15T06:20:58.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for storing ragged tensors and their values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_ragged_conversion_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged import segment_id_ops
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
_eval_using_default_session = ops._eval_using_default_session
# pylint: enable=protected-access
#===============================================================================
# RaggedTensor
#===============================================================================
@tf_export("RaggedTensor")
class RaggedTensor(object):
"""Represents a ragged tensor (go/ragged).
A `RaggedTensor` is a tensor with one or more *ragged dimensions*, which are
dimensions whose slices may have different lengths. For example, the inner
(column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged,
since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths.
Dimensions whose slices all have the same length are called *uniform
dimensions*. The outermost dimension of a `RaggedTensor` is always uniform,
since it consists of a single slice (and so there is no possibility for
differing slice lengths).
The total number of dimensions in a `RaggedTensor` is called its *rank*,
and the number of ragged dimensions in a `RaggedTensor` is called its
*ragged-rank*. A `RaggedTensor`'s ragged-rank is fixed at graph creation
time: it can't depend on the runtime values of `Tensor`s, and can't vary
dynamically for different session runs.
### Potentially Ragged Tensors
Many ops support both `Tensor`s and `RaggedTensor`s. The term "potentially
ragged tensor" may be used to refer to a tensor that might be either a
`Tensor` or a `RaggedTensor`. The ragged-rank of a `Tensor` is zero.
### Documenting RaggedTensor Shapes
When documenting the shape of a RaggedTensor, ragged dimensions can be
indicated by enclosing them in parentheses. For example, the shape of
a 3-D `RaggedTensor` that stores the fixed-size word embedding for each
word in a sentence, for each sentence in a batch, could be written as
`[num_sentences, (num_words), embedding_size]`. The parentheses around
`(num_words)` indicate that dimension is ragged, and that the length
of each element list in that dimension may vary for each item.
### Component Tensors
Internally, a `RaggedTensor` consists of a concatenated list of values that
are partitioned into variable-length rows. In particular, each `RaggedTensor`
consists of:
* A `values` tensor, which concatenates the variable-length rows into a
flattened list. For example, the `values` tensor for
`[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is `[3, 1, 4, 1, 5, 9, 2, 6]`.
* A `row_splits` vector, which indicates how those flattened values are
divided into rows. In particular, the values for row `rt[i]` are stored
in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.
Example:
```python
>>> print(tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
### Alternative Row-Partitioning Schemes
In addition to `row_splits`, ragged tensors provide support for four other
row-partitioning schemes:
* `row_lengths`: a vector with shape `[nrows]`, which specifies the length
of each row.
* `value_rowids` and `nrows`: `value_rowids` is a vector with shape
`[nvals]`, corresponding one-to-one with `values`, which specifies
each value's row index. In particular, the row `rt[row]` consists of the
values `rt.values[j]` where `value_rowids[j]==row`. `nrows` is an
int64 scalar that specifies the number of rows in the `RaggedTensor`.
(`nrows` is used to indicate trailing empty rows.)
* `row_starts`: a vector with shape `[nrows]`, which specifies the start
offset of each row. Equivalent to `row_splits[:-1]`.
* `row_limits`: a vector with shape `[nrows]`, which specifies the stop
offset of each row. Equivalent to `row_splits[1:]`.
Example: The following ragged tensors are equivalent, and all represent the
nested list `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]`.
```python
>>> values = [3, 1, 4, 1, 5, 9, 2, 6]
>>> rt1 = RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8])
>>> rt2 = RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0])
>>> rt3 = RaggedTensor.from_value_rowids(
... values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5)
>>> rt4 = RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8])
>>> rt5 = RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8])
```
### Multiple Ragged Dimensions
`RaggedTensor`s with multiple ragged dimensions can be defined by using
a nested `RaggedTensor` for the `values` tensor. Each nested `RaggedTensor`
adds a single ragged dimension.
```python
>>> inner_rt = RaggedTensor.from_row_splits( # =rt1 from above
... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])
>>> outer_rt = RaggedTensor.from_row_splits(
... values=inner_rt, row_splits=[0, 3, 3, 5])
>>> print outer_rt.to_list()
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]
>>> print outer_rt.ragged_rank
2
```
The factory function `RaggedTensor.from_nested_row_splits` may be used to
construct a `RaggedTensor` with multiple ragged dimensions directly, by
providing a list of `row_splits` tensors:
```python
>>> RaggedTensor.from_nested_row_splits(
... flat_values=[3, 1, 4, 1, 5, 9, 2, 6],
... nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8])).to_list()
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]
```
### Uniform Inner Dimensions
`RaggedTensor`s with uniform inner dimensions can be defined
by using a multidimensional `Tensor` for `values`.
```python
>>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3]),
.. row_splits=[0, 2, 5])
>>> print rt.to_list()
[[[1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]]]
>>> print rt.shape
(2, ?, 3)
```
### RaggedTensor Shape Restrictions
The shape of a RaggedTensor is currently restricted to have the following
form:
* A single uniform dimension
* Followed by one or more ragged dimensions
* Followed by zero or more uniform dimensions.
This restriction follows from the fact that each nested `RaggedTensor`
replaces the uniform outermost dimension of its `values` with a uniform
dimension followed by a ragged dimension.
"""
#=============================================================================
# Constructor (private)
#=============================================================================
def __init__(self,
values,
row_splits,
cached_row_lengths=None,
cached_value_rowids=None,
cached_nrows=None,
internal=False):
"""Creates a `RaggedTensor` with a specified partitioning for `values`.
This constructor is private -- please use one of the following ops to
build `RaggedTensor`s:
* `tf.RaggedTensor.from_row_lengths`
* `tf.RaggedTensor.from_value_rowids`
* `tf.RaggedTensor.from_row_splits`
* `tf.RaggedTensor.from_row_starts`
* `tf.RaggedTensor.from_row_limits`
* `tf.RaggedTensor.from_nested_row_splits`
* `tf.RaggedTensor.from_nested_row_lengths`
* `tf.RaggedTensor.from_nested_value_rowids`
Args:
values: A potentially ragged tensor of any dtype and shape `[nvals, ...]`.
row_splits: A 1-D int64 tensor with shape `[nrows+1]`.
cached_row_lengths: A 1-D int64 tensor with shape `[nrows]`
cached_value_rowids: A 1-D int64 tensor with shape `[nvals]`.
cached_nrows: A 1-D int64 scalar tensor.
internal: True if the constructor is being called by one of the factory
methods. If false, an exception will be raised.
Raises:
TypeError: If a row partitioning tensor has an inappropriate dtype.
TypeError: If exactly one row partitioning argument was not specified.
ValueError: If a row partitioning tensor has an inappropriate shape.
ValueError: If multiple partitioning arguments are specified.
ValueError: If nrows is specified but value_rowids is not None.
"""
if not internal:
raise ValueError("RaggedTensor constructor is private; please use one "
"of the factory methods instead (e.g., "
"RaggedTensor.from_row_lengths())")
# Validate the arguments.
if not isinstance(values, (RaggedTensor, ops.Tensor)):
raise TypeError("values must be a Tensor or RaggedTensor.")
if not isinstance(row_splits, ops.Tensor):
raise TypeError("Row-partitioning argument must be a Tensor.")
values.shape.with_rank_at_least(1)
row_splits.shape.assert_has_rank(1)
row_splits.set_shape([None])
self._values = values
self._row_splits = row_splits
# Store any cached tensors. These are used to avoid unnecessary
# round-trip conversions when a RaggedTensor is constructed from
# lengths or rowids, and we later want those lengths/rowids back.
for tensor in [cached_row_lengths, cached_value_rowids, cached_nrows]:
if tensor is not None and not isinstance(tensor, ops.Tensor):
raise TypeError("Cached value must be a Tensor or None.")
self._cached_row_lengths = cached_row_lengths
self._cached_value_rowids = cached_value_rowids
self._cached_nrows = cached_nrows
#=============================================================================
# Factory Methods
#=============================================================================
@classmethod
def from_value_rowids(cls, values, value_rowids, nrows=None, name=None):
"""Creates a `RaggedTensor` with rows partitioned by `value_rowids`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [[values[i] for i in range(len(values)) if value_rowids[i] == row]
for row in range(nrows)]
```
Warning: currently, this needs to cast value_rowids to int64 before
converting, since `tf.bincount` only supports `int32`.
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
value_rowids: A 1-D int64 tensor with shape `[nvals]`, which corresponds
one-to-one with `values`, and specifies each value's row index. Must be
nonnegative, and must be sorted in ascending order.
nrows: An int64 scalar specifying the number of rows. This should be
specified if the `RaggedTensor` may containing empty training rows. Must
be greater than `value_rowids[-1]` (or zero if `value_rowids` is empty).
Defaults to `value_rowids[-1]` (or zero if `value_rowids` is empty).
name: A name prefix for the RaggedTensor (optional).
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If `nrows` is incompatible with `value_rowids`.
#### Example:
```python
>>> print(tf.RaggedTensor.from_value_rowids(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3],
... nrows=5))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
"""
with ops.name_scope(name, "RaggedFromValueRowIds",
[values, value_rowids, nrows]):
values = convert_to_tensor_or_ragged_tensor(values, name="values")
value_rowids = ops.convert_to_tensor(
value_rowids, dtypes.int64, name="value_rowids")
if nrows is None:
const_rowids = tensor_util.constant_value(value_rowids)
if const_rowids is None:
nrows = array_ops.concat([value_rowids[-1:], [-1]], axis=0)[0] + 1
const_nrows = None
else:
const_nrows = const_rowids[-1] + 1 if const_rowids.size > 0 else 0
nrows = ops.convert_to_tensor(const_nrows, dtypes.int64, name="nrows")
else:
nrows = ops.convert_to_tensor(nrows, dtypes.int64, "nrows")
const_nrows = tensor_util.constant_value(nrows)
if const_nrows is not None:
if const_nrows < 0:
raise ValueError("Expected nrows >= 0; got %d" % const_nrows)
const_rowids = tensor_util.constant_value(value_rowids)
if const_rowids is not None and const_rowids.size > 0:
if not const_nrows >= const_rowids[-1] + 1:
raise ValueError(
"Expected nrows >= value_rowids[-1] + 1; got nrows=%d, "
"value_rowids[-1]=%d" % (const_nrows, const_rowids[-1]))
value_rowids.shape.assert_has_rank(1)
nrows.shape.assert_has_rank(0)
values.shape[:1].assert_is_compatible_with(value_rowids.shape)
# Convert value_rowids & nrows to row_splits.
# Note: we don't use segment_ids_to_row_splits() here because we want
# to save the intermediate value `row_lengths`, so we can cache it.
# TODO(b/116708836) Upgrade bincount to accept int64 so we can skip the
# cast (Remove the warning in the docstring when we do.)
value_rowids_int32 = math_ops.cast(value_rowids, dtypes.int32)
nrows_int32 = math_ops.cast(nrows, dtypes.int32)
row_lengths = math_ops.bincount(
value_rowids_int32,
minlength=nrows_int32,
maxlength=nrows_int32,
dtype=dtypes.int64)
row_splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0)
if const_nrows is not None:
row_lengths.set_shape([const_nrows])
row_splits.set_shape([const_nrows + 1])
return cls(
values,
row_splits,
cached_row_lengths=row_lengths,
cached_value_rowids=value_rowids,
cached_nrows=nrows,
internal=True)
@classmethod
def from_row_splits(cls, values, row_splits, name=None):
"""Creates a `RaggedTensor` with rows partitioned by `row_splits`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [values[row_splits[i]:row_splits[i + 1]]
for i in range(len(row_splits) - 1)]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_splits: A 1-D int64 tensor with shape `[nrows+1]`. Must not be empty,
and must be sorted in ascending order. `row_splits[0]` must be zero and
`row_splits[-1]` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If `row_splits` is an empty list.
#### Example:
```python
>>> print(tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
"""
if isinstance(row_splits, (list, tuple)) and not row_splits:
raise ValueError("row_splits tensor may not be empty.")
with ops.name_scope(name, "RaggedFromRowSplits", [values, row_splits]):
values = convert_to_tensor_or_ragged_tensor(values, name="values")
row_splits = ops.convert_to_tensor(row_splits, dtypes.int64, "row_splits")
row_splits.shape.assert_has_rank(1)
return cls(values=values, row_splits=row_splits, internal=True)
@classmethod
def from_row_lengths(cls, values, row_lengths, name=None):
"""Creates a `RaggedTensor` with rows partitioned by `row_lengths`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [[values.pop(0) for i in range(length)]
for length in row_lengths]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_lengths: A 1-D int64 tensor with shape `[nrows]`. Must be
nonnegative. `sum(row_lengths)` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
```python
>>> print(tf.RaggedTensor.from_row_lengths(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_lengths=[4, 0, 3, 1, 0]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []])>
```
"""
with ops.name_scope(name, "RaggedFromRowLengths", [values, row_lengths]):
values = convert_to_tensor_or_ragged_tensor(values, name="values")
row_lengths = ops.convert_to_tensor(row_lengths, dtypes.int64,
"row_lengths")
row_lengths.shape.assert_has_rank(1)
row_limits = math_ops.cumsum(row_lengths)
row_splits = array_ops.concat([[0], row_limits], axis=0)
return cls(
values=values,
row_splits=row_splits,
cached_row_lengths=row_lengths,
internal=True)
@classmethod
def from_row_starts(cls, values, row_starts, name=None):
"""Creates a `RaggedTensor` with rows partitioned by `row_starts`.
Equivalent to: `from_row_splits(values, concat([row_starts, nvals]))`.
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_starts: A 1-D int64 tensor with shape `[nrows]`. Must be nonnegative
and sorted in ascending order. If `nrows>0`, then `row_starts[0]` must
be zero.
name: A name prefix for the RaggedTensor (optional).
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
```python
>>> print(tf.RaggedTensor.from_row_starts(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_starts=[0, 4, 4, 7, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
"""
with ops.name_scope(name, "RaggedFromRowStarts", [values, row_starts]):
values = convert_to_tensor_or_ragged_tensor(values, name="values")
row_starts = ops.convert_to_tensor(row_starts, dtypes.int64, "row_starts")
row_starts.shape.assert_has_rank(1)
nvals = array_ops.shape(values, out_type=dtypes.int64)[:1]
row_splits = array_ops.concat([row_starts, nvals], axis=0)
return cls(values=values, row_splits=row_splits, internal=True)
@classmethod
def from_row_limits(cls, values, row_limits, name=None):
"""Creates a `RaggedTensor` with rows partitioned by `row_limits`.
Equivalent to: `from_row_splits(values, concat([0, row_limits]))`.
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_limits: A 1-D int64 tensor with shape `[nrows]`. Must be sorted in
ascending order. If `nrows>0`, then `row_limits[-1]` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
```python
>>> print(tf.RaggedTensor.from_row_limits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_limits=[4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
"""
with ops.name_scope(name, "RaggedFromRowLimits", [values, row_limits]):
values = convert_to_tensor_or_ragged_tensor(values, name="values")
row_limits = ops.convert_to_tensor(row_limits, dtypes.int64, "row_limits")
row_limits.shape.assert_has_rank(1)
zero = array_ops.zeros([1], dtypes.int64)
row_splits = array_ops.concat([zero, row_limits], axis=0)
return cls(values=values, row_splits=row_splits, internal=True)
@classmethod
def from_nested_value_rowids(cls,
flat_values,
nested_value_rowids,
nested_nrows=None,
name=None):
"""Creates a `RaggedTensor` from a nested list of `value_rowids` tensors.
Equivalent to:
```python
result = flat_values
for (rowids, nrows) in reversed(zip(nested_value_rowids, nested_nrows)):
result = from_value_rowids(result, rowids, nrows)
```
Args:
flat_values: A potentially ragged tensor.
nested_value_rowids: A list of 1-D int64 tensors. The `i`th tensor is
used as the `value_rowids` for the `i`th ragged dimension.
nested_nrows: A list of int64 scalars. The `i`th scalar is used as the
`nrows` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
Returns:
A `RaggedTensor` (or `flat_values` if `nested_value_rowids` is empty).
Raises:
ValueError: If `len(nested_values_rowids) != len(nested_nrows)`.
"""
if isinstance(nested_value_rowids, ops.Tensor):
raise TypeError("nested_value_rowids must be a list of Tensors")
if nested_nrows is None:
nested_nrows = [None] * len(nested_value_rowids)
else:
if isinstance(nested_nrows, ops.Tensor):
raise TypeError("nested_nrows must be a list of Tensors")
if len(nested_nrows) != len(nested_value_rowids):
raise ValueError("nested_nrows must have the same length as "
"nested_value_rowids")
with ops.name_scope(
name, "RaggedFromNestedValueRowIds",
[flat_values] + list(nested_value_rowids) + list(nested_nrows)):
result = flat_values
for value_rowids, nrows in reversed(
list(zip(nested_value_rowids, nested_nrows))):
result = cls.from_value_rowids(result, value_rowids, nrows)
return result
@classmethod
def from_nested_row_splits(cls, flat_values, nested_row_splits, name=None):
"""Creates a `RaggedTensor` from a nested list of `row_splits` tensors.
Equivalent to:
```python
result = flat_values
for row_splits in reversed(nested_row_splits):
result = from_row_splits(result, row_splits)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_splits: A list of 1-D int64 tensors. The `i`th tensor is used
as the `row_splits` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_splits` is empty).
"""
if isinstance(nested_row_splits, ops.Tensor):
raise TypeError("nested_row_splits must be a list of Tensors")
with ops.name_scope(name, "RaggedFromNestedRowSplits",
[flat_values] + list(nested_row_splits)):
result = flat_values
for splits in reversed(nested_row_splits):
result = cls.from_row_splits(result, splits)
return result
@classmethod
def from_nested_row_lengths(cls, flat_values, nested_row_lengths, name=None):
"""Creates a `RaggedTensor` from a nested list of `row_lengths` tensors.
Equivalent to:
```python
result = flat_values
for row_lengths in reversed(nested_row_lengths):
result = from_row_lengths(result, row_lengths)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_lengths: A list of 1-D int64 tensors. The `i`th tensor is used
as the `row_lengths` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty).
"""
if isinstance(nested_row_lengths, ops.Tensor):
raise TypeError("nested_row_lengths must be a list of Tensors")
with ops.name_scope(name, "RaggedFromNestedRowlengths",
[flat_values] + list(nested_row_lengths)):
result = flat_values
for lengths in reversed(nested_row_lengths):
result = cls.from_row_lengths(result, lengths)
return result
#=============================================================================
# Accessors
#=============================================================================
@property
def dtype(self):
"""The `DType` of values in this tensor."""
return self._values.dtype
@property
def shape(self):
"""The statically known shape of this ragged tensor.
Returns:
A `TensorShape` containing the statically known shape of this ragged
tensor. Ragged dimensions have a size of `None`.
Examples:
```python
>>> ragged.constant([[0], [1, 2]]).shape
TensorShape([Dimension(2), Dimension(None)])
>>> ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape
TensorShape([Dimension(2), Dimension(None), Dimension(2)
```
"""
nrows = tensor_shape.dimension_at_index(self._row_splits.shape, 0) - 1
values_shape = self._values.shape
value_shape = values_shape[1:]
return tensor_shape.TensorShape([nrows, None]).concatenate(value_shape)
@property
def ragged_rank(self):
"""The number of ragged dimensions in this ragged tensor.
Returns:
A Python `int` indicating the number of ragged dimensions in this ragged
tensor. The outermost dimension is not considered ragged.
"""
values_is_ragged = isinstance(self._values, RaggedTensor)
return self._values.ragged_rank + 1 if values_is_ragged else 1
@property
def values(self):
"""The concatenated rows for this ragged tensor.
`rt.values` is a potentially ragged tensor formed by flattening the two
outermost dimensions of `rt` into a single dimension.
`rt.values.shape = [nvals] + rt.shape[2:]` (where `nvals` is the
number of items in the outer two dimensions of `rt`).
`rt.ragged_rank = self.ragged_rank - 1`
Returns:
A potentially ragged tensor.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print rt.values
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
```
"""
return self._values
@property
def row_splits(self):
"""The row-split indices for this ragged tensor's `values`.
`rt.row_splits` specifies where the values for each row begin and end in
`rt.values`. In particular, the values for row `rt[i]` are stored in
the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.
Returns:
A 1-D `int64` `Tensor` with shape `[self.nrows+1]`.
The returned tensor is non-empty, and is sorted in ascending order.
`self.row_splits[0]` is zero, and `self.row_splits[-1]` is equal to
`self.values.shape[0]`.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print rt.row_splits # indices of row splits in rt.values
tf.Tensor([0, 4, 4, 7, 8, 8])
```
"""
return self._row_splits
@property
def flat_values(self):
"""The innermost `values` tensor for this ragged tensor.
Concretely, if `rt.values` is a `Tensor`, then `rt.flat_values` is
`rt.values`; otherwise, `rt.flat_values` is `rt.values.flat_values`.
Conceptually, `flat_values` is the tensor formed by flattening the
outermost dimension and all of the ragged dimensions into a single
dimension.
`rt.flat_values.shape = [nvals] + rt.shape[rt.ragged_rank + 1:]`
(where `nvals` is the number of items in the flattened dimensions).
Returns:
A `Tensor`.
#### Example:
```python
>>> rt = ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
>>> print rt.flat_values()
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
```
"""
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_values = rt_values.values
return rt_values
@property
def nested_row_splits(self):
"""A tuple containing the row_splits for all ragged dimensions.
`rt.nested_row_splits` is a tuple containing the `row_splits` tensors for
all ragged dimensions in `rt`, ordered from outermost to innermost. In
particular, `rt.nested_row_splits = (rt.row_splits,) + value_splits` where:
* `value_splits = ()` if `rt.values` is a `Tensor`.
* `value_splits = rt.values.nested_row_splits` otherwise.
Returns:
A `tuple` of 1-D `int64` `Tensor`s.
#### Example:
```python
>>> rt = ragged.constant([[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]])
>>> for i, splits in enumerate(rt.nested_row_splits()):
... print('Splits for dimension %d: %s' % (i+1, splits))
Splits for dimension 1: [0, 1]
Splits for dimension 2: [0, 3, 3, 5]
Splits for dimension 3: [0, 4, 4, 7, 8, 8]
```
"""
rt_nested_splits = [self.row_splits]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_nested_splits.append(rt_values.row_splits)
rt_values = rt_values.values
return tuple(rt_nested_splits)
def value_rowids(self, name=None):
"""Returns the row indices for the `values` in this ragged tensor.
`rt.value_rowids()` corresponds one-to-one with the outermost dimension of
`rt.values`, and specifies the row containing each value. In particular,
the row `rt[row]` consists of the values `rt.values[j]` where
`rt.value_rowids()[j] == row`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D `int64` `Tensor` with shape `self.values.shape[:1]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.values
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
>>> rt.value_rowids()
tf.Tensor([0, 0, 0, 0, 2, 2, 2, 3]) # corresponds 1:1 with rt.values
```
"""
if self._cached_value_rowids is not None:
return self._cached_value_rowids
with ops.name_scope(name, "RaggedValueRowIds", [self]):
return segment_id_ops.row_splits_to_segment_ids(self.row_splits)
def nrows(self, out_type=dtypes.int64, name=None):
"""Returns the number of rows in this ragged tensor.
I.e., the size of the outermost dimension of the tensor.
Args:
out_type: `dtype` for the returned tensor.
name: A name prefix for the returned tensor (optional).
Returns:
A scalar `Tensor` with dtype `out_type`.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.nrows() # rt has 5 rows.
5
```
"""
if self._cached_nrows is not None:
return self._cached_nrows
with ops.name_scope(name, "RaggedNRows", [self]):
return array_ops.shape(self.row_splits, out_type=out_type)[0] - 1
def row_starts(self, name=None):
"""Returns the start indices for rows in this ragged tensor.
These indices specify where the values for each row begin in
`self.values`. `rt.row_starts()` is equal to `rt.row_splits[:-1]`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D Tensor of int64 with shape `[nrows]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.values
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
>>> rt.row_starts() # indices of row starts in rt.values
tf.Tensor([0, 4, 4, 7, 8])
```
"""
with ops.name_scope(name, "RaggedRowStarts", [self]):
return self.row_splits[:-1]
def row_limits(self, name=None):
"""Returns the limit indices for rows in this ragged tensor.
These indices specify where the values for each row end in
`self.values`. `rt.row_limits(self)` is equal to `rt.row_splits[:-1]`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D Tensor of int64 with shape `[nrows]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.values
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
>>> rt.row_limits() # indices of row limits in rt.values
tf.Tensor([4, 4, 7, 8, 8])
```
"""
with ops.name_scope(name, "RaggedRowLimits", [self]):
return self.row_splits[1:]
def row_lengths(self, axis=1, name=None):
"""Returns the lengths of the rows in this ragged tensor.
`rt.row_lengths()[i]` indicates the number of values in the
`i`th row of `rt`.
Args:
axis: An integer constant indicating the axis whose row lengths should be
returned.
name: A name prefix for the returned tensor (optional).
Returns:
A potentially ragged Tensor of int64 with shape `self.shape[:axis]`.
Raises:
ValueError: If `axis` is out of bounds.
#### Example:
```python
>>> rt = ragged.constant([[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []])
>>> rt.row_lengths(rt) # lengths of rows in rt
tf.Tensor([2, 0, 2, 1, 0])
>>> rt.row_lengths(axis=2) # lengths of axis=2 rows.
<tf.RaggedTensor [[3, 1], [], [2, 1], [1], []]>
```
"""
if self._cached_row_lengths is not None:
return self._cached_row_lengths
with ops.name_scope(name, "RaggedRowLengths", [self]):
axis = ragged_util.get_positive_axis(axis, self.shape.ndims)
if axis == 0:
return self.nrows()
elif axis == 1:
splits = self.row_splits
return splits[1:] - splits[:-1]
elif isinstance(self.values, RaggedTensor):
return self.with_values(self.values.row_lengths(axis - 1))
else:
shape = array_ops.shape(self.values, out_type=dtypes.int64)
return self.with_values(
array_ops.ones(shape[:axis - 1], dtypes.int64) * shape[axis - 1])
def nested_row_lengths(self, name=None):
"""Returns a tuple containing the row_lengths for all ragged dimensions.
`rtnested_row_lengths()` is a tuple containing the `row_lengths` tensors for
all ragged dimensions in `rt`, ordered from outermost to innermost.
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A `tuple` of 1-D `int64` `Tensors`. The length of the tuple is equal to
`self.ragged_rank`.
"""
with ops.name_scope(name, "RaggedNestedRowLengths", [self]):
rt_nested_row_lengths = []
rt = self
while isinstance(rt, RaggedTensor):
rt_nested_row_lengths.append(rt.row_lengths())
rt = rt.values
return tuple(rt_nested_row_lengths)
def bounding_shape(self, axis=None, name=None):
"""Returns the tight bounding box shape for this `RaggedTensor`.
Args:
axis: An integer scalar or vector indicating which axes to return the
bounding box for. If not specified, then the full bounding box is
returned.
name: A name prefix for the returned tensor (optional).
Returns:
An int64 `Tensor`. If `axis` is not specified, then `output`
is a vector with `output.shape=[self.shape.ndims]`. If `axis` is a
scalar, then the `output` is a scalar. If `axis` is a vector, then
`output` is a vector, where `output[i]` is the bounding size for
dimension `axis[i]`.
#### Example:
```python
>>> rt = ragged.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]])
>>> rt.bounding_shape()
[5, 4]
```
"""
with ops.name_scope(name, "RaggedBoundingBox", [self, axis]):
nested_splits = self.nested_row_splits
rt_flat_values = self.flat_values
# Optimized special cases for when axis=0 or axis=1:
if isinstance(axis, int):
if axis == 0:
return array_ops.shape(nested_splits[0], out_type=dtypes.int64)[0] - 1
elif axis == 1:
return math_ops.maximum(math_ops.reduce_max(self.row_lengths()), 0)
splits_shape = array_ops.shape(self.row_splits, out_type=dtypes.int64)
flat_values_shape = array_ops.shape(rt_flat_values, out_type=dtypes.int64)
ragged_dimensions = array_ops.stack([splits_shape[0] - 1] + [
math_ops.maximum(math_ops.reduce_max(splits[1:] - splits[:-1]), 0)
for splits in nested_splits
])
inner_dimensions = flat_values_shape[1:]
bbox = array_ops.concat([ragged_dimensions, inner_dimensions], axis=0)
return bbox if axis is None else array_ops.gather(bbox, axis)
#=============================================================================
# Transformation
#=============================================================================
def with_values(self, new_values):
"""Returns a copy of `self` with `values` replaced by `new_value`.
Preserves cached row-partitioning tensors such as `self.cached_nrows` and
`self.cached_value_rowids` if they have values.
Args:
new_values: Potentially ragged tensor to use as the `values` for the
returned `RaggedTensor`. Must have `rank > 0`, and must have the same
number of rows as `self.values`.
Returns:
A `RaggedTensor`. `result.rank = 1 + new_values.rank`.
`result.ragged_rank = 1 + new_values.ragged_rank`
"""
new_values.shape.with_rank_at_least(1)
self.values.shape[:1].assert_is_compatible_with(new_values.shape[:1])
return RaggedTensor(
new_values,
self._row_splits,
self._cached_row_lengths,
self._cached_value_rowids,
self._cached_nrows,
internal=True)
def with_flat_values(self, new_values):
"""Returns a copy of `self` with `flat_values` replaced by `new_value`.
Preserves cached row-partitioning tensors such as `self.cached_nrows` and
`self.cached_value_rowids` if they have values.
Args:
new_values: Potentially ragged tensor that should replace
`self.flat_values`. Must have `rank > 0`, and must have the same
number of rows as `self.flat_values`.
Returns:
A `RaggedTensor`.
`result.rank = self.ragged_rank + new_values.rank`.
`result.ragged_rank = self.ragged_rank + new_values.ragged_rank`.
"""
if isinstance(self._values, ops.Tensor):
return self.with_values(new_values)
else:
return self.with_values(self.values.with_flat_values(new_values))
#=============================================================================
# Tensor Type Conversions
#=============================================================================
@classmethod
def from_tensor(cls,
tensor,
lengths=None,
padding=None,
ragged_rank=1,
name=None):
"""Converts a `tf.Tensor` into a `RaggedTensor`.
The set of absent/default values may be specified using a vector of lengths
or a padding value (but not both). If `lengths` is specified, then the
output tensor will satisfy `output[row] = tensor[row][:lengths[row]]`.
If `padding` is specified, then any row *suffix* consisting entirely of
`padding` will be excluded from the returned `RaggedTensor`. If neither
`lengths` nor `padding` is specified, then the returned `RaggedTensor` will
have no absent/default values.
Examples:
```python
>>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]])
>>> tf.RaggedTensor.from_tensor(dt)
<tf.RaggedTensor [[5, 7, 0], [0, 3, 0], [6, 0, 0]]>
>>> tf.RaggedTensor.from_tensor(dt, lengths=[2, 0, 3])
<tf.RaggedTensor [[5, 7], [], [6, 0, 0]]>
>>> tf.RaggedTensor.from_tensor(dt, padding=0)
<tf.RaggedTensor [[5, 7], [0, 3], [6]]>
```
Args:
tensor: The `Tensor` to convert. Must have rank `ragged_rank + 1` or
higher.
lengths: An optional set of row lengths, specified using a 1-D integer
`Tensor` whose length is equal to `tensor.shape[0]` (the number of rows
in `tensor`). If specified, then `output[row]` will contain
`tensor[row][:lengths[row]]`. Negative lengths are treated as zero.
padding: An optional padding value. If specified, then any row suffix
consisting entirely of `padding` will be excluded from the returned
RaggedTensor. `padding` is a `Tensor` with the same dtype as `tensor`
and with `shape=tensor.shape[ragged_rank + 1:]`.
ragged_rank: Integer specifying the ragged rank for the returned
`RaggedTensor`. Must be greater than zero.
name: A name prefix for the returned tensors (optional).
Returns:
A `RaggedTensor` with the specified `ragged_rank`. The shape of the
returned ragged tensor is compatible with the shape of `tensor`.
Raises:
ValueError: If both `lengths` and `padding` are specified.
"""
if lengths is not None and padding is not None:
raise ValueError("Specify lengths or padding, but not both")
if not isinstance(ragged_rank, int):
raise TypeError("ragged_rank expected int, got %r" % ragged_rank)
if ragged_rank <= 0:
raise ValueError(
"ragged_rank must be greater than 0; got %s" % ragged_rank)
with ops.name_scope(name, "RaggedFromTensor", [tensor, lengths, padding]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
tensor.shape.with_rank_at_least(ragged_rank + 1)
input_shape = array_ops.shape(tensor, out_type=dtypes.int64)
ncols = input_shape[1]
# Handle ragged_rank>1 via recursion:
# If the output should have multiple ragged dimensions, then first
# flatten the tensor to eliminate all but the last ragged dimension,
# and recursively convert that flattened tensor. Then add on the splits
# for the dimensions that we flattened out.
if ragged_rank > 1:
# Flatten `tensor` to eliminate all but the last ragged dimension.
new_shape = array_ops.concat([
constant_op.constant([-1], dtypes.int64), input_shape[ragged_rank:]
],
axis=0)
flattened = array_ops.reshape(tensor, new_shape)
# Recursively convert the flattened tensor.
values = cls.from_tensor(flattened, lengths, padding)
# The total number of elements in each dimension. E.g., if
# input_shape=[3, 4, 5, 6], then dim[2] has 3*4*5 elements in total.
dim_size = math_ops.cumprod(input_shape)
# Construct splits tensors for the dimensions that were flattened.
new_splits = [
math_ops.range(0, dim_size[dim - 1] + 1) * input_shape[dim]
for dim in range(1, ragged_rank)
]
return cls.from_nested_row_splits(values, new_splits)
# If padding was specified, then use it to find row lengths.
if padding is not None:
padding = ops.convert_to_tensor(
padding, name="padding", dtype=tensor.dtype)
padding.shape.assert_is_compatible_with(tensor.shape[2:])
# Find places where the padding is equal to the tensor. (This will
# broadcast `padding` across the outermost 2 dimensions of `tensor`,
# so `has_default_value.shape = tensor.shape`.)
has_default_value = math_ops.equal(padding, tensor)
# If the padding isn't a scalar, then require that all values in the
# padding match each item in the tensor. After this block of code,
# `has_default.shape = tensor.shape[:2]`. (Unfortunately, we can't just
# use reduce_all for both cases, becaue when you pass an empty `axis`
# list to reduce_all, it reduces all axes; but we want it to reduce no
# axes -- i.e., to be a no-op.)
tensor_rank = array_ops.rank(tensor)
reduce_axis = math_ops.range(2, tensor_rank)
has_default = control_flow_ops.cond(
tensor_rank > 2,
lambda: math_ops.reduce_all(has_default_value, axis=reduce_axis),
lambda: has_default_value)
has_default.set_shape(tensor_shape.TensorShape([None, None]))
has_default.set_shape(tensor.shape[:2])
# Use has_default it to find the length of each row: for each
# non-default item in a row, calculate the length that the row needs to
# have to include that item; and then take the max of those values
# (across each row).
has_nondefault = math_ops.logical_not(has_default)
has_nondefault = math_ops.cast(has_nondefault, dtypes.int64)
length_for_nondefault_value = (
has_nondefault * array_ops.expand_dims(
math_ops.range(1, ncols + 1), 0))
lengths = math_ops.reduce_max(length_for_nondefault_value, axis=1)
# If we have lengths (either directly supplied, or computed from
# paddings), then use those to construct splits; and then use masking
# to get the corresponding values.
if lengths is not None:
lengths = ragged_util.convert_to_int_tensor(lengths, "lengths",
dtypes.int64)
lengths.shape.assert_has_rank(1)
lengths = math_ops.minimum(lengths, ncols)
lengths = math_ops.maximum(lengths, 0)
limits = math_ops.cumsum(lengths)
splits = array_ops.concat([array_ops.zeros([1], dtypes.int64), limits],
axis=0)
mask = array_ops.sequence_mask(lengths, maxlen=ncols)
values = array_ops.boolean_mask(tensor, mask)
return cls.from_row_splits(values, splits)
# If neither padding nor lengths were specified, then create a splits
# vector that contains no default values, and reshape the input tensor
# to form the values for the RaggedTensor.
nrows = input_shape[0]
nvals = nrows * ncols
splits = math_ops.range(nrows + 1) * ncols
values_shape = array_ops.concat([[nvals], input_shape[2:]], axis=0)
values = array_ops.reshape(tensor, values_shape)
return cls.from_row_splits(values, splits)
def to_tensor(self, default_value=None, name=None):
"""Converts this `RaggedTensor` into a `tf.Tensor`.
Example:
```python
>>> rt = ragged.constant([[9, 8, 7], [], [6, 5], [4]])
>>> print rt.to_tensor()
[[9 8 7]
[0 0 0]
[6 5 0]
[4 0 0]]
```
Args:
default_value: Value to set for indices not specified in `self`. Defaults
to zero. `default_value` must be broadcastable to
`self.shape[self.ragged_rank + 1:]`.
name: A name prefix for the returned tensors (optional).
Returns:
A `Tensor` with shape `ragged.bounding_shape(self)` and the
values specified by the non-empty values in `self`. Empty values are
assigned `default_value`.
"""
with ops.name_scope(name, "RaggedToTensor", [self, default_value]):
if default_value is not None:
default_value = ops.convert_to_tensor(
default_value, name="default_value", dtype=self.dtype)
# If ragged_rank > 1, then recursively convert the ragged values into a
# `Tensor` before we proceed.
values = self.values
if is_ragged(values):
values = values.to_tensor(default_value)
# Tile the default value, if necessary.
if default_value is not None:
if values.shape.ndims is not None:
default_value.shape.with_rank_at_most(values.shape.ndims - 1)
if (values.shape.ndims is None or default_value.shape.ndims is None or
values.shape.ndims != default_value.shape.ndims + 1):
value_shape = array_ops.shape(values)[1:]
default_value = array_ops.broadcast_to(default_value, value_shape)
default_value.shape.assert_is_compatible_with(values.shape[1:])
# Get the expected dense shape ([nrows, ncols] + value_shape).
rt_row_lengths = [self.row_splits[1:] - self.row_splits[:-1]]
nrows = array_ops.shape(self.row_splits, out_type=dtypes.int64)[0] - 1
ncols = math_ops.maximum(math_ops.reduce_max(rt_row_lengths), 0)
values_shape = array_ops.shape(values, out_type=dtypes.int64)
value_shape = values_shape[1:]
nvals = values_shape[0]
# Build a default value if none was supplied.
if default_value is None:
default_value = array_ops.zeros(value_shape, dtype=values.dtype)
default_value.shape.assert_is_compatible_with(values.shape[1:])
default_value.set_shape(values.shape[1:])
# Get the row start indices, and expand to shape=[nrows, 1].
starts = array_ops.expand_dims(self.row_splits[:-1], 1)
# Get the row limit indices, and expand to shape=[nrows, 1].
limits = array_ops.expand_dims(self.row_splits[1:], 1)
# Get the column indices, and expand to shape=[1, ncols].
columns = array_ops.expand_dims(math_ops.range(0, ncols), 0)
# Build a list containing the values plus the default value. We will use
# tf.gather to collect values from this list for the `Tensor` (using
# nvals as the index for the default value).
values_and_default = array_ops.concat(
[values, array_ops.stack([default_value])], axis=0)
# Construct a matrix "indices" pointing into values_and_default. I.e.,
# output[r, c] = values_and_default[indices[r, c].
nondefault_index = starts + columns
has_value = nondefault_index < limits
default_index = array_ops.fill(array_ops.stack([nrows, ncols]), nvals)
indices = array_ops.where(has_value, nondefault_index, default_index)
# Gather the results into a `Tensor`.
return array_ops.gather(values_and_default, indices)
@classmethod
def from_sparse(cls, st_input, name=None):
"""Converts a 2D `tf.SparseTensor` to a `RaggedTensor`.
Each row of the `output` `RaggedTensor` will contain the explicit values
from the same row in `st_input`. `st_input` must be ragged-right. If not
it is not ragged-right, then an error will be generated.
Example:
```python
>>> st = SparseTensor(indices=[[0, 1], [0, 2], [0, 3], [1, 0], [3, 0]],
... values=[1, 2, 3, 4, 5],
... dense_shape=[4, 3])
>>> rt.RaggedTensor.from_sparse(st).eval().tolist()
[[1, 2, 3], [4], [], [5]]
```
Currently, only two-dimensional `SparseTensors` are supported.
Args:
st_input: The sparse tensor to convert. Must have rank 2.
name: A name prefix for the returned tensors (optional).
Returns:
A `RaggedTensor` with the same values as `st_input`.
`output.ragged_rank = rank(st_input) - 1`.
`output.shape = [st_input.dense_shape[0], None]`.
Raises:
ValueError: If the number of dimensions in `st_input` is not known
statically, or is not two.
"""
if not sparse_tensor.is_sparse(st_input):
raise TypeError("Expected SparseTensor, got %s" % type(st_input).__name__)
with ops.name_scope(name, "RaggedFromSparse", [st_input]):
st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor(
st_input, name="st_input")
if st_input.dense_shape.shape.ndims is None:
static_rank_from_dense_shape = None
else:
static_rank_from_dense_shape = st_input.dense_shape.shape.dims[0].value
if st_input.indices.shape.ndims is None:
static_rank_from_indices = None
else:
static_rank_from_indices = st_input.indices.shape.dims[1].value
if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2:
raise ValueError("rank(st_input) must be 2")
with ops.control_dependencies(
_assert_sparse_indices_are_ragged_right(st_input.indices)):
# Treat sparse row indices as segment ids to generate a splits tensor
# thta we can pair with the sparse tensor values. (Ignore sparse column
# indices.)
segment_ids = st_input.indices[:, 0]
num_segments = st_input.dense_shape[0]
return cls.from_value_rowids(st_input.values, segment_ids, num_segments)
def to_sparse(self, name=None):
"""Converts this `RaggedTensor` into a `tf.SparseTensor`.
Example:
```python
>>> rt = ragged.constant([[1, 2, 3], [4], [], [5, 6]])
>>> rt.to_sparse().eval()
SparseTensorValue(indices=[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [3, 1]],
values=[1, 2, 3, 4, 5, 6],
dense_shape=[4, 3])
```
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A SparseTensor with the same values as `self`.
"""
with ops.name_scope(name, "RaggedToSparse", [self]):
result = gen_ragged_conversion_ops.ragged_tensor_to_sparse(
self.nested_row_splits, self.flat_values, name=name)
return sparse_tensor.SparseTensor(result.sparse_indices,
result.sparse_values,
result.sparse_dense_shape)
#=============================================================================
# String Encoding
#=============================================================================
def __str__(self):
if self._is_eager():
return "<tf.RaggedTensor %s>" % self.to_list()
else:
return self.__repr__()
def __repr__(self):
return "tf.RaggedTensor(values=%s, row_splits=%s)" % (self._values,
self._row_splits)
#=============================================================================
# Eager Execution Mode
#=============================================================================
def to_list(self):
"""Returns a nested Python `list` with the values for this `RaggedTensor`.
Requires that `rt` was constructed in eager execution mode.
Returns:
A nested Python `list`.
"""
if self._is_eager():
return self._eager_value().to_list()
else:
raise ValueError("RaggedTensor.to_list() is only supported in eager "
"mode; in graph mode, evaluate the RaggedTensor first "
"and then use RaggedTensorValue.to_list().")
def _eager_value(self):
"""Returns a RaggedTensorValue for self. Requires self._is_eager()=true."""
value = self.flat_values.numpy()
for row_splits in reversed(self.nested_row_splits):
value = ragged_tensor_value.RaggedTensorValue(value, row_splits.numpy())
return value
def _is_eager(self):
"""Returns True if values & row_splits Tensors are all `EagerTensor`s."""
rt = self
while isinstance(rt, RaggedTensor):
if not isinstance(rt.row_splits, ops.EagerTensor):
return False
rt = rt.values
return isinstance(rt, ops.EagerTensor)
#=============================================================================
# Indexing & Slicing
#=============================================================================
def __getitem__(self, key):
"""Returns the specified piece of this RaggedTensor."""
# See ragged_getitem.py for the documentation and implementation of this
# method.
#
# Note: the imports in ragged/__init__.py ensure that this method always
# gets overridden before it is called.
#=============================================================================
# Name Scope
#=============================================================================
# This private function is used by ops.name_scope to ensure that all of the
# input tensors for the scope belong to the same graph. Defining this means
# that you may include `RaggedTensor` objects in the name_scope `values`
# list.
def _as_graph_element(self):
"""Convert `self` to a graph element."""
values = self.values
while isinstance(values, RaggedTensor):
values = values.values
return values
def is_ragged(value):
"""Returns true if `value` is a ragged tensor or ragged tensor value."""
return isinstance(value,
(RaggedTensor, ragged_tensor_value.RaggedTensorValue))
#===============================================================================
# Convert value -> tensor
#===============================================================================
def convert_to_tensor_or_ragged_tensor(value,
dtype=None,
preferred_dtype=None,
name=None):
"""Converts value to a `RaggedTensor` or `Tensor`.
* If `value` is a `RaggedTensor`, then return it as-is.
* If `value` is a `RaggedTensorValue`, return a corresponding constant
`RaggedTensor`.
* Otherwise, use `convert_to_tensor` to convert `value` to a `Tensor`.
Args:
value: A `RaggedTensor`, a `RaggedTensorValue`, or an object whose type has
a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing the type
is inferred from the type of `value`.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. This argument has no effect if `value` is already a
tensor, or when conversion is not possible.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `RaggedTensor`.
"""
if isinstance(value, RaggedTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError("Tensor conversion requested dtype %s for "
"RaggedTensor with dtype %s: %r" %
(dtype.name, value.dtype.name, value))
return value
elif isinstance(value, ragged_tensor_value.RaggedTensorValue):
with ops.name_scope(name, "ConvertToTensorOrRaggedTensor", []):
flat_values = ops.convert_to_tensor(
value=value.flat_values,
dtype=dtype,
preferred_dtype=preferred_dtype,
name="flat_values")
return RaggedTensor.from_nested_row_splits(flat_values,
value.nested_row_splits)
else:
return ops.convert_to_tensor(
value=value, dtype=dtype, preferred_dtype=preferred_dtype, name=name)
#===============================================================================
# Register RaggedTensor for use with session.run.
#===============================================================================
def _ragged_tensor_value_from_components(components):
components = list(components)
value = components.pop()
while components:
value = ragged_tensor_value.RaggedTensorValue(value, components.pop())
return value
def _ragged_tensor_session_fetch(rt):
components = rt.nested_row_splits + (rt.flat_values,)
return (components, _ragged_tensor_value_from_components)
def _ragged_tensor_session_feed(feed_key, feed_val):
key_components = feed_key.nested_row_splits + (feed_key.flat_values,)
val_components = feed_val.nested_row_splits + (feed_val.flat_values,)
return zip(key_components, val_components)
def _ragged_tensor_session_feed_for_partial_run(feed_key):
return feed_key.nested_row_splits + (feed_key.flat_values,)
session.register_session_run_conversion_functions(
RaggedTensor, _ragged_tensor_session_fetch, _ragged_tensor_session_feed,
_ragged_tensor_session_feed_for_partial_run)
#===============================================================================
# RaggedTensorType
#===============================================================================
class RaggedTensorType(object):
"""Encoding of a static type for a `RaggedTensor`.
Use this type to express/declare that an output must have the type of
`RaggedTensor`.
"""
def __init__(self, dtype, ragged_rank):
"""Initializes a RaggedTensorType object.
Args:
dtype: data type of the `RaggedTensor`'s inner values.
ragged_rank: ragged_rank of the declared `RaggedTensor`.
"""
self._dtype = dtype
self._ragged_rank = ragged_rank
dtype = property(lambda self: self._dtype)
ragged_rank = property(lambda self: self._ragged_rank)
#===============================================================================
# Helper Functions
#===============================================================================
def _assert_sparse_indices_are_ragged_right(indices):
"""Checks that the given SparseTensor.indices tensor is ragged-right.
Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right
because the entry `[3, 1]` skips a cell.
Args:
indices: The SparseTensor indices to check.
Returns:
A list of control dependency op tensors.
"""
index_prefix = indices[:, :-1]
index_suffix = indices[:, -1]
# Check whether each index is starting a new row in the innermost dimension
# (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]).
# (Note: this skips the first index; we will check that separately below.)
index_prefix_changed = math_ops.reduce_any(
math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1)
# Check two cases:
# * For indices that start a new row: index_suffix[i] must be zero.
# * For indices that continue a row: index_suffix[i] must be equal to
# index_suffix[i-1]+1.
index_ok = array_ops.where(
index_prefix_changed, math_ops.equal(index_suffix[1:], 0),
math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1))
# Also check that the very first index didn't skip any cells. The first
# index starts a new row (by definition), so its suffix should be zero.
sparse_indices_are_ragged_right = math_ops.logical_and(
math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)),
math_ops.reduce_all(index_ok))
message = [
"SparseTensor is not right-ragged", "SparseTensor.indices =", indices
]
return [control_flow_ops.Assert(sparse_indices_are_ragged_right, message)]
@ops.RegisterGradient("RaggedTensorToSparse")
def _ragged_tensor_to_sparse_gradient(op, unused_sparse_indices_grad,
sparse_values_grad,
unused_sparse_shape_grad):
"""Gradient for RaggedTensorToSparse."""
op_inputs_nested_row_splits = op.inputs[:-1]
op_inputs_flat_values = op.inputs[-1]
# No gradient for the RaggedTensor's nested_row_splits.
nested_row_splits_gradient = [None] * len(op_inputs_nested_row_splits)
# Gradient for the RaggedTensor's flat_values is formed by reshaping
# the gradient for the SparseTensor's values.
flat_values_shape = array_ops.shape(op_inputs_flat_values)
flat_values_gradient = array_ops.reshape(sparse_values_grad,
flat_values_shape)
return nested_row_splits_gradient + [flat_values_gradient]
| 39.952824 | 80 | 0.631222 |
acf3a3ea7e2177c1ead4937975edb8957a1455df | 70 | py | Python | Study_NumPy/1.py | hougeaaa/HappyDay | 60e226d380862db30b3c6abc1d7d7fbbcddd04a0 | [
"MIT"
] | 1 | 2020-03-19T10:01:53.000Z | 2020-03-19T10:01:53.000Z | Study_NumPy/1.py | hougeaaa/HappyDay | 60e226d380862db30b3c6abc1d7d7fbbcddd04a0 | [
"MIT"
] | null | null | null | Study_NumPy/1.py | hougeaaa/HappyDay | 60e226d380862db30b3c6abc1d7d7fbbcddd04a0 | [
"MIT"
] | null | null | null | import numpy as np
#dt=np.dtype(np.int32)
dt=np.dtype('<i4')
print(dt) | 17.5 | 22 | 0.7 |
acf3a7494e57415780beaccd86bbe1ef813fe515 | 7,270 | py | Python | airflow/plugins_manager.py | InigoSJ/airflow | 8b97a387dc30d8c88390d500ec99333798c20f1c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2020-10-12T05:21:27.000Z | 2021-07-07T09:23:47.000Z | airflow/plugins_manager.py | InigoSJ/airflow | 8b97a387dc30d8c88390d500ec99333798c20f1c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4 | 2020-07-07T20:16:19.000Z | 2021-09-29T17:27:38.000Z | airflow/plugins_manager.py | InigoSJ/airflow | 8b97a387dc30d8c88390d500ec99333798c20f1c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-09-29T05:26:34.000Z | 2020-09-29T05:26:34.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import imp
import inspect
import os
import re
import pkg_resources
from typing import List, Any
from airflow import settings
from airflow.models.baseoperator import BaseOperatorLink
from airflow.utils.log.logging_mixin import LoggingMixin
log = LoggingMixin().log
import_errors = {}
class AirflowPluginException(Exception):
pass
class AirflowPlugin:
name = None # type: str
operators = [] # type: List[Any]
sensors = [] # type: List[Any]
hooks = [] # type: List[Any]
executors = [] # type: List[Any]
macros = [] # type: List[Any]
admin_views = [] # type: List[Any]
flask_blueprints = [] # type: List[Any]
menu_links = [] # type: List[Any]
appbuilder_views = [] # type: List[Any]
appbuilder_menu_items = [] # type: List[Any]
# A function that validate the statsd stat name, apply changes
# to the stat name if necessary and return the transformed stat name.
#
# The function should have the following signature:
# def func_name(stat_name: str) -> str:
stat_name_handler = None # type: Any
global_operator_extra_links = [] # type: List[BaseOperatorLink]
@classmethod
def validate(cls):
if not cls.name:
raise AirflowPluginException("Your plugin needs a name.")
@classmethod
def on_load(cls, *args, **kwargs):
"""
Executed when the plugin is loaded.
This method is only called once during runtime.
:param args: If future arguments are passed in on call.
:param kwargs: If future arguments are passed in on call.
"""
def load_entrypoint_plugins(entry_points, airflow_plugins):
"""
Load AirflowPlugin subclasses from the entrypoints
provided. The entry_point group should be 'airflow.plugins'.
:param entry_points: A collection of entrypoints to search for plugins
:type entry_points: Generator[setuptools.EntryPoint, None, None]
:param airflow_plugins: A collection of existing airflow plugins to
ensure we don't load duplicates
:type airflow_plugins: list[type[airflow.plugins_manager.AirflowPlugin]]
:rtype: list[airflow.plugins_manager.AirflowPlugin]
"""
for entry_point in entry_points:
log.debug('Importing entry_point plugin %s', entry_point.name)
plugin_obj = entry_point.load()
if is_valid_plugin(plugin_obj, airflow_plugins):
if callable(getattr(plugin_obj, 'on_load', None)):
plugin_obj.on_load()
airflow_plugins.append(plugin_obj)
return airflow_plugins
def is_valid_plugin(plugin_obj, existing_plugins):
"""
Check whether a potential object is a subclass of
the AirflowPlugin class.
:param plugin_obj: potential subclass of AirflowPlugin
:param existing_plugins: Existing list of AirflowPlugin subclasses
:return: Whether or not the obj is a valid subclass of
AirflowPlugin
"""
if (
inspect.isclass(plugin_obj) and
issubclass(plugin_obj, AirflowPlugin) and
(plugin_obj is not AirflowPlugin)
):
plugin_obj.validate()
return plugin_obj not in existing_plugins
return False
plugins = [] # type: List[AirflowPlugin]
norm_pattern = re.compile(r'[/|.]')
if settings.PLUGINS_FOLDER is None:
raise AirflowPluginException("Plugins folder is not set")
# Crawl through the plugins folder to find AirflowPlugin derivatives
for root, dirs, files in os.walk(settings.PLUGINS_FOLDER, followlinks=True):
for f in files:
try:
filepath = os.path.join(root, f)
if not os.path.isfile(filepath):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(filepath)[-1])
if file_ext != '.py':
continue
log.debug('Importing plugin module %s', filepath)
# normalize root path as namespace
namespace = '_'.join([re.sub(norm_pattern, '__', root), mod_name])
m = imp.load_source(namespace, filepath)
for obj in list(m.__dict__.values()):
if is_valid_plugin(obj, plugins):
plugins.append(obj)
except Exception as e:
log.exception(e)
log.error('Failed to import plugin %s', filepath)
import_errors[filepath] = str(e)
plugins = load_entrypoint_plugins(
pkg_resources.iter_entry_points('airflow.plugins'),
plugins
)
def make_module(name, objects):
log.debug('Creating module %s', name)
name = name.lower()
module = imp.new_module(name)
module._name = name.split('.')[-1]
module._objects = objects
module.__dict__.update((o.__name__, o) for o in objects)
return module
# Plugin components to integrate as modules
operators_modules = []
sensors_modules = []
hooks_modules = []
executors_modules = []
macros_modules = []
# Plugin components to integrate directly
admin_views = [] # type: List[Any]
flask_blueprints = [] # type: List[Any]
menu_links = [] # type: List[Any]
flask_appbuilder_views = [] # type: List[Any]
flask_appbuilder_menu_links = [] # type: List[Any]
stat_name_handler = None # type: Any
global_operator_extra_links = [] # type: List[Any]
stat_name_handlers = []
for p in plugins:
operators_modules.append(
make_module('airflow.operators.' + p.name, p.operators + p.sensors))
sensors_modules.append(
make_module('airflow.sensors.' + p.name, p.sensors)
)
hooks_modules.append(make_module('airflow.hooks.' + p.name, p.hooks))
executors_modules.append(
make_module('airflow.executors.' + p.name, p.executors))
macros_modules.append(make_module('airflow.macros.' + p.name, p.macros))
admin_views.extend(p.admin_views)
menu_links.extend(p.menu_links)
flask_appbuilder_views.extend(p.appbuilder_views)
flask_appbuilder_menu_links.extend(p.appbuilder_menu_items)
flask_blueprints.extend([{
'name': p.name,
'blueprint': bp
} for bp in p.flask_blueprints])
if p.stat_name_handler:
stat_name_handlers.append(p.stat_name_handler)
global_operator_extra_links.extend(p.global_operator_extra_links)
if len(stat_name_handlers) > 1:
raise AirflowPluginException(
'Specified more than one stat_name_handler ({}) '
'is not allowed.'.format(stat_name_handlers))
stat_name_handler = stat_name_handlers[0] if len(stat_name_handlers) == 1 else None
| 33.971963 | 83 | 0.685832 |
acf3a8921980ea5d33251b5e211af7be1038ecf1 | 4,646 | py | Python | Lectures/week_12/demo/demo/settings.py | diable201/WEB-development | 370bd731b9a65a1658033a60c63abece11d4e259 | [
"MIT"
] | 1 | 2022-02-18T15:44:46.000Z | 2022-02-18T15:44:46.000Z | Lectures/week_12/demo/demo/settings.py | diable201/WEB-development | 370bd731b9a65a1658033a60c63abece11d4e259 | [
"MIT"
] | null | null | null | Lectures/week_12/demo/demo/settings.py | diable201/WEB-development | 370bd731b9a65a1658033a60c63abece11d4e259 | [
"MIT"
] | 1 | 2021-03-26T13:55:52.000Z | 2021-03-26T13:55:52.000Z | """
Django settings for demo project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import datetime
from django.conf import settings
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p2)k044zvj@dph!o++!cmu#=oa(ru!zr66f)d^e&f^a+gp#6np'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# Django applications
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third part applications
'rest_framework',
'rest_framework_jwt',
'corsheaders',
# Local applications
'main',
'core',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Almaty'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
JWT_AUTH = {
'JWT_ENCODE_HANDLER':
'rest_framework_jwt.utils.jwt_encode_handler',
'JWT_DECODE_HANDLER':
'rest_framework_jwt.utils.jwt_decode_handler',
'JWT_PAYLOAD_HANDLER':
'rest_framework_jwt.utils.jwt_payload_handler',
'JWT_PAYLOAD_GET_USER_ID_HANDLER':
'rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler',
'JWT_RESPONSE_PAYLOAD_HANDLER':
'rest_framework_jwt.utils.jwt_response_payload_handler',
'JWT_SECRET_KEY': settings.SECRET_KEY,
'JWT_GET_USER_SECRET_KEY': None,
'JWT_PUBLIC_KEY': None,
'JWT_PRIVATE_KEY': None,
'JWT_ALGORITHM': 'HS256',
'JWT_VERIFY': True,
'JWT_VERIFY_EXPIRATION': True,
'JWT_LEEWAY': 0,
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=300),
'JWT_AUDIENCE': None,
'JWT_ISSUER': None,
'JWT_ALLOW_REFRESH': False,
'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=7),
'JWT_AUTH_HEADER_PREFIX': 'JWT',
'JWT_AUTH_COOKIE': None,
} | 25.387978 | 91 | 0.703401 |
acf3a99043dc959556dfc94e6e8f3f0905298959 | 3,861 | py | Python | test/functional/wallet_dump.py | indianpivxcoin/indiancoin | d57d8ae1bf258d4266120976f397cd776debdf3b | [
"MIT"
] | null | null | null | test/functional/wallet_dump.py | indianpivxcoin/indiancoin | d57d8ae1bf258d4266120976f397cd776debdf3b | [
"MIT"
] | null | null | null | test/functional/wallet_dump.py | indianpivxcoin/indiancoin | d57d8ae1bf258d4266120976f397cd776debdf3b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the dumpwallet RPC."""
import os
from test_framework.test_framework import IndianCoinTestFramework
from test_framework.util import (assert_equal, assert_raises_rpc_error)
def read_dump(file_name, addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
# only read non comment lines
if line[0] != "#" and len(line) > 10:
# split out some data
key_label, comment = line.split("#")
# key = key_label.split(" ")[0]
keytype = key_label.split(" ")[2]
addr = comment.split(" addr=")[1].strip()
# count key types
if addr in addrs:
found_addr += 1
elif keytype == "change=1":
found_addr_chg += 1
elif keytype == "reserve=1":
found_addr_rsv += 1
return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(IndianCoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-keypool=90"]]
def setup_network(self, split=False):
# Use 1 minute timeout because the initial getnewaddress RPC can take
# longer than the default 30 seconds due to an expensive
# CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in
# the test often takes even longer.
self.add_nodes(self.num_nodes, self.extra_args, timewait=60)
self.start_nodes()
def run_test (self):
tmpdir = self.options.tmpdir
# generate 20 addresses to compare against the dump
test_addr_count = 20
addrs = []
for i in range(0,test_addr_count):
addr = self.nodes[0].getnewaddress()
#vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath
addrs.append(addr)
# Should be a no-op:
self.nodes[0].keypoolrefill()
# dump unencrypted wallet
result = self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")
assert_equal(result['filename'], os.path.abspath(tmpdir + "/node0/wallet.unencrypted.dump"))
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
assert_equal(found_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_addr_chg, 0) # 0 blocks where mined
assert_equal(found_addr_rsv, 90 + 1) # keypool size (TODO: fix off-by-one)
#encrypt wallet, restart, unlock and dump
self.nodes[0].node_encrypt_wallet('test')
self.start_node(0)
self.nodes[0].walletpassphrase('test', 10)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
assert_equal(found_addr, test_addr_count)
assert_equal(found_addr_chg, 90 + 1) # old reserve keys are marked as change now
assert_equal(found_addr_rsv, 90 + 1) # keypool size (TODO: fix off-by-one)
if __name__ == '__main__':
WalletDumpTest().main ()
| 41.074468 | 100 | 0.638695 |
acf3a9ccc278abc37a06e3fcfd9fbe49e0387cc6 | 8,632 | py | Python | tests/keras_tests/feature_networks_tests/feature_networks/mixed_percision_test.py | eladc-git/model_optimization | 46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a | [
"Apache-2.0"
] | null | null | null | tests/keras_tests/feature_networks_tests/feature_networks/mixed_percision_test.py | eladc-git/model_optimization | 46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a | [
"Apache-2.0"
] | null | null | null | tests/keras_tests/feature_networks_tests/feature_networks/mixed_percision_test.py | eladc-git/model_optimization | 46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
import model_compression_toolkit as mct
from model_compression_toolkit.common.mixed_precision.kpi import KPI
from model_compression_toolkit.common.mixed_precision.mixed_precision_quantization_config import \
MixedPrecisionQuantizationConfig
from model_compression_toolkit.common.user_info import UserInformation
from tests.common_tests.base_feature_test import BaseFeatureNetworkTest
from tests.common_tests.helpers.tensors_compare import cosine_similarity
keras = tf.keras
layers = keras.layers
class MixedPercisionBaseTest(BaseKerasFeatureNetworkTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_quantization_config(self):
qc = mct.QuantizationConfig(mct.ThresholdSelectionMethod.MSE,
mct.ThresholdSelectionMethod.MSE,
mct.QuantizationMethod.POWER_OF_TWO,
mct.QuantizationMethod.POWER_OF_TWO,
weights_bias_correction=True,
weights_per_channel_threshold=True,
activation_channel_equalization=True,
relu_unbound_correction=True,
input_scaling=True)
return MixedPrecisionQuantizationConfig(qc, weights_n_bits=[2, 8, 4], num_of_images=1)
def get_bit_widths_config(self):
return None
def get_input_shapes(self):
return [[self.val_batch_size, 224, 244, 3]]
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
x = layers.Conv2D(30, 40)(inputs)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(50, 40)(x)
outputs = layers.ReLU()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
def compare(self, quantized_model, float_model, input_x=None, quantization_info: UserInformation = None):
# This is a base test, so it does not check a thing. Only actual tests of mixed precision
# compare things to test.
raise NotImplementedError
class MixedPercisionManuallyConfiguredTest(MixedPercisionBaseTest):
def get_quantization_config(self):
qc = mct.QuantizationConfig(mct.ThresholdSelectionMethod.MSE,
mct.ThresholdSelectionMethod.MSE,
mct.QuantizationMethod.POWER_OF_TWO,
mct.QuantizationMethod.POWER_OF_TWO,
weights_bias_correction=True,
weights_per_channel_threshold=False,
activation_channel_equalization=True,
relu_unbound_correction=True,
input_scaling=True)
return MixedPrecisionQuantizationConfig(qc, weights_n_bits=[8, 2, 3])
def get_bit_widths_config(self):
# First layer should be quantized using 2 bits
# Second layer should be quantized using 3 bits
return [2, 1]
def get_kpi(self):
# Return some KPI (it does not really matter the value here as search_methods is not done,
# and the configuration is
# set manually)
return KPI(1)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert quantization_info.mixed_precision_cfg == [2, 1]
self.unit_test.assertTrue(np.unique(quantized_model.layers[2].weights[0]).flatten().shape[0] <= 4)
self.unit_test.assertTrue(np.unique(quantized_model.layers[4].weights[0]).flatten().shape[0] <= 8)
class MixedPercisionSearchTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
# kpi is infinity -> should give best model - 8bits
return KPI(np.inf)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert (quantization_info.mixed_precision_cfg == [0,
0]).all() # kpi is infinity -> should give best model - 8bits
for i in range(30): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[2].weights[0][:, :, :, i]).flatten().shape[0] <= 256)
for i in range(50): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[4].weights[0][:, :, :, i]).flatten().shape[0] <= 256)
class MixedPercisionSearchKPI4BitsAvgTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
# kpi is for 4 bits on average
return KPI(2544140 * 4 / 8)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert (quantization_info.mixed_precision_cfg == [1, 1]).all()
for i in range(30): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[2].weights[0][:, :, :, i]).flatten().shape[0] <= 16)
for i in range(50): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[4].weights[0][:, :, :, i]).flatten().shape[0] <= 16)
class MixedPercisionSearchKPI2BitsAvgTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
# kpi is for 2 bits on average
return KPI(2544200 * 2 / 8)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert (quantization_info.mixed_precision_cfg == [2, 2]).all()
for i in range(30): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[2].weights[0][:, :, :, i]).flatten().shape[0] <= 4)
for i in range(50): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[4].weights[0][:, :, :, i]).flatten().shape[0] <= 4)
class MixedPercisionDepthwiseTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
return KPI(np.inf)
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
x = layers.DepthwiseConv2D(30)(inputs)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
model = keras.Model(inputs=inputs, outputs=x)
return model
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
y = float_model.predict(input_x)
y_hat = quantized_model.predict(input_x)
cs = cosine_similarity(y, y_hat)
self.unit_test.assertTrue(np.isclose(cs, 1), msg=f'fail cosine similarity check:{cs}')
def get_quantization_config(self):
qc = mct.QuantizationConfig(mct.ThresholdSelectionMethod.MSE,
mct.ThresholdSelectionMethod.MSE,
mct.QuantizationMethod.POWER_OF_TWO,
mct.QuantizationMethod.POWER_OF_TWO,
weights_bias_correction=False,
weights_per_channel_threshold=True,
activation_channel_equalization=False,
relu_unbound_correction=False,
input_scaling=False,
activation_n_bits=16)
return MixedPrecisionQuantizationConfig(qc, weights_n_bits=[2, 8, 4, 16])
| 44.958333 | 120 | 0.630561 |
acf3aa0629748e7c6022633f4292c7b92912726f | 5,546 | py | Python | catalyst/rl/agent/head.py | andrey-avdeev/catalyst | fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3 | [
"Apache-2.0"
] | 1 | 2019-11-26T06:41:33.000Z | 2019-11-26T06:41:33.000Z | catalyst/rl/agent/head.py | andrey-avdeev/catalyst | fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3 | [
"Apache-2.0"
] | null | null | null | catalyst/rl/agent/head.py | andrey-avdeev/catalyst | fd17aaba7775c99b7e2b1ce86e60aa8f2379acc3 | [
"Apache-2.0"
] | 1 | 2021-12-20T07:32:25.000Z | 2021-12-20T07:32:25.000Z | from typing import List # isort:skip
import torch
import torch.nn as nn
from catalyst.contrib.models import SequentialNet
from catalyst.utils import outer_init
from .policy import (
BernoulliPolicy, CategoricalPolicy, DiagonalGaussPolicy, RealNVPPolicy,
SquashingGaussPolicy
)
class ValueHead(nn.Module):
@staticmethod
def _build_head(in_features, out_features, num_atoms, bias):
head = nn.Linear(
in_features=in_features,
out_features=out_features * num_atoms,
bias=bias
)
return head
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
num_atoms: int = 1,
use_state_value_head: bool = False,
distribution: str = None,
values_range: tuple = None,
num_heads: int = 1,
hyperbolic_constant: float = 1.0
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.num_atoms = num_atoms
self.use_state_value_head = use_state_value_head
self.distribution = distribution
self.values_range = values_range
self.num_heads = num_heads
if self.num_heads == 1:
hyperbolic_constant = 1.0
self.hyperbolic_constant = hyperbolic_constant
if distribution is None: # mean case
assert values_range is None and num_atoms == 1
elif distribution == "categorical":
assert values_range is not None and num_atoms > 1
elif distribution == "quantile":
assert values_range is None and num_atoms > 1
else:
raise NotImplementedError()
value_heads = [
self._build_head(in_features, out_features, num_atoms, bias)
for _ in range(num_heads)
]
self.value_heads = nn.ModuleList(value_heads)
if self.use_state_value_head:
assert self.out_features > 1, "Not implemented behaviour"
state_value_heads = [
self._build_head(in_features, 1, num_atoms, bias)
for _ in range(num_heads)
]
self.state_value_heads = nn.ModuleList(state_value_heads)
self.apply(outer_init)
def forward(self, state: torch.Tensor):
x: List[torch.Tensor] = []
for net in self.value_heads:
x.append(net(state).view(-1, self.out_features, self.num_atoms))
# batch_size(0) x num_heads(1) x num_outputs(2) x num_atoms(3)
x = torch.stack(x, dim=1)
if self.use_state_value_head:
state_value: List[torch.Tensor] = []
for net in self.state_value_heads:
state_value.append(net(state).view(-1, 1, self.num_atoms))
# batch_size(0) x num_heads(1) x num_outputs(2) x num_atoms(3)
state_value = torch.stack(state_value, dim=1)
x_mean = x.mean(2, keepdim=True)
x = x - x_mean + state_value
# batch_size(0) x num_heads(1) x num_outputs(2) x num_atoms(3)
return x
class PolicyHead(nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
policy_type: str = None,
out_activation: nn.Module = None
):
super().__init__()
assert policy_type in [
"categorical", "bernoulli", "diagonal-gauss",
"squashing-gauss", "real-nvp",
"logits", None
]
# @TODO: refactor
layer_fn = nn.Linear
activation_fn = nn.ReLU
squashing_fn = out_activation
bias = True
if policy_type == "categorical":
assert out_activation is None
head_size = out_features
policy_net = CategoricalPolicy()
elif policy_type == "bernoulli":
assert out_activation is None
head_size = out_features
policy_net = BernoulliPolicy()
elif policy_type == "diagonal-gauss":
head_size = out_features * 2
policy_net = DiagonalGaussPolicy()
elif policy_type == "squashing-gauss":
out_activation = None
head_size = out_features * 2
policy_net = SquashingGaussPolicy(squashing_fn)
elif policy_type == "real-nvp":
out_activation = None
head_size = out_features * 2
policy_net = RealNVPPolicy(
action_size=out_features,
layer_fn=layer_fn,
activation_fn=activation_fn,
squashing_fn=squashing_fn,
bias=bias
)
else:
head_size = out_features
policy_net = None
policy_type = "logits"
self.policy_type = policy_type
head_net = SequentialNet(
hiddens=[in_features, head_size],
layer_fn={"module": layer_fn, "bias": True},
activation_fn=out_activation,
norm_fn=None,
)
head_net.apply(outer_init)
self.head_net = head_net
self.policy_net = policy_net
self._policy_fn = None
if policy_net is not None:
self._policy_fn = policy_net.forward
else:
self._policy_fn = lambda *args: args[0]
def forward(self, state: torch.Tensor, logprob=None, deterministic=False):
x = self.head_net(state)
x = self._policy_fn(x, logprob, deterministic)
return x
__all__ = ["ValueHead", "PolicyHead"]
| 31.873563 | 78 | 0.593401 |
acf3ab5ceca13c06836a0298833fcd106ea1c54c | 827 | py | Python | ch16/22.py | hnccho/book | f659bc759dca6d4991183147db7ae04abb4265a4 | [
"MIT"
] | 84 | 2017-01-13T04:57:20.000Z | 2022-02-17T11:56:03.000Z | ch16/22.py | hnccho/book | f659bc759dca6d4991183147db7ae04abb4265a4 | [
"MIT"
] | 3 | 2019-10-12T12:02:54.000Z | 2020-04-13T12:09:57.000Z | ch16/22.py | hnccho/book | f659bc759dca6d4991183147db7ae04abb4265a4 | [
"MIT"
] | 111 | 2016-09-22T09:02:12.000Z | 2022-03-18T13:26:49.000Z | import sys
from PyQt5.QtWidgets import *
class MyWindow(QWidget):
def __init__(self):
super().__init__()
self.setupUI()
def setupUI(self):
self.setGeometry(800, 200, 300, 300)
self.setWindowTitle("PyStock v0.1")
self.pushButton = QPushButton("Input number")
self.pushButton.clicked.connect(self.pushButtonClicked)
self.label = QLabel()
layout = QVBoxLayout()
layout.addWidget(self.pushButton)
layout.addWidget(self.label)
self.setLayout(layout)
def pushButtonClicked(self):
text, ok = QInputDialog.getInt(self, '매수 수량', '매수 수량을 입력하세요.')
if ok:
self.label.setText(str(text))
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MyWindow()
window.show()
app.exec_()
| 25.060606 | 70 | 0.622733 |
acf3abb5f982d67762f2711e31e081b2181fc84e | 392 | py | Python | src/grace/configs/opaque/default_runtime.py | hallettmiket/candescence | 1f2b526a7ea8824860565699859bd0202c442c84 | [
"CC0-1.0"
] | null | null | null | src/grace/configs/opaque/default_runtime.py | hallettmiket/candescence | 1f2b526a7ea8824860565699859bd0202c442c84 | [
"CC0-1.0"
] | null | null | null | src/grace/configs/opaque/default_runtime.py | hallettmiket/candescence | 1f2b526a7ea8824860565699859bd0202c442c84 | [
"CC0-1.0"
] | null | null | null | checkpoint_config = dict(interval=300)
# yapf:disable
log_config = dict(
interval=15,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
#load_from = '/home/data/refined/deep-microscopy/output/mike_curriculum/exp40/latest.pth'
resume_from = None
workflow = [('train', 1)]
| 26.133333 | 89 | 0.69898 |
acf3abcc035d1b0bdc10dd07e14ac6cab90333b6 | 3,872 | py | Python | examples/pwr_run/mit_supercloud/characterization/archive/bert_inf.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/mit_supercloud/characterization/archive/bert_inf.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/mit_supercloud/characterization/archive/bert_inf.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | from datasets import load_dataset
from tensorflow import keras
import time
import json
import signal
import sys
gpu_type = sys.argv[1]
raw_datasets = load_dataset("imdb")
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
#inputs = tokenizer(sentences, padding="max_length", truncation=True)
def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True)
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000))
small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000))
full_train_dataset = tokenized_datasets["train"]
full_eval_dataset = tokenized_datasets["test"]
import tensorflow as tf
from transformers import TFAutoModelForSequenceClassification
from datetime import datetime
model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2)
tf_train_dataset = small_train_dataset.remove_columns(["text"]).with_format("tensorflow")
tf_eval_dataset = small_eval_dataset.remove_columns(["text"]).with_format("tensorflow")
train_features = {x: tf_train_dataset[x].to_tensor() for x in tokenizer.model_input_names}
train_tf_dataset = tf.data.Dataset.from_tensor_slices((train_features, tf_train_dataset["label"]))
train_tf_dataset = train_tf_dataset.shuffle(len(tf_train_dataset)).batch(8)
eval_features = {x: tf_eval_dataset[x].to_tensor() for x in tokenizer.model_input_names}
eval_tf_dataset = tf.data.Dataset.from_tensor_slices((eval_features, tf_eval_dataset["label"]))
eval_tf_dataset = eval_tf_dataset.batch(8)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=5e-5),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=tf.metrics.SparseCategoricalAccuracy(),
)
global curr_iter, batch_time, iter_time
batch_time = {} # mini-batch time
iter_time = {} # time stamp of current iteration
curr_iter = 0
class RecordBatch(keras.callbacks.Callback):
def __init__(self):
super(RecordBatch, self).__init__()
global curr_iter, iter_time, batch_time
self.batch_time = []
self.batch_begin = 0
self.curr_iter = curr_iter
self.iter_time = 0
def on_predict_begin(self, logs=None):
now = datetime.now()
self.iter_time = str(now)
def on_predict_batch_begin(self, batch, logs=None):
self.batch_begin = time.time()
def on_predict_batch_end(self, batch, logs=None):
self.batch_time.append(round(time.time() - self.batch_begin, 4))
def on_predict_end(self, logs=None):
global curr_iter, iter_time, batch_time
iter_time[curr_iter] = self.iter_time
batch_time[curr_iter] = self.batch_time
my_callback = RecordBatch()
callbacks = [my_callback]
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global iter_time, batch_time
with open(f'logs/{gpu_type}_duration_bert_inf256.json', 'w') as f:
json.dump(batch_time, f, indent=4)
with open(f'logs/{gpu_type}_timestamp_bert_inf256.json', 'w') as f:
json.dump(iter_time, f, indent=4)
sys.exit()
signal.signal(signal.SIGINT, terminateProcess)
#################################################################################
print('################## start inferencing ##################')
for i in range(300):
curr_iter = i
print(f'curr_iter: {i}')
model.predict(train_tf_dataset, batch_size=256, callbacks=callbacks)
with open(f'logs/{gpu_type}_duration_bert_inf256.json', 'w') as f:
json.dump(batch_time, f, indent=4)
with open(f'logs/{gpu_type}_timestamp_bert_inf256.json', 'w') as f:
json.dump(iter_time, f, indent=4)
| 37.230769 | 98 | 0.724948 |
acf3aca7bb074d50bd6091ef03aae7c7690c2185 | 5,465 | py | Python | tests/null_object_tests.py | SungevityCorp/holmium.core | b28160eba5b62ee45be30774c60d1b73ecfa7c29 | [
"MIT"
] | null | null | null | tests/null_object_tests.py | SungevityCorp/holmium.core | b28160eba5b62ee45be30774c60d1b73ecfa7c29 | [
"MIT"
] | 2 | 2016-07-05T16:54:19.000Z | 2016-07-19T21:40:49.000Z | tests/null_object_tests.py | SungevityCorp/holmium.core | b28160eba5b62ee45be30774c60d1b73ecfa7c29 | [
"MIT"
] | 1 | 2015-05-14T12:41:22.000Z | 2015-05-14T12:41:22.000Z | import unittest
import time
from StdSuites import anything
from holmium.core import Page, Element, Locators
from holmium.core.pageobject import NonexistentElement
from tests.utils import get_driver, make_temp_page
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException, WebDriverException
from selenium.common.exceptions import TimeoutException, NoSuchFrameException
import hiro
class ElementTest(unittest.TestCase):
page_content = """
<body>
<div id="simple_id">simple_id</div>
<div id="other_id">simple_id</div>
<div id="another_id">another_id</div>
<div class="simple_class">simple_class</div>
<div class="simple_class">simple_other_class</div>
<div class="simple_xpath"><h3>Simple XPATH</h3></div>
</body>
"""
def setUp(self):
self.driver = get_driver()
def test_non_existence_eq_element(self):
"""test object equivalence test that expected parameters are displayed"""
class SimplePage(Page):
invalid_el = Element(Locators.ID, "foo")
uri = make_temp_page(ElementTest.page_content)
page = SimplePage(self.driver, uri)
non_exist_el = NonexistentElement()
assert page.invalid_el == non_exist_el, "Expected object equivalence to evaluate TRUE"
assert page.invalid_el.locator_type != non_exist_el.locator_type, "Expected different locator types"
def test_non_existence_nq_element(self):
"""test object nonequivalence test that expected parameters are displayed"""
class SimplePage(Page):
class_el = Element(Locators.CLASS_NAME, "simple_class")
class_xpath_el = Element(Locators.CLASS_NAME, "simple_xpath")
uri = make_temp_page(ElementTest.page_content)
page = SimplePage(self.driver, uri)
non_exist_el = NonexistentElement()
assert page != non_exist_el, "Expected SimplePage() and NonexistentElement != to evaluate FALSE"
def test_non_existence_str_element(self):
"""test when str() is called on the object test that expected parameters are displayed"""
class SimplePage(Page):
class_el = Element(Locators.CLASS_NAME, "simple_class")
exception_msg = "Class Name Found"
query_string = "key1=value1&key2=value2"
uri = make_temp_page(ElementTest.page_content)
page = SimplePage(self.driver, uri)
result = NonexistentElement(page.exception_msg,page.class_el.text,page.query_string)
test_str = str(result)
assert test_str == format(result) , " Expected object format and str value are same"
assert page.exception_msg in test_str
assert page.class_el.text in test_str
assert page.query_string in test_str
def test_non_existence_repr_element(self):
"""when repr() is called on the object test that expected parameters can be referenced by name.
(e.g: object.prop1, object.prop2 etc...)"""
class SimplePage(Page):
class_el = Element(Locators.CLASS_NAME, "simple_class")
exception_msg = "Class Name Found"
query_string = "key1=value1&key2=value2"
uri = make_temp_page(ElementTest.page_content)
page = SimplePage(self.driver, uri)
non_exist_el = NonexistentElement()
result = NonexistentElement(page.exception_msg, page.class_el.text, page.query_string)
test_repr = repr(result)
assert page.exception_msg in test_repr
assert page.class_el.text in test_repr
assert page.query_string in test_repr
def test_non_existence_getAttr_element(self):
"""test that if an undefined property is referenced, an Exception is thrown that includes the initialization data
#properties, (exception_class_name, locator_type, query_string)"""
class SimplePage(Page):
class_el = Element(Locators.CLASS_NAME, "simple_class")
exception_msg = "Class Name Found"
query_string = "key1=value1&key2=value2"
invalid_el = Element(Locators.ID, "blargh")
uri = make_temp_page(ElementTest.page_content)
page = SimplePage(self.driver, uri)
non_exist_el = NonexistentElement(page.exception_msg, page.class_el.text, page.query_string)
try:
bar = non_exist_el.foo
except Exception as e:
assert page.exception_msg in e.message
assert page.class_el.text in e.message
assert page.query_string in e.message
def test_exception_from_webdriver(self):
"""test that if an undefined property is referenced, valid Webdriver exception
information is returned"""
class SimplePage(Page):
class_el = Element(Locators.CLASS_NAME, "simple_class")
exception_msg = "Class Name Found"
query_string = "key1=value1&key2=value2"
invalid_el = Element(Locators.ID, "blargh")
uri = make_temp_page(ElementTest.page_content)
page = SimplePage(self.driver, uri)
locator_failure = page.invalid_el
try:
text = locator_failure.text
except Exception as e:
assert "NoSuchElementException" in e.message
assert "id" in e.message
assert "blargh" in e.message
if __name__ == "__main__":
unittest.main()
| 40.481481 | 121 | 0.672644 |
acf3ad690e6d2175c68be085e36b0bfab4eae4bc | 871 | py | Python | python/example_code/ec2/allocate_address.py | dlo/aws-doc-sdk-examples | 305e5c4f6cf268cafad7e1603aa5d2909fcd9c0c | [
"Apache-2.0"
] | 9 | 2018-09-29T11:44:19.000Z | 2019-11-06T21:41:34.000Z | python/example_code/ec2/allocate_address.py | dlo/aws-doc-sdk-examples | 305e5c4f6cf268cafad7e1603aa5d2909fcd9c0c | [
"Apache-2.0"
] | 1 | 2018-10-30T06:11:07.000Z | 2018-10-30T06:11:07.000Z | python/example_code/ec2/allocate_address.py | dlo/aws-doc-sdk-examples | 305e5c4f6cf268cafad7e1603aa5d2909fcd9c0c | [
"Apache-2.0"
] | 2 | 2018-12-25T10:13:56.000Z | 2021-06-24T11:26:38.000Z | # Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
from botocore.exceptions import ClientError
ec2 = boto3.client('ec2')
try:
allocation = ec2.allocate_address(Domain='vpc')
response = ec2.associate_address(AllocationId=allocation['AllocationId'],
InstanceId='INSTANCE_ID')
print(response)
except ClientError as e:
print(e)
| 33.5 | 80 | 0.724455 |
acf3aea1369283862af47eeec90b863fe48bf356 | 88 | py | Python | bookorbooks/school/models/__init__.py | talhakoylu/SummerInternshipBackend | 4ecedf5c97f73e3d32d5a534769e86aac3e4b6d3 | [
"MIT"
] | 1 | 2021-08-10T22:24:17.000Z | 2021-08-10T22:24:17.000Z | bookorbooks/school/models/__init__.py | talhakoylu/SummerInternshipBackend | 4ecedf5c97f73e3d32d5a534769e86aac3e4b6d3 | [
"MIT"
] | null | null | null | bookorbooks/school/models/__init__.py | talhakoylu/SummerInternshipBackend | 4ecedf5c97f73e3d32d5a534769e86aac3e4b6d3 | [
"MIT"
] | null | null | null | from .class_model import *
from .school_model import *
from .student_list_model import * | 29.333333 | 33 | 0.806818 |
acf3af27b16f32b97c052577f1b2bb178b8fe720 | 7,772 | py | Python | aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/SaveBatchTaskForCreatingOrderActivateRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/SaveBatchTaskForCreatingOrderActivateRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-domain/aliyunsdkdomain/request/v20180129/SaveBatchTaskForCreatingOrderActivateRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdomain.endpoint import endpoint_data
class SaveBatchTaskForCreatingOrderActivateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'SaveBatchTaskForCreatingOrderActivate')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_OrderActivateParams(self):
return self.get_query_params().get('OrderActivateParam')
def set_OrderActivateParams(self, OrderActivateParams):
for depth1 in range(len(OrderActivateParams)):
if OrderActivateParams[depth1].get('Country') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.Country', OrderActivateParams[depth1].get('Country'))
if OrderActivateParams[depth1].get('SubscriptionDuration') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.SubscriptionDuration', OrderActivateParams[depth1].get('SubscriptionDuration'))
if OrderActivateParams[depth1].get('PermitPremiumActivation') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.PermitPremiumActivation', OrderActivateParams[depth1].get('PermitPremiumActivation'))
if OrderActivateParams[depth1].get('City') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.City', OrderActivateParams[depth1].get('City'))
if OrderActivateParams[depth1].get('Dns2') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.Dns2', OrderActivateParams[depth1].get('Dns2'))
if OrderActivateParams[depth1].get('Dns1') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.Dns1', OrderActivateParams[depth1].get('Dns1'))
if OrderActivateParams[depth1].get('RegistrantProfileId') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.RegistrantProfileId', OrderActivateParams[depth1].get('RegistrantProfileId'))
if OrderActivateParams[depth1].get('AliyunDns') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.AliyunDns', OrderActivateParams[depth1].get('AliyunDns'))
if OrderActivateParams[depth1].get('ZhCity') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.ZhCity', OrderActivateParams[depth1].get('ZhCity'))
if OrderActivateParams[depth1].get('TelExt') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.TelExt', OrderActivateParams[depth1].get('TelExt'))
if OrderActivateParams[depth1].get('ZhRegistrantName') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.ZhRegistrantName', OrderActivateParams[depth1].get('ZhRegistrantName'))
if OrderActivateParams[depth1].get('Province') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.Province', OrderActivateParams[depth1].get('Province'))
if OrderActivateParams[depth1].get('PostalCode') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.PostalCode', OrderActivateParams[depth1].get('PostalCode'))
if OrderActivateParams[depth1].get('Email') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.Email', OrderActivateParams[depth1].get('Email'))
if OrderActivateParams[depth1].get('ZhRegistrantOrganization') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.ZhRegistrantOrganization', OrderActivateParams[depth1].get('ZhRegistrantOrganization'))
if OrderActivateParams[depth1].get('Address') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.Address', OrderActivateParams[depth1].get('Address'))
if OrderActivateParams[depth1].get('TelArea') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.TelArea', OrderActivateParams[depth1].get('TelArea'))
if OrderActivateParams[depth1].get('DomainName') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.DomainName', OrderActivateParams[depth1].get('DomainName'))
if OrderActivateParams[depth1].get('ZhAddress') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.ZhAddress', OrderActivateParams[depth1].get('ZhAddress'))
if OrderActivateParams[depth1].get('RegistrantType') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.RegistrantType', OrderActivateParams[depth1].get('RegistrantType'))
if OrderActivateParams[depth1].get('Telephone') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.Telephone', OrderActivateParams[depth1].get('Telephone'))
if OrderActivateParams[depth1].get('TrademarkDomainActivation') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.TrademarkDomainActivation', OrderActivateParams[depth1].get('TrademarkDomainActivation'))
if OrderActivateParams[depth1].get('ZhProvince') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.ZhProvince', OrderActivateParams[depth1].get('ZhProvince'))
if OrderActivateParams[depth1].get('RegistrantOrganization') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.RegistrantOrganization', OrderActivateParams[depth1].get('RegistrantOrganization'))
if OrderActivateParams[depth1].get('EnableDomainProxy') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.EnableDomainProxy', OrderActivateParams[depth1].get('EnableDomainProxy'))
if OrderActivateParams[depth1].get('RegistrantName') is not None:
self.add_query_param('OrderActivateParam.' + str(depth1 + 1) + '.RegistrantName', OrderActivateParams[depth1].get('RegistrantName'))
def get_CouponNo(self):
return self.get_query_params().get('CouponNo')
def set_CouponNo(self,CouponNo):
self.add_query_param('CouponNo',CouponNo)
def get_UseCoupon(self):
return self.get_query_params().get('UseCoupon')
def set_UseCoupon(self,UseCoupon):
self.add_query_param('UseCoupon',UseCoupon)
def get_PromotionNo(self):
return self.get_query_params().get('PromotionNo')
def set_PromotionNo(self,PromotionNo):
self.add_query_param('PromotionNo',PromotionNo)
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_UsePromotion(self):
return self.get_query_params().get('UsePromotion')
def set_UsePromotion(self,UsePromotion):
self.add_query_param('UsePromotion',UsePromotion) | 61.68254 | 159 | 0.750643 |
acf3af955926041a2fba509e6ae727e78e0d0be1 | 137 | py | Python | splitiorequests/models/splits/tag.py | mikoblog/splitio-requests | c09b069e49f5224cbe3f892a5731c62885fd6151 | [
"MIT"
] | null | null | null | splitiorequests/models/splits/tag.py | mikoblog/splitio-requests | c09b069e49f5224cbe3f892a5731c62885fd6151 | [
"MIT"
] | 4 | 2020-11-22T12:07:45.000Z | 2021-03-26T23:44:12.000Z | splitiorequests/models/splits/tag.py | mikoblog/splitio-requests | c09b069e49f5224cbe3f892a5731c62885fd6151 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Tag dataclass"""
from dataclasses import dataclass
@dataclass
class Tag:
"""Tag model"""
name: str
| 12.454545 | 33 | 0.613139 |
acf3afd932574530ec91dec71639f4be3b70cd1b | 17,134 | py | Python | bundle_cache/app_store/tk-maya/v0.9.3.1/hooks/tk-multi-publish2/basic/publish_session.py | ColinKennedy/tk-config-default2-respawn | 855fb8033daa549b92615792442f19a7f9c4f55c | [
"Linux-OpenIB"
] | 4 | 2019-01-11T03:41:28.000Z | 2019-09-12T06:57:17.000Z | bundle_cache/app_store/tk-maya/v0.9.3.1/hooks/tk-multi-publish2/basic/publish_session.py | ColinKennedy/tk-config-default2-respawn | 855fb8033daa549b92615792442f19a7f9c4f55c | [
"Linux-OpenIB"
] | null | null | null | bundle_cache/app_store/tk-maya/v0.9.3.1/hooks/tk-multi-publish2/basic/publish_session.py | ColinKennedy/tk-config-default2-respawn | 855fb8033daa549b92615792442f19a7f9c4f55c | [
"Linux-OpenIB"
] | 2 | 2019-01-10T05:00:18.000Z | 2020-02-15T16:32:56.000Z | # Copyright (c) 2017 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import os
import maya.cmds as cmds
import maya.mel as mel
import sgtk
from sgtk.util.filesystem import ensure_folder_exists
HookBaseClass = sgtk.get_hook_baseclass()
class MayaSessionPublishPlugin(HookBaseClass):
"""
Plugin for publishing an open maya session.
This hook relies on functionality found in the base file publisher hook in
the publish2 app and should inherit from it in the configuration. The hook
setting for this plugin should look something like this::
hook: "{self}/publish_file.py:{engine}/tk-multi-publish2/basic/publish_session.py"
"""
# NOTE: The plugin icon and name are defined by the base file plugin.
@property
def description(self):
"""
Verbose, multi-line description of what the plugin does. This can
contain simple html for formatting.
"""
loader_url = "https://support.shotgunsoftware.com/hc/en-us/articles/219033078"
return """
Publishes the file to Shotgun. A <b>Publish</b> entry will be
created in Shotgun which will include a reference to the file's current
path on disk. If a publish template is configured, a copy of the
current session will be copied to the publish template path which
will be the file that is published. Other users will be able to access
the published file via the <b><a href='%s'>Loader</a></b> so long as
they have access to the file's location on disk.
If the session has not been saved, validation will fail and a button
will be provided in the logging output to save the file.
<h3>File versioning</h3>
If the filename contains a version number, the process will bump the
file to the next version after publishing.
The <code>version</code> field of the resulting <b>Publish</b> in
Shotgun will also reflect the version number identified in the filename.
The basic worklfow recognizes the following version formats by default:
<ul>
<li><code>filename.v###.ext</code></li>
<li><code>filename_v###.ext</code></li>
<li><code>filename-v###.ext</code></li>
</ul>
After publishing, if a version number is detected in the work file, the
work file will automatically be saved to the next incremental version
number. For example, <code>filename.v001.ext</code> will be published
and copied to <code>filename.v002.ext</code>
If the next incremental version of the file already exists on disk, the
validation step will produce a warning, and a button will be provided in
the logging output which will allow saving the session to the next
available version number prior to publishing.
<br><br><i>NOTE: any amount of version number padding is supported. for
non-template based workflows.</i>
<h3>Overwriting an existing publish</h3>
In non-template workflows, a file can be published multiple times,
however only the most recent publish will be available to other users.
Warnings will be provided during validation if there are previous
publishes.
""" % (loader_url,)
@property
def settings(self):
"""
Dictionary defining the settings that this plugin expects to receive
through the settings parameter in the accept, validate, publish and
finalize methods.
A dictionary on the following form::
{
"Settings Name": {
"type": "settings_type",
"default": "default_value",
"description": "One line description of the setting"
}
The type string should be one of the data types that toolkit accepts as
part of its environment configuration.
"""
# inherit the settings from the base publish plugin
base_settings = super(MayaSessionPublishPlugin, self).settings or {}
# settings specific to this class
maya_publish_settings = {
"Publish Template": {
"type": "template",
"default": None,
"description": "Template path for published work files. Should"
"correspond to a template defined in "
"templates.yml.",
}
}
# update the base settings
base_settings.update(maya_publish_settings)
return base_settings
@property
def item_filters(self):
"""
List of item types that this plugin is interested in.
Only items matching entries in this list will be presented to the
accept() method. Strings can contain glob patters such as *, for example
["maya.*", "file.maya"]
"""
return ["maya.session"]
def accept(self, settings, item):
"""
Method called by the publisher to determine if an item is of any
interest to this plugin. Only items matching the filters defined via the
item_filters property will be presented to this method.
A publish task will be generated for each item accepted here. Returns a
dictionary with the following booleans:
- accepted: Indicates if the plugin is interested in this value at
all. Required.
- enabled: If True, the plugin will be enabled in the UI, otherwise
it will be disabled. Optional, True by default.
- visible: If True, the plugin will be visible in the UI, otherwise
it will be hidden. Optional, True by default.
- checked: If True, the plugin will be checked in the UI, otherwise
it will be unchecked. Optional, True by default.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
:returns: dictionary with boolean keys accepted, required and enabled
"""
# if a publish template is configured, disable context change. This
# is a temporary measure until the publisher handles context switching
# natively.
if settings.get("Publish Template").value:
item.context_change_allowed = False
path = _session_path()
if not path:
# the session has not been saved before (no path determined).
# provide a save button. the session will need to be saved before
# validation will succeed.
self.logger.warn(
"The Maya session has not been saved.",
extra=_get_save_as_action()
)
self.logger.info(
"Maya '%s' plugin accepted the current Maya session." %
(self.name,)
)
return {
"accepted": True,
"checked": True
}
def validate(self, settings, item):
"""
Validates the given item to check that it is ok to publish. Returns a
boolean to indicate validity.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
:returns: True if item is valid, False otherwise.
"""
publisher = self.parent
path = _session_path()
# ---- ensure the session has been saved
if not path:
# the session still requires saving. provide a save button.
# validation fails.
error_msg = "The Maya session has not been saved."
self.logger.error(
error_msg,
extra=_get_save_as_action()
)
raise Exception(error_msg)
# ensure we have an updated project root
project_root = cmds.workspace(q=True, rootDirectory=True)
item.properties["project_root"] = project_root
# log if no project root could be determined.
if not project_root:
self.logger.info(
"Your session is not part of a maya project.",
extra={
"action_button": {
"label": "Set Project",
"tooltip": "Set the maya project",
"callback": lambda: mel.eval('setProject ""')
}
}
)
# ---- check the session against any attached work template
# get the path in a normalized state. no trailing separator,
# separators are appropriate for current os, no double separators,
# etc.
path = sgtk.util.ShotgunPath.normalize(path)
# if the session item has a known work template, see if the path
# matches. if not, warn the user and provide a way to save the file to
# a different path
work_template = item.properties.get("work_template")
if work_template:
if not work_template.validate(path):
self.logger.warning(
"The current session does not match the configured work "
"file template.",
extra={
"action_button": {
"label": "Save File",
"tooltip": "Save the current Maya session to a "
"different file name",
# will launch wf2 if configured
"callback": _get_save_as_action()
}
}
)
else:
self.logger.debug(
"Work template configured and matches session file.")
else:
self.logger.debug("No work template configured.")
# ---- see if the version can be bumped post-publish
# check to see if the next version of the work file already exists on
# disk. if so, warn the user and provide the ability to jump to save
# to that version now
(next_version_path, version) = self._get_next_version_info(path, item)
if next_version_path and os.path.exists(next_version_path):
# determine the next available version_number. just keep asking for
# the next one until we get one that doesn't exist.
while os.path.exists(next_version_path):
(next_version_path, version) = self._get_next_version_info(
next_version_path, item)
error_msg = "The next version of this file already exists on disk."
self.logger.error(
error_msg,
extra={
"action_button": {
"label": "Save to v%s" % (version,),
"tooltip": "Save to the next available version number, "
"v%s" % (version,),
"callback": lambda: _save_session(next_version_path)
}
}
)
raise Exception(error_msg)
# ---- populate the necessary properties and call base class validation
# populate the publish template on the item if found
publish_template_setting = settings.get("Publish Template")
publish_template = publisher.engine.get_template_by_name(
publish_template_setting.value)
if publish_template:
item.properties["publish_template"] = publish_template
# set the session path on the item for use by the base plugin validation
# step. NOTE: this path could change prior to the publish phase.
item.properties["path"] = path
# run the base class validation
return super(MayaSessionPublishPlugin, self).validate(settings, item)
def publish(self, settings, item):
"""
Executes the publish logic for the given item and settings.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
"""
# get the path in a normalized state. no trailing separator, separators
# are appropriate for current os, no double separators, etc.
path = sgtk.util.ShotgunPath.normalize(_session_path())
# ensure the session is saved
_save_session(path)
# update the item with the saved session path
item.properties["path"] = path
# add dependencies for the base class to register when publishing
item.properties["publish_dependencies"] = \
_maya_find_additional_session_dependencies()
# let the base class register the publish
super(MayaSessionPublishPlugin, self).publish(settings, item)
def finalize(self, settings, item):
"""
Execute the finalization pass. This pass executes once all the publish
tasks have completed, and can for example be used to version up files.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
"""
# do the base class finalization
super(MayaSessionPublishPlugin, self).finalize(settings, item)
# bump the session file to the next version
self._save_to_next_version(item.properties["path"], item, _save_session)
def _maya_find_additional_session_dependencies():
"""
Find additional dependencies from the session
"""
# default implementation looks for references and
# textures (file nodes) and returns any paths that
# match a template defined in the configuration
ref_paths = set()
# first let's look at maya references
ref_nodes = cmds.ls(references=True)
for ref_node in ref_nodes:
# get the path:
ref_path = cmds.referenceQuery(ref_node, filename=True)
# make it platform dependent
# (maya uses C:/style/paths)
ref_path = ref_path.replace("/", os.path.sep)
if ref_path:
ref_paths.add(ref_path)
# now look at file texture nodes
for file_node in cmds.ls(l=True, type="file"):
# ensure this is actually part of this session and not referenced
if cmds.referenceQuery(file_node, isNodeReferenced=True):
# this is embedded in another reference, so don't include it in
# the breakdown
continue
# get path and make it platform dependent
# (maya uses C:/style/paths)
texture_path = cmds.getAttr(
"%s.fileTextureName" % file_node).replace("/", os.path.sep)
if texture_path:
ref_paths.add(texture_path)
return list(ref_paths)
def _session_path():
"""
Return the path to the current session
:return:
"""
path = cmds.file(query=True, sn=True)
if isinstance(path, unicode):
path = path.encode("utf-8")
return path
def _save_session(path):
"""
Save the current session to the supplied path.
"""
# Maya can choose the wrong file type so we should set it here
# explicitly based on the extension
maya_file_type = None
if path.lower().endswith(".ma"):
maya_file_type = "mayaAscii"
elif path.lower().endswith(".mb"):
maya_file_type = "mayaBinary"
# Maya won't ensure that the folder is created when saving, so we must make sure it exists
folder = os.path.dirname(path)
ensure_folder_exists(folder)
cmds.file(rename=path)
# save the scene:
if maya_file_type:
cmds.file(save=True, force=True, type=maya_file_type)
else:
cmds.file(save=True, force=True)
# TODO: method duplicated in all the maya hooks
def _get_save_as_action():
"""
Simple helper for returning a log action dict for saving the session
"""
engine = sgtk.platform.current_engine()
# default save callback
callback = cmds.SaveScene
# if workfiles2 is configured, use that for file save
if "tk-multi-workfiles2" in engine.apps:
app = engine.apps["tk-multi-workfiles2"]
if hasattr(app, "show_file_save_dlg"):
callback = app.show_file_save_dlg
return {
"action_button": {
"label": "Save As...",
"tooltip": "Save the current session",
"callback": callback
}
}
| 37.41048 | 94 | 0.616027 |
acf3b0a6eb08e4441f3a265ada298dec0c85f2f2 | 5,458 | py | Python | generation.py | Carterj3/Hyperion | cabbbbf20c12da02335c4879a54bf7562e03118b | [
"MIT"
] | null | null | null | generation.py | Carterj3/Hyperion | cabbbbf20c12da02335c4879a54bf7562e03118b | [
"MIT"
] | null | null | null | generation.py | Carterj3/Hyperion | cabbbbf20c12da02335c4879a54bf7562e03118b | [
"MIT"
] | null | null | null | import itertools
import math
import wave
from array import array
import random
PROGRESSION = ["Cmaj7", "Dm7", "G7"]
CHORDS = {
"Dm7": [38, 50, 53, 57, 60],
"G7": [31, 50, 53, 55, 59],
"Cmaj7": [36, 48, 52, 55, 59]
}
for a in range(1,100):
s = "a"+str(a)
CHORDS[s] = [random.randint(40,60) for x in range(5)]
PROGRESSION.append(s)
class Voice(object):
def __init__(self, note, length):
self.note = note
self.length = length
self.released = False
self.t = 0
self.oscillator = oscillator(self.note)
self.adsr = ADSREnvelope(0.1, 1.0, 0.7, 3.0)
def __iter__(self):
return self
def next(self):
if self.t >= self.length:
self.released = True
self.adsr.trigger_release()
self.t += 1
sample = next(self.adsr) * next(self.oscillator)
# Add filters and other neat effects here, e.g. by feeding the signal
# to a coroutine.
return sample
class ADSREnvelope(object):
""" ADSR envelope generator class """
RATIO = 1.0 - 1.0 / math.e
def __init__(self, attack, decay, sustain, release):
self.attacking = True
self.released = False
self.level = 0.0
compute_coefficient = lambda time: 1.0 - math.exp(-1.0 / (time * 44100.0))
self.attack = compute_coefficient(attack)
self.decay = compute_coefficient(decay)
self.sustain = sustain
self.release = compute_coefficient(release)
def __iter__(self):
return self
def trigger_release(self):
self.released = True
def next(self):
if self.released:
self.level += self.release * (1.0 - (1.0 / self.RATIO) - self.level)
if self.level < 0.0:
# envelope finished
raise StopIteration
else:
if self.attacking:
self.level += self.attack * ((1.0 / self.RATIO) - self.level)
if self.level > 1.0:
# attack phase finished
self.level = 1.0
self.attacking = False
else:
self.level += self.decay * (self.sustain - self.level)
return self.level
def oscillator(pitch):
""" Generate a waveform at a given pitch """
phi = 0.0
frequency = (2.0 ** ((pitch - 69.0) / 12.0)) * 440.0
delta = 2.0 * math.pi * frequency / 44100.0
while True:
yield math.sin(phi) + math.sin(2.0 * phi)
phi += delta
def amplifier(gain, iterable):
""" Attenuate the input signal by a given gain factor """
return (gain * sample for sample in iterable)
def chord_generator(iterable):
""" Converts chord symbols to a list of MIDI notes. """
return (CHORDS[chord_symbol] for chord_symbol in iterable)
def comp_pattern_generator(iterable):
""" Converts a list of MIDI notes to (length, notes) tuples in a jazzy pattern. """
for chord in iterable:
yield (600, chord)
yield (300, chord[0:1])
yield (300, chord)
yield (600, chord[0:1])
yield (300, chord)
yield (300, [chord[0] + 7])
def voice_generator(iterable):
""" Converts a (length, notes) tuple into a (start time, list of voices) tuple """
t = 0
for length, pitches in iterable:
voices = [Voice(pitch, length) for pitch in pitches]
yield (t, voices)
t += length
def voice_combiner(iterable):
""" Renders samples from voices and maintains a voice pool """
t = 0.0
stopping = False
voice_pool = []
voice_time, voice_list = next(iterable)
while True:
# add new voices to the pool
while t >= voice_time:
voice_pool.extend(voice_list)
try:
voice_time, voice_list = next(iterable)
except StopIteration:
voice_time = float("inf")
stopping = True
# pull samples from voices and mix them
sample = 0.0
pending_removal = []
for voice in voice_pool:
try:
sample += next(voice)
except StopIteration:
# voice has stopped, remove it from the pool
pending_removal.append(voice)
# clean up pool
for voice in pending_removal:
voice_pool.remove(voice)
# stop yielding if we're done
if stopping and len(voice_pool) == 0:
raise StopIteration
yield sample
t += 1000.0 / 44100.0
def quantizer(iterable):
""" Converts floating point audio signals to 16 bit integers """
return (int(32767.0 * sample) for sample in iterable)
# create pipeline
chords = chord_generator(PROGRESSION)
comp_pattern = comp_pattern_generator(chords)
voices = voice_generator(comp_pattern)
samples = voice_combiner(voices)
attenuated_samples = amplifier(0.5, samples)
output = quantizer(attenuated_samples)
# prepare audio stream
audiofile = wave.open("output"+str(random.randint(1,10000))+".wav", "wb")
audiofile.setnchannels(1)
audiofile.setsampwidth(2)
audiofile.setframerate(44100)
# render samples
output = list(output)
audiofile.writeframes(array('h', output))
audiofile.writeframes(array('h', output))
audiofile.close() | 29.502703 | 88 | 0.575669 |
acf3b268092ccb3b4099f775b38404faa9b1875c | 4,858 | py | Python | bitcoinetl/mappers/transaction_mapper.py | jsvisa/bitcoin-etl | 5f59c9bb0dfbd16810e53e53451764ed6cfe8e66 | [
"MIT"
] | 1 | 2019-03-27T10:34:35.000Z | 2019-03-27T10:34:35.000Z | bitcoinetl/mappers/transaction_mapper.py | jsvisa/bitcoin-etl | 5f59c9bb0dfbd16810e53e53451764ed6cfe8e66 | [
"MIT"
] | null | null | null | bitcoinetl/mappers/transaction_mapper.py | jsvisa/bitcoin-etl | 5f59c9bb0dfbd16810e53e53451764ed6cfe8e66 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2018 Omidiora Samuel, samparsky@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from bitcoinetl.btc_utils import bitcoin_to_satoshi
from bitcoinetl.domain.transaction import BtcTransaction
from bitcoinetl.mappers.join_split_mapper import BtcJoinSplitMapper
from bitcoinetl.mappers.transaction_input_mapper import BtcTransactionInputMapper
from bitcoinetl.mappers.transaction_output_mapper import BtcTransactionOutputMapper
# http://chainquery.com/bitcoin-api/getblock
# http://chainquery.com/bitcoin-api/getrawtransaction
class BtcTransactionMapper(object):
def __init__(self):
self.transaction_input_mapper = BtcTransactionInputMapper()
self.transaction_output_mapper = BtcTransactionOutputMapper()
self.join_split_mapper = BtcJoinSplitMapper()
def json_dict_to_transaction(self, json_dict, block=None):
transaction = BtcTransaction()
transaction.hash = json_dict.get('txid')
transaction.size = json_dict.get('size')
transaction.virtual_size = json_dict.get('vsize')
transaction.version = json_dict.get('version')
transaction.lock_time = json_dict.get('locktime')
if block is not None:
transaction.block_number = block.number
transaction.block_hash = json_dict.get('blockhash')
if block is not None:
transaction.block_hash = block.hash
transaction.block_timestamp = json_dict.get('blocktime')
if block is not None:
transaction.block_timestamp = block.timestamp
transaction.inputs = self.transaction_input_mapper.vin_to_inputs(json_dict.get('vin'))
transaction.outputs = self.transaction_output_mapper.vout_to_outputs(json_dict.get('vout'))
# Only Zcash
transaction.join_splits = self.join_split_mapper.vjoinsplit_to_join_splits(json_dict.get('vjoinsplit'))
transaction.value_balance = bitcoin_to_satoshi(json_dict.get('valueBalance'))
return transaction
def transaction_to_dict(self, transaction):
result = {
'type': 'transaction',
'hash': transaction.hash,
'size': transaction.size,
'virtual_size': transaction.virtual_size,
'version': transaction.version,
'lock_time': transaction.lock_time,
'block_number': transaction.block_number,
'block_hash': transaction.block_hash,
'block_timestamp': transaction.block_timestamp,
'is_coinbase': transaction.is_coinbase,
'inputs': self.transaction_input_mapper.inputs_to_dicts(transaction.inputs),
'outputs': self.transaction_output_mapper.outputs_to_dicts(transaction.outputs),
'input_count': len(transaction.inputs),
'output_count': len(transaction.outputs),
'input_value': transaction.calculate_input_value(),
'output_value': transaction.calculate_output_value(),
'fee': transaction.calculate_fee(),
}
return result
def dict_to_transaction(self, dict):
transaction = BtcTransaction()
transaction.hash = dict.get('hash')
transaction.size = dict.get('size')
transaction.virtual_size = dict.get('virtual_size')
transaction.version = dict.get('version')
transaction.lock_time = dict.get('lock_time')
transaction.block_number = dict.get('block_number')
transaction.block_hash = dict.get('block_hash')
transaction.block_timestamp = dict.get('block_timestamp')
transaction.is_coinbase = dict.get('is_coinbase')
transaction.inputs = self.transaction_input_mapper.dicts_to_inputs(dict.get('inputs'))
transaction.outputs = self.transaction_output_mapper.dicts_to_outputs(dict.get('outputs'))
return transaction
| 45.401869 | 111 | 0.720049 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.