text
stringlengths 2
999k
|
|---|
"""Incantations only a selected group of High Wizards can utilize.
"""
from re import * # pylint: disable=W0614, W0401, W0622
|
#!/usr/bin/env python
from .fmap import FieldEnhance, FieldToRadS, FieldToHz, Phasediff2Fieldmap, Phases2Fieldmap
|
import os
import json
from flask import Flask, jsonify, request, send_file
from flask_cors import CORS, cross_origin
DEFAULT_LOGO = 'logos/default.jpg'
app = Flask(__name__)
CORS(app)
@app.route('/')
def hello():
return "Hello World!"
@app.route('/logos/<org>', methods=['GET'])
def get_image(org):
if os.path.exists(f'logos/{org}.png'):
return send_file(f'logos/{org}.png')
if os.path.exists(f'logos/{org}.jpg'):
return send_file(f'logos/{org}.jpg')
return send_file(DEFAULT_LOGO)
@app.route('/rankings/<file_name>', methods=['GET'])
def get_json(file_name):
with open(f'rankings/{file_name}.json', 'r') as f:
return jsonify(json.load(f))
if __name__ == "__main__":
app.run()
|
import numpy as np
import pandas as pd
import hydrostats.data as hd
import hydrostats.visual as hv
import HydroErr as he
import matplotlib.pyplot as plt
import os
from netCDF4 import Dataset
# Put all the directories (different states and resolutions) and corresponding NetCDF files into lists.
list_of_files = []
list_of_dir = []
streamflow_dict = {}
list_streams = []
for i in os.listdir('/home/chrisedwards/Documents/rapid_output/mult_res_output'):
for j in os.listdir(os.path.join('/home/chrisedwards/Documents/rapid_output/mult_res_output', i)):
list_of_files.append(os.path.join('/home/chrisedwards/Documents/rapid_output/mult_res_output', i, j,
'Qout_erai_t511_24hr_19800101to20141231.nc'))
list_of_dir.append(os.path.join('/home/chrisedwards/Documents/rapid_output/mult_res_output', i, j))
list_of_dir.sort()
list_of_files.sort()
list_of_states=['az', 'id', 'mo', 'ny', 'or', 'col',
'az', 'id', 'mo', 'ny', 'or', 'col',
'az', 'id', 'mo', 'ny', 'or', 'col']
list_of_states.sort()
# Loop through the lists to create the csv for each stream, in each resolution.
for file, direc, state in zip(list_of_files, list_of_dir, list_of_states):
# Call the NetCDF file.
nc = Dataset(file)
nc.variables.keys()
nc.dimensions.keys()
# Define variables from the NetCDF file.
riv = nc.variables['rivid'][:].tolist()
lat = nc.variables['lat'][:]
lon = nc.variables['lon'][:]
Q = nc.variables['Qout'][:]
sQ = nc.variables['sQout'][:]
time = nc.variables['time'][:].tolist()
# Convert time from 'seconds since 1970' to the actual date.
dates = pd.to_datetime(time, unit='s', origin='unix')
temp_dictionary = {}
counter = 0
for n in riv:
str=state+'-{}'.format(n)
temp_dictionary['{}'.format(str)] = pd.DataFrame(data=Q[:, counter], index=dates, columns=['Flow'])
streamflow_dict.update(temp_dictionary)
list_streams.append(str)
counter += 1
list_streams_condensed = list(set(list_streams))
list_streams_condensed.sort()
# Now there is a dictionary called 'streamflow_dict' that has the 35-yr time series stored in a pandas DataFrame.
# Each array has the datetime and flowrate.
# Each data frame is named in the format '{state}-{streamID}' (eg: 'az-7' or 'col-9').
# There are a total of 180 streams, or 180 keys in the dictionary: streamflow_dict['az-7']
# list_streams_condensed = list of all the stream names, or names of the data frames.
# ***************************************************************************************************************
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
from PIL import Image
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
import torchvision.transforms.functional as TF
from torchvision import transforms
import random
import os
class DisasterDataset(Dataset):
def __init__(self, data_dir, i_shard, set_name, data_mean_stddev, transform:bool, normalize:bool):
self.data_dir = data_dir
self.transform = transform
self.normalize = normalize
self.data_mean_stddev = data_mean_stddev
shard_path = os.path.join(data_dir, f'{set_name}_pre_image_chips_{i_shard}.npy')
self.pre_image_chip_shard = np.load(shard_path)
logging.info(f'pre_image_chips loaded{self.pre_image_chip_shard.shape}')
shard_path = os.path.join(data_dir, f'{set_name}_post_image_chips_{i_shard}.npy')
self.post_image_chip_shard = np.load(shard_path)
logging.info(f'post_image_chips loaded{self.post_image_chip_shard.shape}')
shard_path = os.path.join(data_dir, f'{set_name}_bld_mask_chips_{i_shard}.npy')
self.bld_mask_chip_shard = np.load(shard_path)
logging.info(f'bld_mask_chips loaded{self.bld_mask_chip_shard.shape}')
shard_path = os.path.join(data_dir, f'{set_name}_dmg_mask_chips_{i_shard}.npy')
self.dmg_mask_chip_shard = np.load(shard_path)
logging.info(f'dmg_mask_chips loaded{self.dmg_mask_chip_shard.shape}')
shard_path = os.path.join(data_dir, f'{set_name}_pre_img_tile_chips_{i_shard}.npy')
self.pre_img_tile_chip_shard = np.load(shard_path)
logging.info(f'pre_img_tile_chips loaded{self.pre_img_tile_chip_shard.shape}')
def __len__(self):
return len(self.pre_image_chip_shard)
@classmethod
def apply_transform(self, mask, pre_img, post_img, damage_class):
'''
apply tranformation functions on cv2 arrays
'''
# Random horizontal flipping
if random.random() > 0.5:
mask = cv2.flip(mask, flipCode=1)
pre_img = cv2.flip(pre_img, flipCode=1)
post_img = cv2.flip(post_img, flipCode=1)
damage_class = cv2.flip(damage_class, flipCode=1)
# Random vertical flipping
if random.random() > 0.5:
mask = cv2.flip(mask, flipCode=0)
pre_img = cv2.flip(pre_img, flipCode=0)
post_img = cv2.flip(post_img, flipCode=0)
damage_class = cv2.flip(damage_class, flipCode=0)
return mask, pre_img, post_img, damage_class
def __getitem__(self, i):
pre_img = self.pre_image_chip_shard[i]
post_img = self.post_image_chip_shard[i]
mask = self.bld_mask_chip_shard[i]
damage_class= self.dmg_mask_chip_shard[i]
# copy original image for viz
pre_img_orig = pre_img
post_img_orig = post_img
if self.transform is True:
mask, pre_img, post_img, damage_class = self.apply_transform(mask, pre_img, post_img, damage_class)
if self.normalize is True:
pre_img_tile_name = self.pre_img_tile_chip_shard[i]
post_img_tile_name = pre_img_tile_name.replace('pre', 'post')
# normalize the images based on a tilewise mean & std dev --> pre_
mean_pre = self.data_mean_stddev[pre_img_tile_name][0]
stddev_pre = self.data_mean_stddev[pre_img_tile_name][1]
norm_pre = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=mean_pre, std=stddev_pre)
])
pre_img = norm_pre(np.array(pre_img).astype(dtype='float64')/255.0)
# normalize the images based on a tilewise mean & std dev --> post_
mean_post = self.data_mean_stddev[post_img_tile_name][0]
stddev_post = self.data_mean_stddev[post_img_tile_name][1]
norm_post = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=mean_post, std=stddev_post)
])
post_img = norm_post(np.array(post_img).astype(dtype='float64')/255.0)
else:
pre_img = np.array(transforms.ToTensor()(pre_img)).astype(dtype='float64')/255.0
post_img = np.array(transforms.ToTensor()(post_img)).astype(dtype='float64')/255.0
# convert eveything to arrays
pre_img = np.array(pre_img)
post_img = np.array(post_img)
mask = np.array(mask)
damage_class = np.array(damage_class)
# replace non-classified pixels with background
damage_class = np.where(damage_class==5, 0, damage_class)
return {'pre_image': torch.from_numpy(pre_img).type(torch.FloatTensor), 'post_image': torch.from_numpy(post_img).type(torch.FloatTensor), 'building_mask': torch.from_numpy(mask).type(torch.LongTensor), 'damage_mask': torch.from_numpy(damage_class).type(torch.LongTensor), 'pre_image_orig': transforms.ToTensor()(pre_img_orig), 'post_image_orig': transforms.ToTensor()(post_img_orig)}
|
# Select deconvolution method
from networks.G.FP import FP
from networks.G.Wiener import Wiener
def select_G(params, args):
if args.G_network == 'FP':
return FP(params, args)
elif args.G_network == 'Wiener':
return Wiener(params, args)
else:
assert False, ("Unsupported generator network")
|
#!/usr/bin/env python
"""
For more information on this API, please visit:
https://duo.com/docs/adminapi
-
Script Dependencies:
requests
Depencency Installation:
$ pip install -r requirements.txt
System Requirements:
- Duo MFA, Duo Access or Duo Beyond account with aministrator priviliedges.
- Duo Admin API enabled
Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
import json, base64, email, hmac, hashlib, urllib3, urllib
import requests
import pprint
import config
# Disable SSL warnings
urllib3.disable_warnings()
# Imported API configuration variables
API_HOSTNAME = config.DUO_API_HOSTNAME
S_KEY = config.DUO_API_SECRET_KEY
I_KEY = config.DUO_API_INTEGRATION_KEY
# Script specific variables
METHOD = 'DELETE'
# Get the integration id from the get_all_integrations.py output
INTEGRATION_KEY = ''
API_PATH = '/admin/v1/integrations/{}'.format(INTEGRATION_KEY)
PARAMS = {}
# Request signing helper function
def sign(method=METHOD,
host=API_HOSTNAME,
path=API_PATH,
params=PARAMS,
skey=S_KEY,
ikey=I_KEY):
"""
Return HTTP Basic Authentication ("Authorization" and "Date") headers.
method, host, path: strings from request
params: dict of request parameters
skey: secret key
ikey: integration key
"""
# create canonical string
now = email.utils.formatdate()
canon = [now, method.upper(), host.lower(), path]
args = []
for key in sorted(params.keys()):
val = params[key]
if isinstance(val, str):
val = val.encode("utf-8")
args.append(
'%s=%s' % (urllib.parse.quote(key, '~'), urllib.parse.quote(val, '~')))
canon.append('&'.join(args))
canon = '\n'.join(canon)
print(canon)
# sign canonical string
sig = hmac.new(skey.encode('utf-8'), canon.encode('utf-8'), hashlib.sha1)
auth = '%s:%s' % (ikey, sig.hexdigest())
encoded_auth = base64.b64encode(auth.encode('utf-8'))
# return headers
return {'Date': now, 'Authorization': 'Basic %s' % str(encoded_auth, 'UTF-8')}
if __name__ == "__main__":
url = "https://{}{}".format(API_HOSTNAME, API_PATH)
request_headers = sign()
integration = requests.request(METHOD, url, headers=request_headers, verify=False)
pprint.pprint(json.loads(integration.content))
|
# Databricks CLI
# Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"), except
# that the use of services to which certain application programming
# interfaces (each, an "API") connect requires that the user first obtain
# a license for the use of the APIs from Databricks, Inc. ("Databricks"),
# by creating an account at www.databricks.com and agreeing to either (a)
# the Community Edition Terms of Service, (b) the Databricks Terms of
# Service, or (c) another written agreement between Licensee and Databricks
# for the use of the APIs.
#
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from databricks_cli.sdk import TokenService
class TokensApi(object):
def __init__(self, api_client):
self.client = TokenService(api_client)
def create(self, lifetime_seconds, comment):
return self.client.create_token(lifetime_seconds, comment)
def list(self):
return self.client.list_tokens()
def revoke(self, token_id):
return self.client.revoke_token(token_id)
|
"""
sphinx.ext.autodoc.importer
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Importer utilities for autodoc
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import importlib
import traceback
import warnings
from typing import Any, Callable, Dict, List, Mapping, NamedTuple, Optional, Tuple
from sphinx.deprecation import RemovedInSphinx40Warning, deprecated_alias
from sphinx.pycode import ModuleAnalyzer
from sphinx.util import logging
from sphinx.util.inspect import isclass, isenumclass, safe_getattr
logger = logging.getLogger(__name__)
def mangle(subject: Any, name: str) -> str:
"""mangle the given name."""
try:
if isclass(subject) and name.startswith('__') and not name.endswith('__'):
return "_%s%s" % (subject.__name__, name)
except AttributeError:
pass
return name
def unmangle(subject: Any, name: str) -> Optional[str]:
"""unmangle the given name."""
try:
if isclass(subject) and not name.endswith('__'):
prefix = "_%s__" % subject.__name__
if name.startswith(prefix):
return name.replace(prefix, "__", 1)
else:
for cls in subject.__mro__:
prefix = "_%s__" % cls.__name__
if name.startswith(prefix):
# mangled attribute defined in parent class
return None
except AttributeError:
pass
return name
def import_module(modname: str, warningiserror: bool = False) -> Any:
"""
Call importlib.import_module(modname), convert exceptions to ImportError
"""
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ImportWarning)
with logging.skip_warningiserror(not warningiserror):
return importlib.import_module(modname)
except BaseException as exc:
# Importing modules may cause any side effects, including
# SystemExit, so we need to catch all errors.
raise ImportError(exc, traceback.format_exc()) from exc
def import_object(modname: str, objpath: List[str], objtype: str = '',
attrgetter: Callable[[Any, str], Any] = safe_getattr,
warningiserror: bool = False) -> Any:
if objpath:
logger.debug('[autodoc] from %s import %s', modname, '.'.join(objpath))
else:
logger.debug('[autodoc] import %s', modname)
try:
module = None
exc_on_importing = None
objpath = list(objpath)
while module is None:
try:
module = import_module(modname, warningiserror=warningiserror)
logger.debug('[autodoc] import %s => %r', modname, module)
except ImportError as exc:
logger.debug('[autodoc] import %s => failed', modname)
exc_on_importing = exc
if '.' in modname:
# retry with parent module
modname, name = modname.rsplit('.', 1)
objpath.insert(0, name)
else:
raise
obj = module
parent = None
object_name = None
for attrname in objpath:
parent = obj
logger.debug('[autodoc] getattr(_, %r)', attrname)
mangled_name = mangle(obj, attrname)
obj = attrgetter(obj, mangled_name)
logger.debug('[autodoc] => %r', obj)
object_name = attrname
return [module, parent, object_name, obj]
except (AttributeError, ImportError) as exc:
if isinstance(exc, AttributeError) and exc_on_importing:
# restore ImportError
exc = exc_on_importing
if objpath:
errmsg = ('autodoc: failed to import %s %r from module %r' %
(objtype, '.'.join(objpath), modname))
else:
errmsg = 'autodoc: failed to import %s %r' % (objtype, modname)
if isinstance(exc, ImportError):
# import_module() raises ImportError having real exception obj and
# traceback
real_exc, traceback_msg = exc.args
if isinstance(real_exc, SystemExit):
errmsg += ('; the module executes module level statement '
'and it might call sys.exit().')
elif isinstance(real_exc, ImportError) and real_exc.args:
errmsg += '; the following exception was raised:\n%s' % real_exc.args[0]
else:
errmsg += '; the following exception was raised:\n%s' % traceback_msg
else:
errmsg += '; the following exception was raised:\n%s' % traceback.format_exc()
logger.debug(errmsg)
raise ImportError(errmsg) from exc
def get_module_members(module: Any) -> List[Tuple[str, Any]]:
"""Get members of target module."""
from sphinx.ext.autodoc import INSTANCEATTR
members = {} # type: Dict[str, Tuple[str, Any]]
for name in dir(module):
try:
value = safe_getattr(module, name, None)
members[name] = (name, value)
except AttributeError:
continue
# annotation only member (ex. attr: int)
if hasattr(module, '__annotations__'):
for name in module.__annotations__:
if name not in members:
members[name] = (name, INSTANCEATTR)
return sorted(list(members.values()))
Attribute = NamedTuple('Attribute', [('name', str),
('directly_defined', bool),
('value', Any)])
def get_object_members(subject: Any, objpath: List[str], attrgetter: Callable,
analyzer: ModuleAnalyzer = None) -> Dict[str, Attribute]:
"""Get members and attributes of target object."""
from sphinx.ext.autodoc import INSTANCEATTR
# the members directly defined in the class
obj_dict = attrgetter(subject, '__dict__', {})
members = {} # type: Dict[str, Attribute]
# enum members
if isenumclass(subject):
for name, value in subject.__members__.items():
if name not in members:
members[name] = Attribute(name, True, value)
superclass = subject.__mro__[1]
for name in obj_dict:
if name not in superclass.__dict__:
value = safe_getattr(subject, name)
members[name] = Attribute(name, True, value)
# members in __slots__
if isclass(subject) and getattr(subject, '__slots__', None) is not None:
from sphinx.ext.autodoc import SLOTSATTR
for name in subject.__slots__:
members[name] = Attribute(name, True, SLOTSATTR)
# other members
for name in dir(subject):
try:
value = attrgetter(subject, name)
directly_defined = name in obj_dict
name = unmangle(subject, name)
if name and name not in members:
members[name] = Attribute(name, directly_defined, value)
except AttributeError:
continue
# annotation only member (ex. attr: int)
if hasattr(subject, '__annotations__') and isinstance(subject.__annotations__, Mapping):
for name in subject.__annotations__:
name = unmangle(subject, name)
if name and name not in members:
members[name] = Attribute(name, True, INSTANCEATTR)
if analyzer:
# append instance attributes (cf. self.attr1) if analyzer knows
namespace = '.'.join(objpath)
for (ns, name) in analyzer.find_attr_docs():
if namespace == ns and name not in members:
members[name] = Attribute(name, True, INSTANCEATTR)
return members
from sphinx.ext.autodoc.mock import ( # NOQA
_MockModule, _MockObject, MockFinder, MockLoader, mock
)
deprecated_alias('sphinx.ext.autodoc.importer',
{
'_MockModule': _MockModule,
'_MockObject': _MockObject,
'MockFinder': MockFinder,
'MockLoader': MockLoader,
'mock': mock,
},
RemovedInSphinx40Warning,
{
'_MockModule': 'sphinx.ext.autodoc.mock._MockModule',
'_MockObject': 'sphinx.ext.autodoc.mock._MockObject',
'MockFinder': 'sphinx.ext.autodoc.mock.MockFinder',
'MockLoader': 'sphinx.ext.autodoc.mock.MockLoader',
'mock': 'sphinx.ext.autodoc.mock.mock',
})
|
from dataclasses import dataclass, field
from enum import Enum, auto, unique
from eth_typing import BLSPubkey
from eth2._utils.humanize import humanize_bytes
from eth2.beacon.signature_domain import SignatureDomain
from eth2.beacon.typing import CommitteeIndex
from eth2.validator_client.tick import Tick
@unique
class DutyType(Enum):
Attestation = auto()
BlockProposal = auto()
@dataclass(eq=True, frozen=True)
class Duty:
"""
A ``Duty`` represents some work that needs to be performed on behalf of some
validator, like signing an ``Attestation``.
"""
validator_public_key: BLSPubkey
# ``Tick`` when this ``Duty`` should be executed, resulting in a
# (slashable) signature to publish
tick_for_execution: Tick
# ``Tick`` when this ``Duty`` was discovered via a ``BeaconNode``
discovered_at_tick: Tick
duty_type: DutyType
signature_domain: SignatureDomain
@dataclass(eq=True, frozen=True)
class AttestationDuty(Duty):
duty_type: DutyType = field(init=False, default=DutyType.Attestation)
signature_domain: SignatureDomain = field(
init=False, default=SignatureDomain.DOMAIN_BEACON_ATTESTER
)
committee_index: CommitteeIndex
def __repr__(self) -> str:
return (
f"AttestationDuty(validator_public_key={humanize_bytes(self.validator_public_key)},"
f" tick_for_execution={self.tick_for_execution},"
f" discovered_at_tick={self.discovered_at_tick},"
f" committee_index={self.committee_index})"
)
@dataclass(eq=True, frozen=True)
class BlockProposalDuty(Duty):
duty_type: DutyType = field(init=False, default=DutyType.BlockProposal)
signature_domain: SignatureDomain = field(
init=False, default=SignatureDomain.DOMAIN_BEACON_PROPOSER
)
def __repr__(self) -> str:
return (
f"BlockProposalDuty(validator_public_key={humanize_bytes(self.validator_public_key)},"
f" tick_for_execution={self.tick_for_execution},"
f" discovered_at_tick={self.discovered_at_tick}"
)
|
from click.testing import CliRunner
from pyskel_bc.cli import cli
def test_cli_count():
runner = CliRunner()
result = runner.invoke(cli, ['3'])
assert result.exit_code == 0
assert result.output == "False\nFalse\nFalse\n"
|
from typing import Generic, Iterator, TypeVar
T = TypeVar('T')
class Peekable(Generic[T]):
__slots__ = "just", "next", "iterator"
def __init__(self, iterator: Iterator[T]):
self.iterator = iterator
self.next: T | None = next(self.iterator, None)
self.just: T | None = None
def __iter__(self):
return self
def __next__(self) -> T:
if self.next is None:
raise StopIteration
self.just = self.next
self.next = next(self.iterator, None)
return self.just
def __copy__(self) -> "Peekable[T]":
result = self.__class__(self.iterator)
result.just = self.just
return result
def peek(self):
return self.next
def current(self):
return self.just
|
import unittest
from programy.config.file.yaml_file import YamlConfigurationFile
from programy.clients.restful.config import RestConfiguration
from programy.clients.events.console.config import ConsoleConfiguration
class RestConfigurationTests(unittest.TestCase):
def test_init(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
rest:
host: 127.0.0.1
port: 5000
debug: false
workers: 4
use_api_keys: false
api_key_file: apikeys.txt
""", ConsoleConfiguration(), ".")
rest_config = RestConfiguration("rest")
rest_config.load_configuration(yaml, ".")
self.assertEqual("127.0.0.1", rest_config.host)
self.assertEqual(5000, rest_config.port)
self.assertEqual(False, rest_config.debug)
self.assertEqual(False, rest_config.use_api_keys)
self.assertEqual("apikeys.txt", rest_config.api_key_file)
def test_init_no_values(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
rest:
""", ConsoleConfiguration(), ".")
rest_config = RestConfiguration("rest")
rest_config.load_configuration(yaml, ".")
self.assertEqual("0.0.0.0", rest_config.host)
self.assertEqual(80, rest_config.port)
self.assertEqual(False, rest_config.debug)
self.assertEqual(False, rest_config.use_api_keys)
def test_to_yaml_with_defaults(self):
config = RestConfiguration("rest")
data = {}
config.to_yaml(data, True)
self.assertEqual(data['host'], "0.0.0.0")
self.assertEqual(data['port'], 80)
self.assertEqual(data['debug'], False)
self.assertEqual(data['use_api_keys'], False)
self.assertEqual(data['api_key_file'], './api.keys')
self.assertEqual(data['ssl_cert_file'], './rsa.cert')
self.assertEqual(data['ssl_key_file'], './rsa.keys')
self.assertEqual(data['bot'], 'bot')
self.assertEqual(data['bot_selector'], "programy.clients.client.DefaultBotSelector")
self.assertEqual(data['renderer'], "programy.clients.render.text.TextRenderer")
|
"""RCS interface module for CVSGit."""
from __future__ import absolute_import
import rcsparse
# Some observations about RCS + CVS, although I don't really know
# that much about the RCS format or the CVS usage of it...
#
# 1. The 'branch' keyword is normally absent (or empty?), but has
# the value "1.1.1" for files that have been imported into the
# vendor branch and never modified. The 'head' keyword has the
# value "1.1" in that case.
#
# 2. When checking out files on trunk via date that were imported
# in a vendor branch, cvs expands the Id keyword to "1.1.1.1"
# if the date matches exactly, and to "1.1" if the date is one
# second or more after the exact date of the import.
import os.path
import sys
from cvsgit.changeset import Change, FILE_ADDED, FILE_MODIFIED, \
FILE_DELETED
from cvsgit.error import Error
from cvsgit.i18n import _
REV_TIMESTAMP = 1
REV_AUTHOR = 2
REV_STATE = 3
REV_BRANCHES = 4
REV_NEXT = 5
REV_MODE = 6
class RCSError(Error):
"""Base class for exceptions from the cvsgit.rcs module.
"""
class ParseError(RCSError):
"""Raised when an RCS file couldn't be parsed correctly.
"""
def __init__(self, message, rcsfile):
"""This exception provides additional information.
'rcsfile' is an RCSFile object.
"""
super(ParseError, self).__init__(message)
self.rcsfile = rcsfile
class CheckoutError(ParseError):
"""Extraction of fulltext from RCS file failed.
This exception indicates that the fulltext of a particular
revision was requested but couldn't be retrieved from the
RCS file because the revision does not exist, for example.
It likely indicates that an invalid argument was passed to
the RCSFile.blob method.
"""
def __init__(self, rcsfile, revision):
"""This exception provides additional information.
'rcsfile' is an RCSFile object.
'revision' is the revision that couldn't be retrieved.
"""
super(CheckoutError, self).__init__(
_("Couldn't retrieve file content for revision %s of %s") % \
(revision, rcsfile.filename), rcsfile)
self.revision = revision
class RCSFile(object):
"""Represents a single RCS file.
"""
def __init__(self, filename, encoding='iso8859-1'):
"""'encoding' sets the encoding assumed of log messages and
delta text in RCS files.
"""
self.filename = filename
self.encoding = encoding
self.rcsfile = rcsparse.rcsfile(filename)
head = property(lambda self: self.rcsfile.head)
branch = property(lambda self: self.rcsfile.branch)
expand = property(lambda self: self.rcsfile.expand)
mode = property(lambda self: self.rcsfile.mode)
revs = property(lambda self: self.rcsfile.revs)
def revisions(self):
"""Yield all revision numbers from current HEAD backwards.
"""
if self.branch:
branchprefix = self.branch + '.'
else:
branchprefix = None
revision = self.head
while revision != None:
if branchprefix:
for brevision in self.revs[revision][REV_BRANCHES]:
if brevision.startswith(branchprefix):
branchprefix = None
revision = brevision
break
yield(revision)
revision = self.revs[revision][REV_NEXT]
def changes(self):
"""Yield Change objects for all revisions on HEAD
The changes are generated by following the current head
revision back to its origin. The order of changes is thus
from most recent to oldest.
"""
for revision in self.revisions():
change = self.change(revision)
if change != None:
yield(change)
def change(self, revision):
"""Return a single Change object for <revision>.
If the revision is 1.1 and has state 'dead' then the file was
added on a branch and None is returned.
"""
rev = self.revs[revision]
if rev[REV_STATE] == 'dead':
if revision == '1.1':
# This file was initially added on a branch and so
# the initial trunk revision was marked 'dead'. We
# do not count this as a change since it wasn't
# added and hasn't existed before.
return None
else:
filestatus = FILE_DELETED
elif rev[REV_NEXT] == None:
filestatus = FILE_ADDED
else:
# XXX: Resurrections of dead revisions aren't flagged
# as FILE_ADDED.
filestatus = FILE_MODIFIED
# The log message for an initial import is actually in
# the initial vendor branch revision.
if revision == '1.1' and '1.1.1.1' in rev[REV_BRANCHES]:
log = self.rcsfile.getlog('1.1.1.1')
else:
log = self.rcsfile.getlog(revision)
# XXX: is this right?
log = unicode(log, self.encoding)
if rev[REV_MODE] == None:
mode = ''
else:
mode = rev[REV_MODE]
return Change(timestamp=rev[REV_TIMESTAMP],
author=rev[REV_AUTHOR],
log=log,
filestatus=filestatus,
filename=self.filename,
revision=revision,
state=rev[REV_STATE],
mode=mode)
def blob(self, revision):
"""Returns the revision's file content.
"""
try:
return self.rcsfile.checkout(revision)
except RuntimeError:
raise CheckoutError(self, revision)
# XXX only for debugging; remove later
def _print_revision(self, revision):
import time
rev = self.revs[revision]
print 'revision:', revision
print ' timestamp:', time.strftime("%Y-%m-%d %H:%M", time.gmtime(rev[REV_TIMESTAMP]))
print ' branches:', rev[REV_BRANCHES]
print ' next:', rev[REV_NEXT]
print ' state:', rev[REV_STATE]
print ' log:', self.rcsfile.getlog(revision).splitlines()[0]
|
'''
========================
efficient_vdf module
========================
Created on Feb.6, 2022
@author: Xu Ronghua
@Email: rxu22@binghamton.edu
@TaskDescription: This module provide efficient verifiable delay function implementation.
@Reference: Efficient Verifiable Delay Functions (By Wesolowski)
C++ prototype: https://github.com/iotaledger/vdf/tree/master/src
'''
import os
import time
import hashlib
import gmpy2
from gmpy2 import mpz
'''
======================== Internal functions ========================
'''
## integer to hex
def int_to_hex(int_data):
return hex(int_data)
## hex to integer
def hex_to_int(hex_data):
return int(hex_data, 16)
## hash function with 2*k length:
def hash_message(hex_message, _k):
if(_k==128):
hash_hex = hashlib.sha256(hex_message.encode('utf-8')).hexdigest()
else:
hash_hex = hashlib.sha1(hex_message.encode('utf-8')).hexdigest()
return hash_hex
'''
======================= Efficient VDF class ========================
'''
class E_VDF(object):
def __init__(self, seed_type=0):
'''
Initialize parameters
@Input
seed_type: seed generaton method.
'''
if(seed_type ==1):
## 1) use random number as seed
r_seed = int(os.urandom(32).hex(), 16)
else:
## 2) use hash of random_state as seed
r_seed = hash(gmpy2.random_state())
## set random state
self.r_state = gmpy2.random_state(r_seed)
@staticmethod
def generate_prime(r_state, bitLen):
'''
generate a random prime number
@Input
r_state: generate by gmpy2.random_state(r_seed)
bitLen: bit length of the prime number
@Output
mpz_prime: an uniformly distributed random integer between 0 and 2**bitLen - 1
'''
## generate a random number
mpz_random = gmpy2.mpz_urandomb(r_state, bitLen)
## get prime bumber that close to mpz_random
mpz_prime = gmpy2.next_prime(mpz_random)
## return prime number.
return mpz_prime
def hash_prime(self, mpz_prime):
'''
generate a next_prime given the output of H(current_prime)
@Input
mpz_prime: current mpz_prime used to calculate hash_prime
@Output
next_mpz_prime (l): the closet prime number to H(mpz_prime)
'''
## 1) convert mpz_prime to hex format
hex_mpz_prime = int_to_hex(mpz_prime)
## 2) get hex format hash value that is output of H(mpz_prime)
hash_data = hash_message(hex_mpz_prime, self._k)
## 3) convert hash_data to mpz format
hash_mpz_prime = mpz(hex_to_int(hash_data))
## 4) get next prime that is closed to hash_mpz_prime
next_mpz_prime = gmpy2.next_prime(hash_mpz_prime)
return next_mpz_prime
def set_up(self, _lambda, _k,):
'''
VDF setup procedure to configurate parameters
@Input
_lambda: This is used to generate RSA prime N with ength p_lambda/2
_k: Security parameter _k defines length of hash string l.
'''
## set security parameters
self._lambda = _lambda
self._k = _k
## create big prime N
self.p = E_VDF.generate_prime(self.r_state, int(self._lambda/2))
self.q = E_VDF.generate_prime(self.r_state, int(self._lambda/2))
mpz_N = gmpy2.mul(self.p, self.q)
# self.phi_N = gmpy2.mul(self.p-1, self.q-1)
return mpz_N
def evaluate_proof(self, _message, _tau ,_N):
'''
VDF evaluation to solve challenge and calculate proof pairs for verification
@Input
_message: data (x) that is used to feed evaluatation
_tau: challenge (task difficuly) that requires y=x^(2**tau) mod N
_N: modulus N
@Output
proof_pair: Return [pi, l]
'''
## ---------------------- evaluation -----------------------------
## 1) perform H(m)
hash_m = hash_message(_message, self._k)
x = hex_to_int(hash_m)
## 2) calculate 2^tau
exp_tau = pow(2, _tau)
## 3) calculate y=x^(2**t) mod N
y = gmpy2.powmod(x, exp_tau, _N)
## ---------------------------- proof -----------------------------
## 1) calculate l= H_prime(x+y)
l = self.hash_prime(gmpy2.add(x,y))
## 2) calculate floor(2^tau/l)
exp_tau_div = gmpy2.f_div(exp_tau,l)
## 3) calculate pi=x^exp_tau_div mod N
pi = gmpy2.powmod(x, exp_tau_div, _N)
## return proof pair
proof_pair=[pi,l]
return proof_pair
def verify_proof(self, _message, _tau ,_N, proof_pair):
'''
VDF verification to check if proof is correct
@Input
_message: data (x) that is used to feed evaluatation
_tau: challenge (task difficuly) that requires y=x^2**t mod N
_N: modulus N
proof_pair: [pi, l]
@Output
verify_ret: Return true or false
'''
pi = proof_pair[0]
l = proof_pair[1]
## 1) perform H(m)
hash_m = hash_message(_message, self._k)
x = hex_to_int(hash_m)
## 2) calculate 2^tau mod l
r = gmpy2.powmod(2, _tau, l)
## 3) use proof pair to calculate y
## optimized method: y=((pi^l mod N) * (x^r mod N)) mod N
pi_l_mod = gmpy2.powmod(pi, l, _N)
pi_x_r = gmpy2.powmod(x, r, _N)
y = gmpy2.mul(pi_l_mod, pi_x_r) % _N
## 4) calculate l_verify = H_prime(x+y)
l_verify = self.hash_prime(gmpy2.add(x,y))
## 5) return verify result
return l==l_verify
|
from __future__ import annotations
import pytest
from bot.emote import EmotePosition
from bot.emote import parse_emote_info
@pytest.mark.parametrize(
('s', 'expected'),
(
('', []),
('303330140:23-31', [EmotePosition(23, 31, '303330140')]),
('302498976_BW:0-15', [EmotePosition(0, 15, '302498976_BW')]),
(
'300753352:36-45/303265469:0-7,9-16,18-25',
[
EmotePosition(0, 7, '303265469'),
EmotePosition(9, 16, '303265469'),
EmotePosition(18, 25, '303265469'),
EmotePosition(36, 45, '300753352'),
],
),
),
)
def test_parse_emote_info(s, expected):
assert parse_emote_info(s) == expected
|
from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ("email", "password", "name")
extra_kwargs = {"password": {"write_only": True, "min_length": 5}}
def create(self, validated_data):
"""Django Rest Framework method for creating a model through Serializer class"""
return get_user_model().objects.create_user(**validated_data)
class AuthTokenSerializer(serializers.Serializer):
email = serializers.CharField()
password = serializers.CharField(
style={"input_type": "password"}, trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get("email")
password = attrs.get("password")
user = authenticate(
request=self.context.get("request"), username=email, password=password
)
if not user:
msg = _("Invalid credentials")
raise serializers.ValidationError(msg, code="authentication")
attrs["user"] = user
return attrs
|
import psycopg2
import pandas.io.sql as sqlio
import pandas as pd
import dash
from dash import dcc
from dash import html
import plotly.express as px
app = dash.Dash(__name__)
# Connect to Materialize as a regular database
conn = psycopg2.connect("dbname=materialize user=materialize port=6875 host=localhost")
# Read the materialized view with Pandas
sql = "select * from consolidated_funnel order by cnt desc;"
df = pd.read_sql_query(sql, conn)
# Plot a funnel chart
fig = px.funnel(df, x="step", y="cnt")
# Main UI scaffolding for the dashboard
app.layout = html.Div(children=[
html.H1(children='Conversion Funnel'),
html.Div(children='''
Dash: A web application framework for your data.
'''),
dcc.Graph(
id='funnel-chart',
figure=fig
)
])
if __name__ == '__main__':
app.run_server(debug=True)
conn = None
|
import re
from typing import List
import requests
from anime_cli.anime import Anime
from anime_cli.search import SearchApi
class GogoAnime(SearchApi):
def __init__(self, mirror: str):
super().__init__(mirror)
self.url = f"https://gogoanime.{mirror}"
@staticmethod
def get_headers() -> dict[str, str]:
return {"Referer": "https://gogoplay1.com/"}
def search_anime(self, keyword: str) -> List[Anime]:
# Get and parse the html from the site
soup = self.get_soup(f"search.html?keyword={keyword}")
# Find all the p tags which have the name class
animes = soup.findAll("p", {"class": "name"})
return [
Anime(anime.a["title"], anime.a["href"].split("/")[2]) for anime in animes
]
def get_episodes_count(self, anime: Anime) -> int:
soup = self.get_soup(f"category/{anime.id}")
# Find all the ul tag which have an id of episode_page
episode_page = soup.find("ul", {"id": "episode_page"})
# From the ul tag find all the elements having li tag and then get ep_end
# from the last li tag which is the total number of episodes
episode_count = int(episode_page.find_all("li")[-1].a["ep_end"])
return episode_count
def get_embed_video(self, anime: Anime, episode: int) -> str:
soup = self.get_soup(f"{anime.id}-episode-{episode}")
# In the html search for a `a` tag
# having the rel: 100 and href: # properties
link = soup.find("a", {"href": "#", "rel": "100"})
return f'https:{link["data-video"]}'
def get_video_url(self, embed_url: str) -> str:
"""
Get video url returns the direct link to video by parsing
the page where the video is embedded
"""
# Get the page where the video is embedded
r = requests.get(embed_url, headers=self.request_headers)
# Search for the link to the video and return it
link = re.search(r"\s*sources.*", r.text).group()
link = re.search(r"https:.*(m3u8)|(mp4)", link).group()
return link
|
# Copyright (c) 2021, IAC Electricals and contributors
# For license information, please see license.txt
import frappe
from frappe import _
import gzip
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
def before_insert(self,method=None):
self.flags.name_set = 1
current = frappe.db.sql("""select MAX(current) AS current from `tabSeries` where name = '{0}'""".format(self.custom_naming_series),as_dict=1)
for row in current:
current = row.current
last_doc = frappe.get_last_doc('File')
file = open(frappe.utils.get_site_path("private")+"/files/"+last_doc.file_name, "rt")
# gzip_file = gzip.GzipFile(fileobj=file)
# csv= gzip_file.readlines()
csv= file.readlines()
id_list = []
variable = None
if variable:
frappe.db.sql("""update tabSeries set current = {0} where name = '{1}'""".format(variable, self.custom_naming_series), debug = 1)
series = self.custom_naming_series + str(variable).zfill(3)
self.name = series
# for row in csv[1:]:
# li = list(row.split(","))
# if li:
# id_list.append(li[7])
# if li[7] == self.item_name:
# variable = int(li[0][6:])
# break
if current is None:
current = 1
series = str(self.real_item_code)
self.name = series
first_series_to_store = self.custom_naming_series
frappe.db.sql("insert into tabSeries (name, current) values (%s, 1)", (first_series_to_store))
else:
current = current + 1
current = current
series = str(self.real_item_code)
self.name = series
frappe.db.sql("""update tabSeries set current = {0} where name = '{1}'""".format(current, self.custom_naming_series))
pass
if current is None:
current = 1
if self.real_item_code is None:
series = self.custom_naming_series + str(current).zfill(3)
self.name = series
first_series_to_store = self.custom_naming_series
frappe.db.sql("insert into tabSeries (name, current) values (%s, 1)", (series))
else:
current = current + 1
current = current
if self.real_item_code is None:
series = self.custom_naming_series + str(current).zfill(3)
self.name = series
first_series_to_store = self.custom_naming_series
frappe.db.sql("insert into tabSeries (name, current) values (%s, 1)", (series))
@frappe.whitelist()
def update_old_item_custom_naming_series_for_one_time():
all_item = frappe.get_all('Item')
cnt = 0
for item in all_item:
cnt = cnt + 1
sql = """ UPDATE `tabItem` SET custom_naming_series = "" where name IN ('{0}')""".format(item.name)
benificiary_purchase_count = frappe.db.sql(sql,debug=1)
error_log = frappe.log_error(frappe.get_traceback(), _("All item Updated item count: '{0}' ").format(cnt))
@frappe.whitelist()
def update_the_series_item_updation(prefix_level_for_item,count1):
item_updation = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name = '{1}' """.format(count1, prefix_level_for_item), debug = 1)
return "Success"
@frappe.whitelist()
def update_the_series_prefix2_updation(prefix_level_3, count2):
item_updation = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name = '{1}' """.format(count2, prefix_level_3), debug = 1)
return "Success"
@frappe.whitelist()
def update_the_series_prefix3_updation(prefix_level_2, count3):
series_3_updation = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name = '{1}' """.format(count3, prefix_level_2), debug = 1)
return "Success"
@frappe.whitelist()
def all_reset_series(level, count4):
#item series level 1 counter reset
item_updation = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name = '{1}' """.format(count4,level), debug = 1)
#item series level 2 counter reset
level2_name = frappe.db.sql("""SELECT l2.name from `tabItem Series lavel 2` l2 join `tabItem Series lavel 1` l1 on l1.name = l2.lavel_2_item_code where l1.name = '{0}' """.format(level), debug = 1, as_dict = 1)
level_2_name_list = [item.name for item in level2_name]
if len(level_2_name_list)> 1:
item_updation2 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name in {1} """.format(count4,tuple(level_2_name_list)), debug = 1)
level3_name = frappe.db.sql("""SELECT l3.name from `tabItem Series lavel 3` l3 join `tabItem Series lavel 2` l2 on l2.name = l3.level_3_item_code where l2.name in {0} """.format(tuple(level_2_name_list)), debug = 1, as_dict = 1)
level_3_name_list = [item.name for item in level3_name]
if len(level_3_name_list) > 1:
item_updation3 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name in {1}""".format(count4,tuple(level_3_name_list)), debug = 1)
item_name = frappe.db.sql("""SELECT item.name from `tabItem` item join `tabItem Series lavel 3` l3 on l3.name = item.item_name where l3.name in {0} """.format(tuple(level_3_name_list)), debug = 1, as_dict = 1)
item_name_list = tuple([item.name for item in item_name])
if len(item_name_list) > 1:
item_updation4 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name in {1}""".format(count4,tuple(item_name_list)), debug = 1)
item_deletion = frappe.db.sql("""DELETE from `tabItem` where name in {0}""".format(tuple(item_name_list)), debug = 1)
elif len(item_name_list) == 1:
item_updation4 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name = '{1}' """.format(count4,item_name_list[0]), debug = 1)
item_deletion = frappe.db.sql("""DELETE from `tabItem` where name = '{0}' """.format(item_name_list[0]), debug = 1)
else:
pass
if len(level_2_name_list)> 1:
item_updation2 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name in {1} """.format(count4,tuple(level_2_name_list)), debug = 1)
level3_name = frappe.db.sql("""SELECT l3.name from `tabItem Series lavel 3` l3 join `tabItem Series lavel 2` l2 on l2.name = l3.level_3_item_code where l2.name in {0} """.format(tuple(level_2_name_list)), debug = 1, as_dict = 1)
level_3_name_list = [item.name for item in level3_name]
if len(level_3_name_list) == 1:
item_updation3 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name = '{1}' """.format(count4,level_3_name_list[0]), debug = 1)
item_name = frappe.db.sql("""SELECT item.name from `tabItem` item join `tabItem Series lavel 3` l3 on l3.name = item.item_name where l3.name = '{0}' """.format(level_3_name_list[0]), debug = 1, as_dict = 1)
item_name_list = tuple([item.name for item in item_name])
if len(item_name_list) > 1:
item_updation4 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name in {1}""".format(count4,tuple(item_name_list)), debug = 1)
item_deletion = frappe.db.sql("""DELETE from `tabItem` where name in {0}""".format(tuple(item_name_list)), debug = 1)
elif len(item_name_list) == 1:
item_updation4 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name = '{1}' """.format(count4,item_name_list[0]), debug = 1)
item_deletion = frappe.db.sql("""DELETE from `tabItem` where name = '{0}' """.format(item_name_list[0]), debug = 1)
else:
pass
if len(level_2_name_list) == 1:
item_updation2 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name = '{1}' """.format(count4,level_2_name_list[0]), debug = 1)
level3_name = frappe.db.sql("""SELECT l3.name from `tabItem Series lavel 3` l3 join `tabItem Series lavel 2` l2 on l2.name = l3.level_3_item_code where l2.name = '{0}' """.format(level_2_name_list[0]), debug = 1, as_dict = 1)
level_3_name_list = [item.name for item in level3_name]
if len(level_3_name_list) > 1:
item_updation3 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name in {1}""".format(count4,tuple(level_3_name_list)), debug = 1)
item_name = frappe.db.sql("""SELECT item.name from `tabItem` item join `tabItem Series lavel 3` l3 on l3.name = item.item_name where l3.name in {0} """.format(tuple(level_3_name_list)), debug = 1, as_dict = 1)
item_name_list = tuple([item.name for item in item_name])
if len(item_name_list) > 1:
item_updation4 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name in {1}""".format(count4,tuple(item_name_list)), debug = 1)
item_deletion = frappe.db.sql("""DELETE from `tabItem` where name in {0}""".format(tuple(item_name_list)), debug = 1)
elif len(item_name_list) == 1:
item_updation4 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name = '{1}' """.format(count4,item_name_list[0]), debug = 1)
item_deletion = frappe.db.sql("""DELETE from `tabItem` where name = '{0}' """.format(item_name_list[0]), debug = 1)
else:
pass
if len(level_2_name_list) == 1:
item_updation2 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name = '{1}' """.format(count4,level_2_name_list[0]), debug = 1)
level3_name = frappe.db.sql("""SELECT l3.name from `tabItem Series lavel 3` l3 join `tabItem Series lavel 2` l2 on l2.name = l3.level_3_item_code where l2.name = '{0}' """.format(level_2_name_list[0]), debug = 1, as_dict = 1)
level_3_name_list = [item.name for item in level3_name]
if len(level_3_name_list) == 1:
item_updation3 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name = '{1}' """.format(count4,level_3_name_list[0]), debug = 1)
item_name = frappe.db.sql("""SELECT item.name from `tabItem` item join `tabItem Series lavel 3` l3 on l3.name = item.item_name where l3.name = '{0}' """.format(level_3_name_list[0]), debug = 1, as_dict = 1)
item_name_list = tuple([item.name for item in item_name])
if len(item_name_list) > 1:
item_updation4 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name in {1}""".format(count4,tuple(item_name_list)), debug = 1)
item_deletion = frappe.db.sql("""DELETE from `tabItem` where name in {0}""".format(tuple(item_name_list)), debug = 1)
elif len(item_name_list) == 1:
item_updation4 = frappe.db.sql("""UPDATE `tabSeries` SET current = {0} WHERE name = '{1}' """.format(count4,item_name_list[0]), debug = 1)
item_deletion = frappe.db.sql("""DELETE from `tabItem` where name = '{0}' """.format(item_name_list[0]), debug = 1)
else:
pass
return "Success"
|
from typing import Iterable, Type
from vkbottle.api import ABCAPI
from vkbottle.http import AiohttpClient, SingleSessionManager
from vkbottle.modules import logger
from vkbottle.polling import ABCPolling, BotPolling
from .bot import Bot
def bot_run_multibot(bot: Bot, apis: Iterable[ABCAPI], polling_type: Type[ABCPolling] = BotPolling):
""" Add run_polling with polling constructed from derived apis
:param bot: Bot main instance (api is not required)
:param apis: Iterable of apis
:param polling_type: polling type to be ran
"""
for i, api_instance in enumerate(apis):
logger.debug(f"Connecting API (index: {i})")
polling = polling_type().construct(api_instance)
api_instance.http = SingleSessionManager(AiohttpClient)
bot.loop_wrapper.add_task(bot.run_polling(custom_polling=polling))
bot.loop_wrapper.run_forever(bot.loop)
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 AVSystem <avsystem@avsystem.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import base64
from framework.lwm2m_test import *
from framework.test_utils import *
from . import plaintext_base64 as pb64
class JsonEncodingTest:
class Test(test_suite.Lwm2mSingleServerTest,
test_suite.Lwm2mDmOperations):
def setUp(self):
super().setUp()
self.create_instance(self.serv, oid=OID.Test, iid=1)
def as_json(pkt):
return json.loads(pkt.content.decode('utf-8'))
class JsonEncodingBnResource(JsonEncodingTest.Test):
def runTest(self):
res = as_json(self.read_resource(self.serv, oid=OID.Test, iid=1, rid=0,
accept=coap.ContentFormat.APPLICATION_LWM2M_JSON))
self.assertEqual('/%d/1/0' % OID.Test, res['bn'])
class JsonEncodingBnInstance(JsonEncodingTest.Test):
def runTest(self):
res = as_json(self.read_instance(self.serv, oid=OID.Test, iid=1,
accept=coap.ContentFormat.APPLICATION_LWM2M_JSON))
self.assertEqual('/%d/1' % OID.Test, res['bn'])
class JsonEncodingBnObject(JsonEncodingTest.Test):
def runTest(self):
res = as_json(self.read_object(self.serv, oid=OID.Test,
accept=coap.ContentFormat.APPLICATION_LWM2M_JSON))
self.assertEqual('/%d' % OID.Test, res['bn'])
class JsonEncodingAllNamesAreSlashPrefixed(JsonEncodingTest.Test):
def runTest(self):
responses = [
as_json(self.read_object(self.serv, oid=OID.Test,
accept=coap.ContentFormat.APPLICATION_LWM2M_JSON)),
as_json(self.read_instance(self.serv, oid=OID.Test, iid=1,
accept=coap.ContentFormat.APPLICATION_LWM2M_JSON)) ]
for response in responses:
self.assertTrue(len(response['e']) > 0)
for resource in response['e']:
self.assertEqual('/', resource['n'][0])
resource = as_json(self.read_resource(self.serv, oid=OID.Test, iid=1, rid=0,
accept=coap.ContentFormat.APPLICATION_LWM2M_JSON))
# Resource path is in 'bn', therefore no 'n' parameter is specified by the client
self.assertFalse('n' in resource['e'])
class JsonEncodingBytesInBase64(JsonEncodingTest.Test):
def runTest(self):
some_bytes = pb64.test_object_bytes_generator(51)
some_bytes_b64 = base64.encodebytes(some_bytes).replace(b'\n', b'')
self.write_resource(self.serv, oid=OID.Test, iid=1, rid=RID.Test.ResRawBytes,
content=some_bytes_b64, format=coap.ContentFormat.TEXT_PLAIN)
result = as_json(self.read_resource(self.serv, oid=OID.Test, iid=1,
rid=RID.Test.ResRawBytes,
accept=coap.ContentFormat.APPLICATION_LWM2M_JSON))
self.assertEqual(some_bytes_b64, bytes(result['e'][0]['sv'], encoding='utf-8'))
class JsonEncodingArrayOfOpaqueValues(JsonEncodingTest.Test):
def runTest(self):
values = {}
import random
for i in range(9):
values[i] = random.randint(0, 2**31)
execute_args = ','.join("%d='%d'" % (k, v) for k, v in values.items())
self.execute_resource(self.serv, oid=OID.Test, iid=1, rid=RID.Test.ResInitIntArray,
content=bytes(execute_args, encoding='utf-8'))
result = as_json(self.read_resource(self.serv, oid=OID.Test, iid=1,
rid=RID.Test.ResOpaqueArray,
accept=coap.ContentFormat.APPLICATION_LWM2M_JSON))
import struct
for instance in result['e']:
key = int(instance['n'][1:])
expected_bytes = struct.pack('!i', values[key])
expected_value = base64.encodebytes(expected_bytes).replace(b'\n', b'')
self.assertEquals(expected_value, bytes(instance['sv'], encoding='utf-8'))
|
from pytorch_lightning.callbacks import Callback
import math
from typing import List
__all__ = ['LrSchedullerStep']
def _annealing_cos(start_value: float, end_value: float, pct: float) -> float:
''' Calculate value for Cosine anneal.
Return value at pct, as pct goes from 0.0 to 1.0, from `start_value` to `end_value`. '''
return end_value + (start_value - end_value) / 2 * (math.cos(math.pi * pct) + 1)
def annealing_cos(start_value: float, end_value: float, steps: int) -> List[float]:
" Return list of values, Cosine anneal from `start_value` to `end_value` for `steps` steps"
return [_annealing_cos(start_value, end_value, pct) for pct in [i / steps for i in range(steps)]]
def annealing_cos_revers(start_value: float, end_value: float, steps: int) -> List[float]:
" Return list of values, Cosine anneal from `start_value` to `end_value` for `steps` steps"
return [_annealing_cos(end_value, start_value, pct) for pct in [i / steps for i in range(steps)]]
def _annealing_linear(start_value: float, end_value: float, pct: float) -> float:
''' Calculate value for Linear anneal.
Return value at pct, as pct goes from 0.0 to 1.0, from `start_value` to `end_value`. '''
return start_value + (end_value - start_value) * pct
def annealing_linear(start_value: float, end_value: float, steps: int) -> List[float]:
" Return list of values, Linear anneal from `start_value` to `end_value` for `steps` steps"
return [_annealing_linear(start_value, end_value, pct) for pct in [i / steps for i in range(steps)]]
def annealing_linear_revers(start_value: float, end_value: float, steps: int) -> List[float]:
" Return list of values, Linear anneal from `start_value` to `end_value` for `steps` steps"
return [_annealing_linear(end_value, start_value, pct) for pct in [i / steps for i in range(steps)]]
annealing_fn_dict = {'cos': annealing_cos,
'lin': annealing_linear,
'cos_rev': annealing_cos_revers,
'lin_rev': annealing_linear_revers,
'step': annealing_linear}
class LrSchedullerStep(Callback):
'''Lr scheduller. Change lr on every step by "annealing_fn" function.'''
def __init__(self,
start_pct: float = 0.5,
annealing_pct: float = 0.25,
div_factor: float = 0.1,
annealing_fn: str = 'cos') -> None:
self.start_pct = start_pct
self.annealing_pct = annealing_pct
self.div_factor = div_factor
self.annealing_fn = annealing_fn_dict[annealing_fn]
def on_train_start(self, trainer, pl_module):
self.steps_at_epoch = len(trainer.train_dataloader.dataset) // trainer.train_dataloader.batch_size
self.start_epoch = trainer.current_epoch
total_steps = (trainer.max_epochs - trainer.current_epoch) * self.steps_at_epoch
start_steps = int(total_steps * self.start_pct)
annealing_steps = int(total_steps * self.annealing_pct)
optimizer = trainer.optimizers[0]
self.start_lr_groups = [param_group['lr'] for param_group in optimizer.param_groups]
self.start_lr = self.start_lr_groups[0]
self.lr_list = [self.start_lr] * start_steps
self.lr_list.extend(self.annealing_fn(self.start_lr, self.start_lr * self.div_factor, annealing_steps))
self.lr_list.extend([self.start_lr * self.div_factor] * (total_steps - annealing_steps))
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
step_idx = (trainer.current_epoch - self.start_epoch) * self.steps_at_epoch + batch_idx
optimizer = trainer.optimizers[0]
for param_group in optimizer.param_groups:
param_group['lr'] = self.lr_list[step_idx]
|
def read_next(*args):
for el in args:
for ch in el:
yield ch
for item in read_next('string', (2,), {'d': 1, 'i': 2, 'c': 3, 't': 4}):
print(item, end='')
|
import numpy as np
import tensorflow as tf
import gym
import time
from spinup.algos.td3 import core
from spinup.algos.td3.td3_randtarg import ReplayBuffer
from spinup.algos.td3.core import get_vars
from spinup.utils.logx import EpochLogger
from spinup.utils.run_utils import ExperimentGrid
"""
Exercise 2.3: Details Matter
In this exercise, you will run TD3 with a tiny implementation difference,
pertaining to how target actions are calculated. Your goal is to determine
whether or not there is any change in performance, and if so, explain why.
You do NOT need to write code for this exercise.
"""
def td3(env_fn, actor_critic=core.mlp_actor_critic, ac_kwargs=dict(), seed=0,
steps_per_epoch=5000, epochs=100, replay_size=int(1e6), gamma=0.99,
polyak=0.995, pi_lr=1e-3, q_lr=1e-3, batch_size=100, start_steps=10000,
act_noise=0.1, target_noise=0.2, noise_clip=0.5, policy_delay=2,
max_ep_len=1000, logger_kwargs=dict(), save_freq=1,
remove_action_clip=False):
"""
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: A function which takes in placeholder symbols
for state, ``x_ph``, and action, ``a_ph``, and returns the main
outputs from the agent's Tensorflow computation graph:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``pi`` (batch, act_dim) | Deterministically computes actions
| from policy given states.
``q1`` (batch,) | Gives one estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q2`` (batch,) | Gives another estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q1_pi`` (batch,) | Gives the composition of ``q1`` and
| ``pi`` for states in ``x_ph``:
| q1(x, pi(x)).
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the actor_critic
function you provided to TD3.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
pi_lr (float): Learning rate for policy.
q_lr (float): Learning rate for Q-networks.
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
act_noise (float): Stddev for Gaussian exploration noise added to
policy at training time. (At test time, no noise is added.)
target_noise (float): Stddev for smoothing noise added to target
policy.
noise_clip (float): Limit for absolute value of target policy
smoothing noise.
policy_delay (int): Policy will only be updated once every
policy_delay times for each update of the Q-networks.
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
remove_action_clip (bool): Special arg for this exercise. Controls
whether or not to clip the target action after adding noise to it.
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
tf.set_random_seed(seed)
np.random.seed(seed)
env, test_env = env_fn(), env_fn()
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Share information about action space with policy architecture
ac_kwargs['action_space'] = env.action_space
# Inputs to computation graph
x_ph, a_ph, x2_ph, r_ph, d_ph = core.placeholders(obs_dim, act_dim, obs_dim, None, None)
# Main outputs from computation graph
with tf.variable_scope('main'):
pi, q1, q2, q1_pi = actor_critic(x_ph, a_ph, **ac_kwargs)
# Target policy network
with tf.variable_scope('target'):
pi_targ, _, _, _ = actor_critic(x2_ph, a_ph, **ac_kwargs)
# Target Q networks
with tf.variable_scope('target', reuse=True):
# Target policy smoothing, by adding clipped noise to target actions
epsilon = tf.random_normal(tf.shape(pi_targ), stddev=target_noise)
epsilon = tf.clip_by_value(epsilon, -noise_clip, noise_clip)
a2 = pi_targ + epsilon
if not(remove_action_clip):
a2 = tf.clip_by_value(a2, -act_limit, act_limit)
# Target Q-values, using action from target policy
_, q1_targ, q2_targ, _ = actor_critic(x2_ph, a2, **ac_kwargs)
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
# Count variables
var_counts = tuple(core.count_vars(scope) for scope in ['main/pi', 'main/q1', 'main/q2', 'main'])
print('\nNumber of parameters: \t pi: %d, \t q1: %d, \t q2: %d, \t total: %d\n'%var_counts)
# Bellman backup for Q functions, using Clipped Double-Q targets
min_q_targ = tf.minimum(q1_targ, q2_targ)
backup = tf.stop_gradient(r_ph + gamma*(1-d_ph)*min_q_targ)
# TD3 losses
pi_loss = -tf.reduce_mean(q1_pi)
q1_loss = tf.reduce_mean((q1-backup)**2)
q2_loss = tf.reduce_mean((q2-backup)**2)
q_loss = q1_loss + q2_loss
# Separate train ops for pi, q
pi_optimizer = tf.train.AdamOptimizer(learning_rate=pi_lr)
q_optimizer = tf.train.AdamOptimizer(learning_rate=q_lr)
train_pi_op = pi_optimizer.minimize(pi_loss, var_list=get_vars('main/pi'))
train_q_op = q_optimizer.minimize(q_loss, var_list=get_vars('main/q'))
# Polyak averaging for target variables
target_update = tf.group([tf.assign(v_targ, polyak*v_targ + (1-polyak)*v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])
# Initializing targets to match main variables
target_init = tf.group([tf.assign(v_targ, v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(target_init)
# Setup model saving
logger.setup_tf_saver(sess, inputs={'x': x_ph, 'a': a_ph}, outputs={'pi': pi, 'q1': q1, 'q2': q2})
def get_action(o, noise_scale):
a = sess.run(pi, feed_dict={x_ph: o.reshape(1,-1)})
a += noise_scale * np.random.randn(act_dim)
return np.clip(a, -act_limit, act_limit)
def test_agent(n=10):
for j in range(n):
o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0
while not(d or (ep_len == max_ep_len)):
# Take deterministic actions at test time (noise_scale=0)
o, r, d, _ = test_env.step(get_action(o, 0))
ep_ret += r
ep_len += 1
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
start_time = time.time()
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
total_steps = steps_per_epoch * epochs
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
"""
Until start_steps have elapsed, randomly sample actions
from a uniform distribution for better exploration. Afterwards,
use the learned policy (with some noise, via act_noise).
"""
if t > start_steps:
a = get_action(o, act_noise)
else:
a = env.action_space.sample()
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len==max_ep_len else d
# Store experience to replay buffer
replay_buffer.store(o, a, r, o2, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
if d or (ep_len == max_ep_len):
"""
Perform all TD3 updates at the end of the trajectory
(in accordance with source code of TD3 published by
original authors).
"""
for j in range(ep_len):
batch = replay_buffer.sample_batch(batch_size)
feed_dict = {x_ph: batch['obs1'],
x2_ph: batch['obs2'],
a_ph: batch['acts'],
r_ph: batch['rews'],
d_ph: batch['done']
}
q_step_ops = [q_loss, q1, q2, train_q_op]
outs = sess.run(q_step_ops, feed_dict)
logger.store(LossQ=outs[0], Q1Vals=outs[1], Q2Vals=outs[2])
if j % policy_delay == 0:
# Delayed policy update
outs = sess.run([pi_loss, train_pi_op, target_update], feed_dict)
logger.store(LossPi=outs[0])
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
# End of epoch wrap-up
if t > 0 and t % steps_per_epoch == 0:
epoch = t // steps_per_epoch
# Save model
if (epoch % save_freq == 0) or (epoch == epochs-1):
logger.save_state({'env': env}, None)
# Test the performance of the deterministic version of the agent.
test_agent()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('Q1Vals', with_min_and_max=True)
logger.log_tabular('Q2Vals', with_min_and_max=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='HalfCheetah-v2')
parser.add_argument('--h', type=int, default=300)
parser.add_argument('--l', type=int, default=1)
parser.add_argument('--num_runs', '-n', type=int, default=3)
parser.add_argument('--steps_per_epoch', '-s', type=int, default=5000)
parser.add_argument('--total_steps', '-t', type=int, default=int(5e4))
args = parser.parse_args()
def td3_with_actor_critic(**kwargs):
td3(ac_kwargs=dict(hidden_sizes=[args.h]*args.l),
start_steps=5000,
max_ep_len=150,
batch_size=64,
polyak=0.95,
**kwargs)
eg = ExperimentGrid(name='ex2-3_td3')
eg.add('replay_size', int(args.total_steps))
eg.add('env_name', args.env, '', True)
eg.add('seed', [10*i for i in range(args.num_runs)])
eg.add('epochs', int(args.total_steps / args.steps_per_epoch))
eg.add('steps_per_epoch', args.steps_per_epoch)
eg.add('remove_action_clip', [False, True])
eg.run(td3_with_actor_critic, datestamp=True)
|
import pytest
from numpy.testing import assert_array_almost_equal
from landlab import RasterModelGrid
from landlab.components import ErosionDeposition, FlowAccumulator, Space
@pytest.fixture
def grid():
grid = RasterModelGrid((10, 10), xy_spacing=10.0)
grid.set_closed_boundaries_at_grid_edges(True, True, True, True)
z = grid.add_zeros("node", "topographic__elevation")
grid.add_zeros("node", "soil__depth")
z += grid.x_of_node.copy() + grid.y_of_node.copy()
z[25] -= 40
z[35] -= 40
z[26] -= 40
z[36] -= 40
z[24] -= 40
z[34] -= 40
return grid
# consider full combinitorics of solver, two phi, ED and Space, and (if space)
# initial soil depth of very large and zero.
@pytest.mark.parametrize("solver", ["basic", "adaptive"])
@pytest.mark.parametrize("v_s", [1.5])
@pytest.mark.parametrize("dt", [2])
def test_mass_conserve_all_closed_ErosionDeposition(grid, solver, v_s, dt):
z_init = grid.at_node["topographic__elevation"].copy()
fa = FlowAccumulator(grid)
fa.run_one_step()
ed = ErosionDeposition(grid, solver=solver, v_s=v_s)
ed.run_one_step(dt)
dz = z_init - grid.at_node["topographic__elevation"]
# For Erosion Deposition, porosity should not have any effect, because
# the component operates in terms of bulk-equivalent sediment flux,
# erosion, and deposition.
assert_array_almost_equal(dz.sum(), 0.0, decimal=10)
@pytest.mark.parametrize("phi", [0.0, 0.3])
@pytest.mark.parametrize("solver", ["basic", "adaptive"])
@pytest.mark.parametrize("H", [0, 1, 1000])
@pytest.mark.parametrize("v_s", [1.5])
@pytest.mark.parametrize("H_star", [0.1])
@pytest.mark.parametrize("dt", [2])
def test_mass_conserve_all_closed_Space(grid, H, solver, phi, v_s, H_star, dt):
grid.at_node["soil__depth"][:] = H
z_init = grid.at_node["topographic__elevation"].copy()
fa = FlowAccumulator(grid)
fa.run_one_step()
ed = Space(grid, solver=solver, phi=phi, v_s=v_s, H_star=H_star)
ed.run_one_step(dt)
# in space, everything is either bedrock or sediment. check for
# conservation.
dH = grid.at_node["soil__depth"][:] - H
# sediment is defined as having a porosity so all changes (up or down )
# must be adjusted to mass.
dH *= 1 - phi
dBr = grid.at_node["bedrock__elevation"] - (z_init - H)
mass_change = dH + dBr
assert_array_almost_equal(mass_change.sum(), 0.0, decimal=10)
# Note that we can't make an equivalent test for with a depression finder yet
# because the depression finder can't handle no outlet on the grid.
# but what we can do is make an example in which there is a big sink in which
# almost all sediment is trapped. We can then assert that all sediment is
# either trapped OR that it is sent out of the one outlet node.
@pytest.fixture()
def grid2(grid):
grid.status_at_node[1] = grid.BC_NODE_IS_FIXED_VALUE
return grid
# consider full combinitorics of solver, two phi, depression finding or not,
# ED and Space, and (if space) initial soil depth of very large and zero.
@pytest.mark.parametrize("depression_finder", [None, "DepressionFinderAndRouter"])
@pytest.mark.parametrize("solver", ["basic", "adaptive"])
@pytest.mark.parametrize("v_s", [1.5])
@pytest.mark.parametrize("dt", [2])
def test_mass_conserve_with_depression_finder_ErosionDeposition(
grid2, solver, depression_finder, v_s, dt
):
assert grid2.status_at_node[1] == grid2.BC_NODE_IS_FIXED_VALUE
z_init = grid2.at_node["topographic__elevation"].copy()
if depression_finder is None:
fa = FlowAccumulator(grid2)
else:
fa = FlowAccumulator(grid2, depression_finder=depression_finder, routing="D4")
fa.run_one_step()
ed = ErosionDeposition(grid2, solver=solver, v_s=v_s)
ed.run_one_step(dt)
dz = grid2.at_node["topographic__elevation"] - z_init
# assert that the mass loss over the surface is exported through the one
# outlet.
net_change = dz[grid2.core_nodes].sum() + (
ed._qs_in[1] * dt / grid2.cell_area_at_node[11]
)
assert_array_almost_equal(net_change, 0.0, decimal=10)
@pytest.mark.parametrize("depression_finder", [None, "DepressionFinderAndRouter"])
@pytest.mark.parametrize("phi", [0.0, 0.3])
@pytest.mark.parametrize("solver", ["basic", "adaptive"])
@pytest.mark.parametrize("H", [0, 1000])
@pytest.mark.parametrize("v_s", [1.5])
@pytest.mark.parametrize("H_star", [0.1])
@pytest.mark.parametrize("dt", [2])
def test_mass_conserve_with_depression_finder_Space(
grid2, H, solver, depression_finder, phi, v_s, H_star, dt
):
grid2.at_node["soil__depth"][:] = H
assert grid2.status_at_node[1] == grid2.BC_NODE_IS_FIXED_VALUE
z_init = grid2.at_node["topographic__elevation"].copy()
if depression_finder is None:
fa = FlowAccumulator(grid2)
else:
fa = FlowAccumulator(grid2, depression_finder=depression_finder, routing="D4")
fa.run_one_step()
ed = Space(grid2, solver=solver, phi=phi, v_s=v_s, H_star=H_star)
ed.run_one_step(dt)
# see above test for notes.
dH = grid2.at_node["soil__depth"][:] - H
dH *= 1 - phi
dBr = grid2.at_node["bedrock__elevation"] - (z_init - H)
mass_change = dH + dBr
# assert that the mass loss over the surface is exported through the one
# outlet.
net_change = mass_change[grid2.core_nodes].sum() + (
ed._qs_in[1] * dt / grid2.cell_area_at_node[11]
)
assert_array_almost_equal(net_change, 0.0, decimal=10)
|
# (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Unit tests for brcd fc san lookup service."""
import mock
from oslo_config import cfg
import paramiko
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
import cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service \
as brcd_lookup
from cinder.zonemanager.drivers.brocade import fc_zone_constants
nsshow = '20:1a:00:05:1e:e8:e3:29'
switch_data = [' N 011a00;2,3;20:1a:00:05:1e:e8:e3:29;\
20:1a:00:05:1e:e8:e3:29;na']
nsshow_data = ['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50']
_device_map_to_verify = {
'BRCD_FAB_2': {
'initiator_port_wwn_list': ['10008c7cff523b01'],
'target_port_wwn_list': ['20240002ac000a50']}}
class TestBrcdFCSanLookupService(brcd_lookup.BrcdFCSanLookupService,
test.TestCase):
def setUp(self):
super(TestBrcdFCSanLookupService, self).setUp()
self.client = paramiko.SSHClient()
self.configuration = conf.Configuration(None)
self.configuration.set_default('fc_fabric_names', 'BRCD_FAB_2',
'fc-zone-manager')
self.configuration.fc_fabric_names = 'BRCD_FAB_2'
self.create_configuration()
# override some of the functions
def __init__(self, *args, **kwargs):
test.TestCase.__init__(self, *args, **kwargs)
def create_configuration(self):
fc_fabric_opts = []
fc_fabric_opts.append(cfg.StrOpt('fc_fabric_address',
default='10.24.49.100', help=''))
fc_fabric_opts.append(cfg.StrOpt('fc_fabric_user',
default='admin', help=''))
fc_fabric_opts.append(cfg.StrOpt('fc_fabric_password',
default='password', help='',
secret=True))
fc_fabric_opts.append(cfg.IntOpt('fc_fabric_port',
default=22, help=''))
fc_fabric_opts.append(cfg.StrOpt('principal_switch_wwn',
default='100000051e55a100', help=''))
config = conf.Configuration(fc_fabric_opts, 'BRCD_FAB_2')
self.fabric_configs = {'BRCD_FAB_2': config}
@mock.patch.object(paramiko.hostkeys.HostKeys, 'load')
def test_create_ssh_client(self, load_mock):
mock_args = {}
mock_args['known_hosts_file'] = 'dummy_host_key_file'
mock_args['missing_key_policy'] = paramiko.RejectPolicy()
ssh_client = self.create_ssh_client(**mock_args)
self.assertEqual('dummy_host_key_file', ssh_client._host_keys_filename)
self.assertTrue(isinstance(ssh_client._policy, paramiko.RejectPolicy))
mock_args = {}
ssh_client = self.create_ssh_client(**mock_args)
self.assertIsNone(ssh_client._host_keys_filename)
self.assertTrue(isinstance(ssh_client._policy, paramiko.WarningPolicy))
@mock.patch.object(brcd_lookup.BrcdFCSanLookupService,
'get_nameserver_info')
def test_get_device_mapping_from_network(self, get_nameserver_info_mock):
initiator_list = ['10008c7cff523b01']
target_list = ['20240002ac000a50', '20240002ac000a40']
with mock.patch.object(self.client, 'connect'):
get_nameserver_info_mock.return_value = (nsshow_data)
device_map = self.get_device_mapping_from_network(
initiator_list, target_list)
self.assertDictMatch(device_map, _device_map_to_verify)
@mock.patch.object(brcd_lookup.BrcdFCSanLookupService, '_get_switch_data')
def test_get_nameserver_info(self, get_switch_data_mock):
ns_info_list = []
ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29',
'20:1a:00:05:1e:e8:e3:29']
get_switch_data_mock.return_value = (switch_data)
ns_info_list = self.get_nameserver_info()
self.assertEqual(ns_info_list_expected, ns_info_list)
def test__get_switch_data(self):
cmd = fc_zone_constants.NS_SHOW
with mock.patch.object(self.client, 'exec_command') \
as exec_command_mock:
exec_command_mock.return_value = (Stream(),
Stream(nsshow),
Stream())
switch_data = self._get_switch_data(cmd)
self.assertEqual(nsshow, switch_data)
exec_command_mock.assert_called_once_with(cmd)
def test__parse_ns_output(self):
invalid_switch_data = [' N 011a00;20:1a:00:05:1e:e8:e3:29']
return_wwn_list = []
expected_wwn_list = ['20:1a:00:05:1e:e8:e3:29']
return_wwn_list = self._parse_ns_output(switch_data)
self.assertEqual(expected_wwn_list, return_wwn_list)
self.assertRaises(exception.InvalidParameterValue,
self._parse_ns_output, invalid_switch_data)
def test_get_formatted_wwn(self):
wwn_list = ['10008c7cff523b01']
return_wwn_list = []
expected_wwn_list = ['10:00:8c:7c:ff:52:3b:01']
return_wwn_list.append(self.get_formatted_wwn(wwn_list[0]))
self.assertEqual(expected_wwn_list, return_wwn_list)
class Channel(object):
def recv_exit_status(self):
return 0
class Stream(object):
def __init__(self, buffer=''):
self.buffer = buffer
self.channel = Channel()
def readlines(self):
return self.buffer
def close(self):
pass
def flush(self):
self.buffer = ''
|
# -*- coding: utf-8 -*-
"""
Learning Shapelets
==================
This example illustrates how the "Learning Shapelets" method can quickly
find a set of shapelets that results in excellent predictive performance
when used for a shapelet transform.
More information on the method can be found at:
http://fs.ismll.de/publicspace/LearningShapelets/.
"""
# Author: Romain Tavenard
# License: BSD 3 clause
import numpy
from sklearn.metrics import accuracy_score
import tensorflow as tf
import matplotlib.pyplot as plt
from tslearn.datasets import CachedDatasets
from tslearn.preprocessing import TimeSeriesScalerMinMax
from tslearn.shapelets import ShapeletModel, \
grabocka_params_to_shapelet_size_dict
from tslearn.utils import ts_size
# Set seed for determinism
numpy.random.seed(0)
# Load the Trace dataset
X_train, y_train, X_test, y_test = CachedDatasets().load_dataset("Trace")
# Normalize each of the timeseries in the Trace dataset
X_train = TimeSeriesScalerMinMax().fit_transform(X_train)
X_test = TimeSeriesScalerMinMax().fit_transform(X_test)
# Get statistics of the dataset
n_ts, ts_sz = X_train.shape[:2]
n_classes = len(set(y_train))
# Set the number of shapelets per size as done in the original paper
shapelet_sizes = grabocka_params_to_shapelet_size_dict(n_ts=n_ts,
ts_sz=ts_sz,
n_classes=n_classes,
l=0.1,
r=1)
# Define the model using parameters provided by the authors (except that we
# use fewer iterations here)
shp_clf = ShapeletModel(n_shapelets_per_size=shapelet_sizes,
optimizer=tf.optimizers.Adam(.01),
batch_size=16,
weight_regularizer=.01,
max_iter=200,
random_state=42,
verbose=0)
shp_clf.fit(X_train, y_train)
# Make predictions and calculate accuracy score
pred_labels = shp_clf.predict(X_test)
print("Correct classification rate:", accuracy_score(y_test, pred_labels))
# Plot the different discovered shapelets
plt.figure()
for i, sz in enumerate(shapelet_sizes.keys()):
plt.subplot(len(shapelet_sizes), 1, i + 1)
plt.title("%d shapelets of size %d" % (shapelet_sizes[sz], sz))
for shp in shp_clf.shapelets_:
if ts_size(shp) == sz:
plt.plot(shp.ravel())
plt.xlim([0, max(shapelet_sizes.keys()) - 1])
plt.tight_layout()
plt.show()
# The loss history is accessible via the `model_` that is a keras model
plt.figure()
plt.plot(numpy.arange(1, shp_clf.n_iter_ + 1), shp_clf.history_["loss"])
plt.title("Evolution of cross-entropy loss during training")
plt.xlabel("Epochs")
plt.show()
|
# -*- coding: utf-8 -*-
# Copyright 2019 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from abc import ABC, abstractmethod
import json
from collections import OrderedDict
import logging
import copy
from qiskit.aqua import (local_pluggables_types,
PluggableType,
get_pluggable_configuration,
local_pluggables,
get_backends_from_provider)
from qiskit.aqua.aqua_error import AquaError
from .jsonschema import JSONSchema
import traceback
def exception_to_string(excp):
stack = traceback.extract_stack()[:-3] + traceback.extract_tb(excp.__traceback__)
pretty = traceback.format_list(stack)
return ''.join(pretty) + '\n {} {}'.format(excp.__class__, excp)
logger = logging.getLogger(__name__)
class BaseParser(ABC):
"""Base Aqua Parser."""
_UNKNOWN = 'unknown'
_DEFAULT_PROPERTY_ORDER = [JSONSchema.NAME, _UNKNOWN]
_BACKEND_PROPERTY_ORDER = [JSONSchema.PROVIDER, JSONSchema.NAME, _UNKNOWN]
def __init__(self, jsonSchema):
"""Create InputParser object."""
self._original_sections = None
self._filename = None
self._sections = None
self._json_schema = jsonSchema
self._json_schema.populate_problem_names()
self._json_schema.commit_changes()
def _order_sections(self, sections):
sections_sorted = OrderedDict(sorted(list(sections.items()),
key=lambda x: self._section_order.index(x[0])
if x[0] in self._section_order else self._section_order.index(BaseParser._UNKNOWN)))
for section, properties in sections_sorted.items():
if isinstance(properties, dict):
_property_order = BaseParser._BACKEND_PROPERTY_ORDER if section == JSONSchema.BACKEND else BaseParser._DEFAULT_PROPERTY_ORDER
sections_sorted[section] = OrderedDict(sorted(list(properties.items()),
key=lambda x: _property_order.index(x[0])
if x[0] in _property_order
else _property_order.index(BaseParser._UNKNOWN)))
return sections_sorted
@property
def json_schema(self):
"""Getter of _json_schema."""
return self._json_schema
@abstractmethod
def parse(self):
"""Parse the data."""
pass
def is_modified(self):
"""
Returns true if data has been changed
"""
return self._original_sections != self._sections
@staticmethod
def is_pluggable_section(section_name):
section_name = JSONSchema.format_section_name(section_name)
for pluggable_type in local_pluggables_types():
if section_name == pluggable_type.value:
return True
return False
def get_section_types(self, section_name):
return self._json_schema.get_section_types(section_name)
def get_property_types(self, section_name, property_name):
return self._json_schema.get_property_types(section_name, property_name)
def get_default_section_names(self):
sections = self.get_default_sections()
return list(sections.keys()) if sections is not None else []
def get_section_default_properties(self, section_name):
return self._json_schema.get_section_default_properties(section_name)
def allows_additional_properties(self, section_name):
return self._json_schema.allows_additional_properties(section_name)
def get_property_default_values(self, section_name, property_name):
return self._json_schema.get_property_default_values(section_name, property_name)
def get_property_default_value(self, section_name, property_name):
return self._json_schema.get_property_default_value(section_name, property_name)
def get_filename(self):
"""Return the filename."""
return self._filename
@staticmethod
def get_algorithm_problems(algo_name):
return JSONSchema.get_algorithm_problems(algo_name)
def _merge_dependencies(self):
algo_name = self.get_section_property(PluggableType.ALGORITHM.value, JSONSchema.NAME)
if algo_name is None:
return
config = get_pluggable_configuration(PluggableType.ALGORITHM, algo_name)
pluggable_dependencies = config.get('depends', [])
section_names = self.get_section_names()
for pluggable_type_dict in pluggable_dependencies:
pluggable_type = pluggable_type_dict.get('pluggable_type')
if pluggable_type is None:
continue
pluggable_name = None
pluggable_defaults = pluggable_type_dict.get('default')
new_properties = {}
if pluggable_defaults is not None:
for key, value in pluggable_defaults.items():
if key == JSONSchema.NAME:
pluggable_name = value
else:
new_properties[key] = value
if pluggable_name is None:
continue
if pluggable_type not in section_names:
self.set_section(pluggable_type)
if self.get_section_property(pluggable_type, JSONSchema.NAME) is None:
self.set_section_property(pluggable_type, JSONSchema.NAME, pluggable_name)
if pluggable_name == self.get_section_property(pluggable_type, JSONSchema.NAME):
properties = self.get_section_properties(pluggable_type)
if new_properties:
new_properties.update(properties)
else:
new_properties = properties
self.set_section_properties(pluggable_type, new_properties)
@abstractmethod
def validate_merge_defaults(self):
self.merge_default_values()
self._json_schema.validate(self.get_sections())
self._validate_algorithm_problem()
@abstractmethod
def merge_default_values(self):
pass
def _validate_algorithm_problem(self):
algo_name = self.get_section_property(PluggableType.ALGORITHM.value, JSONSchema.NAME)
if algo_name is None:
return
problem_name = self.get_section_property(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
problem_name = self.get_property_default_value(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
raise AquaError("No algorithm 'problem' section found on input.")
problems = BaseParser.get_algorithm_problems(algo_name)
if problem_name not in problems:
raise AquaError("Problem: {} not in the list of problems: {} for algorithm: {}.".format(problem_name, problems, algo_name))
def commit_changes(self):
self._original_sections = copy.deepcopy(self._sections)
@abstractmethod
def save_to_file(self, file_name):
pass
def section_is_text(self, section_name):
section_name = JSONSchema.format_section_name(section_name).lower()
types = self.get_section_types(section_name)
if len(types) > 0:
return 'object' not in types
section = self._sections.get(section_name)
if section is None:
return False
return not isinstance(section, dict)
def get_sections(self):
return self._sections
def get_section(self, section_name):
"""Return a Section by name.
Args:
section_name (str): the name of the section, case insensitive
Returns:
Section: The section with this name
Raises:
AquaError: if the section does not exist.
"""
section_name = JSONSchema.format_section_name(section_name).lower()
try:
return self._sections[section_name]
except KeyError:
raise AquaError('No section "{0}"'.format(section_name))
def get_section_text(self, section_name):
section_name = JSONSchema.format_section_name(section_name).lower()
section = self._sections.get(section_name)
if section is None:
return ''
if isinstance(section, str):
return section
return json.dumps(section, sort_keys=True, indent=4)
def get_section_properties(self, section_name):
section_name = JSONSchema.format_section_name(section_name).lower()
section = self._sections.get(section_name)
if section is None:
return {}
return section
def get_section_property(self, section_name, property_name, default_value=None):
"""Return a property by name.
Args:
section_name (str): the name of the section, case insensitive
property_name (str): the property name in the section
default_value : default value in case it is not found
Returns:
Value: The property value
"""
section_name = JSONSchema.format_section_name(section_name)
property_name = JSONSchema.format_property_name(property_name)
if section_name in self._sections:
section = self._sections[section_name]
if property_name in section:
return section[property_name]
return default_value
def set_section(self, section_name):
"""
Args:
section_name (str): the name of the section, case insensitive
"""
section_name = JSONSchema.format_section_name(section_name)
if section_name not in self._sections:
self._sections[section_name] = '' if self.section_is_text(section_name) else OrderedDict()
self._sections = self._order_sections(self._sections)
@abstractmethod
def delete_section(self, section_name):
"""
Args:
section_name (str): the name of the section, case insensitive
"""
section_name = JSONSchema.format_section_name(section_name).lower()
if section_name not in self._sections:
return
del self._sections[section_name]
# update schema
self._json_schema.rollback_changes()
self._json_schema.update_backend_schema()
self._json_schema.update_pluggable_schemas(self)
def set_section_properties(self, section_name, new_properties):
old_properties = self.get_section_properties(section_name)
set_properties = copy.deepcopy(new_properties)
del_properties = []
for key, value in old_properties.items():
if key in set_properties:
if value == set_properties[key]:
del set_properties[key]
else:
del_properties.append(key)
# first delete
for property_name in del_properties:
self.delete_section_property(section_name, property_name)
# update name first
if JSONSchema.NAME in set_properties:
self.set_section_property(section_name, JSONSchema.NAME, set_properties[JSONSchema.NAME])
del set_properties[JSONSchema.NAME]
# update remaining properties
for property_name, value in set_properties.items():
self.set_section_property(section_name, property_name, value)
@abstractmethod
def post_set_section_property(self, section_name, property_name):
pass
def set_section_property(self, section_name, property_name, value):
"""
Args:
section_name (str): the name of the section, case insensitive
property_name (str): the name of the property
value (obj): the value of the property
"""
section_name = JSONSchema.format_section_name(section_name).lower()
property_name = JSONSchema.format_property_name(property_name)
value = self._json_schema.check_property_value(section_name, property_name, value)
types = self.get_property_types(section_name, property_name)
sections_temp = copy.deepcopy(self._sections)
BaseParser._set_section_property(sections_temp, section_name, property_name, value, types)
msg = self._json_schema.validate_property(sections_temp, section_name, property_name)
if msg is not None:
raise AquaError("{}.{}: Value '{}': '{}'".format(section_name, property_name, value, msg))
value_changed = False
if section_name not in self._sections:
value_changed = True
elif property_name not in self._sections[section_name]:
value_changed = True
else:
old_value = self.get_section_property(section_name, property_name)
value_changed = (old_value != value)
if not value_changed:
# nothing changed
return
# check if this provider is loadable and valid
if JSONSchema.BACKEND == section_name and property_name == JSONSchema.PROVIDER:
get_backends_from_provider(value)
BaseParser._set_section_property(self._sections, section_name, property_name, value, types)
if property_name == JSONSchema.NAME:
if JSONSchema.PROBLEM == section_name:
self._update_algorithm_problem()
elif JSONSchema.BACKEND == section_name:
self._json_schema.update_backend_schema()
elif BaseParser.is_pluggable_section(section_name):
self._json_schema.update_pluggable_schemas(self)
# remove properties that are not valid for this section
default_properties = self.get_section_default_properties(section_name)
if isinstance(default_properties, dict):
properties = self.get_section_properties(section_name)
for p_name in list(properties.keys()):
if p_name != JSONSchema.NAME and p_name not in default_properties:
self.delete_section_property(section_name, p_name)
self._update_dependency_sections(section_name)
else:
self.post_set_section_property(section_name, property_name)
self._sections = self._order_sections(self._sections)
def _update_algorithm_problem(self):
problem_name = self.get_section_property(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
problem_name = self.get_property_default_value(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
raise AquaError("No algorithm 'problem' section found on input.")
algo_name = self.get_section_property(PluggableType.ALGORITHM.value, JSONSchema.NAME)
if algo_name is not None and problem_name in BaseParser.get_algorithm_problems(algo_name):
return
for algo_name in local_pluggables(PluggableType.ALGORITHM):
if problem_name in self.get_algorithm_problems(algo_name):
# set to the first algorithm to solve the problem
self.set_section_property(PluggableType.ALGORITHM.value, JSONSchema.NAME, algo_name)
return
# no algorithm solve this problem, remove section
self.delete_section(PluggableType.ALGORITHM.value)
def _update_dependency_sections(self, section_name):
sections_to_be_deleted = []
prop_name = self.get_section_property(section_name, JSONSchema.NAME)
config = {} if prop_name is None else get_pluggable_configuration(section_name, prop_name)
pluggable_dependencies = config.get('depends', [])
if section_name == PluggableType.ALGORITHM.value:
sections_to_be_deleted = [name for name in self.get_section_names()
if name != PluggableType.INPUT.value and self.is_pluggable_section(name)]
classical = config.get('classical', False)
# update backend based on classical
if classical:
if JSONSchema.BACKEND in self._sections:
del self._sections[JSONSchema.BACKEND]
else:
if JSONSchema.BACKEND not in self._sections:
self.set_section_properties(JSONSchema.BACKEND, self.get_section_default_properties(JSONSchema.BACKEND))
# update dependencies recursively
self._update_dependencies(section_name, sections_to_be_deleted, pluggable_dependencies)
# remove pluggable sections not in algorithm dependency list
for name in sections_to_be_deleted:
if name in self._sections:
del self._sections[name]
# reorder sections
self._sections = self._order_sections(self._sections)
def _update_dependencies(self, section_name, sections_to_be_deleted, pluggable_dependencies):
# remove dependency pluggable type from sections to be deleted
if section_name in sections_to_be_deleted:
sections_to_be_deleted.remove(section_name)
# update sections with dependencies recursevely
for pluggable_type_dict in pluggable_dependencies:
pluggable_type = pluggable_type_dict.get('pluggable_type')
if pluggable_type is None:
continue
pluggable_name = None
pluggable_defaults = pluggable_type_dict.get('default')
if pluggable_defaults is not None:
pluggable_name = pluggable_defaults.get(JSONSchema.NAME)
if pluggable_name is not None:
if pluggable_type not in self._sections:
self.set_section_property(pluggable_type, JSONSchema.NAME, pluggable_name)
# update default values for new dependency pluggable types
default_properties = self.get_section_default_properties(pluggable_type)
if isinstance(default_properties, dict):
self.set_section_properties(pluggable_type, default_properties)
config = get_pluggable_configuration(pluggable_type, pluggable_name)
self._update_dependencies(pluggable_type, sections_to_be_deleted, config.get('depends', []))
@staticmethod
def _set_section_property(sections, section_name, property_name, value, types):
"""
Args:
section_name (str): the name of the section, case insensitive
property_name (str): the property name in the section
value : property value
types : schema types
"""
section_name = JSONSchema.format_section_name(section_name)
property_name = JSONSchema.format_property_name(property_name)
value = JSONSchema.get_value(value, types)
if section_name not in sections:
sections[section_name] = OrderedDict()
# name should come first
if JSONSchema.NAME == property_name and property_name not in sections[section_name]:
new_dict = OrderedDict([(property_name, value)])
new_dict.update(sections[section_name])
sections[section_name] = new_dict
else:
sections[section_name][property_name] = value
def delete_section_property(self, section_name, property_name):
"""
Args:
section_name (str): the name of the section, case insensitive
property_name (str): the property name in the section
"""
section_name = JSONSchema.format_section_name(section_name)
property_name = JSONSchema.format_property_name(property_name)
if section_name in self._sections and property_name in self._sections[section_name]:
del self._sections[section_name][property_name]
def delete_section_properties(self, section_name):
"""
Args:
section_name (str): the name of the section, case insensitive
"""
section_name = JSONSchema.format_section_name(section_name).lower()
if section_name in self._sections:
del self._sections[section_name]
def set_section_data(self, section_name, value):
"""
Sets a section data.
Args:
section_name (str): the name of the section, case insensitive
value : value to set
"""
section_name = JSONSchema.format_section_name(section_name)
self._sections[section_name] = self._json_schema.check_section_value(section_name, value)
def get_section_names(self):
"""Return all the names of the sections."""
return list(self._sections.keys())
|
"""
All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
its licensors.
For complete copyright and license terms please see the LICENSE at the root of this
distribution (the "License"). All use of this software is governed by the License,
or, if provided, by the license below or the license accompanying this file. Do not
remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
# setup path
import azlmbr.object
import azlmbr.math as math
# returns a Vector3 of cartesian coordinates from spherical coordinates
# phi and theta are in degrees
def spherical_to_cartesian(phi, theta, dist=1.0):
phi = math.Math_DegToRad(phi)
theta = math.Math_DegToRad(theta)
x = float(dist) * float(math.Math_Sin(phi)) * float(math.Math_Cos(theta))
y = float(dist) * float(math.Math_Sin(phi)) * float(math.Math_Sin(theta))
z = float(dist) * float(math.Math_Cos(phi))
return math.Vector3(x, y, z)
# converts two Cartesian points into their midpoint, normalized into a vector of size r
def normalize_midpoint(pos1, pos2, r=1.0):
pos = pos1.Add(pos2).MultiplyFloat(0.5)
pos.Normalize()
return pos.MultiplyFloat(r)
|
import urllib.request
import json
from calc import calc
URL = ("https://data.nasa.gov/resource/y77d-th95.json")
class MeteoriteStats:
def get_data(self):
with urllib.request.urlopen(URL) as url:
return json.loads(url.read().decode())
def average_mass(self, data):
c = calc.Calc()
masses = [float(d['mass']) for d in data if 'mass' in d]
return c.avg(masses)
|
# Generated by Django 2.1.7 on 2019-03-17 08:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paste', '0004_auto_20190315_1134'),
]
operations = [
migrations.AlterField(
model_name='paste',
name='lang',
field=models.CharField(choices=[('c', 'C'), ('cpp', 'C++11'), ('cc14', 'C++14'), ('cc17', 'C++17'), ('py2', 'Python 2'), ('python', 'Python 3'), ('pypy', 'PyPy'), ('pypy3', 'PyPy 3'), ('java', 'Java 8'), ('pas', 'Pascal'), ('text', 'Text')], default='cpp', max_length=12, verbose_name='语言'),
),
]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetWebApplicationFirewallPolicyResult',
'AwaitableGetWebApplicationFirewallPolicyResult',
'get_web_application_firewall_policy',
]
@pulumi.output_type
class GetWebApplicationFirewallPolicyResult:
"""
Defines web application firewall policy.
"""
def __init__(__self__, application_gateways=None, custom_rules=None, etag=None, http_listeners=None, id=None, location=None, managed_rules=None, name=None, path_based_rules=None, policy_settings=None, provisioning_state=None, resource_state=None, tags=None, type=None):
if application_gateways and not isinstance(application_gateways, list):
raise TypeError("Expected argument 'application_gateways' to be a list")
pulumi.set(__self__, "application_gateways", application_gateways)
if custom_rules and not isinstance(custom_rules, list):
raise TypeError("Expected argument 'custom_rules' to be a list")
pulumi.set(__self__, "custom_rules", custom_rules)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if http_listeners and not isinstance(http_listeners, list):
raise TypeError("Expected argument 'http_listeners' to be a list")
pulumi.set(__self__, "http_listeners", http_listeners)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if managed_rules and not isinstance(managed_rules, dict):
raise TypeError("Expected argument 'managed_rules' to be a dict")
pulumi.set(__self__, "managed_rules", managed_rules)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if path_based_rules and not isinstance(path_based_rules, list):
raise TypeError("Expected argument 'path_based_rules' to be a list")
pulumi.set(__self__, "path_based_rules", path_based_rules)
if policy_settings and not isinstance(policy_settings, dict):
raise TypeError("Expected argument 'policy_settings' to be a dict")
pulumi.set(__self__, "policy_settings", policy_settings)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_state and not isinstance(resource_state, str):
raise TypeError("Expected argument 'resource_state' to be a str")
pulumi.set(__self__, "resource_state", resource_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="applicationGateways")
def application_gateways(self) -> Sequence['outputs.ApplicationGatewayResponse']:
"""
A collection of references to application gateways.
"""
return pulumi.get(self, "application_gateways")
@property
@pulumi.getter(name="customRules")
def custom_rules(self) -> Optional[Sequence['outputs.WebApplicationFirewallCustomRuleResponse']]:
"""
The custom rules inside the policy.
"""
return pulumi.get(self, "custom_rules")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="httpListeners")
def http_listeners(self) -> Sequence['outputs.SubResourceResponse']:
"""
A collection of references to application gateway http listeners.
"""
return pulumi.get(self, "http_listeners")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedRules")
def managed_rules(self) -> 'outputs.ManagedRulesDefinitionResponse':
"""
Describes the managedRules structure.
"""
return pulumi.get(self, "managed_rules")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="pathBasedRules")
def path_based_rules(self) -> Sequence['outputs.SubResourceResponse']:
"""
A collection of references to application gateway path rules.
"""
return pulumi.get(self, "path_based_rules")
@property
@pulumi.getter(name="policySettings")
def policy_settings(self) -> Optional['outputs.PolicySettingsResponse']:
"""
The PolicySettings for policy.
"""
return pulumi.get(self, "policy_settings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the web application firewall policy resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> str:
"""
Resource status of the policy.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWebApplicationFirewallPolicyResult(GetWebApplicationFirewallPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebApplicationFirewallPolicyResult(
application_gateways=self.application_gateways,
custom_rules=self.custom_rules,
etag=self.etag,
http_listeners=self.http_listeners,
id=self.id,
location=self.location,
managed_rules=self.managed_rules,
name=self.name,
path_based_rules=self.path_based_rules,
policy_settings=self.policy_settings,
provisioning_state=self.provisioning_state,
resource_state=self.resource_state,
tags=self.tags,
type=self.type)
def get_web_application_firewall_policy(policy_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebApplicationFirewallPolicyResult:
"""
Defines web application firewall policy.
:param str policy_name: The name of the policy.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['policyName'] = policy_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200701:getWebApplicationFirewallPolicy', __args__, opts=opts, typ=GetWebApplicationFirewallPolicyResult).value
return AwaitableGetWebApplicationFirewallPolicyResult(
application_gateways=__ret__.application_gateways,
custom_rules=__ret__.custom_rules,
etag=__ret__.etag,
http_listeners=__ret__.http_listeners,
id=__ret__.id,
location=__ret__.location,
managed_rules=__ret__.managed_rules,
name=__ret__.name,
path_based_rules=__ret__.path_based_rules,
policy_settings=__ret__.policy_settings,
provisioning_state=__ret__.provisioning_state,
resource_state=__ret__.resource_state,
tags=__ret__.tags,
type=__ret__.type)
|
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import re
from reframe.core.exceptions import ReframeError
def re_compile(patt):
try:
return re.compile(patt)
except re.error:
raise ReframeError(f'invalid regex: {patt!r}')
def have_name(patt):
regex = re_compile(patt)
def _fn(case):
return regex.match(case.check.name)
return _fn
def have_not_name(patt):
def _fn(case):
return not have_name(patt)(case)
return _fn
def have_tag(patt):
regex = re_compile(patt)
def _fn(case):
return any(regex.match(p) for p in case.check.tags)
return _fn
def have_not_tag(patt):
def _fn(case):
return not have_tag(patt)(case)
return _fn
def have_maintainer(patt):
regex = re_compile(patt)
def _fn(case):
return any(regex.match(p) for p in case.check.maintainers)
return _fn
def have_gpu_only():
def _fn(case):
return case.check.num_gpus_per_node > 0
return _fn
def have_cpu_only():
def _fn(case):
return case.check.num_gpus_per_node == 0
return _fn
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class Repository(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The ARN of the repository
"""
clone_url_http: pulumi.Output[str]
"""
The URL to use for cloning the repository over HTTPS.
"""
clone_url_ssh: pulumi.Output[str]
"""
The URL to use for cloning the repository over SSH.
"""
default_branch: pulumi.Output[str]
"""
The default branch of the repository. The branch specified here needs to exist.
"""
description: pulumi.Output[str]
"""
The description of the repository. This needs to be less than 1000 characters
"""
repository_id: pulumi.Output[str]
"""
The ID of the repository
"""
repository_name: pulumi.Output[str]
"""
The name for the repository. This needs to be less than 100 characters.
"""
def __init__(__self__, resource_name, opts=None, default_branch=None, description=None, repository_name=None, __name__=None, __opts__=None):
"""
Provides a CodeCommit Repository Resource.
> **NOTE on CodeCommit Availability**: The CodeCommit is not yet rolled out
in all regions - available regions are listed
[the AWS Docs](https://docs.aws.amazon.com/general/latest/gr/rande.html#codecommit_region).
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] default_branch: The default branch of the repository. The branch specified here needs to exist.
:param pulumi.Input[str] description: The description of the repository. This needs to be less than 1000 characters
:param pulumi.Input[str] repository_name: The name for the repository. This needs to be less than 100 characters.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['default_branch'] = default_branch
__props__['description'] = description
if repository_name is None:
raise TypeError("Missing required property 'repository_name'")
__props__['repository_name'] = repository_name
__props__['arn'] = None
__props__['clone_url_http'] = None
__props__['clone_url_ssh'] = None
__props__['repository_id'] = None
super(Repository, __self__).__init__(
'aws:codecommit/repository:Repository',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hashlib
import imp
import os
import shutil
import sys
import tarfile
from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
TEST_262_HARNESS_FILES = ["sta.js", "assert.js"]
TEST_262_SUITE_PATH = ["data", "test"]
TEST_262_HARNESS_PATH = ["data", "harness"]
TEST_262_TOOLS_PATH = ["data", "tools", "packaging"]
ALL_VARIANT_FLAGS_STRICT = dict(
(v, [flags + ["--use-strict"] for flags in flag_sets])
for v, flag_sets in testsuite.ALL_VARIANT_FLAGS.iteritems()
)
FAST_VARIANT_FLAGS_STRICT = dict(
(v, [flags + ["--use-strict"] for flags in flag_sets])
for v, flag_sets in testsuite.FAST_VARIANT_FLAGS.iteritems()
)
ALL_VARIANT_FLAGS_BOTH = dict(
(v, [flags for flags in testsuite.ALL_VARIANT_FLAGS[v] +
ALL_VARIANT_FLAGS_STRICT[v]])
for v in testsuite.ALL_VARIANT_FLAGS
)
FAST_VARIANT_FLAGS_BOTH = dict(
(v, [flags for flags in testsuite.FAST_VARIANT_FLAGS[v] +
FAST_VARIANT_FLAGS_STRICT[v]])
for v in testsuite.FAST_VARIANT_FLAGS
)
ALL_VARIANTS = {
'nostrict': testsuite.ALL_VARIANT_FLAGS,
'strict': ALL_VARIANT_FLAGS_STRICT,
'both': ALL_VARIANT_FLAGS_BOTH,
}
FAST_VARIANTS = {
'nostrict': testsuite.FAST_VARIANT_FLAGS,
'strict': FAST_VARIANT_FLAGS_STRICT,
'both': FAST_VARIANT_FLAGS_BOTH,
}
class Test262VariantGenerator(testsuite.VariantGenerator):
def GetFlagSets(self, testcase, variant):
if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
variant_flags = FAST_VARIANTS
else:
variant_flags = ALL_VARIANTS
test_record = self.suite.GetTestRecord(testcase)
if "noStrict" in test_record:
return variant_flags["nostrict"][variant]
if "onlyStrict" in test_record:
return variant_flags["strict"][variant]
return variant_flags["both"][variant]
class Test262TestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(Test262TestSuite, self).__init__(name, root)
self.testroot = os.path.join(self.root, *TEST_262_SUITE_PATH)
self.harnesspath = os.path.join(self.root, *TEST_262_HARNESS_PATH)
self.harness = [os.path.join(self.harnesspath, f)
for f in TEST_262_HARNESS_FILES]
self.harness += [os.path.join(self.root, "harness-adapt.js")]
self.ignition_script_filter = "--ignition-script-filter=" + self.testroot
self.ParseTestRecord = None
def ListTests(self, context):
tests = []
for dirname, dirs, files in os.walk(self.testroot):
for dotted in [x for x in dirs if x.startswith(".")]:
dirs.remove(dotted)
if context.noi18n and "intl402" in dirs:
dirs.remove("intl402")
dirs.sort()
files.sort()
for filename in files:
if filename.endswith(".js"):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.testroot) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
case = testcase.TestCase(self, testname)
tests.append(case)
return tests
def GetFlagsForTestCase(self, testcase, context):
# TODO(rmcilroy) Remove ignition filter modification once ignition can
# support the test262 test harness code.
flags = testcase.flags
if '--ignition' in flags:
flags += [self.ignition_script_filter]
return (flags + context.mode_flags + self.harness +
self.GetIncludesForTest(testcase) + ["--harmony"] +
[os.path.join(self.testroot, testcase.path + ".js")])
def _VariantGeneratorFactory(self):
return Test262VariantGenerator
def LoadParseTestRecord(self):
if not self.ParseTestRecord:
root = os.path.join(self.root, *TEST_262_TOOLS_PATH)
f = None
try:
(f, pathname, description) = imp.find_module("parseTestRecord", [root])
module = imp.load_module("parseTestRecord", f, pathname, description)
self.ParseTestRecord = module.parseTestRecord
except:
raise ImportError("Cannot load parseTestRecord; you may need to "
"--download-data for test262")
finally:
if f:
f.close()
return self.ParseTestRecord
def GetTestRecord(self, testcase):
if not hasattr(testcase, "test_record"):
ParseTestRecord = self.LoadParseTestRecord()
testcase.test_record = ParseTestRecord(self.GetSourceForTest(testcase),
testcase.path)
return testcase.test_record
def GetIncludesForTest(self, testcase):
test_record = self.GetTestRecord(testcase)
if "includes" in test_record:
includes = [os.path.join(self.harnesspath, f)
for f in test_record["includes"]]
else:
includes = []
return includes
def GetSourceForTest(self, testcase):
filename = os.path.join(self.testroot, testcase.path + ".js")
with open(filename) as f:
return f.read()
def IsNegativeTest(self, testcase):
test_record = self.GetTestRecord(testcase)
return "negative" in test_record
def IsFailureOutput(self, output, testpath):
if output.exit_code != 0:
return True
return "FAILED!" in output.stdout
def HasUnexpectedOutput(self, testcase):
outcome = self.GetOutcome(testcase)
if (statusfile.FAIL_SLOPPY in testcase.outcomes and
"--use-strict" not in testcase.flags):
return outcome != statusfile.FAIL
return not outcome in (testcase.outcomes or [statusfile.PASS])
def DownloadData(self):
print "Test262 download is deprecated. It's part of DEPS."
# Clean up old directories and archive files.
directory_old_name = os.path.join(self.root, "data.old")
if os.path.exists(directory_old_name):
shutil.rmtree(directory_old_name)
archive_files = [f for f in os.listdir(self.root)
if f.startswith("tc39-test262-")]
if len(archive_files) > 0:
print "Clobber outdated test archives ..."
for f in archive_files:
os.remove(os.path.join(self.root, f))
def GetSuite(name, root):
return Test262TestSuite(name, root)
|
import difflib
import asyncio, ssl, sys, random
import mrworkserver, mrpacker
num = 0
users = {}
async def on_start(ws):
print("on_start")
ws.gather = asyncio.ensure_future( gather(ws) )
async def on_stop(ws):
print("on_stop, num =",num)
async def callback(ws, msgs):
for m in msgs:
num += 1
print ("Processing",num)
def setcb(ws, k, v):
users[k] = mrpacker.pack(v)
def fetchcb(ws, o):
return mrpacker.pack( {"name":"mark"} )
async def gather(ws):
while True:
await asyncio.sleep(5)
ws.process_messages()
# In gather mode you must periodically call ws.process_messages to gather the collected messages
# If set to False your callback will be called immediately as messages are received
ws = mrworkserver.WorkServer(gather=True,callback=callback,fetch_callback=fetchcb)
ws.setcb = setcb
ws.on_start = on_start
ws.on_stop = on_stop
port = 7100
print (sys.argv)
if len(sys.argv) == 2:
port = int(sys.argv[1])
#sc = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
#sc.load_cert_chain(certfile='cert/server.crt', keyfile='cert/server.key')
ws.run(host="127.0.0.1",port=port) #ssl=sc)
|
import numpy as np
import os
from mpEntropy import mpSystem
import matplotlib as mpl
from matplotlib.pyplot import cm
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
# This is a workaround until scipy fixes the issue
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
# Load sysVar
sysVar = mpSystem("../interact_0.ini", plotOnly=True)
# Create plot folder
pltfolder = "./epsplots/"
if not os.path.exists(pltfolder):
os.mkdir(pltfolder)
print("Plotting", end='')
mpl.use('Agg')
# styles and stuff
avgstyle = 'dashed'
avgsize = 0.6
expectstyle = 'solid'
expectsize = 1
legend_size = 10
font_size = 10
# https://scipy.github.io/old-wiki/pages/Cookbook/Matplotlib/LaTeX_Examples.html
fig_width_pt = 246.0 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inches
golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio
fig_width = fig_width_pt * inches_per_pt # width in inches
fig_height = fig_width * golden_mean # height in inches
fig_size = [fig_width, fig_height]
# padding in units of fontsize
padding = 0.32
params = {
'axes.labelsize': 10,
'font.size': 10,
'legend.fontsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'lines.linewidth': 0.3,
'figure.figsize': fig_size,
'legend.frameon': False,
'legend.loc': 'best',
'mathtext.default': 'rm' # see http://matplotlib.org/users/customizing.html
}
plt.rcParams['agg.path.chunksize'] = 0
plt.rcParams.update(params)
plt.rc('text', usetex=True)
plt.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Arial']})
loavgpercent = sysVar.plotLoAvgPerc # percentage of time evolution to start averaging
loavgind = int(loavgpercent * sysVar.dataPoints) # index to start at when calculating average and stddev
loavgtime = np.round(loavgpercent * (sysVar.deltaT * sysVar.steps * sysVar.plotTimeScale), 2)
if sysVar.boolPlotAverages:
print(' with averaging from Jt=%.2f' % loavgtime, end='')
fwidth = sysVar.plotSavgolFrame
ford = sysVar.plotSavgolOrder
bool_eigenvalues = False # plot eigenvalues and decomposition?
if os.path.isfile('../data/hamiltonian_eigvals.txt'):
bool_eigenvalues = True
energy_array = np.loadtxt('../data/hamiltonian_eigvals.txt')
bool_total = False # plot total energy?
if os.path.isfile('../data/total_energy.txt'):
bool_total = True
total_energy_array = np.loadtxt('../data/total_energy.txt')
elif os.path.isfile('../data/energy.txt'): # old file path
bool_total = True
total_energy_array = np.loadtxt('../data/energy.txt')
# eigenvalues and spectral decomposition
if bool_eigenvalues:
fig, ax1 = plt.subplots()
energy_markersize = 0.7
energy_barsize = 0.06
if sysVar.dim != 1:
energy_markersize *= 2.0 / np.log10(sysVar.dim)
energy_barsize *= (4.0 / np.log10(sysVar.dim))
index_maximum = np.argmax(energy_array[:, 2])
ax1.plot(energy_array[:, 0], energy_array[:, 1], linestyle='none', marker='.', markersize=energy_markersize,
markeredgewidth=0, color='blue')
ax1.set_ylabel(r'E / J')
ax1.set_xlabel(r'Index $n$')
plt.grid(False)
ax1.set_xlim(xmin=-(len(energy_array[:, 0]) * (5.0 / 100)))
tmp_ticks = list(ax1.get_xticks())
tmp_ticks.pop(2)
ax1.set_xticks(tmp_ticks + [int(index_maximum)])
plt.tight_layout(padding)
if np.shape(energy_array)[0] > 2:
ax2 = ax1.twinx()
ax2.bar(energy_array[:, 0], energy_array[:, 2], alpha=0.8, color='red', width=energy_barsize, align='center')
ax2.set_ylabel(r'$|c_n|^2$')
# inlay with small region around maximum
ax_inlay = plt.axes([0.40, 0.65, 0.25, 0.28])
index_range = int(np.floor(sysVar.dim / 200))
index_lo = index_maximum - index_range
index_hi = index_maximum + index_range
decomp_max = np.max(energy_array[index_lo:index_hi, 2])
decomp_min = np.min(energy_array[index_lo:index_hi, 2]) / decomp_max
ax_inlay.bar(energy_array[index_lo:index_hi, 0], energy_array[index_lo:index_hi, 2] / decomp_max, color='red',
width=energy_barsize * 10, align='center')
ax_inlay.set_xticks([energy_array[index_lo, 0], energy_array[index_hi, 0]])
ax_inlay.set_yticks([np.round(decomp_min), 1])
###
plt.savefig(pltfolder + 'energy_eigenvalues.eps', format='eps', dpi=1000)
plt.clf()
print('.', end='', flush=True)
# Eigenvalue decomposition with energy x-axis
for i in range(0, len(energy_array)):
if energy_array[i, 2]/np.max(energy_array[:, 2]) > 0.01:
lo_en_ind = i
break
for i in range(0, len(energy_array)):
if energy_array[-i, 2]/np.max(energy_array[:, 2]) > 0.01:
hi_en_ind = len(energy_array) - i
break
index_maximum = np.argmax(energy_array[:, 2])
index_range = int(np.floor(sysVar.dim / 200))
index_lo = index_maximum - index_range
index_hi = index_maximum + index_range
decomp_max = np.max(energy_array[index_lo:index_hi, 2])
decomp_min = np.min(energy_array[index_lo:index_hi, 2]) / decomp_max
plt.bar(energy_array[lo_en_ind:hi_en_ind, 1], energy_array[lo_en_ind:hi_en_ind, 2]/decomp_max, alpha=0.8,
color='red', width=energy_barsize, align='center')
plt.xlabel(r'E / J')
plt.ylabel(r'$|c_E|^2 / \max(|c_E|^2)$')
plt.tight_layout(padding)
ax_inlay = plt.axes([0.60, 0.65, 0.3, 0.3])
ax_inlay.bar(energy_array[index_lo:index_hi, 1], energy_array[index_lo:index_hi, 2] / decomp_max, color='red',
width=energy_barsize, align='center')
ax_inlay.set_xticks([np.round(energy_array[index_lo, 1],1), np.round(energy_array[index_hi, 1],1)])
ax_inlay.set_yticks([np.round(decomp_min), 1])
###
plt.savefig(pltfolder + 'energy_decomposition.eps', format='eps', dpi=1000)
plt.clf()
print('.', end='', flush=True)
# Total energy
if bool_total:
en0 = total_energy_array[0, 1]
total_energy_array[:, 1] -= en0
en0_magnitude = np.floor(np.log10(np.abs(en0)))
en0 /= np.power(10, en0_magnitude)
magnitude = np.floor(np.log10(np.max(np.abs(total_energy_array[:, 1]))))
plt.plot(total_energy_array[:, 0] * sysVar.plotTimeScale, total_energy_array[:, 1] / (np.power(10, magnitude)))
plt.figtext(0.9, 0.85, r'$E_0 / J = %.2f \cdot 10^{%i}$' % (en0, en0_magnitude), horizontalalignment='right',
verticalalignment='bottom')
plt.ylabel(r'$E_{tot} - E_0 / (J \cdot 10^{%i})$' % magnitude)
plt.xlabel(r'$J\,t$')
plt.grid(False)
plt.tight_layout(padding)
###
plt.savefig(pltfolder + 'energy_total.eps', format='eps', dpi=1000)
plt.clf()
print('.', end='', flush=True)
print(" done!")
|
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify import ctx
from cloudify import exceptions as cfy_exc
from cloudify.decorators import operation
from vcloud_plugin_common import (with_vca_client, wait_for_task,
get_vcloud_config, get_mandatory)
import collections
from network_plugin import (check_ip, is_valid_ip_range, is_separate_ranges,
is_ips_in_same_subnet, save_gateway_configuration,
get_network_name, is_network_exists)
VCLOUD_NETWORK_NAME = 'vcloud_network_name'
ADD_POOL = 1
DELETE_POOL = 2
@operation
@with_vca_client
def create(vca_client, **kwargs):
"""
create new vcloud air network, e.g.:
{
'use_external_resource': False,
'resource_id': 'secret_network',
'network': {
'dhcp': {
'dhcp_range': "10.1.1.128-10.1.1.255"
},
'static_range': "10.1.1.2-10.1.1.127",
'gateway_ip': "10.1.1.1",
'edge_gateway': 'gateway',
'name': 'secret_network',
"netmask": '255.255.255.0',
"dns": ["8.8.8.8", "4.4.4.4"]
}
}
"""
vdc_name = get_vcloud_config()['vdc']
if ctx.node.properties['use_external_resource']:
network_name = ctx.node.properties['resource_id']
if not is_network_exists(vca_client, network_name):
raise cfy_exc.NonRecoverableError(
"Can't find external resource: {0}".format(network_name))
ctx.instance.runtime_properties[VCLOUD_NETWORK_NAME] = network_name
ctx.logger.info(
"External resource {0} has been used".format(network_name))
return
net_prop = ctx.node.properties["network"]
network_name = get_network_name(ctx.node.properties)
if network_name in _get_network_list(vca_client,
get_vcloud_config()['vdc']):
raise cfy_exc.NonRecoverableError(
"Network {0} already exists, but parameter "
"'use_external_resource' is 'false' or absent"
.format(network_name))
ip = _split_adresses(net_prop['static_range'])
gateway_name = net_prop['edge_gateway']
if not vca_client.get_gateway(vdc_name, gateway_name):
raise cfy_exc.NonRecoverableError(
"Gateway {0} not found".format(gateway_name))
start_address = ip.start
end_address = ip.end
gateway_ip = net_prop["gateway_ip"]
netmask = net_prop["netmask"]
dns1 = ""
dns2 = ""
dns_list = net_prop.get("dns")
if dns_list:
dns1 = dns_list[0]
if len(dns_list) > 1:
dns2 = dns_list[1]
dns_suffix = net_prop.get("dns_suffix")
success, result = vca_client.create_vdc_network(
vdc_name, network_name, gateway_name, start_address,
end_address, gateway_ip, netmask, dns1, dns2, dns_suffix)
if success:
ctx.logger.info("Network {0} has been successfully created."
.format(network_name))
else:
raise cfy_exc.NonRecoverableError(
"Could not create network {0}: {1}".format(network_name, result))
wait_for_task(vca_client, result)
ctx.instance.runtime_properties[VCLOUD_NETWORK_NAME] = network_name
_dhcp_operation(vca_client, network_name, ADD_POOL)
@operation
@with_vca_client
def delete(vca_client, **kwargs):
"""
delete vcloud air network
"""
if ctx.node.properties['use_external_resource'] is True:
del ctx.instance.runtime_properties[VCLOUD_NETWORK_NAME]
ctx.logger.info("Network was not deleted - external resource has"
" been used")
return
network_name = get_network_name(ctx.node.properties)
_dhcp_operation(vca_client, network_name, DELETE_POOL)
success, task = vca_client.delete_vdc_network(
get_vcloud_config()['vdc'], network_name)
if success:
ctx.logger.info(
"Network {0} has been successful deleted.".format(network_name))
else:
raise cfy_exc.NonRecoverableError(
"Could not delete network {0}".format(network_name))
wait_for_task(vca_client, task)
@operation
@with_vca_client
def creation_validation(vca_client, **kwargs):
"""
check network description from node description
"""
network_name = get_network_name(ctx.node.properties)
ctx.logger.info("Validation cloudify.vcloud.nodes.Network node: {0}"
.format(network_name))
if is_network_exists(vca_client, network_name):
if ctx.node.properties.get('use_external_resource'):
# TODO: check: default gateway must exists
return
else:
raise cfy_exc.NonRecoverableError(
"Network already exsists: {0}".format(network_name))
net_prop = get_mandatory(ctx.node.properties, "network")
gateway_name = get_mandatory(net_prop, 'edge_gateway')
if not vca_client.get_gateway(get_vcloud_config()['vdc'], gateway_name):
raise cfy_exc.NonRecoverableError(
"Gateway {0} not found".format(gateway_name))
static_ip = _split_adresses(get_mandatory(net_prop, 'static_range'))
check_ip(static_ip.start)
check_ip(static_ip.end)
dns_list = net_prop.get("dns")
if dns_list:
for ip in dns_list:
check_ip(ip)
gateway_ip = check_ip(get_mandatory(net_prop, "gateway_ip"))
netmask = check_ip(get_mandatory(net_prop, "netmask"))
ips = [gateway_ip, static_ip.start, static_ip.end]
dhcp = net_prop.get("dhcp")
if dhcp:
dhcp_range = get_mandatory(net_prop["dhcp"], "dhcp_range")
dhcp_ip = _split_adresses(dhcp_range)
if not is_separate_ranges(static_ip, dhcp_ip):
raise cfy_exc.NonRecoverableError(
"Static_range and dhcp_range is overlapped.")
ips.extend([dhcp_ip.start, dhcp_ip.end])
if not is_ips_in_same_subnet(ips, netmask):
raise cfy_exc.NonRecoverableError(
"IP addresses in different subnets.")
def _dhcp_operation(vca_client, network_name, operation):
"""
update dhcp setting for network
"""
dhcp_settings = ctx.node.properties['network'].get('dhcp')
if dhcp_settings is None:
return
gateway_name = ctx.node.properties["network"]['edge_gateway']
gateway = vca_client.get_gateway(get_vcloud_config()['vdc'], gateway_name)
if not gateway:
raise cfy_exc.NonRecoverableError(
"Gateway {0} not found!".format(gateway_name))
if operation == ADD_POOL:
ip = _split_adresses(dhcp_settings['dhcp_range'])
low_ip_address = check_ip(ip.start)
hight_ip_address = check_ip(ip.end)
default_lease = dhcp_settings.get('default_lease')
max_lease = dhcp_settings.get('max_lease')
gateway.add_dhcp_pool(network_name, low_ip_address, hight_ip_address,
default_lease, max_lease)
ctx.logger.info("DHCP rule successful created for network {0}"
.format(network_name))
if operation == DELETE_POOL:
gateway.delete_dhcp_pool(network_name)
ctx.logger.info("DHCP rule successful deleted for network {0}"
.format(network_name))
if not save_gateway_configuration(gateway, vca_client):
return ctx.operation.retry(message='Waiting for gateway.',
retry_after=10)
def _split_adresses(address_range):
"""
split network addresses from 1.1.1.1-2.2.2.2 representation to
separate (start,end) tuple
"""
adresses = [ip.strip() for ip in address_range.split('-')]
IPRange = collections.namedtuple('IPRange', 'start end')
try:
start = check_ip(adresses[0])
end = check_ip(adresses[1])
if not is_valid_ip_range(start, end):
raise cfy_exc.NonRecoverableError(
"Start address {0} is greater than end address: {1}"
.format(start, end))
return IPRange(start=start, end=end)
except IndexError:
raise cfy_exc.NonRecoverableError("Can't parse IP range:{0}".
format(address_range))
def _get_network_list(vca_client, vdc_name):
"""
list all avable network for current vdc
"""
vdc = vca_client.get_vdc(vdc_name)
if not vdc:
raise cfy_exc.NonRecoverableError(
"Vdc {0} not found.".format(vdc_name))
return [net.name for net in vdc.AvailableNetworks.Network]
|
'''
BSD Licence Copyright (c) 2016, Science & Technology Facilities Council (STFC)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the Science & Technology Facilities Council (STFC)
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Created on 5 May 2012
@author: Maurizio Nagni
'''
class OSParam(object):
'''
classdocs
'''
def __init__(self, par_name, term_name, default=None, required=False,
namespace=None, namespace_prefix=None):
'''
Constructor
@param par_name: http parameter's name
@param term_name: query template's parameter name
@param default: the default value
@param required: is this parameter required? Default = false
@param namespace: the namespace where this parameter is defined
@param namespace_prefix: the prefix of the namespace where this
parameter is defined
'''
self.par_name = par_name
self.term_name = term_name
self.required = required
self.default = default
self.namespace = namespace
self.namespace_prefix = namespace_prefix
|
from __future__ import division, absolute_import, print_function
import sys
import time
from datetime import date
import numpy as np
from numpy.testing import (
run_module_suite, assert_, assert_equal, assert_allclose, assert_raises,
)
from numpy.lib._iotools import (
LineSplitter, NameValidator, StringConverter,
has_nested_fields, easy_dtype, flatten_dtype
)
class TestLineSplitter(object):
"Tests the LineSplitter class."
def test_no_delimiter(self):
"Test LineSplitter w/o delimiter"
strg = b" 1 2 3 4 5 # test"
test = LineSplitter()(strg)
assert_equal(test, [b'1', b'2', b'3', b'4', b'5'])
test = LineSplitter('')(strg)
assert_equal(test, [b'1', b'2', b'3', b'4', b'5'])
def test_space_delimiter(self):
"Test space delimiter"
strg = b" 1 2 3 4 5 # test"
test = LineSplitter(b' ')(strg)
assert_equal(test, [b'1', b'2', b'3', b'4', b'', b'5'])
test = LineSplitter(b' ')(strg)
assert_equal(test, [b'1 2 3 4', b'5'])
def test_tab_delimiter(self):
"Test tab delimiter"
strg = b" 1\t 2\t 3\t 4\t 5 6"
test = LineSplitter(b'\t')(strg)
assert_equal(test, [b'1', b'2', b'3', b'4', b'5 6'])
strg = b" 1 2\t 3 4\t 5 6"
test = LineSplitter(b'\t')(strg)
assert_equal(test, [b'1 2', b'3 4', b'5 6'])
def test_other_delimiter(self):
"Test LineSplitter on delimiter"
strg = b"1,2,3,4,,5"
test = LineSplitter(b',')(strg)
assert_equal(test, [b'1', b'2', b'3', b'4', b'', b'5'])
#
strg = b" 1,2,3,4,,5 # test"
test = LineSplitter(b',')(strg)
assert_equal(test, [b'1', b'2', b'3', b'4', b'', b'5'])
def test_constant_fixed_width(self):
"Test LineSplitter w/ fixed-width fields"
strg = b" 1 2 3 4 5 # test"
test = LineSplitter(3)(strg)
assert_equal(test, [b'1', b'2', b'3', b'4', b'', b'5', b''])
#
strg = b" 1 3 4 5 6# test"
test = LineSplitter(20)(strg)
assert_equal(test, [b'1 3 4 5 6'])
#
strg = b" 1 3 4 5 6# test"
test = LineSplitter(30)(strg)
assert_equal(test, [b'1 3 4 5 6'])
def test_variable_fixed_width(self):
strg = b" 1 3 4 5 6# test"
test = LineSplitter((3, 6, 6, 3))(strg)
assert_equal(test, [b'1', b'3', b'4 5', b'6'])
#
strg = b" 1 3 4 5 6# test"
test = LineSplitter((6, 6, 9))(strg)
assert_equal(test, [b'1', b'3 4', b'5 6'])
# -----------------------------------------------------------------------------
class TestNameValidator(object):
def test_case_sensitivity(self):
"Test case sensitivity"
names = ['A', 'a', 'b', 'c']
test = NameValidator().validate(names)
assert_equal(test, ['A', 'a', 'b', 'c'])
test = NameValidator(case_sensitive=False).validate(names)
assert_equal(test, ['A', 'A_1', 'B', 'C'])
test = NameValidator(case_sensitive='upper').validate(names)
assert_equal(test, ['A', 'A_1', 'B', 'C'])
test = NameValidator(case_sensitive='lower').validate(names)
assert_equal(test, ['a', 'a_1', 'b', 'c'])
# check exceptions
assert_raises(ValueError, NameValidator, case_sensitive='foobar')
def test_excludelist(self):
"Test excludelist"
names = ['dates', 'data', 'Other Data', 'mask']
validator = NameValidator(excludelist=['dates', 'data', 'mask'])
test = validator.validate(names)
assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_'])
def test_missing_names(self):
"Test validate missing names"
namelist = ('a', 'b', 'c')
validator = NameValidator()
assert_equal(validator(namelist), ['a', 'b', 'c'])
namelist = ('', 'b', 'c')
assert_equal(validator(namelist), ['f0', 'b', 'c'])
namelist = ('a', 'b', '')
assert_equal(validator(namelist), ['a', 'b', 'f0'])
namelist = ('', 'f0', '')
assert_equal(validator(namelist), ['f1', 'f0', 'f2'])
def test_validate_nb_names(self):
"Test validate nb names"
namelist = ('a', 'b', 'c')
validator = NameValidator()
assert_equal(validator(namelist, nbfields=1), ('a',))
assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"),
['a', 'b', 'c', 'g0', 'g1'])
def test_validate_wo_names(self):
"Test validate no names"
namelist = None
validator = NameValidator()
assert_(validator(namelist) is None)
assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2'])
# -----------------------------------------------------------------------------
def _bytes_to_date(s):
if sys.version_info[0] >= 3:
return date(*time.strptime(s.decode('latin1'), "%Y-%m-%d")[:3])
else:
return date(*time.strptime(s, "%Y-%m-%d")[:3])
class TestStringConverter(object):
"Test StringConverter"
def test_creation(self):
"Test creation of a StringConverter"
converter = StringConverter(int, -99999)
assert_equal(converter._status, 1)
assert_equal(converter.default, -99999)
def test_upgrade(self):
"Tests the upgrade method."
converter = StringConverter()
assert_equal(converter._status, 0)
# test int
assert_equal(converter.upgrade(b'0'), 0)
assert_equal(converter._status, 1)
# On systems where integer defaults to 32-bit, the statuses will be
# offset by one, so we check for this here.
import numpy.core.numeric as nx
status_offset = int(nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize)
# test int > 2**32
assert_equal(converter.upgrade(b'17179869184'), 17179869184)
assert_equal(converter._status, 1 + status_offset)
# test float
assert_allclose(converter.upgrade(b'0.'), 0.0)
assert_equal(converter._status, 2 + status_offset)
# test complex
assert_equal(converter.upgrade(b'0j'), complex('0j'))
assert_equal(converter._status, 3 + status_offset)
# test str
assert_equal(converter.upgrade(b'a'), b'a')
assert_equal(converter._status, len(converter._mapper) - 1)
def test_missing(self):
"Tests the use of missing values."
converter = StringConverter(missing_values=(b'missing',
b'missed'))
converter.upgrade(b'0')
assert_equal(converter(b'0'), 0)
assert_equal(converter(b''), converter.default)
assert_equal(converter(b'missing'), converter.default)
assert_equal(converter(b'missed'), converter.default)
try:
converter('miss')
except ValueError:
pass
def test_upgrademapper(self):
"Tests updatemapper"
dateparser = _bytes_to_date
StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
convert = StringConverter(dateparser, date(2000, 1, 1))
test = convert(b'2001-01-01')
assert_equal(test, date(2001, 1, 1))
test = convert(b'2009-01-01')
assert_equal(test, date(2009, 1, 1))
test = convert(b'')
assert_equal(test, date(2000, 1, 1))
def test_string_to_object(self):
"Make sure that string-to-object functions are properly recognized"
conv = StringConverter(_bytes_to_date)
assert_equal(conv._mapper[-2][0](0), 0j)
assert_(hasattr(conv, 'default'))
def test_keep_default(self):
"Make sure we don't lose an explicit default"
converter = StringConverter(None, missing_values=b'',
default=-999)
converter.upgrade(b'3.14159265')
assert_equal(converter.default, -999)
assert_equal(converter.type, np.dtype(float))
#
converter = StringConverter(
None, missing_values=b'', default=0)
converter.upgrade(b'3.14159265')
assert_equal(converter.default, 0)
assert_equal(converter.type, np.dtype(float))
def test_keep_default_zero(self):
"Check that we don't lose a default of 0"
converter = StringConverter(int, default=0,
missing_values=b"N/A")
assert_equal(converter.default, 0)
def test_keep_missing_values(self):
"Check that we're not losing missing values"
converter = StringConverter(int, default=0,
missing_values=b"N/A")
assert_equal(
converter.missing_values, set([b'', b'N/A']))
def test_int64_dtype(self):
"Check that int64 integer types can be specified"
converter = StringConverter(np.int64, default=0)
val = b"-9223372036854775807"
assert_(converter(val) == -9223372036854775807)
val = b"9223372036854775807"
assert_(converter(val) == 9223372036854775807)
def test_uint64_dtype(self):
"Check that uint64 integer types can be specified"
converter = StringConverter(np.uint64, default=0)
val = b"9223372043271415339"
assert_(converter(val) == 9223372043271415339)
class TestMiscFunctions(object):
def test_has_nested_dtype(self):
"Test has_nested_dtype"
ndtype = np.dtype(float)
assert_equal(has_nested_fields(ndtype), False)
ndtype = np.dtype([('A', '|S3'), ('B', float)])
assert_equal(has_nested_fields(ndtype), False)
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
assert_equal(has_nested_fields(ndtype), True)
def test_easy_dtype(self):
"Test ndtype on dtypes"
# Simple case
ndtype = float
assert_equal(easy_dtype(ndtype), np.dtype(float))
# As string w/o names
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype),
np.dtype([('f0', "i4"), ('f1', "f8")]))
# As string w/o names but different default format
assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"),
np.dtype([('field_000', "i4"), ('field_001', "f8")]))
# As string w/ names
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names="a, b"),
np.dtype([('a', "i4"), ('b', "f8")]))
# As string w/ names (too many)
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([('a', "i4"), ('b', "f8")]))
# As string w/ names (not enough)
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names=", b"),
np.dtype([('f0', "i4"), ('b', "f8")]))
# ... (with different default format)
assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"),
np.dtype([('a', "i4"), ('f00', "f8")]))
# As list of tuples w/o names
ndtype = [('A', int), ('B', float)]
assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)]))
# As list of tuples w/ names
assert_equal(easy_dtype(ndtype, names="a,b"),
np.dtype([('a', int), ('b', float)]))
# As list of tuples w/ not enough names
assert_equal(easy_dtype(ndtype, names="a"),
np.dtype([('a', int), ('f0', float)]))
# As list of tuples w/ too many names
assert_equal(easy_dtype(ndtype, names="a,b,c"),
np.dtype([('a', int), ('b', float)]))
# As list of types w/o names
ndtype = (int, float, float)
assert_equal(easy_dtype(ndtype),
np.dtype([('f0', int), ('f1', float), ('f2', float)]))
# As list of types w names
ndtype = (int, float, float)
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([('a', int), ('b', float), ('c', float)]))
# As simple dtype w/ names
ndtype = np.dtype(float)
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([(_, float) for _ in ('a', 'b', 'c')]))
# As simple dtype w/o names (but multiple fields)
ndtype = np.dtype(float)
assert_equal(
easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"),
np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')]))
def test_flatten_dtype(self):
"Testing flatten_dtype"
# Standard dtype
dt = np.dtype([("a", "f8"), ("b", "f8")])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, float])
# Recursive dtype
dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int])
# dtype with shaped fields
dt = np.dtype([("a", (float, 2)), ("b", (int, 3))])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, int])
dt_flat = flatten_dtype(dt, True)
assert_equal(dt_flat, [float] * 2 + [int] * 3)
# dtype w/ titles
dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, float])
if __name__ == "__main__":
run_module_suite()
|
from parser import OneOf, character, greedy, greedy1, pack, satisfy, token
def use(templateResult):
return templateResult[0][0]
def template():
return greedy(literal().orElse(substitution()))\
.map(lambda result: Concatenation(result))
def literal():
return greedy1(escapedCharacter().orElse(notAnOpeningBracket()))\
.map(lambda result: ''.join(result))\
.map(lambda verbatim: Literal(verbatim))
def escapedCharacter():
return OneOf([
token('\\\\').map(lambda _: '\\'),
token('\\{').map(lambda _: '{'),
])
def notAnOpeningBracket():
return satisfy(lambda character: character != '{')
def notAnClosingBracket():
return satisfy(lambda character: character != '}')
def substitution():
return pack(openingBracket(), identifier(), closingBracket())\
.map(lambda identifier: Variable(identifier))
def identifier():
return greedy1(notAnClosingBracket())\
.map(lambda result: ''.join(result))\
def openingBracket():
return character('{')
def closingBracket():
return character('}')
class Template:
def __call__(self, context):
raise NotImplementedError()
class ValueMissingFromContext(Exception):
pass
class Literal(Template):
def __init__(self, literal):
self.literal = literal
def __call__(self, context):
return self.literal
class Variable(Template):
def __init__(self, name):
self.name = name
def __call__(self, context):
if self.name in context:
return context[self.name]
else:
raise ValueMissingFromContext(f'expected \'{self.name}\' to be in context')
class Concatenation(Template):
def __init__(self, templates):
self.templates = templates
def __call__(self, context):
return ''.join([template(context) for template in self.templates])
if __name__ == '__main__':
context = { 'subject': 'World' }
assert Literal('Hello, World!')(context) == 'Hello, World!'
assert Variable('subject')(context) == 'World'
try:
Variable('missing from context')(context)
assert False
except ValueMissingFromContext as e:
assert True
assert Concatenation([])(context) == ''
assert Concatenation([Literal('Hello, '), Variable('subject'), Literal('!')])(context) == 'Hello, World!'
use(literal()('Hello, World!'))(context) == 'Hello, World!'
use(literal()('\\\\'))(context) == '\\'
use(literal()('\\{'))(context) == '{'
use(substitution()('{subject}'))(context) == 'World'
use(template()('Hello, {subject}!'))(context) == 'Hello, World!'
|
"""
Unit tests
"""
from django.test import TestCase
from django.test.utils import override_settings
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import LANGUAGE_SESSION_KEY
from mapentity.factories import SuperUserFactory
from geotrek.authent.models import UserProfile
@override_settings(LOGIN_URL='/login/')
class LoginTestCase(TestCase):
def test_login(self):
response = self.client.get('/')
_next = (settings.FORCE_SCRIPT_NAME or '') + '/'
self.assertRedirects(response, '/login/?next=' + _next)
class UserProfileTest(TestCase):
def setUp(self):
self.user = SuperUserFactory(password="Bar")
success = self.client.login(username=self.user.username, password="Bar")
self.assertTrue(success)
def test_profile(self):
self.assertTrue(isinstance(self.user.profile, UserProfile))
self.assertEqual(self.user.profile.structure.name, settings.DEFAULT_STRUCTURE_NAME)
self.assertEqual(self.user.profile.language, settings.LANGUAGE_CODE)
def test_language(self):
response = self.client.get(reverse('core:path_list'))
self.assertEqual(200, response.status_code)
self.assertContains(response, "Logout")
# Change user lang
self.assertNotEqual(settings.LANGUAGE_CODE, "fr")
userprofile = UserProfile.objects.get(user=self.user)
userprofile.language = "fr"
userprofile.save()
self.assertEqual(self.user.profile.language, "fr")
# No effect if no logout
response = self.client.get(reverse('core:path_list'))
self.assertContains(response, "Logout")
self.client.logout()
self.client.login(username=self.user.username, password="Bar")
response = self.client.get(reverse('core:path_list'))
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], "fr")
self.assertContains(response, "Déconnexion")
def test_link_to_adminsite_visible_to_staff(self):
self.assertTrue(self.user.is_staff)
response = self.client.get(reverse('core:path_list'))
self.assertContains(response, '<a href="/admin/">Admin</a>')
def test_link_to_adminsite_not_visible_to_others(self):
self.user.is_staff = False
self.user.save()
self.client.login(username=self.user.username, password="Bar")
response = self.client.get(reverse('core:path_list'))
self.assertNotContains(response, '<a href="/admin/">Admin</a>')
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponseRedirect
from django.urls import reverse_lazy
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.views.generic import ListView, DetailView, CreateView
from django.shortcuts import render, get_object_or_404, redirect
from ..forms import ApplyJobForm
from ..models import Job, Applicant
# Global home page for all users irrespective of members or not
def home_init(request):
return render(request,'jobsapp/home_initial.html')
class HomeView(ListView):
model = Job
template_name = 'home.html'
context_object_name = 'jobs'
def get_queryset(self):
return self.model.objects.all()[:6]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['trendings'] = self.model.objects.filter(created_at__month=timezone.now().month)[:3]
return context
class SearchView(ListView):
model = Job
template_name = 'jobs/search.html'
context_object_name = 'jobs'
def get_queryset(self):
return self.model.objects.filter(location__contains=self.request.GET['location'],
title__contains=self.request.GET['position'])
class JobListView(ListView):
model = Job
template_name = 'jobs/jobs.html'
context_object_name = 'jobs'
paginate_by = 5
class JobDetailsView(DetailView):
model = Job
template_name = 'jobs/details.html'
context_object_name = 'job'
pk_url_kwarg = 'id'
def get_object(self, queryset=None):
obj = super(JobDetailsView, self).get_object(queryset=queryset)
if obj is None:
raise Http404("Job doesn't exists")
return obj
def get(self, request, *args, **kwargs):
try:
self.object = self.get_object()
except Http404:
# redirect here
raise Http404("Job doesn't exists")
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
class ApplyJobView(CreateView):
model = Applicant
form_class = ApplyJobForm
slug_field = 'job_id'
slug_url_kwarg = 'job_id'
@method_decorator(login_required(login_url=reverse_lazy('accounts:login')))
def dispatch(self, request, *args, **kwargs):
return super().dispatch(self.request, *args, **kwargs)
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
messages.info(self.request, 'Successfully applied for the job!')
return self.form_valid(form)
else:
return HttpResponseRedirect(reverse_lazy('jobs:home'))
def get_success_url(self):
return reverse_lazy('jobs:jobs-detail', kwargs={'id': self.kwargs['job_id']})
# def get_form_kwargs(self):
# kwargs = super(ApplyJobView, self).get_form_kwargs()
# print(kwargs)
# kwargs['job'] = 1
# return kwargs
def form_valid(self, form):
# check if user already applied
applicant = Applicant.objects.filter(user_id=self.request.user.id, job_id=self.kwargs['job_id'])
if applicant:
messages.info(self.request, 'You already applied for this job')
return HttpResponseRedirect(self.get_success_url())
# save applicant
form.instance.user = self.request.user
form.save()
return super().form_valid(form)
|
# coding=utf-8
import Adafruit_DHT
import time
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# Potrebbe essermi utile per generare un topic in caso di errore di lettura dal sensore
import paho.mqtt.client as mqtt
dbPath = r'/home/pi/smart-home-automation/webapp/app.db'
engine = create_engine('sqlite:///%s' % dbPath, echo=True)
Base = declarative_base(engine)
class Lettura(Base):
""""""
__tablename__ = 'lettura'
__table_args__ = {'autoload':True}
class Sensore(Base):
""""""
__tablename__ = 'sensore'
__table_args__ = {'autoload':True}
def loadSession():
""""""
metadata = Base.metadata
Session = sessionmaker(bind=engine)
session = Session()
return session
session = loadSession()
sensor = Adafruit_DHT.DHT22
sensori = session.query(Sensore).all()
while True:
for sensore in sensori:
humidity, temperature = Adafruit_DHT.read_retry(sensor, sensore.pin)
if humidity is not None and temperature is not None:
print('Temp={0:0.1f}* Humidity={1:0.1f}%'.format(temperature, humidity))
nuova_lettura=Lettura(timestamp=datetime.now(), temperatura=temperature, umidita=humidity, sensore_id=sensore.id)
else:
print('Lettura Fallita')
sys.exit(1)
# Dopo ogni lettura scrivo sul db le rilevazioni ottenute
session.add(nuova_lettura)
session.commit()
time.sleep(60)
|
import numpy as np
from numpy.fft import fft, ifft, fftfreq, rfftfreq
from astropy.io import ascii,fits
from scipy.interpolate import InterpolatedUnivariateSpline, interp1d
from scipy.integrate import trapz
from scipy.special import j1
import multiprocessing as mp
import sys
import gc
import os
import bz2
import h5py
from functools import partial
import itertools
from collections import OrderedDict
from .spectrum import create_log_lam_grid, calculate_dv, calculate_dv_dict, Base1DSpectrum
from . import constants as C
def chunk_list(mylist, n=mp.cpu_count()):
'''
Divide a lengthy parameter list into chunks for parallel processing and
backfill if necessary.
:param mylist: a lengthy list of parameter combinations
:type mylist: 1-D list
:param n: number of chunks to divide list into. Default is ``mp.cpu_count()``
:type n: integer
:returns: **chunks** (*2-D list* of shape (n, -1)) a list of chunked parameter lists.
'''
length = len(mylist)
size = int(length / n)
chunks = [mylist[0+size*i : size*(i+1)] for i in range(n)] #fill with evenly divisible
leftover = length - size*n
edge = size*n
for i in range(leftover): #backfill each with the last item
chunks[i%n].append(mylist[edge+i])
return chunks
def determine_chunk_log(wl, wl_min, wl_max):
'''
Take in a wavelength array and then, given two minimum bounds, determine
the boolean indices that will allow us to truncate this grid to near the
requested bounds while forcing the wl length to be a power of 2.
:param wl: wavelength array
:type wl: np.ndarray
:param wl_min: minimum required wavelength
:type wl_min: float
:param wl_max: maximum required wavelength
:type wl_max: float
:returns: a np.ndarray boolean array used to index into the wl array.
'''
# wl_min and wl_max must of course be within the bounds of wl
assert wl_min >= np.min(wl) and wl_max <= np.max(wl), "wl_min {} and wl_max"\
" {} are not within the bounds of the grid.".format(wl_min, wl_max)
# Find the smallest length synthetic spectrum that is a power of 2 in length
# and longer than the number of points contained between wl_min and wl_max
len_wl = len(wl)
npoints = np.sum((wl >= wl_min) & (wl <= wl_max))
chunk = len_wl
inds = (0, chunk)
# This loop will exit with chunk being the smallest power of 2 that is
# larger than npoints
while chunk > npoints:
if chunk/2 > npoints:
chunk = chunk//2
else:
break
assert type(chunk) == np.int, "Chunk is not an integer!. Chunk is {}".format(chunk)
if chunk < len_wl:
# Now that we have determined the length of the chunk of the synthetic
# spectrum, determine indices that straddle the data spectrum.
# Find the index that corresponds to the wl at the center of the data spectrum
center_wl = (wl_min + wl_max)/2.
center_ind = (np.abs(wl - center_wl)).argmin()
#Take a chunk that straddles either side.
inds = (center_ind - chunk//2, center_ind + chunk//2)
ind = (np.arange(len_wl) >= inds[0]) & (np.arange(len_wl) < inds[1])
else:
print("keeping grid as is")
ind = np.ones_like(wl, dtype="b")
assert (min(wl[ind]) <= wl_min) and (max(wl[ind]) >= wl_max), "Model"\
"Interpolator chunking ({:.2f}, {:.2f}) didn't encapsulate full"\
" wl range ({:.2f}, {:.2f}).".format(min(wl[ind]), max(wl[ind]), wl_min, wl_max)
return ind
class RawGridInterface:
'''
A base class to handle interfacing with synthetic spectral libraries.
:param name: name of the spectral library
:type name: string
:param points: a dictionary of lists describing the grid points at which
spectra exist (assumes grid is square, not ragged).
:type points: dict
:param air: Are the wavelengths measured in air?
:type air: bool
:param wl_range: the starting and ending wavelength ranges of the grid to
truncate to.
:type wl_range: list of len 2 [min, max]
:param base: path to the root of the files on disk.
:type base: string
'''
def __init__(self, name, points, air=True, wl_range=[3000,13000], base=None):
self.name = name
self.points = {}
assert type(points) is dict, "points must be a dictionary."
for key, value in points.items():
if key in C.grid_set:
self.points[key] = value
else:
raise KeyError("{0} is not an allowed parameter, skipping".format(key))
self.air = air
self.wl_range = wl_range
self.base = base
def check_params(self, parameters):
'''
Determine if a set of parameters is a subset of allowed parameters,
and then determine if those parameters are allowed in the grid.
:param parameters: parameter set to check
:type parameters: dict
:raises C.GridError: if parameters.keys() is not a subset of :data:`C.grid_set`
:raises C.GridError: if the parameter values are outside of the grid bounds
'''
if not set(parameters.keys()) <= C.grid_set:
raise C.GridError("{} not in allowable grid parameters {}".format(parameters.keys(), C.grid_set))
for key,value in parameters.items():
if value not in self.points[key]:
raise C.GridError("{} not in the grid points {}".format(value, sorted(self.points[key])))
def load_file(self, parameters, norm=True):
'''
Load a synthetic spectrum from disk and :meth:`check_params`
:param parameters: stellar parameters describing a spectrum
:type parameters: dict
.. note::
This method is designed to be extended by the inheriting class
'''
self.check_params(parameters)
def load_flux(self, parameters, norm=True):
'''
Load just the synthetic flux from the disk and :meth:`check_params`
:param parameters: stellar parameters describing a spectrum
:type parameters: dict
.. note::
This method is designed to be extended by the inheriting class
'''
class PHOENIXGridInterface(RawGridInterface):
'''
An Interface to the PHOENIX/Husser synthetic library.
:param norm: normalize the spectrum to solar luminosity?
:type norm: bool
'''
def __init__(self, air=True, norm=True, wl_range=[3000, 54000],
base="libraries/raw/PHOENIX/"):
super().__init__(name="PHOENIX",
points={"temp":
np.array([2300, 2400, 2500, 2600, 2700, 2800, 2900, 3000, 3100, 3200,
3300, 3400, 3500, 3600, 3700, 3800, 3900, 4000, 4100, 4200, 4300, 4400,
4500, 4600, 4700, 4800, 4900, 5000, 5100, 5200, 5300, 5400, 5500, 5600,
5700, 5800, 5900, 6000, 6100, 6200, 6300, 6400, 6500, 6600, 6700, 6800,
6900, 7000, 7200, 7400, 7600, 7800, 8000, 8200, 8400, 8600, 8800, 9000,
9200, 9400, 9600, 9800, 10000, 10200, 10400, 10600, 10800, 11000, 11200,
11400, 11600, 11800, 12000]),
"logg":np.arange(0.0, 6.1, 0.5),
"Z":np.arange(-2., 1.1, 0.5),
"alpha":np.array([-0.2, 0.0, 0.2, 0.4, 0.6, 0.8])},
air=air, wl_range=wl_range, base=base) #wl_range used to be [2999, 13001]
self.norm = norm #Normalize to 1 solar luminosity?
self.Z_dict = {-2:"-2.0", -1.5:"-1.5", -1:'-1.0', -0.5:'-0.5',
0.0: '-0.0', 0.5: '+0.5', 1: '+1.0'}
self.alpha_dict = {-0.4:".Alpha=-0.40", -0.2:".Alpha=-0.20",
0.0: "", 0.2:".Alpha=+0.20", 0.4:".Alpha=+0.40",
0.6:".Alpha=+0.60", 0.8:".Alpha=+0.80"}
# if air is true, convert the normally vacuum file to air wls.
try:
wl_file = fits.open(self.base + "WAVE_PHOENIX-ACES-AGSS-COND-2011.fits")
except OSError:
raise C.GridError("Wavelength file improperly specified.")
w_full = wl_file[0].data
wl_file.close()
if self.air:
self.wl_full = vacuum_to_air(w_full)
else:
self.wl_full = w_full
self.ind = (self.wl_full >= self.wl_range[0]) & (self.wl_full <= self.wl_range[1])
self.wl = self.wl_full[self.ind]
self.rname = self.base + "Z{Z:}{alpha:}/lte{temp:0>5.0f}-{logg:.2f}{Z:}{alpha:}" \
".PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
def load_file(self, parameters):
'''
Load a file from disk and return it as a spectrum object.
:param parameters: stellar parameters
:type parameters: dict
:raises C.GridError: if the file cannot be found on disk.
:returns: :obj:`model.Base1DSpectrum`
'''
super().load_file(parameters) #Check to make sure that the keys are allowed and that the values are in the grid
str_parameters = parameters.copy()
#Rewrite Z
Z = parameters["Z"]
str_parameters["Z"] = self.Z_dict[Z]
#Rewrite alpha, allow alpha to be missing from parameters and set to 0
try:
alpha = parameters["alpha"]
except KeyError:
alpha = 0.0
parameters["alpha"] = alpha
str_parameters["alpha"] = self.alpha_dict[alpha]
fname = self.rname.format(**str_parameters)
#Still need to check that file is in the grid, otherwise raise a C.GridError
#Read all metadata in from the FITS header, and append to spectrum
try:
flux_file = fits.open(fname)
f = flux_file[0].data
hdr = flux_file[0].header
flux_file.close()
except OSError:
raise C.GridError("{} is not on disk.".format(fname))
#If we want to normalize the spectra, we must do it now since later we won't have the full EM range
if self.norm:
f *= 1e-8 #convert from erg/cm^2/s/cm to erg/cm^2/s/A
F_bol = trapz(f, self.wl_full)
f = f * (C.F_sun / F_bol) #bolometric luminosity is always 1 L_sun
#Add temp, logg, Z, alpha, norm to the metadata
header = parameters
header["norm"] = self.norm
#Keep only the relevant PHOENIX keywords, which start with PHX
for key, value in hdr.items():
if key[:3] == "PHX":
header[key] = value
return Base1DSpectrum(self.wl, f[self.ind], metadata=header, air=self.air)
def load_flux(self, parameters, norm=True):
'''
Load just the flux and header information.
:param parameters: stellar parameters
:type parameters: dict
:raises C.GridError: if the file cannot be found on disk.
:returns: tuple (flux_array, header_dict)
'''
super().load_file(parameters) #Check to make sure that the keys are allowed and that the values are in the grid
str_parameters = parameters.copy()
#Rewrite Z
Z = parameters["Z"]
str_parameters["Z"] = self.Z_dict[Z]
#Rewrite alpha, allow alpha to be missing from parameters and set to 0
try:
alpha = parameters["alpha"]
except KeyError:
alpha = 0.0
parameters["alpha"] = alpha
str_parameters["alpha"] = self.alpha_dict[alpha]
fname = self.rname.format(**str_parameters)
#Still need to check that file is in the grid, otherwise raise a C.GridError
#Read all metadata in from the FITS header, and append to spectrum
try:
flux_file = fits.open(fname)
f = flux_file[0].data
hdr = flux_file[0].header
flux_file.close()
except OSError:
raise C.GridError("{} is not on disk.".format(fname))
#If we want to normalize the spectra, we must do it now since later we won't have the full EM range
if self.norm:
f *= 1e-8 #convert from erg/cm^2/s/cm to erg/cm^2/s/A
F_bol = trapz(f, self.wl_full)
f = f * (C.F_sun / F_bol) #bolometric luminosity is always 1 L_sun
#Add temp, logg, Z, alpha, norm to the metadata
header = parameters
header["norm"] = self.norm
header["air"] = self.air
#Keep only the relevant PHOENIX keywords, which start with PHX
for key, value in hdr.items():
if key[:3] == "PHX":
header[key] = value
return (f[self.ind], header)
class KuruczGridInterface(RawGridInterface):
'''Kurucz grid interface.
Spectra are stored in ``f_nu`` in a filename like
``t03500g00m25ap00k2v070z1i00.fits``, ``ap00`` means zero alpha enhancement,
and ``k2`` is the microturbulence, while ``z1`` is the macroturbulence.
These particular values are roughly the ones appropriate for the Sun.
'''
def __init__(self, air=True, norm=True, wl_range=[5000, 5400], base="libraries/raw/Kurucz/"):
super().__init__(name="Kurucz",
points={"temp" : np.arange(3500, 9751, 250),
"logg": np.arange(0.0, 5.1, 0.5),
"Z": np.array([-2.5, -2.0, -1.5, -1.0, -0.5, 0.0, 0.5]),
"alpha": np.array([0.0])},
air=air, wl_range=wl_range, base=base)
self.Z_dict = {-2.5:"m25", -2.0:"m20", -1.5:"m15", -1.0:"m10", -0.5:"m05", 0.0:"p00", 0.5:"p05"}
self.norm = norm #Convert to f_lam and average to 1, or leave in f_nu?
self.rname = base + "t{temp:0>5.0f}/g{logg:0>2.0f}/t{temp:0>5.0f}g{logg:0>2.0f}{Z}ap00k2v000z1i00.fits"
self.wl_full = np.load("wave_grids/kurucz_raw_wl.npy")
self.ind = (self.wl_full >= self.wl_range[0]) & (self.wl_full <= self.wl_range[1])
self.wl = self.wl_full[self.ind]
def load_flux(self, parameters, norm=True):
'''
Load a the flux and header information.
:param parameters: stellar parameters
:type parameters: dict
:raises C.GridError: if the file cannot be found on disk.
:returns: tuple (flux_array, header_dict)
'''
super().load_file(parameters) #Check to make sure that the keys are allowed and that the values are in the grid
str_parameters = parameters.copy()
#Rewrite Z
Z = parameters["Z"]
str_parameters["Z"] = self.Z_dict[Z]
#Multiply logg by 10
str_parameters["logg"] = 10 * parameters['logg']
fname = self.rname.format(**str_parameters)
#Still need to check that file is in the grid, otherwise raise a C.GridError
#Read all metadata in from the FITS header, and append to spectrum
try:
flux_file = fits.open(fname)
f = flux_file[0].data
hdr = flux_file[0].header
flux_file.close()
except OSError:
raise C.GridError("{} is not on disk.".format(fname))
#We cannot normalize the spectra, since we don't have a full wl range, so instead we set the average
#flux to be 1
#Also, we should convert from f_nu to f_lam
if self.norm:
f *= C.c_ang / self.wl** 2 #Convert from f_nu to f_lambda
f /= np.average(f) #divide by the mean flux, so avg(f) = 1
#Add temp, logg, Z, alpha, norm to the metadata
header = parameters
header["norm"] = self.norm
header["air"] = self.air
#Keep the relevant keywords
for key, value in hdr.items():
header[key] = value
return (f[self.ind], header)
def load_file(self, parameters):
'''
Load a file from the disk.
:param parameters: stellar parameters
:type parameters: dict
:raises C.GridError: if the file cannot be found on disk.
:returns: :obj:`model.Base1DSpectrum`
'''
super().load_file(parameters) #Check to make sure that the keys are allowed and that the values are in the grid
raise NotImplementedError("No load_file routine for Kurucz")
class BTSettlGridInterface(RawGridInterface):
'''BTSettl grid interface. Unlike the PHOENIX and Kurucz grids, the
individual files of the BTSettl grid do not always have the same wavelength
sampling. Therefore, each call of :meth:`load_flux` will interpolate the
flux onto a LogLambda spaced grid that ranges between `wl_range` and has a
velocity spacing of 0.08 km/s or better.
If you have a choice, it's probably easier to use the Husser PHOENIX grid.
'''
def __init__(self, air=True, norm=True, wl_range=[2999, 13000], base="libraries/raw/BTSettl/"):
super().__init__(name="BTSettl",
points={"temp":np.arange(3000, 7001, 100),
"logg":np.arange(2.5, 5.6, 0.5),
"Z":np.arange(-0.5, 0.6, 0.5),
"alpha": np.array([0.0])},
air=air, wl_range=wl_range, base=base)
self.norm = norm #Normalize to 1 solar luminosity?
self.rname = self.base + "CIFIST2011/M{Z:}/lte{temp:0>3.0f}-{logg:.1f}{Z:}.BT-Settl.spec.7.bz2"
# self.Z_dict = {-2:"-2.0", -1.5:"-1.5", -1:'-1.0', -0.5:'-0.5', 0.0: '-0.0', 0.5: '+0.5', 1: '+1.0'}
self.Z_dict = {-0.5:'-0.5a+0.2', 0.0: '-0.0a+0.0', 0.5: '+0.5a0.0'}
wl_dict = create_log_lam_grid(wl_start=self.wl_range[0], wl_end=self.wl_range[1], min_vc=0.08/C.c_kms)
self.wl = wl_dict['wl']
def load_flux(self, parameters):
'''
Because of the crazy format of the BTSettl, we need to sort the wl to make sure
everything is unique, and we're not screwing ourselves with the spline.
'''
super().load_file(parameters) #Check to make sure that the keys are allowed and that the values are in the grid
str_parameters = parameters.copy()
#Rewrite Z
Z = parameters["Z"]
str_parameters["Z"] = self.Z_dict[Z]
#Multiply temp by 0.01
str_parameters["temp"] = 0.01 * parameters['temp']
fname = self.rname.format(**str_parameters)
file = bz2.BZ2File(fname, 'r')
lines = file.readlines()
strlines = [line.decode('utf-8') for line in lines]
file.close()
data = ascii.read(strlines, col_starts=[0, 13], col_ends=[12, 25], Reader=ascii.FixedWidthNoHeader)
wl = data['col1']
fl_str = data['col2']
fl = idl_float(fl_str) #convert because of "D" exponent, unreadable in Python
fl = 10 ** (fl - 8.) #now in ergs/cm^2/s/A
#"Clean" the wl and flux points. Remove duplicates, sort in increasing wl
wl, ind = np.unique(wl, return_index=True)
fl = fl[ind]
if self.norm:
F_bol = trapz(fl, wl)
fl = fl * (C.F_sun / F_bol)
# the bolometric luminosity is always 1 L_sun
# truncate the spectrum to the wl range of interest
# at this step, make the range a little more so that the next stage of
# spline interpolation is properly in bounds
ind = (wl >= (self.wl_range[0] - 50.)) & (wl <= (self.wl_range[1] + 50.))
wl = wl[ind]
fl = fl[ind]
if self.air:
#Shift the wl that correspond to the raw spectrum
wl = vacuum_to_air(wl)
#Now interpolate wl, fl onto self.wl
interp = InterpolatedUnivariateSpline(wl, fl, k=5)
fl_interp = interp(self.wl)
return fl_interp
def load_file(self, parameters):
'''
Load a file from the disk.
:param parameters: stellar parameters
:type parameters: dict
:raises C.GridError: if the file cannot be found on disk.
:returns: :obj:`model.Base1DSpectrum`
'''
super().load_file(parameters) #Check to make sure that the keys are allowed and that the values are in the grid
raise NotImplementedError("No load_file routine for BTSettl")
class HDF5Creator:
'''
Create a HDF5 grid to store all of the spectra from a RawGridInterface,
along with metadata.
'''
def __init__(self, GridInterface, filename, Instrument, ranges={"temp":(0,np.inf),
"logg":(-np.inf,np.inf), "Z":(-np.inf, np.inf), "alpha":(-np.inf, np.inf)}):
'''
:param GridInterface: :obj:`RawGridInterface` object or subclass thereof
to access raw spectra on disk.
:param filename: where to create the HDF5 file. Suffix ``*.hdf5`` recommended.
:param Instrument: the instrument to convolve/truncate the grid. If you
want a high-res grid, use the NullInstrument.
:param ranges: lower and upper limits for each stellar parameter,
in order to truncate the number of spectra in the grid.
:type ranges: dict of keywords mapped to 2-tuples
This object is designed to be run in serial.
'''
self.GridInterface = GridInterface
self.filename = filename #only store the name to the HDF5 file, because
# otherwise the object cannot be parallelized
self.Instrument = Instrument
# The flux formatting key will always have alpha in the name, regardless
# of whether or not the library uses it as a parameter.
self.flux_name = "t{temp:.0f}g{logg:.1f}z{Z:.1f}a{alpha:.1f}"
# Take only those points of the GridInterface that fall within the ranges specified
self.points = {}
for key, value in ranges.items():
valid_points = self.GridInterface.points[key]
low,high = value
ind = (valid_points >= low) & (valid_points <= high)
self.points[key] = valid_points[ind]
# the raw wl from the spectral library
self.wl_native = self.GridInterface.wl #raw grid
self.dv_native = calculate_dv(self.wl_native)
self.hdf5 = h5py.File(self.filename, "w")
self.hdf5.attrs["grid_name"] = GridInterface.name
self.hdf5.flux_group = self.hdf5.create_group("flux")
self.hdf5.flux_group.attrs["unit"] = "erg/cm^2/s/A"
# We'll need a few wavelength grids
# 1. The original synthetic grid: ``self.wl_native``
# 2. A finely spaced log-lambda grid respecting the ``dv`` of
# ``self.wl_native``, onto which we can interpolate the flux values
# in preperation of the FFT: ``self.wl_FFT``
# [ DO FFT ]
# 3. A log-lambda spaced grid onto which we can downsample the result
# of the FFT, spaced with a ``dv`` such that we respect the remaining
# Fourier modes: ``self.wl_final``
wl_min, wl_max = self.Instrument.wl_range
# Calculate wl_FFT
if (self.wl_native[0] > wl_min) or (self.wl_native[-1] < wl_max):
wl_dict = create_log_lam_grid(self.dv_native, self.wl_native[0], self.wl_native[-1])
# otherwise use the edges specified by the instrument, plus a little more.
else:
#use the dv that preserves the quality of the raw PHOENIX grid
wl_dict = create_log_lam_grid(self.dv_native, wl_min - 50., wl_max + 50.)
self.wl_FFT = wl_dict["wl"]
self.dv_FFT = calculate_dv_dict(wl_dict)
print("FFT grid stretches from {} to {}".format(self.wl_FFT[0], self.wl_FFT[-1]))
print("wl_FFT dv is {} km/s".format(self.dv_FFT))
# The Fourier coordinate
self.ss = rfftfreq(len(self.wl_FFT), d=self.dv_FFT)
# The instrumental taper
sigma = self.Instrument.FWHM / 2.35 # in km/s
# Instrumentally broaden the spectrum by multiplying with a Gaussian in Fourier space
self.taper = np.exp(-2 * (np.pi ** 2) * (sigma ** 2) * (self.ss ** 2))
# Create the final wavelength grid, onto which we will interpolate the
# Fourier filtered wavelengths
# an upper limit on the final dv
dv_temp = self.Instrument.FWHM/self.Instrument.oversampling
wl_dict = create_log_lam_grid(dv_temp, wl_min, wl_max)
self.wl_final = wl_dict["wl"]
self.dv_final = calculate_dv_dict(wl_dict)
#Create the wl dataset separately using float64 due to rounding errors w/ interpolation.
wl_dset = self.hdf5.create_dataset("wl", (len(self.wl_final),), dtype="f8", compression='gzip', compression_opts=9)
wl_dset[:] = self.wl_final
wl_dset.attrs["air"] = self.GridInterface.air
wl_dset.attrs["dv"] = self.dv_final
def process_flux(self, parameters):
'''
Take a flux file from the raw grid, process it according to the
instrument, and insert it into the HDF5 file.
:param parameters: the stellar parameters
:type parameters: dict
.. note::
This function assumes that it's going to get a dictionary of
parameters that includes ``(temp, logg, Z, alpha)``, regardless of
whether the :attr:`GridInterface` actually has alpha or not.
:raises AssertionError: if the `param parameters` dictionary is not
length 4.
:returns: a tuple of (parameters, flux, header). If the flux could
not be loaded, returns (None, None, None).
'''
assert len(parameters.keys()) == 4, "Must pass dictionary with keys " \
"(temp, logg, Z, alpha)"
print("Processing", parameters)
try:
flux, header = self.GridInterface.load_flux(parameters)
# Interpolate the native spectrum to a log-lam FFT grid
interp = InterpolatedUnivariateSpline(self.wl_native, flux, k=5)
fl = interp(self.wl_FFT)
del interp
gc.collect()
# Do the FFT
FF = np.fft.rfft(fl)
# apply instrumental taper
FF_tap = FF * self.taper
# do IFFT
fl_tapered = np.fft.irfft(FF_tap)
# downsample to the final grid
interp = InterpolatedUnivariateSpline(self.wl_FFT, fl_tapered, k=5)
fl_final = interp(self.wl_final)
del interp
gc.collect()
return (parameters, fl_final, header)
except C.GridError as e:
print("No file with parameters {}. C.GridError: {}".format(parameters, e))
return (None, None, None)
def process_grid(self):
'''
Run :meth:`process_flux` for all of the spectra within the `ranges`
and store the processed spectra in the HDF5 file.
Only executed in serial.
'''
# Take all parameter permutations in self.points and create a list
param_list = [] #list of parameter dictionaries
keys,values = self.points.keys(),self.points.values()
# use itertools.product to create permutations of all possible values
for i in itertools.product(*values):
param_list.append(dict(zip(keys,i)))
print("Total of {} files to process.".format(len(param_list)))
for param in param_list:
parameters, fl, header = self.process_flux(param)
if parameters is None:
continue
# The PHOENIX spectra are stored as float32, and so we do the same here.
flux = self.hdf5["flux"].create_dataset(self.flux_name.format(**parameters),
shape=(len(fl),), dtype="f", compression='gzip',
compression_opts=9)
flux[:] = fl
# Store header keywords as attributes in HDF5 file
for key,value in header.items():
if key != "" and value != "": #check for empty FITS kws
flux.attrs[key] = value
class HDF5Interface:
'''
Connect to an HDF5 file that stores spectra.
'''
def __init__(self, filename, ranges={"temp":(0,np.inf),
"logg":(-np.inf,np.inf),
"Z":(-np.inf, np.inf),
"alpha":(-np.inf, np.inf)}):
'''
:param filename: the name of the HDF5 file
:type param: string
:param ranges: optionally select a smaller part of the grid to use.
:type ranges: dict
'''
self.filename = filename
self.flux_name = "t{temp:.0f}g{logg:.1f}z{Z:.1f}a{alpha:.1f}"
with h5py.File(self.filename, "r") as hdf5:
self.wl = hdf5["wl"][:]
self.wl_header = dict(hdf5["wl"].attrs.items())
grid_points = []
for key in hdf5["flux"].keys():
#assemble all temp, logg, Z, alpha keywords into a giant list
hdr = hdf5['flux'][key].attrs
params = {k: hdr[k] for k in C.grid_set}
#Check whether the parameters are within the range
for kk,vv in params.items():
low, high = ranges[kk]
if (vv < low) or (vv > high):
break
else:
#If all parameters have passed successfully through the ranges, allow.
grid_points.append(params)
self.list_grid_points = grid_points
#determine the bounding regions of the grid by sorting the grid_points
temp, logg, Z, alpha = [],[],[],[]
for param in self.list_grid_points:
temp.append(param['temp'])
logg.append(param['logg'])
Z.append(param['Z'])
alpha.append(param['alpha'])
self.bounds = { "temp": (min(temp),max(temp)),
"logg": (min(logg), max(logg)),
"Z": (min(Z), max(Z)),
"alpha":(min(alpha),max(alpha))}
self.points = { "temp": np.unique(temp),
"logg": np.unique(logg),
"Z": np.unique(Z),
"alpha": np.unique(alpha)}
self.ind = None #Overwritten by other methods using this as part of a ModelInterpolator
def load_flux(self, parameters):
'''
Load just the flux from the grid, with possibly an index truncation.
:param parameters: the stellar parameters
:type parameters: dict
:raises KeyError: if spectrum is not found in the HDF5 file.
:returns: flux array
'''
key = self.flux_name.format(**parameters)
with h5py.File(self.filename, "r") as hdf5:
try:
if self.ind is not None:
fl = hdf5['flux'][key][self.ind[0]:self.ind[1]]
else:
fl = hdf5['flux'][key][:]
except KeyError as e:
raise C.GridError(e)
#Note: will raise a KeyError if the file is not found.
return fl
@property
def fluxes(self):
'''
Iterator to loop over all of the spectra stored in the grid, for PCA.
'''
for grid_point in self.list_grid_points:
yield self.load_flux(grid_point)
def load_flux_hdr(self, parameters):
'''
Just like load_flux, but also returns the header
'''
key = self.flux_name.format(**parameters)
with h5py.File(self.filename, "r") as hdf5:
try:
hdr = dict(hdf5['flux'][key].attrs)
if self.ind is not None:
fl = hdf5['flux'][key][self.ind[0]:self.ind[1]]
else:
fl = hdf5['flux'][key][:]
except KeyError as e:
raise C.GridError(e)
#Note: will raise a KeyError if the file is not found.
return (fl, hdr)
class IndexInterpolator:
'''
Object to return fractional distance between grid points of a single grid variable.
:param parameter_list: list of parameter values
:type parameter_list: 1-D list
'''
def __init__(self, parameter_list):
self.parameter_list = np.unique(parameter_list)
self.index_interpolator = interp1d(self.parameter_list, np.arange(len(self.parameter_list)), kind='linear')
pass
def __call__(self, value):
'''
Evaluate the interpolator at a parameter.
:param value:
:type value: float
:raises C.InterpolationError: if *value* is out of bounds.
:returns: ((low_val, high_val), (frac_low, frac_high)), the lower and higher bounding points in the grid
and the fractional distance (0 - 1) between them and the value.
'''
try:
index = self.index_interpolator(value)
except ValueError as e:
raise C.InterpolationError("Requested value {} is out of bounds. {}".format(value, e))
high = np.ceil(index)
low = np.floor(index)
frac_index = index - low
return ((self.parameter_list[low], self.parameter_list[high]), ((1 - frac_index), frac_index))
class Interpolator:
'''
Quickly and efficiently interpolate a synthetic spectrum for use in an MCMC simulation. Caches spectra for
easier memory load.
:param interface: :obj:`HDF5Interface` (recommended) or :obj:`RawGridInterface` to load spectra
:param DataSpectrum: data spectrum that you are trying to fit. Used for truncating the synthetic spectra to the relevant region for speed.
:type DataSpectrum: :obj:`spectrum.DataSpectrum`
:param cache_max: maximum number of spectra to hold in cache
:type cache_max: int
:param cache_dump: how many spectra to purge from the cache once :attr:`cache_max` is reached
:type cache_dump: int
:param trilinear: Should this interpolate in temp, logg, and [Fe/H] AND [alpha/Fe], or just the first three parameters.
:type trilinear: bool
Setting :attr:`trilinear` to **True** is useful for when you want to do a run with [Fe/H] > 0.0
'''
def __init__(self, interface, DataSpectrum, cache_max=256, cache_dump=64, trilinear=False, log=True):
'''
Param log decides how to chunk up the returned spectrum. If we are using a pre-instrument convolved grid,
then we want to use log=True.
If we are using the raw synthetic grid, then we want to use log=False.
'''
self.interface = interface
self.DataSpectrum = DataSpectrum
#If alpha only includes one value, then do trilinear interpolation
(alow, ahigh) = self.interface.bounds['alpha']
if (alow == ahigh) or trilinear:
self.parameters = C.grid_set - set(("alpha",))
else:
self.parameters = C.grid_set
self.wl = self.interface.wl
self.wl_dict = self.interface.wl_header
if log:
self._determine_chunk_log()
else:
self._determine_chunk()
self.setup_index_interpolators()
self.cache = OrderedDict([])
self.cache_max = cache_max
self.cache_dump = cache_dump #how many to clear once the maximum cache has been reached
def _determine_chunk_log(self):
'''
Using the DataSpectrum, determine the minimum chunksize that we can use and then truncate the synthetic
wavelength grid and the returned spectra.
Assumes HDF5Interface is LogLambda spaced, because otherwise you shouldn't need a grid with 2^n points,
because you would need to interpolate in wl space after this anyway.
'''
wave_grid = self.interface.wl
wl_min, wl_max = np.min(self.DataSpectrum.wls), np.max(self.DataSpectrum.wls)
#Length of the raw synthetic spectrum
len_wg = len(wave_grid)
#ind_wg = np.arange(len_wg) #Labels of pixels
#Length of the data
len_data = np.sum((self.wl > wl_min) & (self.wl < wl_max)) #How much of the synthetic spectrum do we need?
#Find the smallest length synthetic spectrum that is a power of 2 in length and larger than the data spectrum
chunk = len_wg
self.interface.ind = (0, chunk) #Set to be the full spectrum
while chunk > len_data:
if chunk/2 > len_data:
chunk = chunk//2
else:
break
assert type(chunk) == np.int, "Chunk is no longer integer!. Chunk is {}".format(chunk)
if chunk < len_wg:
# Now that we have determined the length of the chunk of the synthetic spectrum, determine indices
# that straddle the data spectrum.
# What index corresponds to the wl at the center of the data spectrum?
median_wl = np.median(self.DataSpectrum.wls)
median_ind = (np.abs(wave_grid - median_wl)).argmin()
#Take the chunk that straddles either side.
ind = (median_ind - chunk//2, median_ind + chunk//2)
self.wl = self.wl[ind[0]:ind[1]]
assert min(self.wl) < wl_min and max(self.wl) > wl_max, "ModelInterpolator chunking ({:.2f}, {:.2f}) " \
"didn't encapsulate full DataSpectrum range ({:.2f}, {:.2f}).".format(min(self.wl),
max(self.wl), wl_min, wl_max)
self.interface.ind = ind
print("Determine Chunk Log: Wl is {}".format(len(self.wl)))
def _determine_chunk(self):
'''
Using the DataSpectrum, set the bounds of the interpolator to +/- 5 Ang
'''
wave_grid = self.interface.wl
wl_min, wl_max = np.min(self.DataSpectrum.wls), np.max(self.DataSpectrum.wls)
ind_low = (np.abs(wave_grid - (wl_min - 5.))).argmin()
ind_high = (np.abs(wave_grid - (wl_max + 5.))).argmin()
self.wl = self.wl[ind_low:ind_high]
assert min(self.wl) < wl_min and max(self.wl) > wl_max, "ModelInterpolator chunking ({:.2f}, {:.2f}) " \
"didn't encapsulate full DataSpectrum range ({:.2f}, {:.2f}).".format(min(self.wl),
max(self.wl), wl_min, wl_max)
self.interface.ind = (ind_low, ind_high)
print("Wl is {}".format(len(self.wl)))
def __call__(self, parameters):
'''
Interpolate a spectrum
:param parameters: stellar parameters
:type parameters: dict
Automatically pops :attr:`cache_dump` items from cache if full.
'''
if len(self.cache) > self.cache_max:
[self.cache.popitem(False) for i in range(self.cache_dump)]
self.cache_counter = 0
return self.interpolate(parameters)
def setup_index_interpolators(self):
#create an interpolator between grid points indices. Given a temp, produce fractional index between two points
self.index_interpolators = {key:IndexInterpolator(self.interface.points[key]) for key in self.parameters}
lenF = self.interface.ind[1] - self.interface.ind[0]
self.fluxes = np.empty((2**len(self.parameters), lenF)) #8 rows, for temp, logg, Z
def interpolate(self, parameters):
'''
Interpolate a spectrum without clearing cache. Recommended to use :meth:`__call__` instead.
:param parameters: stellar parameters
:type parameters: dict
:raises C.InterpolationError: if parameters are out of bounds.
Now the interpolator also returns the 24 error spectra along with weights.
'''
#Here it really would be nice to return things in a predictable order
# (temp, logg, Z)
odict = OrderedDict()
for key in ("temp", "logg", "Z"):
odict[key] = parameters[key]
try:
edges = OrderedDict()
for key,value in odict.items():
edges[key] = self.index_interpolators[key](value)
except C.InterpolationError as e:
raise C.InterpolationError("Parameters {} are out of bounds. {}".format(parameters, e))
#Edges is a dictionary of {"temp": ((6000, 6100), (0.2, 0.8)), "logg": (())..}
names = [key for key in edges.keys()] #list of ["temp", "logg", "Z"],
params = [edges[key][0] for key in names] #[(6000, 6100), (4.0, 4.5), ...]
weights = [edges[key][1] for key in names] #[(0.2, 0.8), (0.4, 0.6), ...]
param_combos = itertools.product(*params) #Selects all the possible combinations of parameters
#[(6000, 4.0, 0.0), (6100, 4.0, 0.0), (6000, 4.5, 0.0), ...]
weight_combos = itertools.product(*weights)
#[(0.2, 0.4, 1.0), (0.8, 0.4, 1.0), ...]
parameter_list = [dict(zip(names, param)) for param in param_combos]
if "alpha" not in parameters.keys():
[param.update({"alpha":C.var_default["alpha"]}) for param in parameter_list]
key_list = [self.interface.flux_name.format(**param) for param in parameter_list]
weight_list = np.array([np.prod(weight) for weight in weight_combos])
assert np.allclose(np.sum(weight_list), np.array(1.0)), "Sum of weights must equal 1, {}".format(np.sum(weight_list))
#Assemble flux vector from cache
for i,param in enumerate(parameter_list):
key = key_list[i]
if key not in self.cache.keys():
try:
fl = self.interface.load_flux(param) #This method allows loading only the relevant region from HDF5
except KeyError as e:
raise C.InterpolationError("Parameters {} not in master HDF5 grid. {}".format(param, e))
self.cache[key] = fl
#Note: if we are dealing with a ragged grid, a C.GridError will be raised here because a Z=+1, alpha!=0 spectrum can't be found.
self.fluxes[i,:] = self.cache[key]*weight_list[i]
return np.sum(self.fluxes, axis=0)
#Convert R to FWHM in km/s by \Delta v = c/R
class Instrument:
'''
Object describing an instrument. This will be used by other methods for
processing raw synthetic spectra.
:param name: name of the instrument
:type name: string
:param FWHM: the FWHM of the instrumental profile in km/s
:type FWHM: float
:param wl_range: wavelength range of instrument
:type wl_range: 2-tuple (low, high)
:param oversampling: how many samples fit across the :attr:`FWHM`
:type oversampling: float
Upon initialization, calculates a ``wl_dict`` with the properties of the
instrument.
'''
def __init__(self, name, FWHM, wl_range, oversampling=4.):
self.name = name
self.FWHM = FWHM #km/s
self.oversampling = oversampling
self.wl_range = wl_range
self.wl_dict = create_log_lam_grid(self.FWHM/self.oversampling, *self.wl_range)
#Take the starting and ending wavelength ranges, the FWHM,
# and oversampling value and generate an outwl grid that can be resampled to.
def __str__(self):
'''
Prints the relevant properties of the instrument.
'''
return "Instrument Name: {}, FWHM: {:.1f}, oversampling: {}, " \
"wl_range: {}".format(self.name, self.FWHM, self.oversampling, self.wl_range)
class TRES(Instrument):
'''TRES instrument'''
def __init__(self, name="TRES", FWHM=6.8, wl_range=(3500, 9500)):
super().__init__(name=name, FWHM=FWHM, wl_range=wl_range)
#sets the FWHM and wl_range
class Reticon(Instrument):
'''Reticon Instrument'''
def __init__(self, name="Reticon", FWHM=8.5, wl_range=(5145,5250)):
super().__init__(name=name, FWHM=FWHM, wl_range=wl_range)
class KPNO(Instrument):
'''KNPO Instrument'''
def __init__(self, name="KPNO", FWHM=14.4, wl_range=(6250,6650)):
super().__init__(name=name, FWHM=FWHM, wl_range=wl_range)
class SPEX(Instrument):
'''SPEX Instrument'''
def __init__(self, name="SPEX", FWHM=150., wl_range=(7500, 54000)):
super().__init__(name=name, FWHM=FWHM, wl_range=wl_range)
class SPEX_SXD(Instrument):
'''SPEX Instrument short mode'''
def __init__(self, name="SPEX", FWHM=150., wl_range=(7500, 26000)):
super().__init__(name=name, FWHM=FWHM, wl_range=wl_range)
def vacuum_to_air(wl):
'''
Converts vacuum wavelengths to air wavelengths using the Ciddor 1996 formula.
:param wl: input vacuum wavelengths
:type wl: np.array
:returns: **wl_air** (*np.array*) - the wavelengths converted to air wavelengths
.. note::
CA Prieto recommends this as more accurate than the IAU standard.'''
sigma = (1e4 / wl) ** 2
f = 1.0 + 0.05792105 / (238.0185 - sigma) + 0.00167917 / (57.362 - sigma)
return wl / f
def calculate_n(wl):
'''
Calculate *n*, the refractive index of light at a given wavelength.
:param wl: input wavelength (in vacuum)
:type wl: np.array
:return: **n_air** (*np.array*) - the refractive index in air at that wavelength
'''
sigma = (1e4 / wl) ** 2
f = 1.0 + 0.05792105 / (238.0185 - sigma) + 0.00167917 / (57.362 - sigma)
new_wl = wl / f
n = wl/new_wl
print(n)
def vacuum_to_air_SLOAN(wl):
'''
Converts vacuum wavelengths to air wavelengths using the outdated SLOAN definition.
:param wl:
The input wavelengths to convert
From the SLOAN website:
AIR = VAC / (1.0 + 2.735182E-4 + 131.4182 / VAC^2 + 2.76249E8 / VAC^4)'''
air = wl / (1.0 + 2.735182E-4 + 131.4182 / wl ** 2 + 2.76249E8 / wl ** 4)
return air
def air_to_vacuum(wl):
'''
Convert air wavelengths to vacuum wavelengths.
:param wl: input air wavelegths
:type wl: np.array
:return: **wl_vac** (*np.array*) - the wavelengths converted to vacuum.
.. note::
It is generally not recommended to do this, as the function is imprecise.
'''
sigma = 1e4 / wl
vac = wl + wl * (6.4328e-5 + 2.94981e-2 / (146 - sigma ** 2) + 2.5540e-4 / (41 - sigma ** 2))
return vac
def get_wl_kurucz(filename):
'''The Kurucz grid is log-linear spaced.'''
flux_file = fits.open(filename)
hdr = flux_file[0].header
num = len(flux_file[0].data)
p = np.arange(num)
w1 = hdr['CRVAL1']
dw = hdr['CDELT1']
wl = 10 ** (w1 + dw * p)
return wl
@np.vectorize
def idl_float(idl_num):
'''
idl_float(idl_num)
Convert an idl *string* number in scientific notation it to a float.
:param idl_num:
the idl number in sci_notation'''
#replace 'D' with 'E', convert to float
return np.float(idl_num.replace("D", "E"))
def load_BTSettl(temp, logg, Z, norm=False, trunc=False, air=False):
rname = "BT-Settl/CIFIST2011/M{Z:}/lte{temp:0>3.0f}-{logg:.1f}{Z:}.BT-Settl.spec.7.bz2".format(temp=0.01 * temp,
logg=logg, Z=Z)
file = bz2.BZ2File(rname, 'r')
lines = file.readlines()
strlines = [line.decode('utf-8') for line in lines]
file.close()
data = ascii.read(strlines, col_starts=[0, 13], col_ends=[12, 25], Reader=ascii.FixedWidthNoHeader)
wl = data['col1']
fl_str = data['col2']
fl = idl_float(fl_str) #convert because of "D" exponent, unreadable in Python
fl = 10 ** (fl - 8.) #now in ergs/cm^2/s/A
if norm:
F_bol = trapz(fl, wl)
fl = fl * (C.F_sun / F_bol)
#this also means that the bolometric luminosity is always 1 L_sun
if trunc:
#truncate to only the wl of interest
ind = (wl > 3000) & (wl < 13000)
wl = wl[ind]
fl = fl[ind]
if air:
wl = vacuum_to_air(wl)
return [wl, fl]
def load_flux_full(temp, logg, Z, alpha=None, norm=False, vsini=0, grid="PHOENIX"):
'''Load a raw PHOENIX or kurucz spectrum based upon temp, logg, and Z. Normalize to C.F_sun if desired.'''
if grid == "PHOENIX":
if alpha is not None:
rname = "raw_grids/PHOENIX/Z{Z:}{alpha:}/lte{temp:0>5.0f}-{logg:.2f}{Z:}{alpha:}" \
".PHOENIX-ACES-AGSS-COND-2011-HiRes.fits".format(Z=Z, temp=temp, logg=logg, alpha=alpha)
else:
rname = "raw_grids/PHOENIX/Z{Z:}/lte{temp:0>5.0f}-{logg:.2f}{Z:}" \
".PHOENIX-ACES-AGSS-COND-2011-HiRes.fits".format(Z=Z, temp=temp, logg=logg)
elif grid == "kurucz":
rname = "raw_grids/Kurucz/TRES/t{temp:0>5.0f}g{logg:.0f}{Z:}v{vsini:0>3.0f}.fits".format(temp=temp,
logg=10 * logg, Z=Z, vsini=vsini)
else:
print("No grid %s" % (grid))
return 1
flux_file = fits.open(rname)
f = flux_file[0].data
if norm:
f *= 1e-8 #convert from erg/cm^2/s/cm to erg/cm^2/s/A
F_bol = trapz(f, w_full)
f = f * (C.F_sun / F_bol)
#this also means that the bolometric luminosity is always 1 L_sun
if grid == "kurucz":
f *= C.c_ang / wave_grid_kurucz_raw ** 2 #Convert from f_nu to f_lambda
flux_file.close()
#print("Loaded " + rname)
return f
def create_fits(filename, fl, CRVAL1, CDELT1, dict=None):
'''Assumes that wl is already log lambda spaced'''
hdu = fits.PrimaryHDU(fl)
head = hdu.header
head["DISPTYPE"] = 'log lambda'
head["DISPUNIT"] = 'log angstroms'
head["CRPIX1"] = 1.
head["CRVAL1"] = CRVAL1
head["CDELT1"] = CDELT1
head["DC-FLAG"] = 1
if dict is not None:
for key, value in dict.items():
head[key] = value
hdu.writeto(filename)
class MasterToFITSIndividual:
'''
Object used to create one FITS file at a time.
:param interpolator: an :obj:`Interpolator` object referenced to the master grid.
:param instrument: an :obj:`Instrument` object containing the properties of the final spectra
'''
def __init__(self, interpolator, instrument):
self.interpolator = interpolator
self.instrument = instrument
self.filename = "t{temp:0>5.0f}g{logg:0>2.0f}{Z_flag}{Z:0>2.0f}v{vsini:0>3.0f}.fits"
#Create a master wl_dict which correctly oversamples the instrumental kernel
self.wl_dict = self.instrument.wl_dict
self.wl = self.wl_dict["wl"]
def process_spectrum(self, parameters, out_unit, out_dir=""):
'''
Creates a FITS file with given parameters
:param parameters: stellar parameters :attr:`temp`, :attr:`logg`, :attr:`Z`, :attr:`vsini`
:type parameters: dict
:param out_unit: output flux unit? Choices between `f_lam`, `f_nu`, `f_nu_log`, or `counts/pix`. `counts/pix` will do spline integration.
:param out_dir: optional directory to prepend to output filename, which is chosen automatically for parameter values.
Smoothly handles the *C.InterpolationError* if parameters cannot be interpolated from the grid and prints a message.
'''
#Preserve the "popping of parameters"
parameters = parameters.copy()
#Load the correct C.grid_set value from the interpolator into a LogLambdaSpectrum
if parameters["Z"] < 0:
zflag = "m"
else:
zflag = "p"
filename = out_dir + self.filename.format(temp=parameters["temp"], logg=10*parameters["logg"],
Z=np.abs(10*parameters["Z"]), Z_flag=zflag, vsini=parameters["vsini"])
vsini = parameters.pop("vsini")
try:
spec = self.interpolator(parameters)
# Using the ``out_unit``, determine if we should also integrate while doing the downsampling
if out_unit=="counts/pix":
integrate=True
else:
integrate=False
# Downsample the spectrum to the instrumental resolution.
spec.instrument_and_stellar_convolve(self.instrument, vsini, integrate)
spec.write_to_FITS(out_unit, filename)
except C.InterpolationError as e:
print("{} cannot be interpolated from the grid.".format(parameters))
print("Processed spectrum {}".format(parameters))
class MasterToFITSGridProcessor:
'''
Create one or many FITS files from a master HDF5 grid. Assume that we are not going to need to interpolate
any values.
:param interface: an :obj:`HDF5Interface` object referenced to the master grid.
:param points: lists of output parameters (assumes regular grid)
:type points: dict of lists
:param flux_unit: format of output spectra {"f_lam", "f_nu", "ADU"}
:type flux_unit: string
:param outdir: output directory
:param processes: how many processors to use in parallel
Basically, this object is doing a one-to-one conversion of the PHOENIX spectra. No interpolation necessary,
preserving all of the header keywords.
'''
def __init__(self, interface, instrument, points, flux_unit, outdir, alpha=False, integrate=False, processes=mp.cpu_count()):
self.interface = interface
self.instrument = instrument
self.points = points #points is a dictionary with which values to spit out for each parameter
self.filename = "t{temp:0>5.0f}g{logg:0>2.0f}{Z_flag}{Z:0>2.0f}v{vsini:0>3.0f}.fits"
self.flux_unit = flux_unit
self.integrate = integrate
self.outdir = outdir
self.processes = processes
self.pids = []
self.alpha = alpha
self.vsini_points = self.points.pop("vsini")
names = self.points.keys()
#Creates a list of parameter dictionaries [{"temp":8500, "logg":3.5, "Z":0.0}, {"temp":8250, etc...}, etc...]
#which does not contain vsini
self.param_list = [dict(zip(names,params)) for params in itertools.product(*self.points.values())]
#Create a master wl_dict which correctly oversamples the instrumental kernel
self.wl_dict = self.instrument.wl_dict
self.wl = self.wl_dict["wl"]
#Check that temp, logg, Z are within the bounds of the interface
for key,value in self.points.items():
min_val, max_val = self.interface.bounds[key]
assert np.min(self.points[key]) >= min_val,"Points below interface bound {}={}".format(key, min_val)
assert np.max(self.points[key]) <= max_val,"Points above interface bound {}={}".format(key, max_val)
#Create a temporary grid to resample to that matches the bounds of the instrument.
low, high = self.instrument.wl_range
self.temp_grid = create_log_lam_grid(wl_start=low, wl_end=high, min_vc=0.1)['wl']
def process_spectrum_vsini(self, parameters):
'''
Create a set of FITS files with given stellar parameters temp, logg, Z and all combinations of `vsini`.
:param parameters: stellar parameters
:type parameters: dict
Smoothly handles the *KeyError* if parameters cannot be drawn from the interface and prints a message.
'''
try:
#Check to see if alpha, otherwise append alpha=0 to the parameter list.
if not self.alpha:
parameters.update({"alpha": 0.0})
print(parameters)
if parameters["Z"] < 0:
zflag = "m"
else:
zflag = "p"
#This is a Base1DSpectrum
base_spec = self.interface.load_file(parameters)
master_spec = base_spec.to_LogLambda(instrument=self.instrument, min_vc=0.1/C.c_kms) #convert the Base1DSpectrum to a LogLamSpectrum
#Now process the spectrum for all values of vsini
for vsini in self.vsini_points:
spec = master_spec.copy()
#Downsample the spectrum to the instrumental resolution, integrate to give counts/pixel
spec.instrument_and_stellar_convolve(self.instrument, vsini, integrate=self.integrate)
#Update spectrum with vsini
spec.metadata.update({"vsini":vsini})
filename = self.outdir + self.filename.format(temp=parameters["temp"], logg=10*parameters["logg"],
Z=np.abs(10*parameters["Z"]), Z_flag=zflag, vsini=vsini)
spec.write_to_FITS(self.flux_unit, filename)
except KeyError as e:
print("{} cannot be loaded from the interface.".format(parameters))
def process_chunk(self, chunk):
'''
Process a chunk of parameters to FITS
:param chunk: stellar parameter dicts
:type chunk: 1-D list
'''
print("Process {} processing chunk {}".format(os.getpid(), chunk))
for param in chunk:
self.process_spectrum_vsini(param)
def process_all(self):
'''
Process all parameters in :attr:`points` to FITS by chopping them into chunks.
'''
print("Total of {} FITS files to create.".format(len(self.vsini_points) * len(self.param_list)))
chunks = chunk_list(self.param_list, n=self.processes)
for chunk in chunks:
p = mp.Process(target=self.process_chunk, args=(chunk,))
p.start()
self.pids.append(p)
for p in self.pids:
#Make sure all threads have finished
p.join()
def main():
pass
if __name__ == "__main__":
main()
|
# ----------------------------------------------------------------------------
# Copyright (c) 2017-2019, Ben Kaehler.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
|
from datetime import datetime
import sqlalchemy as sql
from web import sql_database as db
from sqlalchemy.orm import relationship
class Queue(db.Model):
id = sql.Column(sql.Integer, primary_key=True)
song_id = sql.Column(sql.Integer, sql.ForeignKey(
'song.id'), nullable=False, unique=True)
song = relationship(
'database.Song.Song', lazy="joined")
approval_pending = sql.Column(sql.Boolean, nullable=False, default=True)
upvotes = sql.Column(sql.Integer, nullable=False, default=1)
downvotes = sql.Column(sql.Integer, nullable=False, default=0)
insertion_time = sql.Column(
sql.DateTime, nullable=False, default=datetime.now)
played_time = sql.Column(
sql.DateTime, nullable=True)
is_next_song = sql.Column(sql.Boolean, nullable=False, default=False)
is_default_song = sql.Column(sql.Integer, nullable=False, default=False)
voted_by = sql.Column(sql.Text, nullable=False, default="")
|
#!/usr/bin/env python
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script deletes and recreates the prow configmaps
# USE AT YOUR OWN RISK! This is a break-glass tool.
# See September 25th, 2018 in docs/post-mortems.md
#
# USAGE: have KUBECONFIG pointed at your prow cluster then from test-infra root:
#
# hack/recreate_prow_configmaps.py [--wet]
#
from __future__ import print_function
from argparse import ArgumentParser
import os
import sys
import subprocess
def recreate_prow_config(wet, configmap_name, path):
print('recreating prow config:')
cmd = (
'kubectl create configmap %s'
' --from-file=config.yaml=%s'
' --dry-run -o yaml | kubectl replace configmap config -f -'
) % (configmap_name, path)
real_cmd = ['/bin/sh', '-c', cmd]
print(real_cmd)
if wet:
subprocess.check_call(real_cmd)
def recreate_plugins_config(wet, configmap_name, path):
print('recreating plugins config:')
cmd = (
'kubectl create configmap %s'
' --from-file=plugins.yaml=%s'
' --dry-run -o yaml | kubectl replace configmap config -f -'
) % (configmap_name, path)
real_cmd = ['/bin/sh', '-c', cmd]
print(real_cmd)
if wet:
subprocess.check_call(real_cmd)
def recreate_job_config(wet, job_configmap, job_config_dir):
print('recreating jobs config:')
# delete configmap (apply has size limit)
cmd = ["kubectl", "delete", "configmap", job_configmap]
print(cmd)
if wet:
subprocess.check_call(cmd)
# regenerate
cmd = ["kubectl", "create", "configmap", job_configmap]
for root, _, files in os.walk(job_config_dir):
for name in files:
if name.endswith(".yaml"):
cmd.append("--from-file=%s=%s" % (name, os.path.join(root, name)))
print(cmd)
if wet:
subprocess.check_call(cmd)
def main():
parser = ArgumentParser()
# jobs config
parser.add_argument("--job-configmap", default="job-config", help="name of prow jobs configmap")
parser.add_argument(
"--job-config-dir", default="config/jobs",
help="root dir of prow jobs configmap")
# prow config
parser.add_argument("--prow-configmap", default="config",
help="name of prow primary configmap")
parser.add_argument(
"--prow-config-path", default="prow/config.yaml",
help="path to the primary prow config")
# plugins config
parser.add_argument("--plugins-configmap", default="plugins",
help="name of prow plugins configmap")
parser.add_argument(
"--plugins-config-path", default="prow/plugins.yaml",
help="path to the prow plugins config")
# wet or dry?
parser.add_argument("--wet", action="store_true")
args = parser.parse_args()
# debug the current context
out = subprocess.check_output(['kubectl', 'config', 'current-context'])
print('Current KUBECONFIG context: ' + out)
# require additional confirmation in --wet mode
prompt = '!' * 65 + (
"\n!! WARNING THIS WILL RECREATE **ALL** PROW CONFIGMAPS. !!"
"\n!! ARE YOU SURE YOU WANT TO DO THIS? IF SO, ENTER 'YES'. !! "
) + '\n' + '!' * 65 + '\n\n: '
if args.wet:
if raw_input(prompt) != "YES":
print("you did not enter 'YES'")
sys.exit(-1)
# first prow config
recreate_prow_config(args.wet, args.prow_configmap, args.prow_config_path)
print('')
# then plugins config
recreate_plugins_config(args.wet, args.plugins_configmap, args.plugins_config_path)
print('')
# finally jobs config
recreate_job_config(args.wet, args.job_configmap, args.job_config_dir)
if __name__ == '__main__':
main()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AccountBackupsOperations:
"""AccountBackupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.netapp.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> "_models.BackupsList":
"""List Backups for a Netapp Account.
List all Backups for a Netapp Account.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupsList, or the result of cls(response)
:rtype: ~azure.mgmt.netapp.models.BackupsList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupsList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackupsList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/accountBackups'} # type: ignore
async def get(
self,
resource_group_name: str,
account_name: str,
backup_name: str,
**kwargs
) -> "_models.Backup":
"""Get Backup for a Netapp Account.
Get Backup for a Netapp Account.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param backup_name: The name of the backup.
:type backup_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Backup, or the result of cls(response)
:rtype: ~azure.mgmt.netapp.models.Backup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Backup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'backupName': self._serialize.url("backup_name", backup_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Backup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/accountBackups/{backupName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
account_name: str,
backup_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'backupName': self._serialize.url("backup_name", backup_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/accountBackups/{backupName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
account_name: str,
backup_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Delete Backup for a Netapp Account.
Delete Backup for a Netapp Account.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param backup_name: The name of the backup.
:type backup_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
account_name=account_name,
backup_name=backup_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'backupName': self._serialize.url("backup_name", backup_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/accountBackups/{backupName}'} # type: ignore
|
# Generated by Django 2.0.10 on 2019-04-30 22:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('meal_app', '0005_auto_20190430_2250'),
]
operations = [
migrations.RenameField(
model_name='meal',
old_name='date_fieled',
new_name='date_field',
),
]
|
#!/usr/bin/python3
import argparse
from core import main
parser = argparse.ArgumentParser(description='Classify some data.')
parser.add_argument('-k', type=int, default=1, help='k Nearest Neighbor classifier')
parser.add_argument('-d', '--distance', choices=['euclidean', 'hamming+', 'linear_mahalanobis', 'quadratic_mahalanobis'],
default='euclidean', help='Distance metric algorithm')
parser.add_argument('-t', '--training_set', type=str, help='Filename of the training set, must be a CSV file')
parser.add_argument('-i', '--input', type=str, help='Filename of the input to classify, must be a CSV file')
parser.add_argument('-o', '--output', type=str, default='out', help='Output file')
parser.add_argument('-c', '--category', type=int, help='Column of the class')
parser.add_argument('-I', '--ignore', action='append', type=int, help='Columns to be ignored')
parser.add_argument('-s', '--spiral', type=str, choices=['single', 'double'], help='Number of spirals')
parser.add_argument('-g', '--grid_size', type=int, help='Grid size (used for spirals only)')
parser.add_argument('-n', '--noise', type=int, default=0, help='Noise for the spiral(s).')
parser.add_argument('-v', '--verbose', action='store_true', help='Print additional statistics')
parser.add_argument('-p', '--plot', action='store_true', help='Plot the classified patterns (if 2D)')
parser.add_argument('-l', '--slice', type=float, help='Automatically pick the given approximate percentage of the input set to use as the training set')
parser.add_argument('-S', '--save_image', action='store_true', help='Save the resulting spiral as an image')
parser.add_argument('-V', '--voronoi', action='store_true', help='Plot the corresponding voronoi diagram')
parser.add_argument("-C", "--compare", nargs='+', choices=['l2norm', 'quadratic_mahalanobis', 'linear_mahalanobis'], help="Compare with other algorithms")
args = parser.parse_args()
main(parser, args)
|
from flask import render_template,url_for,redirect,request
from . import main
from app import db, photos
from sqlalchemy import and_
from flask_login import login_required,current_user
from .forms import updateForm, findMatches, photoForm
from ..models import Quality, User
from werkzeug.utils import secure_filename
#display categories on the landing page
@main.route('/')
def index():
""" View root page function that returns index page """
title = 'Home | cupidR'
return render_template('index.html', title = title)
@main.route('/profile/<uname>', methods=['GET', 'POST'])
def profile(uname):
""" View function that returns profile page """
users=None
form = findMatches()
user = User.query.filter_by(username=uname).first()
if form.validate_on_submit():
matches = Quality.query.filter_by(gender=form.gender.data, complexion=form.complexion.data, personality=form.personality.data).all()
for match in matches:
users = User.query.filter_by(id=match.user_id).all()
# return redirect(url_for('main.profile', uname=uname, users=users))
return render_template('profile/profile.html', form=form, users=users)
@main.route('/profile/<uname>/edit', methods=['GET', 'POST'])
def update(uname):
form = updateForm()
if form.validate_on_submit():
user = User.query.filter_by(username=uname).first()
qualities = Quality(age=form.age.data, gender=form.gender.data, complexion=form.complexion.data, personality=form.personality.data, height=form.height.data, user_id=user.id)
db.session.add(qualities)
db.session.commit()
return redirect(url_for('main.profile', uname=uname))
title = 'Update profile'
return render_template('profile/update.html',form=form, uname=uname, title=title)
@main.route('/profile/<uname>/find', methods=['GET', 'POST'])
def find(uname):
title='Find matches'
return render_template('profile/find.html', form=form, title=title)
@main.route('/user/<uname>/update_photo', methods=['POST'])
@login_required
def update_photo(uname):
user = User.query.filter_by(username=uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile', uname=uname))
|
#!/usr/local/bin/python
# coding: utf-8
#
# EPXERIMENTAL TOC STRUCTURE!!!
#
from IIIFpres import iiifpapi3
import csv
from collections import defaultdict
import requests
iiifpapi3.BASE_URL = "https://dlib.biblhertz.it/iiif/bncrges1323/" # this is the path where the manifest must be accessible
# some of the resources use @ which might be cause conflict we ignore the error
iiifpapi3.INVALID_URI_CHARACTERS = iiifpapi3.INVALID_URI_CHARACTERS.replace("@","")
manifest = iiifpapi3.Manifest()
manifest.set_id(extendbase_url="manifest.json")
manifest.add_label("en","Zucchi, Philosophia magnetica")
manifest.add_behavior("paged")
manifest.add_behavior("continuous")
manifest.set_navDate("2021-11-16T18:17:44.573+01:00")
manifest.set_rights("http://creativecommons.org/licenses/by-nc/4.0/")
manifest.set_requiredStatement(label="Attribution",language_l="en",value="Provided by BHMPI Rome",language_v="en")
manifest.add_metadata(label="author", value="Niccolò Zucchi", language_l="en", language_v="en")
manifest.add_metadata(label="title", value="Philosophia magnetica per principia propria proposita et ad prima in suo genere promota", language_l="en", language_v="none")
manifest.add_metadata(label="date", value="c. 1653", language_l="en", language_v="none")
manifest.add_metadata(label="held by", value="Rom, Biblioteca Nazionale Centrale Vittorio Emanuele II", language_l="en", language_v="none")
manifest.add_metadata(label="shelfmark", value="Fondo Gesuitico 1323, fols. 59r–78r", language_l="en", language_v="none")
manifest.add_metadata(label="catalogue", value="<a href=\"http://aleph.mpg.de/F/?func=find-b&local_base=kub01&find_code=idn&request=BV0000000\">Kubikat </a>", language_l="en", language_v="none")
manifest.add_metadata(label="hosted by", value="<span><a href=\"https://www.biblhertz.it\">BHMPI Rome</a></span>", language_l="en", language_v="none")
manifest.add_metadata(label="part of", value="<span><a href=\"https://ch-sander.github.io/raramagnetica/\">rara magnetica</a> by Christoph Sander</span>", language_l="en", language_v="none")
manifest.add_metadata(label="identifier", value="ark:/30440/02/bncrges1323", language_l="en", language_v="none")
prov = manifest.add_provider()
prov.set_id("https://www.biblhertz.it/en/mission")
prov.set_type()
prov.add_label(language='en',text="Bibliotheca Hertziana – Max Planck Institute for Art History")
homp = prov.add_homepage()
homp.set_id("https://www.biblhertz.it/")
homp.set_type("Text")
homp.add_label("en","Bibliotheca Hertziana")
homp.set_format("text/html")
homp.set_language("en")
logo = prov.add_logo()
logo.set_id("https://dlib2.biblhertz.it/iiif/3/rsc@bhmpi.jp2/full/200,/0/default.jpg")
serv = logo.add_service()
serv.set_id("https://dlib2.biblhertz.it/iiif/3/rsc@bhmpi.jp2")
serv.set_type("ImageService3")
serv.set_profile("level2")
start = manifest.set_start()
start.set_id("https://dlib2.biblhertz.it/iiif/3/bncrges1323/canvas/p0005") # this must be provided
start.set_type("Canvas")
thumbnailurl = "https://dlib2.biblhertz.it/iiif/3/bncrges1323@0001.jp2" # this must be provided or we can choose the first image
thum = manifest.add_thumbnail()
thum.set_id("%s/80,100/0/default.jpg" %thumbnailurl)
thum.set_type("Image")
thum.set_format("image/jpeg")
#thum.set_height(300)
#thum.set_width(219)
tserv = thum.add_service()
tserv.set_id(thumbnailurl)
tserv.set_type("ImageService3")
tserv.set_profile("level2")
manifest.structures = []
rng = manifest.add_range_to_structures()
rng.set_id(extendbase_url="range/")
rng.add_label('en',"Tables of Contents")
strdic = {-1:rng}
last_label = None
rngind = defaultdict(int) # COUNTER
idx = 0
lastcanvasid = None
lastcanvaslabel = None
lastrangeid = None
with open('metadata_v4.csv') as csv_file, open('imageurllist.txt') as url_list:
data = csv.DictReader(csv_file, delimiter=',')
lastlevel = 0
for d in data:
if d['canvas label'] != last_label:
last_label = d['canvas label']
idx+=1
# when you use a proxy you might have to use the original link e.g. "http://localhost:1080/iipsrv/iipsrv.fcgi?iiif=/imageapi//m0171_0/m0171_0visn20_0001a21.jp2/info.json"
iiifimageurl = next(url_list).strip()
imageinfo = requests.get(iiifimageurl,verify=False)
jsoninfo = imageinfo.json()
imgwidth = jsoninfo['width']
imgheight = jsoninfo['height']
canvas = manifest.add_canvas_to_items()
canvas.set_id(extendbase_url="canvas/p%s"%idx) # in this case we use the base url
canvas.set_height(imgheight) # this can be retrieved from the images or using image api
canvas.set_width(imgwidth) # this can be retrieved from the images or using image api
canvas.add_label("en",d['canvas label'])
annopage = canvas.add_annotationpage_to_items()
annopage.set_id(extendbase_url="page/p%s/1" %str(idx).zfill(4))
annotation = annopage.add_annotation_to_items(target=canvas.id)
annotation.set_id(extendbase_url="annotation/p%s-image"%str(idx).zfill(4))
annotation.set_motivation("painting")
annotation.body.set_id("".join((iiifimageurl,"/full/max/0/default.jpg"))) # this will be the url
annotation.body.set_type("Image")
annotation.body.set_format("image/jpeg")
annotation.body.set_width(imgwidth) # this can be retrieved from the images or using image api
annotation.body.set_height(imgheight) # this can be retrieved from the images or using image api
s = annotation.body.add_service()
s.set_id(iiifimageurl) # this will be the url
s.set_type("ImageService3")
s.set_profile("level1")
if d['structure'] != "":
if d['level'] == "":
raise ValueError("Please specify a level for: %s, %s" %(d['structure'],d['canvas label']))
currentlevel = int(d['level'])
previouslevel = currentlevel - 1
if currentlevel < lastlevel: # this is the case of a new chapter
for lv in list(rngind):
if lv > currentlevel:
del rngind[lv] # we have to reset the counters
# for TOC hack
if currentlevel > lastlevel and rngind[currentlevel] == 0:
print("ENTERED!!!")
strdic[currentlevel] = strdic[previouslevel].add_range_to_items()
strdic[currentlevel].set_id(extendbase_url=lastrangeid+"/0")
strdic[currentlevel].add_label('none',"[Incipit]")
# or we can repet the last label
# strdic[currentlevel].add_label('none',lastcanvaslabel)
strdic[currentlevel].add_canvas_to_items(canvas_id=lastcanvasid)
rngind[currentlevel] +=1
strdic[currentlevel] = strdic[previouslevel].add_range_to_items()
currentpath = "/".join(["r%s"%rngind[i] for i in sorted(rngind)])
strdic[currentlevel].set_id(extendbase_url="range/"+currentpath)
strdic[currentlevel].add_label('none',d['structure'])
strdic[currentlevel].add_canvas_to_items(canvas_id=canvas.id)
lastlevel = currentlevel
lastcanvasid = canvas.id
lastcanvaslabel = d['structure']
lastrangeid = "range/"+currentpath
if __name__ == "__main__":
manifest.json_save("manifest.json")
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from elasticsearch import Elasticsearch
class ESClient(object):
def __init__(self, hosts):
self.hosts = hosts
self.es = None
self.is_init = False
self.init()
def init(self):
if not isinstance(self.hosts, list):
self.hosts = [self.hosts]
try:
self.es = Elasticsearch(hosts=self.hosts, timeout=10)
if self.es.ping():
self.is_init = True
except Exception as error:
raise Exception("{}".format(error))
def stats(self):
try:
return self.es.cluster.stats()
except Exception as error:
raise Exception("{}".format(error))
def search(self, index, doc_type, body, timeout="5s"):
try:
return self.es.search(index=index, doc_type=doc_type, body=body, ignore_unavailable=True, timeout=timeout)
except Exception as error:
raise Exception("{}".format(error))
def get(self, index, doc_type, data_id):
try:
return self.es.get(index=index, doc_type=doc_type, id=data_id)
except Exception as error:
raise Exception("{}".format(error))
def delete(self, index):
try:
self.es.indices.delete(index=index, ignore=[400, 404])
except Exception as error:
raise Exception("{}".format(error))
class ESGenerate(object):
def __init__(self, must=None, should=None, must_not=None, include=None, size=0, from_size=None,
sort_dict=None, aggs_dict=None, multi_match=None, highlight=None):
self.search_body = dict()
self.query_must_condition = must if isinstance(must, list) else []
self.query_should_condition = should if should and isinstance(should, list) else None
self.query_must_not_condition = must_not if must_not and isinstance(must_not, list) else None
self.include = include if isinstance(include, list) else []
self.size = size if isinstance(size, int) else None
self.from_size = from_size if isinstance(from_size, int) else None
self.sort_dict = [sort_dict] if isinstance(sort_dict, dict) else None
self.aggs_dict = aggs_dict if aggs_dict and isinstance(aggs_dict, dict) else None
self.multi_match = multi_match if multi_match and isinstance(multi_match, dict) else None
self.highlight = highlight if highlight and isinstance(highlight, dict) else None
def __call__(self):
return self.generate()
def generate(self):
if self.query_should_condition:
self.query_must_condition += [{"bool": {"should": self.query_should_condition}}]
if self.query_must_not_condition:
self.query_must_condition += [{"bool": {"must_not": self.query_must_not_condition}}]
if self.include:
self.search_body["_source"] = self.include
if self.from_size >= 0:
self.search_body["from"] = self.from_size
if self.sort_dict:
self.search_body["sort"] = self.sort_dict
if self.aggs_dict:
self.search_body["aggs"] = self.aggs_dict
if self.highlight:
self.search_body["highlight"] = self.highlight
if self.size is not None:
self.search_body["size"] = self.size
if self.multi_match:
self.search_body["query"] = {"multi_match": self.multi_match}
else:
self.search_body["query"] = {"bool": {"must": self.query_must_condition}}
return self.search_body
|
import os
import requests
def save_audio(audio_url, filename, dirname='audio'):
try:
audio_content = requests.get(audio_url).content
filename = os.path.join(dirname, filename)
f = open((filename), 'wb')
f.write(audio_content)
f.close()
return filename
except:
return ""
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
from tornado.concurrent import TracebackFuture, is_future
from tornado.log import app_log, gen_log
from tornado import stack_context
from tornado.util import Configurable, errno_from_exception, timedelta_to_seconds
try:
import signal
except ImportError:
signal = None
try:
import thread # py2
except ImportError:
import _thread as thread # py3
from tornado.platform.auto import set_close_exec, Waker
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server::
import errno
import functools
import ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error, e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = ioloop.IOLoop.instance()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. To get the current thread's `IOLoop`, use `current()`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
"""
assert not IOLoop.initialized()
IOLoop._instance = self
@staticmethod
def clear_instance():
"""Clear the global `IOLoop` instance.
.. versionadded:: 4.0
"""
if hasattr(IOLoop, "_instance"):
del IOLoop._instance
@staticmethod
def current():
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as current
by `make_current`, returns that instance. Otherwise returns
`IOLoop.instance()`, i.e. the main thread's `IOLoop`.
A common pattern for classes that depend on ``IOLoops`` is to use
a default argument to enable programs with multiple ``IOLoops``
but not require the argument for simpler applications::
class MyClass(object):
def __init__(self, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
"""
current = getattr(IOLoop._current, "instance", None)
if current is None:
return IOLoop.instance()
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self):
pass
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
If the function returns a `.Future`, the `IOLoop` will run
until the future is resolved. If it raises an exception, the
`IOLoop` will stop and the exception will be re-raised to the
caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.instance().run_sync(main)
"""
future_cell = [None]
def run():
try:
result = func()
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None and is_future(ret):
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
self.add_future(ret, lambda f: f.result())
except Exception:
self.handle_callback_exception(callback)
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None):
super(PollIOLoop, self).initialize()
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = []
self._callback_lock = threading.Lock()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def close(self, all_fds=False):
with self._callback_lock:
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in self._handlers.values():
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError: # non-main thread
pass
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = []
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512
and self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for callback in callbacks:
self._run_callback(callback)
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
callbacks = callback = due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that update self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
with self._callback_lock:
if self._closing:
raise RuntimeError("IOLoop is closing")
list_empty = not self._callbacks
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if list_empty and thread.get_ident() != self._thread_ident:
# If we're in the IOLoop's thread, we know it's not currently
# polling. If we're not, and we added the first callback to an
# empty list, we may need to wake it up (it may wake up on its
# own, but an occasional extra wake is harmless). Waking
# up a polling IOLoop is relatively expensive, so we try to
# avoid it when we can.
self._waker.wake()
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
if thread.get_ident() != self._thread_ident:
# if the signal is handled on another thread, we can add
# it normally (modulo the NullContext)
self.add_callback(callback, *args, **kwargs)
else:
# If we're on the IOLoop's thread, we cannot use
# the regular add_callback because it may deadlock on
# _callback_lock. Blindly insert into self._callbacks.
# This is safe because the GIL makes list.append atomic.
# One subtlety is that if the signal interrupted the
# _callback_lock block in IOLoop.start, we may modify
# either the old or new version of self._callbacks,
# but either way will work.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tiebreaker']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tiebreaker = next(io_loop._timeout_counter)
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return ((self.deadline, self.tiebreaker) <
(other.deadline, other.tiebreaker))
def __le__(self, other):
return ((self.deadline, self.tiebreaker) <=
(other.deadline, other.tiebreaker))
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
`start` must be called after the `PeriodicCallback` is created.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
while self._next_timeout <= current_time:
self._next_timeout += self.callback_time / 1000.0
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
|
# Copyright 2008-2020 Yannick Versley
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import re
import sys
class SimpleFolder(object):
def est_length(self, n):
'''
estimate the number of trees in each fold
'''
return n
def apply_filter(self, tree_no, fold_no, t):
return (tree_no, fold_no, t)
class RangeSel(SimpleFolder):
def __init__(self, ranges):
self.ranges = ranges
def est_length(self, n):
s = 0
for (start, end) in self.ranges:
s += end - start + 1
return [s]
def apply_filter(self, tree_no, fold_no, t):
ranges = self.ranges
while ranges and tree_no > ranges[0][1]:
del ranges[0]
if not ranges:
return None
if tree_no < ranges[0][0]:
return None
return (tree_no, 0, t)
class FoldOnly(SimpleFolder):
def __init__(self, folds):
if isinstance(folds, int):
self.folds = {folds}
else:
self.folds = set(folds)
def est_length(self, n):
if n[0] is None:
return [None]
s = 0
new_folds = set()
for fold in self.folds:
if fold >= len(n):
print("Map fold# %d to %d"%(fold, fold%len(n)), file=sys.stderr)
fold = fold%len(n)
s += n[fold]
new_folds.add(fold)
self.folds = new_folds
return [s]
def apply_filter(self, tree_no, fold_no, t):
if fold_no in self.folds:
return (tree_no, 0, t)
else:
return None
class FoldExcept(SimpleFolder):
def __init__(self, folds):
if isinstance(folds, int):
self.folds = {folds}
else:
self.folds = set(folds)
def est_length(self, n):
if n[0] is None:
return [None]
s = sum(n)
new_folds = set()
for fold in self.folds:
if fold >= len(n):
print("Map fold# %d to %d"%(fold, fold%len(n)), file=sys.stderr)
fold = fold%len(n)
s -= n[fold]
new_folds.add(fold)
self.folds = new_folds
return [s]
def apply_filter(self, tree_no, fold_no, t):
if fold_no not in self.folds:
return (tree_no, 0, t)
else:
return None
class FoldAlternating(SimpleFolder):
def __init__(self, num_folds):
self.num_folds = num_folds
def est_length(self, n):
if n[0] is None:
return [None] * self.num_folds
total = sum(n)
div = int(total) / self.num_folds
mod = int(total) % self.num_folds
result = []
for i in range(self.num_folds):
if i < mod:
result.append(div+1)
else:
result.append(div)
return result
def apply_filter(self, tree_no, fold_no, t):
return (tree_no, tree_no%self.num_folds, t)
class FoldSlices(SimpleFolder):
def __init__(self, num_folds):
self.num_folds = num_folds
def est_length(self, n):
if n[0] is None:
raise ValueError('Need to know number of trees. Use range() if in doubt')
total = sum(n)
self.num_total = total
nf = self.num_folds
result = []
for i in range(nf):
result.append((i+1)*total/nf - i*total/nf)
return result
def apply_filter(self, tree_no, fold_no, t):
return (tree_no,
(tree_no * self.num_folds) / self.num_total,
t)
class Folder(object):
def __init__(self):
self.xform = []
def apply_filter(self, trees):
# first: see if we need to determine length
est_len = [None]
try:
for xf in self.xform:
est_len = xf.est_length(est_len)
except ValueError:
trees = list(trees)
est_len = [len(trees)]
for xf in self.xform:
est_len = xf.est_length(est_len)
if est_len[0] is not None:
print("Estimated output size: %d"%(sum(est_len),), file=sys.stderr)
for i, t in enumerate(trees):
result = self.apply_all(i, 0, t)
if result is None:
continue
yield t
def apply_all(self, tno, fold, t):
for xf in self.xform:
result = xf.apply_filter(tno, fold, t)
if result is None:
return None
else:
tno, fold, t = result
return tno, fold, t
tokens_table = [(code, re.compile(rgx)) for
(code, rgx) in [
('ranges', r'([0-9]+-[0-9]+(?:,[0-9]+-[0-9]+)*)'),
('range', r'range\(([0-9]+),([0-9]+)\)'),
('alternating', r'alternating\(([0-9]+)\)'),
('slices', r'slices\(([0-9]+)\)'),
('only', r'only\(([0-9]+(?:,[0-9]+)*)\)'),
('except', r'except\(([0-9]+(?:,[0-9]+)*)\)'),
('trainfold', r'(train|test)(dev|final)([0-9]+)/([0-9]+)')]]
def interpret(code, arg):
if code == 'ranges':
# convert to 0-based indices
def split_part(part):
x = part.split('-')
return (int(x[0])-1, int(x[1])-1)
ranges = [split_part(part)
for part in arg[0].split(',')]
return [RangeSel(ranges)]
elif code == 'range':
# convert to 0-based indices
return [RangeSel([[int(arg[0])-1, int(arg[1])-1]])]
elif code == 'alternating':
return [FoldAlternating(int(arg[0]))]
elif code == 'slices':
return [FoldSlices(int(arg[0]))]
elif code == 'only':
return [FoldOnly([int(x) for x in arg[0].split(',')])]
elif code == 'except':
return [FoldExcept([int(x) for x in arg[0].split(',')])]
elif code == 'trainfold':
# n-1: test fold
# n-2: devtest fold
# others: train
n_folds = int(arg[3])
offset = int(arg[2]) - 1
finaltest_fold = (n_folds + offset - 1) % n_folds
devtest_fold = (n_folds + offset - 2) % n_folds
if arg[1] == 'dev':
if arg[0] == 'train':
sel = FoldExcept([devtest_fold, finaltest_fold])
else:
sel = FoldOnly([devtest_fold])
else:
if arg[0] == 'train':
sel = FoldExcept([finaltest_fold])
else:
sel = FoldOnly([finaltest_fold])
return [FoldAlternating(int(arg[3])), sel]
else:
assert False, code
def parse_foldspec(spec):
f = Folder()
idx = 0
while idx < len(spec):
if spec[idx] in './;:':
idx += 1
continue
for code, rgx in tokens_table:
m = rgx.match(spec, idx)
if m:
x = interpret(code, m.groups())
f.xform += x
idx = m.end()
return f
def do_recombine(tree_seqs, init=1):
'''
given trees that were distributed in round-robin fashion,
produces a sequence of trees from the re-combined sequences.
To combine testfinal folds, use init=1, for testdev
folds, use init=2
'''
iters = [iter(trees) for trees in tree_seqs]
n_iters = len(tree_seqs)
i = (init + n_iters)%n_iters
while True:
yield next(iters[i])
i = (i+1)%n_iters
|
# -*- coding: utf-8 -*-
"""
gc3-query.__init__.py [9/8/2018 1:34 PM]
~~~~~~~~~~~~~~~~
<DESCR SHORT>
<DESCR>
"""
################################################################################
## Standard Library Imports
import sys, os
################################################################################
## Third-Party Imports
from dataclasses import dataclass
################################################################################
## Project Imports
from gc3_query.lib import *
_debug, _info, _warning, _error, _critical = get_logging(name=__name__)
|
# suorganizer/urls.py
# Django modules
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls', namespace='blog')),
path('', include('organizer.urls', namespace='organizer')),
]
|
#!/usr/bin/env python3
"""
pass.py
Find hardcoded passwords on source code of your project.
python pass.py path/to/project
"""
import os
import sys
import re
import fnmatch
import json
from argparse import ArgumentParser
DEFAULT_BAD_WORDS = ['token', 'oauth', 'secret', 'pass', 'password', 'senha']
DEFAULT_ANALYZERS = [r' *[:=] *["\'][^"\']{4,}["\']', r'[:=][^"\'& ,;{()<\n]{4,}'] # str and url based.
def check_exclude_pattern(checkers, line):
"""Regex checker function used to ignore false positives."""
for pattern in checkers:
if pattern.match(line):
return True
return False
def can_analyze_file(include_paths, exclude_paths, path):
"""Glob checker function used to specify or ignore paths and files."""
if include_paths and not any(fnmatch.fnmatch(path, p) for p in include_paths):
return False
if exclude_paths and any(fnmatch.fnmatch(path, p)for p in exclude_paths):
return False
return True
def build_bad_words(words):
"""Builds a regex pattern based on the bad words provided."""
bad_words = []
for word in words:
rule = '(?:'
for upper, lower in zip(word.upper(), word.lower()):
rule += f'[{upper}{lower}]'
rule += ')'
bad_words.append(rule)
return '|'.join(bad_words)
def build_regex_analyzers(rules, bad_words):
"""
Merges the regex patterns from the bad words
with the analyzers in order to create the
final regex pattern to be used.
"""
analyzers = []
for rule in rules:
analyzers.append(
re.compile(f'(?:{bad_words})(?:[a-zA-Z_][a-zA-Z0-9_]*)?{rule}')
)
return analyzers
def check_file_handler(path, max_length, analyzers, patterns):
"""
Check all lines of a single file.
Also checks for max line length and for false positives.
"""
result = []
try:
with open(path, 'r') as handler:
for i, line in enumerate(handler):
# Checking for max line length.
if len(line) > max_length:
continue
for checker in analyzers: # All analyzers run in every line.
data = checker.findall(line)
# Check if it's a false positive.
if data and not check_exclude_pattern(patterns, line):
result.append({
'file': path,
'target': data[0],
'line': i,
'string': line.strip(),
})
except UnicodeDecodeError:
# Ignore non text files.
pass
return result
def start_digging(root_path, limit, max_length, analyzers, patterns, include_paths, exclude_paths):
"""Start walking to all folders and subfolders in order to reach all files."""
counter = 0
result = []
for root, subfolder_list, file_list in os.walk(root_path):
for file in file_list:
path = os.path.join(root, file)
# Apply include/exclude glob rules.
if not can_analyze_file(include_paths, exclude_paths, path):
continue
# File counter.
if counter > limit:
return counter, result
counter += 1
# Send file to be analyzed by the handler.
result += check_file_handler(path, max_length, analyzers, patterns)
return counter, result
if __name__ == "__main__":
parser = ArgumentParser(description='Check for hardcoded passwords and tokens in your project.')
parser.add_argument('--bad-words', type=open, dest='bad_words',
help='File containing which WORDS to analyze, one word per line. \
If not provided, will fallback to the default bad words list.'
)
parser.add_argument('--ignore-patterns', type=open, dest='ignore_patterns',
help='File containing regex patterns of which TARGETS to ignore.'
)
parser.add_argument('--include-paths', type=open, dest='include_file',
help='File containing glob patterns of which FILES to analyze. \
WARNING: This option has precedence over the option "--exclude-paths".'
)
parser.add_argument('--exclude-paths', type=open, dest='exclude_file',
help='File containing glob patterns of which FILES to ignore.'
)
parser.add_argument('--max-length', type=int, default=1000, dest='max_length',
help='The maximun length of a line to analyze.'
)
parser.add_argument('--max-checks', type=int, default=sys.maxsize, dest='max_checks',
help='Max number of files to analize.'
)
parser.add_argument('--json', action='store_true', dest='json',
help='Output result in a pretty JSON format.'
)
parser.add_argument('path', type=str,
help='Path to the project.'
)
args = parser.parse_args()
# Preparing the bad word list.
bad_words = []
if args.bad_words:
bad_words = args.bad_words.read().splitlines()
args.bad_words.close()
# Preparing for target patterns to ignore.
ignore_patterns = []
if args.ignore_patterns:
for pattern in args.ignore_patterns:
ignore_patterns.append(re.compile(pattern))
args.ignore_patterns.close()
# Checking for paths to include in the results.
include_paths = []
if args.include_file:
include_paths = args.include_file.read().splitlines()
args.include_file.close()
# Checking for paths to exclude from results.
exclude_paths = []
if args.exclude_file:
exclude_paths = args.exclude_file.read().splitlines()
args.exclude_file.close()
# Building bad words.
bad_words = build_bad_words(bad_words or DEFAULT_BAD_WORDS)
# Building regex analyzers.
analyzers = build_regex_analyzers(DEFAULT_ANALYZERS, bad_words)
# Start the digging!!
counter, result = start_digging(
args.path,
args.max_checks,
args.max_length,
analyzers,
ignore_patterns,
include_paths,
exclude_paths
)
# Outputs to JSON or to stdout.
if args.json:
print(json.dumps(result, indent=2))
elif counter == 0:
print('No file found.')
print('STATUS: FAILED')
else:
for r in result:
print('File:\t', r['file'])
print('Line:\t', r['line'])
print('Target:\t', r['target'], '\n')
print(r['string'])
print('\n--------------------------------------------------------------------------------\n')
print('Found: {} | Files Checked: {} | (Hit Upper Limit? {})'.format(len(result), counter, 'Yes' if counter >= args.max_checks else 'No'))
print('STATUS: {}'.format('FAILED' if result else 'OK'))
# For CI/CD purposes.
sys.exit(1 if result else 0)
|
import json
import pexpect
from programmingalpha.Utility import getLogger
logger = getLogger(__name__)
class SimpleTokenizer(object):
def tokenize(self,txt):
return txt.split()
class CoreNLPTokenizer(object):
def __init__(self):
"""
Args:
classpath: Path to the corenlp directory of jars
mem: Java heap memory
"""
#for file in `find /home/zhangzy/stanford-corenlp-full-2018-10-05/ -name "*.jar"`; do export
#CLASSPATH="$CLASSPATH:`realpath $file`"; done
import os
path="/home/LAB/zhangzy/stanford-corenlp-full-2018-10-05/"
files=os.listdir(path)
jars=[]
for f in files:
if f[-4:]==".jar":
jars.append(os.path.join(path,f))
self.classpath = ":".join(jars)
self.mem = "4g"
self._launch()
logger.info("init core_nlp tokenizer finished!")
def _launch(self):
"""Start the CoreNLP jar with pexpect."""
annotators = ['tokenize', 'ssplit']
annotators = ','.join(annotators)
options = ','.join(['untokenizable=noneDelete',
'invertible=true'])
cmd = ['java', '-mx' + self.mem, '-cp', '"%s"' % self.classpath,
'edu.stanford.nlp.pipeline.StanfordCoreNLP', '-annotators',
annotators, '-tokenize.options', options,
'-outputFormat', 'json', '-prettyPrint', 'false']
# We use pexpect to keep the subprocess alive and feed it commands.
# Because we don't want to get hit by the max terminal buffer size,
# we turn off canonical input processing to have unlimited bytes.
self.corenlp = pexpect.spawn('/bin/bash', maxread=100000, timeout=60)
self.corenlp.setecho(False)
self.corenlp.sendline('stty -icanon')
self.corenlp.sendline(' '.join(cmd))
self.corenlp.delaybeforesend = 0
self.corenlp.delayafterread = 0
self.corenlp.expect_exact('NLP>', searchwindowsize=100)
@staticmethod
def _convert(token):
if token == '-LRB-':
return '('
if token == '-RRB-':
return ')'
if token == '-LSB-':
return '['
if token == '-RSB-':
return ']'
if token == '-LCB-':
return '{'
if token == '-RCB-':
return '}'
return token
def tokenize(self, text):
# Since we're feeding text to the commandline, we're waiting on seeing
# the NLP> prompt. Hacky!
if 'NLP>' in text:
raise RuntimeError('Bad token (NLP>) in text!')
# Sending q will cause the process to quit -- manually override
if text.lower().strip() == 'q':
token = text.strip()
return [token]
# Minor cleanup before tokenizing.
clean_text = text.replace('\n', ' ')
self.corenlp.sendline(clean_text.encode('utf-8'))
self.corenlp.expect_exact('NLP>', searchwindowsize=100)
# Skip to start of output (may have been stderr logging messages)
output = self.corenlp.before
start = output.find(b'{\r\n "sentences":')
output = json.loads(output[start:].decode('utf-8'))
tokens = tuple([self._convert(t["word"]) for s in output['sentences'] for t in s['tokens']])
#tokens = tuple([t["word"] for s in output['sentences'] for t in s['tokens']])
return tokens
class SpacyTokenizer(object):
def __init__(self):
"""
Args:
model: spaCy model to use (either path, or keyword like 'en').
"""
import spacy
model = 'en'
nlp_kwargs = {'parser': False}
nlp_kwargs['tagger'] = False
nlp_kwargs['entity'] = False
self.nlp = spacy.load(model, **nlp_kwargs)
logger.info("init spacy tokenizer finished!")
def tokenize(self, text):
# We don't treat new lines as tokens.
clean_text = text.replace('\n', ' ')
tokens = tuple(map(lambda t:t.text,self.nlp.tokenizer(clean_text)))
return tokens
|
from typing import Tuple
import torch
from torch import nn
from torch.nn import functional as F
from utils import Tensor, assert_shape, build_grid, conv_transpose_out_shape
class SlotAttention(nn.Module):
"""Slot attention module that iteratively performs cross-attention.
Args:
slot_agnostic (bool): If True, all slots share trained embedding.
If False, we train embeddings seperately for each slot.
Defaults to True (as in the paper).
random_slot (bool): If True, we train mu and sigma for slot embedding,
and sample slot from the Gaussian when forward pass. If False, we
train slot embedding itself (similar to the learnable positional
embedding in DETR), so that we use the same embedding to interact
with input image features. Defaults to True (as in the paper).
"""
def __init__(self,
in_features,
num_iterations,
num_slots,
slot_size,
mlp_hidden_size,
learnable_slot=False,
slot_agnostic=True,
random_slot=True,
epsilon=1e-6):
super().__init__()
self.in_features = in_features
self.num_iterations = num_iterations
self.num_slots = num_slots
self.slot_size = slot_size # number of hidden layers in slot dimensions
self.mlp_hidden_size = mlp_hidden_size
self.learnable_slot = learnable_slot
self.slot_agnostic = slot_agnostic
self.random_slot = random_slot
self.epsilon = epsilon
self.norm_inputs = nn.LayerNorm(self.in_features)
# I guess this is layer norm across each slot? should look into this
self.norm_slots = nn.LayerNorm(self.slot_size)
self.norm_mlp = nn.LayerNorm(self.slot_size)
# Linear maps for the attention module.
self.project_q = nn.Linear(self.slot_size, self.slot_size, bias=False)
self.project_k = nn.Linear(in_features, self.slot_size, bias=False)
self.project_v = nn.Linear(in_features, self.slot_size, bias=False)
# Slot update functions.
self.gru = nn.GRUCell(self.slot_size, self.slot_size)
self.mlp = nn.Sequential(
nn.Linear(self.slot_size, self.mlp_hidden_size),
nn.ReLU(),
nn.Linear(self.mlp_hidden_size, self.slot_size),
)
trainable_slot_num = 1 if self.slot_agnostic else self.num_slots
slot_init_func = self.register_parameter if \
learnable_slot else self.register_buffer
if self.random_slot:
# train the mean and std of slot embedding
slot_init_func(
"slots_mu",
torch.nn.Parameter(
nn.init.xavier_uniform_(
torch.zeros((1, trainable_slot_num, self.slot_size)),
gain=nn.init.calculate_gain("linear"))),
)
slot_init_func(
"slots_log_sigma",
torch.nn.Parameter(
nn.init.xavier_uniform_(
torch.zeros((1, trainable_slot_num, self.slot_size)),
gain=nn.init.calculate_gain("linear"))),
)
else:
# train slot embedding itself
# should definitely be one trainable embedding for each slot
assert not slot_agnostic, 'cannot use the same emb for each slot!'
slot_init_func(
"slots_mu",
torch.nn.Parameter(
nn.init.xavier_normal_( # TODO: mind the init method here?
torch.zeros((1, self.num_slots, self.slot_size)),
gain=nn.init.calculate_gain("linear"))),
)
def forward(self, inputs: Tensor):
# `inputs` has shape [batch_size, num_inputs, inputs_size].
batch_size, num_inputs, inputs_size = inputs.shape
inputs = self.norm_inputs(inputs) # Apply layer norm to the input.
# Shape: [batch_size, num_inputs, slot_size].
k = self.project_k(inputs)
# Shape: [batch_size, num_inputs, slot_size].
v = self.project_v(inputs)
# Initialize the slots. Shape: [batch_size, num_slots, slot_size].
if self.random_slot:
# if in testing mode, fix random seed to get same slot embedding
if not self.training:
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
slots_init = torch.randn(
(1, self.num_slots,
self.slot_size)).repeat(batch_size, 1, 1)
# in training mode, sample from Gaussian with learned mean and std
else:
slots_init = torch.randn(
(batch_size, self.num_slots, self.slot_size))
slots_init = slots_init.type_as(inputs)
slots = self.slots_mu + self.slots_log_sigma.exp() * slots_init
else:
# use the learned embedding itself, no sampling, no randomness
slots = self.slots_mu.repeat(batch_size, 1, 1)
# Multiple rounds of attention.
for _ in range(self.num_iterations):
slots_prev = slots
slots = self.norm_slots(slots)
# Attention.
q = self.project_q(
slots) # Shape: [batch_size, num_slots, slot_size].
attn_norm_factor = self.slot_size**-0.5
attn_logits = attn_norm_factor * torch.matmul(k, q.transpose(2, 1))
attn = F.softmax(attn_logits, dim=-1)
# `attn` has shape: [batch_size, num_inputs, num_slots].
# Weighted mean.
attn = attn + self.epsilon
attn = attn / torch.sum(attn, dim=1, keepdim=True)
updates = torch.matmul(attn.transpose(1, 2), v)
# `updates` has shape: [batch_size, num_slots, slot_size].
# Slot update.
# GRU is expecting inputs of size (N,H)
# so flatten batch and slots dimension
slots = self.gru(
updates.view(batch_size * self.num_slots, self.slot_size),
slots_prev.view(batch_size * self.num_slots, self.slot_size),
)
slots = slots.view(batch_size, self.num_slots, self.slot_size)
slots = slots + self.mlp(self.norm_mlp(slots))
return slots
class SlotAttentionModel(nn.Module):
def __init__(
self,
resolution: Tuple[int, int],
num_slots: int,
num_iterations: int,
in_channels: int = 3,
kernel_size: int = 5,
slot_size: int = 64,
hidden_dims: Tuple[int, ...] = (64, 64, 64, 64),
decoder_resolution: Tuple[int, int] = (8, 8),
empty_cache: bool = False,
use_relu: bool = False, # TODO: official code use ReLU
slot_mlp_size: int = 128,
learnable_slot: bool = False,
slot_agnostic: bool = True,
random_slot: bool = True,
use_entropy_loss: bool = False,
):
super().__init__()
self.resolution = resolution
self.num_slots = num_slots
self.num_iterations = num_iterations
self.in_channels = in_channels
self.kernel_size = kernel_size
self.slot_size = slot_size
self.empty_cache = empty_cache
self.hidden_dims = hidden_dims
self.decoder_resolution = decoder_resolution
self.out_features = self.hidden_dims[-1]
modules = []
channels = self.in_channels
# Build Encoder
for h_dim in self.hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(
channels,
out_channels=h_dim,
kernel_size=self.kernel_size,
stride=1,
padding=self.kernel_size // 2,
),
nn.ReLU() if use_relu else nn.LeakyReLU(),
))
channels = h_dim
self.encoder = nn.Sequential(*modules)
self.encoder_pos_embedding = SoftPositionEmbed(self.in_channels,
self.out_features,
resolution)
self.encoder_out_layer = nn.Sequential(
nn.Linear(self.out_features, self.out_features),
nn.ReLU() if use_relu else nn.LeakyReLU(),
nn.Linear(self.out_features, self.out_features),
)
# Build Decoder
modules = []
in_size = decoder_resolution[0]
out_size = in_size
for i in range(len(self.hidden_dims) - 1, -1, -1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(
self.hidden_dims[i],
self.hidden_dims[i - 1],
kernel_size=5,
stride=2,
padding=2,
output_padding=1,
),
nn.ReLU() if use_relu else nn.LeakyReLU(),
))
out_size = conv_transpose_out_shape(out_size, 2, 2, 5, 1)
assert_shape(
resolution,
(out_size, out_size),
message="Output shape of decoder did not match input resolution. "
"Try changing `decoder_resolution`.",
)
# same convolutions
modules.append(
nn.Sequential(
nn.ConvTranspose2d(
self.out_features,
self.out_features,
kernel_size=5,
stride=1,
padding=2,
output_padding=0,
),
nn.ReLU() if use_relu else nn.LeakyReLU(),
nn.ConvTranspose2d(
self.out_features,
4,
kernel_size=3,
stride=1,
padding=1,
output_padding=0,
),
))
self.decoder = nn.Sequential(*modules)
self.decoder_pos_embedding = SoftPositionEmbed(self.in_channels,
self.out_features,
self.decoder_resolution)
self.slot_attention = SlotAttention(
in_features=self.out_features,
num_iterations=self.num_iterations,
num_slots=self.num_slots,
slot_size=self.slot_size,
mlp_hidden_size=slot_mlp_size,
learnable_slot=learnable_slot,
slot_agnostic=slot_agnostic,
random_slot=random_slot,
)
self.use_entropy_loss = use_entropy_loss # -p*log(p)
def forward(self, x):
if self.empty_cache:
torch.cuda.empty_cache()
batch_size, num_channels, height, width = x.shape
encoder_out = self.encoder(x)
encoder_out = self.encoder_pos_embedding(encoder_out)
# `encoder_out` has shape: [batch_size, filter_size, height, width]
encoder_out = torch.flatten(encoder_out, start_dim=2, end_dim=3)
# `encoder_out` has shape: [batch_size, filter_size, height*width]
encoder_out = encoder_out.permute(0, 2, 1)
encoder_out = self.encoder_out_layer(encoder_out)
# `encoder_out` has shape: [batch_size, height*width, filter_size]
# (batch_size, self.num_slots, self.slot_size)
slots = self.slot_attention(encoder_out)
# `slots` has shape: [batch_size, num_slots, slot_size].
batch_size, num_slots, slot_size = slots.shape
# spatial broadcast
slots = slots.view(batch_size * num_slots, slot_size, 1, 1)
decoder_in = slots.repeat(1, 1, self.decoder_resolution[0],
self.decoder_resolution[1])
out = self.decoder_pos_embedding(decoder_in)
out = self.decoder(out)
# `out` has shape: [batch_size*num_slots, num_channels+1, height, width].
out = out.view(batch_size, num_slots, num_channels + 1, height, width)
recons = out[:, :, :num_channels, :, :]
masks = out[:, :, -1:, :, :]
masks = F.softmax(masks, dim=1)
recon_combined = torch.sum(recons * masks, dim=1)
return recon_combined, recons, masks, slots
def loss_function(self, input):
recon_combined, recons, masks, slots = self.forward(input)
loss = F.mse_loss(recon_combined, input)
loss_dict = {
'recon_loss': loss,
}
# masks: [B, num_slots, 1, H, W], apply entropy loss
if self.use_entropy_loss:
masks = masks[:, :, 0] # [B, num_slots, H, W]
entropy_loss = (-masks * torch.log(masks + 1e-6)).sum(1).mean()
loss_dict['entropy'] = entropy_loss
return loss_dict
class SoftPositionEmbed(nn.Module):
def __init__(self, num_channels: int, hidden_size: int,
resolution: Tuple[int, int]):
super().__init__()
self.dense = nn.Linear(
in_features=num_channels + 1, out_features=hidden_size)
self.register_buffer("grid", build_grid(resolution))
def forward(self, inputs: Tensor):
emb_proj = self.dense(self.grid).permute(0, 3, 1, 2)
return inputs + emb_proj
|
# Version: 0.11
"""
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
[](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* run `versioneer-installer` in your source tree: this installs `versioneer.py`
* follow the instructions below (also in the `versioneer.py` docstring)
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example 'git describe --tags --dirty --always' reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time. However,
when you use "setup.py build" or "setup.py sdist", `_version.py` in the new
copy is replaced by a small static file that contains just the generated
version data.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the "git archive" command. As a result, generated tarballs will
contain enough information to get the proper version.
## Installation
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file. If your project uses `src/myproject/__init__.py`, this
should be `src/myproject/_version.py`. This file should be checked in to
your VCS as usual: the copy created below by `setup.py versioneer` will
include code that parses expanded VCS keywords in generated tarballs. The
'build' and 'sdist' commands will replace it with a copy that has just the
calculated version string.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string.
* `parentdir_prefix`:
a string, frequently the same as tag_prefix, which appears at the start of
all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'.
This tool provides one script, named `versioneer-installer`. That script does
one thing: write a copy of `versioneer.py` into the current directory.
To versioneer-enable your project:
* 1: Run `versioneer-installer` to copy `versioneer.py` into the top of your
source tree.
* 2: add the following lines to the top of your `setup.py`, with the
configuration values you decided earlier:
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
* 3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: now run `setup.py versioneer`, which will create `_version.py`, and
will modify your `__init__.py` to define `__version__` (by calling a
function from `_version.py`). It will also modify your `MANIFEST.in` to
include both `versioneer.py` and the generated `_version.py` in sdist
tarballs.
* 5: commit these changes to your VCS. To make sure you won't forget,
`setup.py versioneer` will mark everything it touched for addition.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Currently, all version strings must be based upon a tag. Versioneer will
report "unknown" until your tree has at least one tag in its history. This
restriction will be fixed eventually (see issue #12).
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different keys for different flavors
of the version string:
* `['version']`: condensed tag+distance+shortid+dirty identifier. For git,
this uses the output of `git describe --tags --dirty --always` but strips
the tag_prefix. For example "0.11-2-g1076c97-dirty" indicates that the tree
is like the "1076c97" commit but has uncommitted changes ("-dirty"), and
that this commit is two revisions ("-2-") beyond the "0.11" tag. For
released software (exactly equal to a known tag), the identifier will only
contain the stripped tag, e.g. "0.11".
* `['full']`: detailed revision identifier. For Git, this is the full SHA1
commit id, followed by "-dirty" if the tree contains uncommitted changes,
e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac-dirty".
Some variants are more useful than others. Including `full` in a bug report
should allow developers to reconstruct the exact code being tested (or
indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
In the future, this will also include a
[PEP-0440](http://legacy.python.org/dev/peps/pep-0440/) -compatible flavor
(e.g. `1.2.post0.dev123`). This loses a lot of information (and has no room
for a hash-based revision id), but is safe to use in a `setup.py`
"`version=`" argument. It also enables tools like *pip* to compare version
strings and evaluate compatibility constraint declarations.
The `setup.py versioneer` command adds the following text to your
`__init__.py` to place a basic version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version = get_versions()['version']
del get_versions
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* re-run `versioneer-installer` in your source tree to replace your copy of
`versioneer.py`
* edit `setup.py`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `setup.py versioneer` to replace `SRC/_version.py`
* commit any changed files
### Upgrading from 0.10 to 0.11
You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
`setup.py versioneer`. This will enable the use of additional version-control
systems (SVN, etc) in the future.
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is hereby released into the
public domain. The `_version.py` that it creates is also in the public
domain.
"""
import os, sys, re, subprocess, errno
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
# these configuration settings will be overridden by setup.py after it
# imports us
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = None
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.11 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
import os, sys, re, subprocess, errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames, "full": git_full }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
'''
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def do_vcs_install(manifest_in, versionfile_source, ipy):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source, ipy]
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.11) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
with open(filename) as f:
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
except EnvironmentError:
return {}
return versions
def write_to_version_file(filename, versions):
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % versions)
print("set %s to '%s'" % (filename, versions["version"]))
def get_root():
try:
return os.path.dirname(os.path.abspath(__file__))
except NameError:
return os.path.dirname(os.path.abspath(sys.argv[0]))
def vcs_function(vcs, suffix):
return getattr(sys.modules[__name__], '%s_%s' % (vcs, suffix), None)
def get_versions(default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
assert VCS is not None, "please set versioneer.VCS"
# I am in versioneer.py, which must live at the top of the source tree,
# which we use to compute the root directory. py2exe/bbfreeze/non-CPython
# don't have __file__, in which case we fall back to sys.argv[0] (which
# ought to be the setup.py script). We prefer __file__ since that's more
# robust in cases where setup.py was invoked in some weird way (e.g. pip)
root = get_root()
versionfile_abs = os.path.join(root, versionfile_source)
# extract version from first of _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = vcs_function(VCS, "get_keywords")
versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords")
if get_keywords_f and versions_from_keywords_f:
vcs_keywords = get_keywords_f(versionfile_abs)
ver = versions_from_keywords_f(vcs_keywords, tag_prefix)
if ver:
if verbose: print("got version from expanded keyword %s" % ver)
return ver
ver = versions_from_file(versionfile_abs)
if ver:
if verbose: print("got version from file %s %s" % (versionfile_abs,ver))
return ver
versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs")
if versions_from_vcs_f:
ver = versions_from_vcs_f(tag_prefix, root, verbose)
if ver:
if verbose: print("got version from VCS %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % default)
return default
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % versions)
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
versions = get_versions(verbose=True)
target_versionfile = versionfile_source
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "install/upgrade Versioneer files: __init__.py SRC/_version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print(" creating %s" % versionfile_source)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, versionfile_source, ipy)
def get_cmdclass():
cmds = {'version': cmd_version,
'versioneer': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
cmds['build_exe'] = cmd_build_exe
del cmds['build']
return cmds
|
import uuid
from django.db import models
from electionnight.fields import MarkdownField
class PageContentBlock(models.Model):
"""
A block of content for an individual page.
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
page = models.ForeignKey(
"PageContent", related_name="blocks", on_delete=models.PROTECT
)
content_type = models.ForeignKey(
"PageContentType", related_name="+", on_delete=models.PROTECT
)
content = MarkdownField()
created = models.DateTimeField(auto_now_add=True, editable=False)
updated = models.DateTimeField(auto_now=True, editable=False)
class Meta:
unique_together = ("page", "content_type")
def __str__(self):
return self.content_type.name
|
#import h5py
import numpy as np
import scipy.io as sio
import torch
from sklearn import preprocessing
import sys
import h5py
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def map_label(label, classes):
mapped_label = torch.LongTensor(label.size())
for i in range(classes.size(0)):
mapped_label[label==classes[i]] = i
return mapped_label
class Logger(object):
def __init__(self, filename):
self.filename = filename
f = open(self.filename+'.log', "a")
f.close()
def write(self, message):
f = open(self.filename+'.log', "a")
f.write(message)
f.close()
class DATA_LOADER(object):
def __init__(self, opt):
self.read_matdataset(opt)
self.index_in_epoch = 0
self.epochs_completed = 0
def process_few_shot_train(self, data, attsplits, num):
labels = data["labels"]
from copy import deepcopy
copy_labels = deepcopy(labels).reshape(-1,1)
att = attsplits["att"]
test_seen_loc = attsplits["test_seen_loc"]
test_unseen_loc = attsplits["test_unseen_loc"]
seen_classes = np.unique(np.ravel(labels)[test_seen_loc - 1]).tolist()
copy_labels[test_seen_loc-1] = -1
add_seen_index = []
for i in seen_classes:
# print(np.where(copy_labels == i))
add_seen_index += np.where(copy_labels == i)[0].tolist()[0:num]
# print(add_seen_index)
trainval_loc = np.array(add_seen_index).reshape(-1, 1) + 1
print(trainval_loc.shape)
if trainval_loc.shape[0] < 1024:
n = int(1024/trainval_loc.shape[0] + 1)
trainval_loc = np.repeat(trainval_loc, n, axis=0)
print(trainval_loc.shape)
myLabel = {}
myLabel["att"] = att
myLabel["test_unseen_loc"] = test_unseen_loc
myLabel["test_seen_loc"] = test_seen_loc
myLabel["trainval_loc"] = trainval_loc
return data, myLabel
def process_few_shot_test(self, data, attsplits, num):
labels = data["labels"]
att = attsplits["att"]
test_seen_loc = attsplits["test_seen_loc"]
test_unseen_loc = attsplits["test_unseen_loc"]
trainval_loc = attsplits["trainval_loc"]
unseen_classes = np.unique(np.ravel(labels)[test_unseen_loc - 1]).tolist()
# print(unseen_classes)
add_unseen_index = []
for i in unseen_classes:
# print('*',i, np.where(labels.T == i),labels.T.shape)
if (labels.shape[1] == 1):
add_unseen_index += np.where(labels.T == i)[1].tolist()[0:num]
else:
add_unseen_index += np.where(labels == i)[1].tolist()[0:num]
# print(len(add_unseen_index))
trainval_loc = np.row_stack([trainval_loc, np.array(add_unseen_index).reshape(-1, 1) + 1])
# print(add_unseen_index)
for i in add_unseen_index:
# print('&',i, np.where(test_unseen_loc == i + 1))
ind = np.where(test_unseen_loc == i + 1)[0][0]
# print(ind)
test_unseen_loc = np.delete(test_unseen_loc, ind, 0)
myLabel = {}
myLabel["att"] = att
myLabel["test_unseen_loc"] = test_unseen_loc
myLabel["test_seen_loc"] = test_seen_loc
myLabel["trainval_loc"] = trainval_loc
return data, myLabel
def read_matdataset(self, opt):
matcontent1 = sio.loadmat(opt.dataroot + "/" + opt.dataset + "/" + opt.image_embedding + ".mat")
matcontent = sio.loadmat(opt.dataroot + "/" + opt.dataset + "/" + opt.class_embedding + "_splits.mat")
if opt.num_shots > 0:
if opt.few_train:
matcontent1, matcontent = self.process_few_shot_train(matcontent1, matcontent, opt.num_shots)
else:
matcontent1, matcontent = self.process_few_shot_test(matcontent1, matcontent, opt.num_shots)
feature = matcontent1['features'].T
label = matcontent1['labels'].astype(int).squeeze() - 1
trainval_loc = matcontent['trainval_loc'].squeeze() - 1
# train_loc = matcontent['train_loc'].squeeze() - 1
# val_unseen_loc = matcontent['val_loc'].squeeze() - 1
test_seen_loc = matcontent['test_seen_loc'].squeeze() - 1
test_unseen_loc = matcontent['test_unseen_loc'].squeeze() - 1
self.attribute = torch.from_numpy(matcontent['att'].T).float()
self.attribute /= self.attribute.pow(2).sum(1).sqrt().unsqueeze(1).expand(self.attribute.size(0),
self.attribute.size(1))
if not opt.validation:
if opt.preprocessing:
if opt.standardization:
print('standardization...')
scaler = preprocessing.StandardScaler()
else:
scaler = preprocessing.MinMaxScaler()
_train_feature = scaler.fit_transform(feature[trainval_loc])
_test_seen_feature = scaler.transform(feature[test_seen_loc])
_test_unseen_feature = scaler.transform(feature[test_unseen_loc])
self.train_feature = torch.from_numpy(_train_feature).float()
mx = self.train_feature.max()
self.train_feature.mul_(1 / mx)
self.train_label = torch.from_numpy(label[trainval_loc]).long()
self.test_unseen_feature = torch.from_numpy(_test_unseen_feature).float()
self.test_unseen_feature.mul_(1 / mx)
self.test_unseen_label = torch.from_numpy(label[test_unseen_loc]).long()
self.test_seen_feature = torch.from_numpy(_test_seen_feature).float()
self.test_seen_feature.mul_(1 / mx)
self.test_seen_label = torch.from_numpy(label[test_seen_loc]).long()
else:
self.train_feature = torch.from_numpy(feature[trainval_loc]).float()
self.train_label = torch.from_numpy(label[trainval_loc]).long()
self.test_unseen_feature = torch.from_numpy(feature[test_unseen_loc]).float()
self.test_unseen_label = torch.from_numpy(label[test_unseen_loc]).long()
self.test_seen_feature = torch.from_numpy(feature[test_seen_loc]).float()
self.test_seen_label = torch.from_numpy(label[test_seen_loc]).long()
# else:
# self.train_feature = torch.from_numpy(feature[train_loc]).float()
# self.train_label = torch.from_numpy(label[train_loc]).long()
# self.test_unseen_feature = torch.from_numpy(feature[val_unseen_loc]).float()
# self.test_unseen_label = torch.from_numpy(label[val_unseen_loc]).long()
#
self.seenclasses = torch.from_numpy(np.unique(self.train_label.numpy()))
self.unseenclasses = torch.from_numpy(np.unique(self.test_unseen_label.numpy()))
self.test_seenclasses = torch.from_numpy(np.unique(self.test_seen_label.numpy()))
self.ntrain = self.train_feature.size()[0]
self.ntest_seen = self.test_seen_feature.size()[0]
self.ntest_unseen = self.test_unseen_feature.size()[0]
self.ntrain_class = self.ntest_seen + self.ntest_unseen
self.ntest_class = self.unseenclasses.size(0)
self.train_class = self.seenclasses.clone()
self.train_mapped_label = map_label(self.train_label, self.seenclasses)
def next_seen_batch(self, seen_batch):
idx = torch.randperm(self.ntrain)[0:seen_batch]
batch_feature = self.train_feature[idx]
batch_label = self.train_label[idx]
batch_att = self.attribute[batch_label]
return batch_feature, batch_att
|
class ChangeMetricsClass(object):
instanceName = '';
LOC = 0;
LOC_touched = 0;
Number_of_Revisions = 0;
Fix_Count = 0;
Authors = 0;
LOC_added = 0;
Max_LOC_added = 0;
Average_LOC_added = 0;
Churn = 0;
Max_Churn = 0;
Average_Churn = 0;
Change_Set_Size = 0;
Max_Change_Set_Size = 0;
Average_Change_Set_Size = 0;
Release_Length = 0;
Weighted_Release_Length = 0;
def __init__(self):
self.instanceName = '';
self.LOC = 0;
self.LOC_touched = 0;
self.Number_of_Revisions = 0;
self.Fix_Count = 0;
self.Authors = 0;
self.LOC_added = 0;
self.Max_LOC_added = 0;
self.Average_LOC_added = 0;
self.Churn = 0;
self.Max_Churn = 0;
self.Average_Churn = 0;
self.Change_Set_Size = 0;
self.Max_Change_Set_Size = 0;
self.Average_Change_Set_Size = 0;
self.Release_Length = 0;
self.Weighted_Release_Length = 0;
### 将度量转成一行
def metrics2list(self):
list_oneInstance = [self.instanceName,self.LOC,self.LOC_touched,self.Number_of_Revisions,self.Fix_Count,
self.Authors,self.LOC_added,self.Max_LOC_added,self.Average_LOC_added,self.Churn,
self.Max_Churn,self.Average_Churn,self.Change_Set_Size,self.Max_Change_Set_Size,
self.Average_Change_Set_Size,self.Release_Length,self.Weighted_Release_Length,];
return list_oneInstance;
|
from ..abstract_base_classes.table_cruder_default import TableCRUDerDefault
from ....models.daily_traded_volume_money import DailyTradedVolumeMoney
__all__ = ['DailyTradedVolumeMoneyTableCRUDer']
class DailyTradedVolumeMoneyTableCRUDer(TableCRUDerDefault):
def __init__(self, daily_traded_volume_money_table, db_connection):
self._db_connection = db_connection
self._table_name = daily_traded_volume_money_table.get_table_name()
self._column_names = daily_traded_volume_money_table.get_column_names()
self._column_types = daily_traded_volume_money_table.get_column_types()
self._primary_key = daily_traded_volume_money_table.get_primary_key()
self._constraints = daily_traded_volume_money_table.get_constraints()
self._key = daily_traded_volume_money_table.get_key()
self._class = DailyTradedVolumeMoney
|
"""Implements the Raster extension.
https://github.com/stac-extensions/raster
"""
import enum
from typing import Any, Dict, Generic, Iterable, List, Optional, TypeVar, cast
import pystac
from pystac.extensions.base import (
ExtensionManagementMixin,
PropertiesExtension,
SummariesExtension,
)
from pystac.utils import get_opt, get_required, map_opt
T = TypeVar("T", pystac.Item, pystac.Asset)
SCHEMA_URI = "https://stac-extensions.github.io/raster/v1.0.0/schema.json"
BANDS_PROP = "raster:bands"
class Sampling(str, enum.Enum):
def __str__(self) -> str:
return str(self.value)
AREA = "area"
POINT = "point"
class DataType(str, enum.Enum):
def __str__(self) -> str:
return str(self.value)
INT8 = "int8"
INT16 = "int16"
INT32 = "int32"
INT64 = "int64"
UINT8 = "uint8"
UINT16 = "uint16"
UINT32 = "uint32"
UINT64 = "uint64"
FLOAT16 = "float16"
FLOAT32 = "float32"
FLOAT64 = "float64"
CINT16 = "cint16"
CINT32 = "cint32"
CFLOAT32 = "cfloat32"
CFLOAT64 = "cfloat64"
OTHER = "other"
class Statistics:
"""Represents statistics information attached to a band in the raster extension.
Use Statistics.create to create a new Statistics instance.
"""
def __init__(self, properties: Dict[str, Optional[float]]) -> None:
self.properties = properties
def apply(
self,
minimum: Optional[float] = None,
maximum: Optional[float] = None,
mean: Optional[float] = None,
stddev: Optional[float] = None,
valid_percent: Optional[float] = None,
) -> None:
"""
Sets the properties for this raster Band.
Args:
minimum : Minimum value of all the pixels in the band.
maximum : Maximum value of all the pixels in the band.
mean : Mean value of all the pixels in the band.
stddev : Standard Deviation value of all the pixels in the band.
valid_percent : Percentage of valid (not nodata) pixel.
""" # noqa
self.minimum = minimum
self.maximum = maximum
self.mean = mean
self.stddev = stddev
self.valid_percent = valid_percent
@classmethod
def create(
cls,
minimum: Optional[float] = None,
maximum: Optional[float] = None,
mean: Optional[float] = None,
stddev: Optional[float] = None,
valid_percent: Optional[float] = None,
) -> "Statistics":
"""
Creates a new band.
Args:
minimum : Minimum value of all the pixels in the band.
maximum : Maximum value of all the pixels in the band.
mean : Mean value of all the pixels in the band.
stddev : Standard Deviation value of all the pixels in the band.
valid_percent : Percentage of valid (not nodata) pixel.
""" # noqa
b = cls({})
b.apply(
minimum=minimum,
maximum=maximum,
mean=mean,
stddev=stddev,
valid_percent=valid_percent,
)
return b
@property
def minimum(self) -> Optional[float]:
"""Get or sets the minimum pixel value
Returns:
Optional[float]
"""
return self.properties.get("minimum")
@minimum.setter
def minimum(self, v: Optional[float]) -> None:
if v is not None:
self.properties["minimum"] = v
else:
self.properties.pop("minimum", None)
@property
def maximum(self) -> Optional[float]:
"""Get or sets the maximum pixel value
Returns:
Optional[float]
"""
return self.properties.get("maximum")
@maximum.setter
def maximum(self, v: Optional[float]) -> None:
if v is not None:
self.properties["maximum"] = v
else:
self.properties.pop("maximum", None)
@property
def mean(self) -> Optional[float]:
"""Get or sets the mean pixel value
Returns:
Optional[float]
"""
return self.properties.get("mean")
@mean.setter
def mean(self, v: Optional[float]) -> None:
if v is not None:
self.properties["mean"] = v
else:
self.properties.pop("mean", None)
@property
def stddev(self) -> Optional[float]:
"""Get or sets the standard deviation pixel value
Returns:
Optional[float]
"""
return self.properties.get("stddev")
@stddev.setter
def stddev(self, v: Optional[float]) -> None:
if v is not None:
self.properties["stddev"] = v
else:
self.properties.pop("stddev", None)
@property
def valid_percent(self) -> Optional[float]:
"""Get or sets the Percentage of valid (not nodata) pixel
Returns:
Optional[float]
"""
return self.properties.get("valid_percent")
@valid_percent.setter
def valid_percent(self, v: Optional[float]) -> None:
if v is not None:
self.properties["valid_percent"] = v
else:
self.properties.pop("valid_percent", None)
def to_dict(self) -> Dict[str, Any]:
"""Returns the dictionary representing the JSON of those Statistics.
Returns:
dict: The wrapped dict of the Statistics that can be written out as JSON.
"""
return self.properties
@staticmethod
def from_dict(d: Dict[str, Any]) -> "Statistics":
"""Constructs an Statistics from a dict.
Returns:
Statistics: The Statistics deserialized from the JSON dict.
"""
return Statistics(properties=d)
class Histogram:
"""Represents pixel distribution information attached to a band in the raster extension.
Use Band.create to create a new Band.
"""
def __init__(self, properties: Dict[str, Any]) -> None:
self.properties = properties
def apply(
self,
count: int,
min: float,
max: float,
buckets: List[int],
) -> None:
"""
Sets the properties for this raster Band.
Args:
count : number of buckets of the distribution.
min : minimum value of the distribution.
Also the mean value of the first bucket.
max : maximum value of the distribution.
Also the mean value of the last bucket.
buckets : Array of integer indicating the number
of pixels included in the bucket.
""" # noqa
self.count = count
self.min = min
self.max = max
self.buckets = buckets
@classmethod
def create(
cls,
count: int,
min: float,
max: float,
buckets: List[int],
) -> "Histogram":
"""
Creates a new band.
Args:
count : number of buckets of the distribution.
min : minimum value of the distribution.
Also the mean value of the first bucket.
max : maximum value of the distribution.
Also the mean value of the last bucket.
buckets : Array of integer indicating the number
of pixels included in the bucket.
""" # noqa
b = cls({})
b.apply(
count=count,
min=min,
max=max,
buckets=buckets,
)
return b
@property
def count(self) -> int:
"""Get or sets the number of buckets of the distribution.
Returns:
int
"""
return get_required(self.properties["count"], self, "count")
@count.setter
def count(self, v: int) -> None:
self.properties["count"] = v
@property
def min(self) -> float:
"""Get or sets the minimum value of the distribution.
Returns:
float
"""
return get_required(self.properties["min"], self, "min")
@min.setter
def min(self, v: float) -> None:
self.properties["min"] = v
@property
def max(self) -> float:
"""Get or sets the maximum value of the distribution.
Returns:
float
"""
return get_required(self.properties["max"], self, "max")
@max.setter
def max(self, v: float) -> None:
self.properties["max"] = v
@property
def buckets(self) -> List[int]:
"""Get or sets the Array of integer indicating
the number of pixels included in the bucket.
Returns:
List[int]
"""
return get_required(self.properties["buckets"], self, "buckets")
@buckets.setter
def buckets(self, v: List[int]) -> None:
self.properties["buckets"] = v
def to_dict(self) -> Dict[str, Any]:
"""Returns the dictionary representing the JSON of this histogram.
Returns:
dict: The wrapped dict of the Histogram that can be written out as JSON.
"""
return self.properties
@staticmethod
def from_dict(d: Dict[str, Any]) -> "Histogram":
"""Constructs an Histogram from a dict.
Returns:
Histogram: The Histogram deserialized from the JSON dict.
"""
return Histogram(properties=d)
class RasterBand:
"""Represents a Raster Band information attached to an Item
that implements the raster extension.
Use Band.create to create a new Band.
"""
def __init__(self, properties: Dict[str, Any]) -> None:
self.properties = properties
def apply(
self,
nodata: Optional[float] = None,
sampling: Optional[Sampling] = None,
data_type: Optional[DataType] = None,
bits_per_sample: Optional[float] = None,
spatial_resolution: Optional[float] = None,
statistics: Optional[Statistics] = None,
unit: Optional[str] = None,
scale: Optional[float] = None,
offset: Optional[float] = None,
histogram: Optional[Histogram] = None,
) -> None:
"""
Sets the properties for this raster Band.
Args:
nodata : Pixel values used to identify pixels that are nodata in the assets.
sampling : One of area or point. Indicates whether a pixel value should be assumed
to represent a sampling over the region of the pixel or a point sample at the center of the pixel.
data_type :The data type of the band.
One of the data types as described in <https://github.com/stac-extensions/raster/#data-types>.
bits_per_sample : The actual number of bits used for this band.
Normally only present when the number of bits is non-standard for the datatype,
such as when a 1 bit TIFF is represented as byte
spatial_resolution : Average spatial resolution (in meters) of the pixels in the band.
statistics: Statistics of all the pixels in the band
unit: unit denomination of the pixel value
scale: multiplicator factor of the pixel value to transform into the value
(i.e. translate digital number to reflectance).
offset: number to be added to the pixel value (after scaling) to transform into the value
(i.e. translate digital number to reflectance).
histogram: Histogram distribution information of the pixels values in the band
""" # noqa
self.nodata = nodata
self.sampling = sampling
self.data_type = data_type
self.bits_per_sample = bits_per_sample
self.spatial_resolution = spatial_resolution
self.statistics = statistics
self.unit = unit
self.scale = scale
self.offset = offset
self.histogram = histogram
@classmethod
def create(
cls,
nodata: Optional[float] = None,
sampling: Optional[Sampling] = None,
data_type: Optional[DataType] = None,
bits_per_sample: Optional[float] = None,
spatial_resolution: Optional[float] = None,
statistics: Optional[Statistics] = None,
unit: Optional[str] = None,
scale: Optional[float] = None,
offset: Optional[float] = None,
histogram: Optional[Histogram] = None,
) -> "RasterBand":
"""
Creates a new band.
Args:
nodata : Pixel values used to identify pixels that are nodata in the assets.
sampling : One of area or point. Indicates whether a pixel value should be assumed
to represent a sampling over the region of the pixel or a point sample at the center of the pixel.
data_type :The data type of the band.
One of the data types as described in <https://github.com/stac-extensions/raster/#data-types>.
bits_per_sample : The actual number of bits used for this band.
Normally only present when the number of bits is non-standard for the datatype,
such as when a 1 bit TIFF is represented as byte
spatial_resolution : Average spatial resolution (in meters) of the pixels in the band.
statistics: Statistics of all the pixels in the band
unit: unit denomination of the pixel value
scale: multiplicator factor of the pixel value to transform into the value
(i.e. translate digital number to reflectance).
offset: number to be added to the pixel value (after scaling) to transform into the value
(i.e. translate digital number to reflectance).
histogram: Histogram distribution information of the pixels values in the band
""" # noqa
b = cls({})
b.apply(
nodata=nodata,
sampling=sampling,
data_type=data_type,
bits_per_sample=bits_per_sample,
spatial_resolution=spatial_resolution,
statistics=statistics,
unit=unit,
scale=scale,
offset=offset,
histogram=histogram,
)
return b
@property
def nodata(self) -> Optional[float]:
"""Get or sets the nodata pixel value
Returns:
Optional[float]
"""
return self.properties.get("nodata")
@nodata.setter
def nodata(self, v: Optional[float]) -> None:
if v is not None:
self.properties["nodata"] = v
else:
self.properties.pop("nodata", None)
@property
def sampling(self) -> Optional[Sampling]:
"""Get or sets the property indicating whether a pixel value should be assumed
to represent a sampling over the region of the pixel or a point sample
at the center of the pixel.
Returns:
Optional[Sampling]
""" # noqa
return self.properties.get("sampling")
@sampling.setter
def sampling(self, v: Optional[Sampling]) -> None:
if v is not None:
self.properties["sampling"] = v
else:
self.properties.pop("sampling", None)
@property
def data_type(self) -> Optional[DataType]:
"""Get or sets the data type of the band.
Returns:
Optional[DataType]
"""
return self.properties.get("data_type")
@data_type.setter
def data_type(self, v: Optional[DataType]) -> None:
if v is not None:
self.properties["data_type"] = v
else:
self.properties.pop("data_type", None)
@property
def bits_per_sample(self) -> Optional[float]:
"""Get or sets the actual number of bits used for this band.
Returns:
float
"""
return self.properties.get("bits_per_sample")
@bits_per_sample.setter
def bits_per_sample(self, v: Optional[float]) -> None:
if v is not None:
self.properties["bits_per_sample"] = v
else:
self.properties.pop("bits_per_sample", None)
@property
def spatial_resolution(self) -> Optional[float]:
"""Get or sets the average spatial resolution (in meters) of the pixels in the band.
Returns:
[float]
"""
return self.properties.get("spatial_resolution")
@spatial_resolution.setter
def spatial_resolution(self, v: Optional[float]) -> None:
if v is not None:
self.properties["spatial_resolution"] = v
else:
self.properties.pop("spatial_resolution", None)
@property
def statistics(self) -> Optional[Statistics]:
"""Get or sets the average spatial resolution (in meters) of the pixels in the band.
Returns:
[Statistics]
"""
return Statistics.from_dict(get_opt(self.properties.get("statistics")))
@statistics.setter
def statistics(self, v: Optional[Statistics]) -> None:
if v is not None:
self.properties["statistics"] = v.to_dict()
else:
self.properties.pop("statistics", None)
@property
def unit(self) -> Optional[str]:
"""Get or sets the unit denomination of the pixel value
Returns:
[str]
"""
return self.properties.get("unit")
@unit.setter
def unit(self, v: Optional[str]) -> None:
if v is not None:
self.properties["unit"] = v
else:
self.properties.pop("unit", None)
@property
def scale(self) -> Optional[float]:
"""Get or sets the multiplicator factor of the pixel value to transform
into the value (i.e. translate digital number to reflectance).
Returns:
[float]
"""
return self.properties.get("scale")
@scale.setter
def scale(self, v: Optional[float]) -> None:
if v is not None:
self.properties["scale"] = v
else:
self.properties.pop("scale", None)
@property
def offset(self) -> Optional[float]:
"""Get or sets the number to be added to the pixel value (after scaling)
to transform into the value (i.e. translate digital number to reflectance).
Returns:
[float]
"""
return self.properties.get("offset")
@offset.setter
def offset(self, v: Optional[float]) -> None:
if v is not None:
self.properties["offset"] = v
else:
self.properties.pop("offset", None)
@property
def histogram(self) -> Optional[Histogram]:
"""Get or sets the histogram distribution information of the pixels values in the band
Returns:
[Histogram]
"""
return Histogram.from_dict(get_opt(self.properties.get("histogram")))
@histogram.setter
def histogram(self, v: Optional[Histogram]) -> None:
if v is not None:
self.properties["histogram"] = v.to_dict()
else:
self.properties.pop("histogram", None)
def __repr__(self) -> str:
return "<Raster Band>"
def to_dict(self) -> Dict[str, Any]:
"""Returns the dictionary representing the JSON of this Band.
Returns:
dict: The wrapped dict of the Band that can be written out as JSON.
"""
return self.properties
class RasterExtension(
Generic[T], PropertiesExtension, ExtensionManagementMixin[pystac.Item]
):
"""An abstract class that can be used to extend the properties of an
:class:`~pystac.Item` or :class:`~pystac.Asset` with properties from
the :stac-ext:`Raster Extension <raster>`. This class is generic over
the type of STAC Object to be extended (e.g. :class:`~pystac.Item`,
:class:`~pystac.Asset`).
This class will generally not be used directly. Instead, use the concrete
implementation associated with the STAC Object you want to extend (e.g.
:class:`~ItemRasterExtension` to extend an :class:`~pystac.Item`).
"""
def apply(self, bands: List[RasterBand]) -> None:
"""Applies raster extension properties to the extended :class:`pystac.Item` or
:class:`pystac.Asset`.
Args:
bands : a list of :class:`~pystac.RasterBand` objects that represent
the available raster bands.
"""
self.bands = bands
@property
def bands(self) -> Optional[List[RasterBand]]:
"""Gets or sets a list of available bands where each item is a :class:`~RasterBand`
object (or ``None`` if no bands have been set). If not available the field
should not be provided.
"""
return self._get_bands()
@bands.setter
def bands(self, v: Optional[List[RasterBand]]) -> None:
self._set_property(
BANDS_PROP, map_opt(lambda bands: [b.to_dict() for b in bands], v)
)
def _get_bands(self) -> Optional[List[RasterBand]]:
return map_opt(
lambda bands: [RasterBand(b) for b in bands],
self._get_property(BANDS_PROP, List[Dict[str, Any]]),
)
@classmethod
def get_schema_uri(cls) -> str:
return SCHEMA_URI
@staticmethod
def ext(obj: T) -> "RasterExtension[T]":
if isinstance(obj, pystac.Asset):
return cast(RasterExtension[T], AssetRasterExtension(obj))
else:
raise pystac.ExtensionTypeError(
f"Raster extension does not apply to type {type(obj)}"
)
@staticmethod
def summaries(obj: pystac.Collection) -> "SummariesRasterExtension":
return SummariesRasterExtension(obj)
class AssetRasterExtension(RasterExtension[pystac.Asset]):
"""A concrete implementation of :class:`RasterExtension` on an :class:`~pystac.Asset`
that extends the Asset fields to include properties defined in the
:stac-ext:`Raster Extension <raster>`.
This class should generally not be instantiated directly. Instead, call
:meth:`RasterExtension.ext` on an :class:`~pystac.Asset` to extend it.
"""
asset_href: str
"""The ``href`` value of the :class:`~pystac.Asset` being extended."""
properties: Dict[str, Any]
"""The :class:`~pystac.Asset` fields, including extension properties."""
additional_read_properties: Optional[Iterable[Dict[str, Any]]] = None
"""If present, this will be a list containing 1 dictionary representing the
properties of the owning :class:`~pystac.Item`."""
def __init__(self, asset: pystac.Asset):
self.asset_href = asset.href
self.properties = asset.properties
if asset.owner and isinstance(asset.owner, pystac.Item):
self.additional_read_properties = [asset.owner.properties]
def __repr__(self) -> str:
return "<AssetRasterExtension Asset href={}>".format(self.asset_href)
class SummariesRasterExtension(SummariesExtension):
"""A concrete implementation of :class:`~SummariesExtension` that extends
the ``summaries`` field of a :class:`~pystac.Collection` to include properties
defined in the :stac-ext:`Raster Extension <raster>`.
"""
@property
def bands(self) -> Optional[List[RasterBand]]:
"""Get or sets a list of :class:`~pystac.Band` objects that represent
the available bands.
"""
return map_opt(
lambda bands: [RasterBand(b) for b in bands],
self.summaries.get_list(BANDS_PROP),
)
@bands.setter
def bands(self, v: Optional[List[RasterBand]]) -> None:
self._set_summary(BANDS_PROP, map_opt(lambda x: [b.to_dict() for b in x], v))
|
import authority
from kitsune.forums.models import Forum
class ForumPermission(authority.permissions.BasePermission):
label = 'forums_forum'
checks = ('thread_edit', 'thread_sticky', 'thread_locked', 'thread_delete',
'post_edit', 'post_delete', 'thread_move', 'view_in', 'post_in')
# view_in: view the forum, its threads, and its posts
# post_in: make new threads and posts in a forum
authority.sites.register(Forum, ForumPermission)
|
# Generated by Django 2.1.2 on 2018-11-26 16:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tran', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='translation',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
# Python Version: 3.x
import functools
import pathlib
import subprocess
from logging import getLogger
from typing import *
from onlinejudge_verify.config import get_config
from onlinejudge_verify.languages.models import Language, LanguageEnvironment
logger = getLogger(__name__)
class NimLanguageEnvironment(LanguageEnvironment):
compile_to: str
NIMFLAGS: List[str]
def __init__(self, *, compile_to: str, NIMFLAGS: List[str]):
self.compile_to = compile_to
self.NIMFLAGS = NIMFLAGS
def compile(self, path: pathlib.Path, *, basedir: pathlib.Path, tempdir: pathlib.Path) -> None:
command = ["nim", self.compile_to, "-p:.", f"-o:{str(tempdir /'a.out')}", f"--nimcache:{str(tempdir)}"] + self.NIMFLAGS + [str(path)]
logger.info('$ %s', ' '.join(command))
subprocess.check_call(command)
def get_execute_command(self, path: pathlib.Path, *, basedir: pathlib.Path, tempdir: pathlib.Path) -> List[str]:
return [str(tempdir / "a.out")]
@functools.lru_cache(maxsize=None)
def _list_direct_dependencies(path: pathlib.Path, *, basedir: pathlib.Path) -> List[pathlib.Path]:
items: List[str] = []
with open(basedir / path, 'rb') as fh:
for line in fh.read().decode().splitlines():
line = line.strip()
if line.startswith('include'):
items += line[7:].strip().split(',')
elif line.startswith('import'):
line = line[6:]
i = line.find(' except ')
if i >= 0:
line = line[:i]
items += line.split(',')
elif line.startswith('from'):
i = line.find(' import ')
if i >= 0:
items += line[4:i - 1]
dependencies = [path.resolve()]
for item in items:
item = item.strip()
if item.startswith("\""):
item = item[1:len(item) - 1]
else:
item += ".nim"
item_ = pathlib.Path(item)
if item_.exists():
dependencies.append(item_)
return list(set(dependencies))
class NimLanguage(Language):
config: Dict[str, Any]
def __init__(self, *, config: Optional[Dict[str, Any]] = None):
if config is None:
self.config = get_config().get('languages', {}).get('nim', {})
else:
self.config = config
def list_dependencies(self, path: pathlib.Path, *, basedir: pathlib.Path) -> List[pathlib.Path]:
dependencies = []
visited: Set[pathlib.Path] = set()
stk = [path.resolve()]
while stk:
path = stk.pop()
if path in visited:
continue
visited.add(path)
for child in _list_direct_dependencies(path, basedir=basedir):
dependencies.append(child)
stk.append(child)
return list(set(dependencies))
def bundle(self, path: pathlib.Path, *, basedir: pathlib.Path) -> bytes:
raise NotImplementedError
def is_verification_file(self, path: pathlib.Path, *, basedir: pathlib.Path) -> bool:
return path.name.endswith("_test.nim")
def list_environments(self, path: pathlib.Path, *, basedir: pathlib.Path) -> List[NimLanguageEnvironment]:
default_compile_to = 'cpp'
default_NIMFLAGS = ['-d:release', '--opt:speed']
envs = []
if 'environments' not in self.config:
envs.append(NimLanguageEnvironment(compile_to=default_compile_to, NIMFLAGS=default_NIMFLAGS))
else:
for env in self.config['environments']:
compile_to = env.get('compile_to', default_compile_to)
NIMFLAGS: List[str] = env.get('NIMFLAGS', default_NIMFLAGS)
if not isinstance(NIMFLAGS, list):
raise RuntimeError('NIMFLAGS must ba a list')
envs.append(NimLanguageEnvironment(compile_to=compile_to, NIMFLAGS=NIMFLAGS))
return envs
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-11-14 06:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mridata', '0026_data_tags_manager'),
]
operations = [
migrations.RemoveField(
model_name='data',
name='tags',
),
]
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkResourcesOperations:
"""PrivateLinkResourcesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
vault_name: str,
**kwargs
) -> AsyncIterable["_models.PrivateLinkResources"]:
"""Returns the list of private link resources that need to be created for Backup and SiteRecovery.
Returns the list of private link resources that need to be created for Backup and SiteRecovery.
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkResources or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.recoveryservices.models.PrivateLinkResources]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResources"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateLinkResources', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/privateLinkResources'} # type: ignore
async def get(
self,
resource_group_name: str,
vault_name: str,
private_link_resource_name: str,
**kwargs
) -> "_models.PrivateLinkResource":
"""Returns a specified private link resource that need to be created for Backup and SiteRecovery.
Returns a specified private link resource that need to be created for Backup and SiteRecovery.
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param private_link_resource_name:
:type private_link_resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservices.models.PrivateLinkResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'privateLinkResourceName': self._serialize.url("private_link_resource_name", private_link_resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/privateLinkResources/{privateLinkResourceName}'} # type: ignore
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import re
from typing import Any, Dict, Optional
from airflow.exceptions import AirflowException
from airflow.providers.http.hooks.http import HttpHook
class DiscordWebhookHook(HttpHook):
"""
This hook allows you to post messages to Discord using incoming webhooks.
Takes a Discord connection ID with a default relative webhook endpoint. The
default endpoint can be overridden using the webhook_endpoint parameter
(https://discordapp.com/developers/docs/resources/webhook).
Each Discord webhook can be pre-configured to use a specific username and
avatar_url. You can override these defaults in this hook.
:param http_conn_id: Http connection ID with host as "https://discord.com/api/" and
default webhook endpoint in the extra field in the form of
{"webhook_endpoint": "webhooks/{webhook.id}/{webhook.token}"}
:type http_conn_id: str
:param webhook_endpoint: Discord webhook endpoint in the form of
"webhooks/{webhook.id}/{webhook.token}"
:type webhook_endpoint: str
:param message: The message you want to send to your Discord channel
(max 2000 characters)
:type message: str
:param username: Override the default username of the webhook
:type username: str
:param avatar_url: Override the default avatar of the webhook
:type avatar_url: str
:param tts: Is a text-to-speech message
:type tts: bool
:param proxy: Proxy to use to make the Discord webhook call
:type proxy: str
"""
def __init__(self,
http_conn_id: Optional[str] = None,
webhook_endpoint: Optional[str] = None,
message: str = "",
username: Optional[str] = None,
avatar_url: Optional[str] = None,
tts: bool = False,
proxy: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.http_conn_id = http_conn_id
self.webhook_endpoint = self._get_webhook_endpoint(http_conn_id, webhook_endpoint)
self.message = message
self.username = username
self.avatar_url = avatar_url
self.tts = tts
self.proxy = proxy
def _get_webhook_endpoint(self, http_conn_id: Optional[str], webhook_endpoint: Optional[str]) -> str:
"""
Given a Discord http_conn_id, return the default webhook endpoint or override if a
webhook_endpoint is manually supplied.
:param http_conn_id: The provided connection ID
:param webhook_endpoint: The manually provided webhook endpoint
:return: Webhook endpoint (str) to use
"""
if webhook_endpoint:
endpoint = webhook_endpoint
elif http_conn_id:
conn = self.get_connection(http_conn_id)
extra = conn.extra_dejson
endpoint = extra.get('webhook_endpoint', '')
else:
raise AirflowException('Cannot get webhook endpoint: No valid Discord '
'webhook endpoint or http_conn_id supplied.')
# make sure endpoint matches the expected Discord webhook format
if not re.match('^webhooks/[0-9]+/[a-zA-Z0-9_-]+$', endpoint):
raise AirflowException('Expected Discord webhook endpoint in the form '
'of "webhooks/{webhook.id}/{webhook.token}".')
return endpoint
def _build_discord_payload(self) -> str:
"""
Construct the Discord JSON payload. All relevant parameters are combined here
to a valid Discord JSON payload.
:return: Discord payload (str) to send
"""
payload: Dict[str, Any] = {}
if self.username:
payload['username'] = self.username
if self.avatar_url:
payload['avatar_url'] = self.avatar_url
payload['tts'] = self.tts
if len(self.message) <= 2000:
payload['content'] = self.message
else:
raise AirflowException('Discord message length must be 2000 or fewer '
'characters.')
return json.dumps(payload)
def execute(self) -> None:
"""
Execute the Discord webhook call
"""
proxies = {}
if self.proxy:
# we only need https proxy for Discord
proxies = {'https': self.proxy}
discord_payload = self._build_discord_payload()
self.run(endpoint=self.webhook_endpoint,
data=discord_payload,
headers={'Content-type': 'application/json'},
extra_options={'proxies': proxies})
|
#!/usr/bin/python
# Copyright (C) 2018-2021 aitos.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This python script generates Ethereum's C language interface function from contract ABI (solidity).
# Not all contract ABI can be converted to C interface because C is lack of object-oriented programming
# capability. If the tool fails to generate the interface, you may have to organize the contract call
# manually.
# The generated C API is named "<ContractName><ContractApiName>", with a transaction pointer argument
# followed by contract arguments.
#
# For state-less contract call, the generated C API retuns a HEX string representing what is received
# from the blockchain network. If the call is successful, the string is the return value of the contract
# function. The return value string has to be parsed manually as per the contract prototype. If the call
# fails, it returns NULL.
#
# For value transfer or state-ful contract call, i.e. a transaction, the generated C API returns a HEX
# string representing the transaction hash. If the transaction fails, it returns NULL.
import sys
import json
import os.path
import string
generated_declaration_block_str = '''/******************************************************************************
This file is generated from contract ABI. DO NOT modify it by hand.
******************************************************************************/
'''
generated_include_block_str = '''
// Generated C function interface from smart contract ABI
#include "boatiotsdk.h"
#include "sha3.h"
'''
# Types specially for Solidity
generated_types_for_solidity_str = '''
// Types specially defined for mapping from Solidity
typedef BUINT8 Bbytes1[1];
typedef BUINT8 Bbytes2[2];
typedef BUINT8 Bbytes3[3];
typedef BUINT8 Bbytes4[4];
typedef BUINT8 Bbytes5[5];
typedef BUINT8 Bbytes6[6];
typedef BUINT8 Bbytes7[7];
typedef BUINT8 Bbytes8[8];
typedef BUINT8 Bbytes9[9];
typedef BUINT8 Bbytes10[10];
typedef BUINT8 Bbytes11[11];
typedef BUINT8 Bbytes12[12];
typedef BUINT8 Bbytes13[13];
typedef BUINT8 Bbytes14[14];
typedef BUINT8 Bbytes15[15];
typedef BUINT8 Bbytes16[16];
typedef BUINT8 Bbytes17[17];
typedef BUINT8 Bbytes18[18];
typedef BUINT8 Bbytes19[19];
typedef BUINT8 Bbytes20[20];
typedef BUINT8 Bbytes21[21];
typedef BUINT8 Bbytes22[22];
typedef BUINT8 Bbytes23[23];
typedef BUINT8 Bbytes24[24];
typedef BUINT8 Bbytes25[25];
typedef BUINT8 Bbytes26[26];
typedef BUINT8 Bbytes27[27];
typedef BUINT8 Bbytes28[28];
typedef BUINT8 Bbytes29[29];
typedef BUINT8 Bbytes30[30];
typedef BUINT8 Bbytes31[31];
typedef BUINT8 Bbytes32[32];
typedef Bbytes16 BUINT128;
typedef Bbytes16 SUINT128;
typedef Bbytes32 BUINT256;
typedef Bbytes32 SUINT256;
'''
# Map from type used in smart contract to type in C
# Types not listed are not supported
type_mapping = {
# Variable-length types not supported yet
'string' :'BCHAR*',
'bytes' :'BUINT8*',
#Fixed-length types
'address' :'BoatAddress',
'bool' :'BUINT8',
'uint8' :'BUINT8',
'uint16' :'BUINT16',
'uint32' :'BUINT32',
'uint64' :'BUINT64',
'uint128' :'BUINT128',
'uint256' :'BUINT256',
'int8' :'BSINT8',
'int16' :'BSINT16',
'int32' :'BSINT32',
'int64' :'BSINT64',
'int128' :'BSINT128',
'int256' :'BSINT256',
'bytes1' :'Bbytes1',
'bytes2' :'Bbytes2',
'bytes3' :'Bbytes3',
'bytes4' :'Bbytes4',
'bytes5' :'Bbytes5',
'bytes6' :'Bbytes6',
'bytes7' :'Bbytes7',
'bytes8' :'Bbytes8',
'bytes9' :'Bbytes9',
'bytes10' :'Bbytes10',
'bytes11' :'Bbytes11',
'bytes12' :'Bbytes12',
'bytes13' :'Bbytes13',
'bytes14' :'Bbytes14',
'bytes15' :'Bbytes15',
'bytes16' :'Bbytes16',
'bytes17' :'Bbytes17',
'bytes18' :'Bbytes18',
'bytes19' :'Bbytes19',
'bytes20' :'Bbytes20',
'bytes21' :'Bbytes21',
'bytes22' :'Bbytes22',
'bytes23' :'Bbytes23',
'bytes24' :'Bbytes24',
'bytes25' :'Bbytes25',
'bytes26' :'Bbytes26',
'bytes27' :'Bbytes27',
'bytes28' :'Bbytes28',
'bytes29' :'Bbytes29',
'bytes30' :'Bbytes30',
'bytes31' :'Bbytes31',
'bytes32' :'Bbytes32'
}
class CFunctionGen():
def __init__(self, abi_file_name, output_path):
self.abi_object = None
self.c_file_content = ''
self.h_file_content = ''
with open(abi_file_name) as file_handle:
self.abi_object = json.load(file_handle)
self.abi_file_name = os.path.basename(abi_file_name)
#print(self.abi_object);
self.output_path = output_path
def require_endian_change(self, abitype):
types_to_change_endian = {
'bool' :'BUINT8',
'uint8' :'BUINT8',
'uint16' :'BUINT16',
'uint32' :'BUINT32',
'uint64' :'BUINT64',
'uint128' :'BUINT128',
'uint256' :'BUINT256',
'int8' :'BSINT8',
'int16' :'BSINT16',
'int32' :'BSINT32',
'int64' :'BSINT64',
'int128' :'BSINT128',
'int256' :'BSINT256'
}
if abitype in types_to_change_endian.keys():
return True
else:
return False
def is_array_type(self, abitype):
types_of_array = {
'address' :'BoatAddress',
'uint128' :'BUINT128',
'int128' :'BSINT128',
'uint256' :'BUINT256',
'int256' :'BSINT256',
'bytes1' :'Bbytes1',
'bytes2' :'Bbytes2',
'bytes3' :'Bbytes3',
'bytes4' :'Bbytes4',
'bytes5' :'Bbytes5',
'bytes6' :'Bbytes6',
'bytes7' :'Bbytes7',
'bytes8' :'Bbytes8',
'bytes9' :'Bbytes9',
'bytes10' :'Bbytes10',
'bytes11' :'Bbytes11',
'bytes12' :'Bbytes12',
'bytes13' :'Bbytes13',
'bytes14' :'Bbytes14',
'bytes15' :'Bbytes15',
'bytes16' :'Bbytes16',
'bytes17' :'Bbytes17',
'bytes18' :'Bbytes18',
'bytes19' :'Bbytes19',
'bytes20' :'Bbytes20',
'bytes21' :'Bbytes21',
'bytes22' :'Bbytes22',
'bytes23' :'Bbytes23',
'bytes24' :'Bbytes24',
'bytes25' :'Bbytes25',
'bytes26' :'Bbytes26',
'bytes27' :'Bbytes27',
'bytes28' :'Bbytes28',
'bytes29' :'Bbytes29',
'bytes30' :'Bbytes30',
'bytes31' :'Bbytes31',
'bytes32' :'Bbytes32'
}
if abitype in types_of_array.keys():
return True
else:
return False
def is_nonFixedSize_type(self, abitype):
if abitype == 'string':
return True
elif abitype == 'bytes':
return True
else:
return False
def is_needFlagInputLen_type(self, abitype):
if abitype == 'bytes':
return True
elif abitype.find('[]') != -1:
return True
else:
return False
#check function has non-fixed input type or not
def is_has_nonFixed_type(self, abi_item):
inputs = abi_item['inputs']
inputs_len = len(inputs)
i = 0
while i < inputs_len:
inputType = inputs[i]['type']
if self.is_nonFixedSize_type(inputType) == True:
return True
i += 1
return False
def gen_input_name(self, abi_item):
if len(abi_item['name']) == 0:
#print 'input name is null'
return type_mapping[abi_item['type']] + 'Value'
else:
return abi_item['name']
def gen_nonFixed_mallocSize_exp(self, abi_item, spaceNum = 38):
inputs = abi_item['inputs']
inputs_len = len(inputs)
nonFixedSize_type_malloc = ''
i = 0
while i < inputs_len:
inputType = inputs[i]['type']
inputName = self.gen_input_name(inputs[i])
if self.is_nonFixedSize_type(inputType) == True:
if len(nonFixedSize_type_malloc) != 0:
nonFixedSize_type_malloc += ' ' * spaceNum
if self.is_needFlagInputLen_type(inputType) == True:
nonFixedSize_type_malloc += 'BOAT_ROUNDUP(' + inputName + 'Len , 32) + 32' + ' + \\\n'
else:
nonFixedSize_type_malloc += 'BOAT_ROUNDUP(' + 'strlen(' + inputName + '), 32) + 32' + ' + \\\n'
else:
pass
i += 1
if len(nonFixedSize_type_malloc) != 0:
nonFixedSize_type_malloc += ' ' * spaceNum
return nonFixedSize_type_malloc
#gen non-fixed type offset location
#string rtn
def get_nonFixed_offset(self, abi_item, index):
inputs = abi_item['inputs']
inputs_len = len(inputs)
offset_int = inputs_len * 32
offset_str = ''
i = 0
while i < index:
inputType = inputs[i]['type']
inputName = self.gen_input_name(inputs[i])
if self.is_nonFixedSize_type(inputType) == True:
if len(offset_str) != 0:
offset_str += ' ' * 27
if self.is_needFlagInputLen_type(inputType) == True:
offset_str += 'BOAT_ROUNDUP(' + inputName + 'Len , 32) + 32 ' + '+ \\\n'
else:
offset_str += 'BOAT_ROUNDUP(' + 'strlen(' + inputName + '), 32) + 32 ' + '+ \\\n'
else:
pass
i += 1
return offset_str[0:len(offset_str) - 2] + str(offset_int)
#genetate non-fixed data length
def get_nonFixed_length(self, abi_item,input_nonFixed_type):
inputs = abi_item['inputs']
inputs_len = len(inputs)
length_str = ''
i = 0
while i < inputs_len:
inputType = inputs[i]['type']
inputName = self.gen_input_name(inputs[i])
if self.is_nonFixedSize_type(inputType) == True:
if input_nonFixed_type == inputType:
if self.is_needFlagInputLen_type(inputType) == True:
length_str += 'BOAT_ROUNDUP(' + inputName + 'Len , 32)'
else:
length_str += 'BOAT_ROUNDUP(' + 'strlen(' + inputName + '), 32)'
break
else:
pass
i += 1
return length_str
def generate_c_funcs(self):
if self.abi_object != None:
self.c_file_content += generated_declaration_block_str
self.c_file_content += generated_include_block_str
self.c_file_content += generated_types_for_solidity_str
self.h_file_content += generated_declaration_block_str
self.h_file_content += generated_include_block_str
self.h_file_content += generated_types_for_solidity_str
for abi_item in self.abi_object['abi']:
if abi_item['type'] == 'function':
self.generate_func_prototype(abi_item)
self.generate_func_body(abi_item)
def generate_func_prototype(self, abi_item):
inputName_str = ''
# Extract type of return value
if len(abi_item['outputs']) == 0:
retval_str = 'BCHAR *' #'void'
else:
#retval_str = type_mapping[abi_item['outputs'][0]['type']]
# For stateful transaction, returns Tx Hash;
# For state-less function call, returns a string representing the return value
retval_str = 'BCHAR *'
# Extract function name (Prefix with ABI file name because multiple contracts may have same function names)
func_name_str = self.abi_file_name.replace('.json','')
func_name_str = func_name_str.replace('.','_') + '_' + abi_item['name']
# Extract function arguments
inputs = abi_item['inputs']
inputs_len = len(inputs)
input_str = '('
input_str += 'BoatEthTx *tx_ptr'
if inputs_len != 0:
input_str += ', '
i = 0
while i < inputs_len:
input = inputs[i]
inputName_str = self.gen_input_name(input)
try:
input_str += type_mapping[input['type']] + ' ' + inputName_str
#for type 'bytes', <type>[], add a input param to indicate the input lengths
if self.is_needFlagInputLen_type(input['type']) == True:
input_str += ', BUINT32 ' + inputName_str + 'Len'
except:
print(abi_item['name'] + '(): Solidity type (' + input['type'] + ') is incompatible with C interface auto generator.')
print('You may have to manually construct the transaction.')
quit(-1)
if i != inputs_len -1:
input_str += ', '
i = i+1
input_str += ')'
# Generate function prototype
self.c_file_content += retval_str + ' ' + func_name_str + input_str + '\n'
self.h_file_content += retval_str + ' ' + func_name_str + input_str + ';\n'
def generate_func_body(self, abi_item):
func_body_str = '{\n'
inputName_str = ''
# Generate local variables
if abi_item['constant'] == True:
func_body_str += ' BCHAR *call_result_str = NULL;\n'
else:
func_body_str += ' static BCHAR tx_hash_str[67] = \"\";\n'
func_body_str += ' BoatFieldVariable data_field;\n'
func_body_str += ' BCHAR *function_prototye_str;\n'
func_body_str += ' BUINT8 field_bytes32[32];\n'
func_body_str += ' BUINT8 *data_offset_ptr;\n'
if self.is_has_nonFixed_type(abi_item) == True:
func_body_str += ' BUINT32 data_offset_location;\n'
func_body_str += ' BUINT32 nonFixed_filled_length;\n'
func_body_str += ' BUINT32 nonFixed_actual_length;\n'
func_body_str += ' boat_try_declare;\n\n'
inputs = abi_item['inputs']
# input param check
inputs_param = abi_item['inputs']
inputs_param_len = len(inputs_param)
inputs_param_body_str_tmp = ''
if inputs_param_len != 0:
i = 0
while i < inputs_param_len:
if self.is_nonFixedSize_type(inputs_param[i]['type']) or self.is_array_type(inputs_param[i]['type']):
inputParamName_str = self.gen_input_name(inputs_param[i])
inputs_param_body_str_tmp += '|| (' + inputParamName_str + ' == NULL)'
i = i+1
if len(inputs_param_body_str_tmp) == 0:
func_body_str += ' if( tx_ptr == NULL )\n'
else:
func_body_str += ' if( (tx_ptr == NULL) ' + inputs_param_body_str_tmp +' )\n'
else:
func_body_str += ' if( tx_ptr == NULL )\n'
func_body_str += ' {\n'
func_body_str += ' BoatLog(BOAT_LOG_CRITICAL, \"An NULL input parameter be detected.\");\n'
func_body_str += ' return NULL;\n'
func_body_str += ' }\n\n'
# Set Nonce
if abi_item['constant'] != True:
func_body_str += ' boat_try(BoatEthTxSetNonce(tx_ptr, BOAT_ETH_NONCE_AUTO));\n\n'
# Construct solidity function prototype
# Solidity function name
sol_func_proto_str = abi_item['name'] + '('
# Extract solidity function inputs
inputs_len = len(inputs)
nonFixed_size_str = self.gen_nonFixed_mallocSize_exp(abi_item)
nonFixed_filedLen_str = self.gen_nonFixed_mallocSize_exp(abi_item, 27)
fixed_size_str = '(' + str(inputs_len) + ' * 32 + 4)'
func_body_str += ' data_field.field_ptr = BoatMalloc(' + nonFixed_size_str + fixed_size_str + ');\n'
func_body_str += ' if(data_field.field_ptr == NULL) boat_throw(BOAT_ERROR, cleanup)\n'
func_body_str += ' data_field.field_len = ' + nonFixed_filedLen_str + fixed_size_str + ';\n'
func_body_str += ' data_offset_ptr = data_field.field_ptr;\n\n'
i = 0
while i < inputs_len:
input = inputs[i]
sol_func_proto_str += input['type']
if i != inputs_len -1:
sol_func_proto_str += ','
i = i+1
sol_func_proto_str += ')'
# Generate C code for function selector
func_body_str += ' function_prototye_str = \"' + sol_func_proto_str + '\";\n'
func_body_str += ' keccak_256((BUINT8 *)function_prototye_str, strlen(function_prototye_str), field_bytes32);\n'
func_body_str += ' memcpy(data_offset_ptr, field_bytes32, 4);\n'
func_body_str += ' data_offset_ptr += 4;\n\n'
# Assemble function arguments
inputIndex = 0
for input in inputs:
inputIndex += 1 #only be used for third param of get_nonFixed_offset(...)
try:
param_size_str = 'sizeof(' + type_mapping[input['type']] + ')'
except:
print(abi_item['name'] + '(): Solidity type (' + input['type'] + ') is incompatible with C interface auto generator.')
print('You may have to manually construct the transaction.')
quit(-1)
# Only prefix "&" to native C integer types to obtain its address. No "&" for an array type.
if self.is_array_type(input['type']) == True:
c_address_sign = ''
else:
c_address_sign = '&'
inputName_str = self.gen_input_name(input)
if self.require_endian_change(input['type']) == True:
func_body_str += ' UtilityChangeEndian(' + c_address_sign + inputName_str + ', ' + param_size_str + ');\n'
func_body_str += ' memset(data_offset_ptr, 0x00, 32);\n'
func_body_str += ' memcpy(data_offset_ptr+(32-' + param_size_str + '), ' + c_address_sign + inputName_str + ', ' + param_size_str + ');\n'
else:
if self.is_nonFixedSize_type(input['type']) == True:
#fill offset location value
offset = self.get_nonFixed_offset(abi_item, inputIndex - 1)
func_body_str += ' //param \'' + inputName_str + '\' offset location filled\n'
func_body_str += ' data_offset_location = ' + offset + ';\n'
func_body_str += ' UtilityChangeEndian(&data_offset_location, sizeof(BUINT32));\n'
func_body_str += ' memset(data_offset_ptr, 0x00, 32);\n'
func_body_str += ' memcpy(data_offset_ptr + 32 - sizeof(BUINT32), &data_offset_location, sizeof(BUINT32));\n'
else:
func_body_str += ' memset(data_offset_ptr, 0x00, 32);\n'
func_body_str += ' memcpy(data_offset_ptr, ' + c_address_sign + inputName_str + ', ' + param_size_str + ');\n'
func_body_str += ' data_offset_ptr += 32;\n\n'
#non-fixed data fill
i = 0
while i < inputs_len:
input = inputs[i]
inputName_str = self.gen_input_name(input)
nonFixed_length = self.get_nonFixed_length(abi_item, input['type'])
if len(nonFixed_length) != 0:
func_body_str += ' //non-fixed param \'' + inputName_str + '\' data filled\n'
#non-fixed data length
if self.is_needFlagInputLen_type(input['type']) == True:
func_body_str += ' nonFixed_actual_length = ' + inputName_str + 'Len' + ';\n'
func_body_str += ' UtilityChangeEndian(&nonFixed_actual_length, sizeof(BUINT32));\n'
func_body_str += ' memset(data_offset_ptr, 0x00, 32);\n'
func_body_str += ' memcpy(data_offset_ptr + 32 - sizeof(BUINT32), &nonFixed_actual_length, sizeof(BUINT32));\n'
func_body_str += ' data_offset_ptr += 32;\n\n'
func_body_str += ' nonFixed_filled_length = ' + nonFixed_length + ';\n'
func_body_str += ' memset(data_offset_ptr, 0x00, nonFixed_filled_length);\n'
func_body_str += ' memcpy(data_offset_ptr, ' + inputName_str + ', ' + inputName_str + 'Len' + ');\n'
func_body_str += ' data_offset_ptr += nonFixed_filled_length;\n\n'
else:
func_body_str += ' nonFixed_actual_length = ' + 'strlen(' + inputName_str + ');\n'
func_body_str += ' UtilityChangeEndian(&nonFixed_actual_length, sizeof(BUINT32));\n'
func_body_str += ' memset(data_offset_ptr, 0x00, 32);\n'
func_body_str += ' memcpy(data_offset_ptr + 32 - sizeof(BUINT32), &nonFixed_actual_length, sizeof(BUINT32));\n'
func_body_str += ' data_offset_ptr += 32;\n\n'
func_body_str += ' nonFixed_filled_length = ' + nonFixed_length + ';\n'
func_body_str += ' memset(data_offset_ptr, 0x00, nonFixed_filled_length);\n'
func_body_str += ' memcpy(data_offset_ptr, ' + inputName_str + ', ' + 'strlen(' + inputName_str + ')' + ');\n'
func_body_str += ' data_offset_ptr += nonFixed_filled_length;\n\n'
i = i+1
if abi_item['constant'] == True:
# for state-less funciton call
func_body_str += ' call_result_str = BoatEthCallContractFunc(tx_ptr, function_prototye_str, data_field.field_ptr+4, data_field.field_len-4);\n\n'
else:
# for stateful transaction
func_body_str += ' boat_try(BoatEthTxSetData(tx_ptr, &data_field));\n\n'
func_body_str += ' boat_try(BoatEthTxSend(tx_ptr));\n\n'
func_body_str += ' UtilityBin2Hex(tx_hash_str, tx_ptr->tx_hash.field, tx_ptr->tx_hash.field_len, BIN2HEX_LEFTTRIM_QUANTITY, BIN2HEX_PREFIX_0x_YES, BOAT_FALSE);\n\n'
# Cleanup Label
func_body_str += '''
boat_catch(cleanup)
{
BoatLog(BOAT_LOG_VERBOSE, "Exception: %d", boat_exception);
if(data_field.field_ptr != NULL) BoatFree(data_field.field_ptr);
return(NULL);
}
'''
func_body_str += '\n BoatFree(data_field.field_ptr);\n'
if abi_item['constant'] == True:
func_body_str += ' return(call_result_str);\n'
else:
func_body_str += ' return(tx_hash_str);\n'
func_body_str += '\n}\n\n'
self.c_file_content += func_body_str
def save_c_file(self):
if self.abi_object != None:
c_file_name = self.output_path + '/' + self.abi_file_name.replace('.json','.c')
with open(c_file_name, 'w') as c_file_handle:
c_file_handle.write(self.c_file_content)
h_file_name = self.output_path + '/' + self.abi_file_name.replace('.json','.h')
with open(h_file_name, 'w') as h_file_handle:
h_file_handle.write(self.h_file_content)
def main():
argc = len(sys.argv)
if argc <= 1 or argc >= 4:
print('Usage: ' + sys.argv[0] + ' <ABI File> ' + '[<Output path>]')
else:
if argc == 2:
output_path = './'
else:
output_path = sys.argv[2]
c_func_object = CFunctionGen(sys.argv[1], output_path)
c_func_object.generate_c_funcs()
c_func_object.save_c_file()
#print(c_func_object.h_file_content)
#print(c_func_object.c_file_content)
if __name__ == '__main__':
main()
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception-v3 expressed in TensorFlow-Slim.
Usage:
# Parameters for BatchNorm.
batch_norm_params = {
# Decay for the batch_norm moving averages.
'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004):
with slim.arg_scope([slim.ops.conv2d],
stddev=0.1,
activation=tf.nn.relu,
batch_norm_params=batch_norm_params):
# Force all Variables to reside on the CPU.
with slim.arg_scope([slim.variables.variable], device='/cpu:0'):
logits, endpoints = slim.inception.inception_v3(
images,
dropout_keep_prob=0.8,
num_classes=num_classes,
is_training=for_training,
restore_logits=restore_logits,
scope=scope)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception.slim import ops
from inception.slim import scopes
FLAGS = tf.app.flags.FLAGS
def inception_v3(inputs,
dropout_keep_prob=0.8,
num_classes=1000,
is_training=True,
restore_logits=True,
scope=''):
"""Latest Inception from http://arxiv.org/abs/1512.00567.
"Rethinking the Inception Architecture for Computer Vision"
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
Zbigniew Wojna
Args:
inputs: a tensor of size [batch_size, height, width, channels].
dropout_keep_prob: dropout keep_prob.
num_classes: number of predicted classes.
is_training: whether is training or not.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: Optional scope for op_scope.
Returns:
a list containing 'logits', 'aux_logits' Tensors.
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
with tf.op_scope([inputs], scope, 'inception_v3'):
tf.set_random_seed(FLAGS.DANITER_SEED)
with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
is_training=is_training):
with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
stride=1, padding='VALID'):
# 299 x 299 x 3
end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2,
scope='conv0')
# 149 x 149 x 32
end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3],
scope='conv1')
# 147 x 147 x 32
end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3],
padding='SAME', scope='conv2')
# 147 x 147 x 64
end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
stride=2, scope='pool1')
# 73 x 73 x 64
end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1],
scope='conv3')
# 73 x 73 x 80.
end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3],
scope='conv4')
# 71 x 71 x 192.
end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
stride=2, scope='pool2')
# 35 x 35 x 192.
net = end_points['pool2']
# Inception blocks
with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
stride=1, padding='SAME'):
# mixed: 35 x 35 x 256.
with tf.variable_scope('mixed_35x35x256a'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 64, [1, 1])
with tf.variable_scope('branch5x5'):
branch5x5 = ops.conv2d(net, 48, [1, 1])
branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
end_points['mixed_35x35x256a'] = net
# mixed_1: 35 x 35 x 288.
with tf.variable_scope('mixed_35x35x288a'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 64, [1, 1])
with tf.variable_scope('branch5x5'):
branch5x5 = ops.conv2d(net, 48, [1, 1])
branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
end_points['mixed_35x35x288a'] = net
# mixed_2: 35 x 35 x 288.
with tf.variable_scope('mixed_35x35x288b'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 64, [1, 1])
with tf.variable_scope('branch5x5'):
branch5x5 = ops.conv2d(net, 48, [1, 1])
branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
net = tf.concat(3, [branch1x1, branch5x5, branch3x3dbl, branch_pool])
end_points['mixed_35x35x288b'] = net
# mixed_3: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768a'):
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID')
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3],
stride=2, padding='VALID')
with tf.variable_scope('branch_pool'):
branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
net = tf.concat(3, [branch3x3, branch3x3dbl, branch_pool])
end_points['mixed_17x17x768a'] = net
# mixed4: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768b'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 128, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 128, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768b'] = net
# mixed_5: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768c'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 160, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 160, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768c'] = net
# mixed_6: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768d'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 160, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 160, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768d'] = net
# mixed_7: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768e'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 192, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 192, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch7x7, branch7x7dbl, branch_pool])
end_points['mixed_17x17x768e'] = net
# Auxiliary Head logits
aux_logits = tf.identity(end_points['mixed_17x17x768e'])
with tf.variable_scope('aux_logits'):
aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3,
padding='VALID')
aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj')
# Shape of feature map before the final layer.
shape = aux_logits.get_shape()
aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01,
padding='VALID')
aux_logits = ops.flatten(aux_logits)
aux_logits = ops.fc(aux_logits, num_classes, activation=None,
stddev=0.001, restore=restore_logits)
end_points['aux_logits'] = aux_logits
# mixed_8: 8 x 8 x 1280.
# Note that the scope below is not changed to not void previous
# checkpoints.
# (TODO) Fix the scope when appropriate.
with tf.variable_scope('mixed_17x17x1280a'):
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 192, [1, 1])
branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2,
padding='VALID')
with tf.variable_scope('branch7x7x3'):
branch7x7x3 = ops.conv2d(net, 192, [1, 1])
branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3],
stride=2, padding='VALID')
with tf.variable_scope('branch_pool'):
branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
net = tf.concat(3, [branch3x3, branch7x7x3, branch_pool])
end_points['mixed_17x17x1280a'] = net
# mixed_9: 8 x 8 x 2048.
with tf.variable_scope('mixed_8x8x2048a'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 320, [1, 1])
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 384, [1, 1])
branch3x3 = tf.concat(3, [ops.conv2d(branch3x3, 384, [1, 3]),
ops.conv2d(branch3x3, 384, [3, 1])])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 448, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
branch3x3dbl = tf.concat(3, [ops.conv2d(branch3x3dbl, 384, [1, 3]),
ops.conv2d(branch3x3dbl, 384, [3, 1])])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch3x3, branch3x3dbl, branch_pool])
end_points['mixed_8x8x2048a'] = net
# mixed_10: 8 x 8 x 2048.
with tf.variable_scope('mixed_8x8x2048b'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 320, [1, 1])
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 384, [1, 1])
branch3x3 = tf.concat(3, [ops.conv2d(branch3x3, 384, [1, 3]),
ops.conv2d(branch3x3, 384, [3, 1])])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 448, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
branch3x3dbl = tf.concat(3, [ops.conv2d(branch3x3dbl, 384, [1, 3]),
ops.conv2d(branch3x3dbl, 384, [3, 1])])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat(3, [branch1x1, branch3x3, branch3x3dbl, branch_pool])
end_points['mixed_8x8x2048b'] = net
# Final pooling and prediction
with tf.variable_scope('logits'):
shape = net.get_shape()
net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool')
# 1 x 1 x 2048
net = ops.dropout(net, dropout_keep_prob, scope='dropout', seed=FLAGS.DANITER_SEED)
net = ops.flatten(net, scope='flatten')
# 2048
logits = ops.fc(net, num_classes, activation=None, scope='logits',
restore=restore_logits)
# 1000
end_points['logits'] = logits
end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
return logits, end_points
def inception_v3_parameters(weight_decay=0.00004, stddev=0.1,
batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
"""Yields the scope with the default parameters for inception_v3.
Args:
weight_decay: the weight decay for weights variables.
stddev: standard deviation of the truncated guassian weight distribution.
batch_norm_decay: decay for the moving average of batch_norm momentums.
batch_norm_epsilon: small float added to variance to avoid dividing by zero.
Yields:
a arg_scope with the parameters needed for inception_v3.
"""
# Set weight_decay for weights in Conv and FC layers.
with scopes.arg_scope([ops.conv2d, ops.fc],
weight_decay=weight_decay):
# Set stddev, activation and parameters for batch_norm.
with scopes.arg_scope([ops.conv2d],
stddev=stddev,
activation=tf.nn.relu,
seed=FLAGS.DANITER_SEED,
batch_norm_params={
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon}) as arg_scope:
yield arg_scope
|
from django.urls import path
from .views import ArticleListView,ArticleDetailView
urlpatterns = [
path('',ArticleListView.as_view()),
path('<pk>',ArticleDetailView.as_view()),
]
|
# -*- coding: utf-8 -*-
# Copyright 2018-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import logging
import os
import shutil
from typing import TYPE_CHECKING, Optional
from synapse.config._base import Config
from synapse.logging.context import defer_to_thread, run_in_background
from synapse.util.async_helpers import maybe_awaitable
from ._base import FileInfo, Responder
from .media_storage import FileResponder
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from synapse.server import HomeServer
class StorageProvider(metaclass=abc.ABCMeta):
"""A storage provider is a service that can store uploaded media and
retrieve them.
"""
@abc.abstractmethod
async def store_file(self, path: str, file_info: FileInfo) -> None:
"""Store the file described by file_info. The actual contents can be
retrieved by reading the file in file_info.upload_path.
Args:
path: Relative path of file in local cache
file_info: The metadata of the file.
"""
@abc.abstractmethod
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
"""Attempt to fetch the file described by file_info and stream it
into writer.
Args:
path: Relative path of file in local cache
file_info: The metadata of the file.
Returns:
Returns a Responder if the provider has the file, otherwise returns None.
"""
class StorageProviderWrapper(StorageProvider):
"""Wraps a storage provider and provides various config options
Args:
backend: The storage provider to wrap.
store_local: Whether to store new local files or not.
store_synchronous: Whether to wait for file to be successfully
uploaded, or todo the upload in the background.
store_remote: Whether remote media should be uploaded
"""
def __init__(
self,
backend: StorageProvider,
store_local: bool,
store_synchronous: bool,
store_remote: bool,
):
self.backend = backend
self.store_local = store_local
self.store_synchronous = store_synchronous
self.store_remote = store_remote
def __str__(self) -> str:
return "StorageProviderWrapper[%s]" % (self.backend,)
async def store_file(self, path: str, file_info: FileInfo) -> None:
if not file_info.server_name and not self.store_local:
return None
if file_info.server_name and not self.store_remote:
return None
if self.store_synchronous:
# store_file is supposed to return an Awaitable, but guard
# against improper implementations.
await maybe_awaitable(self.backend.store_file(path, file_info)) # type: ignore
else:
# TODO: Handle errors.
async def store():
try:
return await maybe_awaitable(
self.backend.store_file(path, file_info)
)
except Exception:
logger.exception("Error storing file")
run_in_background(store)
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
# store_file is supposed to return an Awaitable, but guard
# against improper implementations.
return await maybe_awaitable(self.backend.fetch(path, file_info))
class FileStorageProviderBackend(StorageProvider):
"""A storage provider that stores files in a directory on a filesystem.
Args:
hs
config: The config returned by `parse_config`.
"""
def __init__(self, hs: "HomeServer", config: str):
self.hs = hs
self.cache_directory = hs.config.media_store_path
self.base_directory = config
def __str__(self):
return "FileStorageProviderBackend[%s]" % (self.base_directory,)
async def store_file(self, path: str, file_info: FileInfo) -> None:
"""See StorageProvider.store_file"""
primary_fname = os.path.join(self.cache_directory, path)
backup_fname = os.path.join(self.base_directory, path)
dirname = os.path.dirname(backup_fname)
if not os.path.exists(dirname):
os.makedirs(dirname)
await defer_to_thread(
self.hs.get_reactor(), shutil.copyfile, primary_fname, backup_fname
)
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
"""See StorageProvider.fetch"""
backup_fname = os.path.join(self.base_directory, path)
if os.path.isfile(backup_fname):
return FileResponder(open(backup_fname, "rb"))
return None
@staticmethod
def parse_config(config: dict) -> str:
"""Called on startup to parse config supplied. This should parse
the config and raise if there is a problem.
The returned value is passed into the constructor.
In this case we only care about a single param, the directory, so let's
just pull that out.
"""
return Config.ensure_directory(config["directory"])
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import em
import getpass
import os
import unittest
from pathlib import Path
import pwd
import shlex
from tempfile import TemporaryDirectory
from rocker.core import list_plugins
from rocker.core import write_files
from rocker.extensions import name_to_argument
from rocker.extensions import RockerExtension
from test_extension import plugin_load_parser_correctly
class ExtensionsTest(unittest.TestCase):
def test_name_to_argument(self):
self.assertEqual(name_to_argument('asdf'), '--asdf')
self.assertEqual(name_to_argument('as_df'), '--as-df')
self.assertEqual(name_to_argument('as-df'), '--as-df')
class TestFileInjection(RockerExtension):
name = 'test_file_injection'
@classmethod
def get_name(cls):
return cls.name
def get_files(self, cliargs):
all_files = {}
all_files['test_file.txt'] = """The quick brown fox jumped over the lazy dog.
%s""" % cliargs
all_files['/absolute.txt'] = """Absolute file path should be skipped"""
return all_files
@staticmethod
def register_arguments(parser, defaults={}):
parser.add_argument('--test-file-injection',
action='store_true',
default=defaults.get('test_file_injection', False),
help="Enable test_file_injection extension")
class FileInjectionExtensionTest(unittest.TestCase):
def setUp(self):
# Work around interference between empy Interpreter
# stdout proxy and test runner. empy installs a proxy on stdout
# to be able to capture the information.
# And the test runner creates a new stdout object for each test.
# This breaks empy as it assumes that the proxy has persistent
# between instances of the Interpreter class
# empy will error with the exception
# "em.Error: interpreter stdout proxy lost"
em.Interpreter._wasProxyInstalled = False
def test_file_injection(self):
extensions = [TestFileInjection()]
mock_cliargs = {'test_key': 'test_value'}
with TemporaryDirectory() as td:
write_files(extensions, mock_cliargs, td)
with open(os.path.join(td, 'test_file.txt'), 'r') as fh:
content = fh.read()
self.assertIn('quick brown', content)
self.assertIn('test_key', content)
self.assertIn('test_value', content)
self.assertFalse(os.path.exists('/absolute.txt'))
|
"""
usage:
pbxproj folder [options] <project> <path> [--target <target>...]
[--exclude <regex>...]
[(--recursive | -r)]
[(--no-create-groups | -G)]
[(--weak | -w)]
[(--no-embed | -E)]
[(--sign-on-copy | -s)]
[(--ignore-unknown-types | -i)]
[(--no-create-build-files | -C)]
[(--header-scope <scope> | -H <scope>)]
pbxproj folder [options] (--delete | -D) <project> <path> [--tree <tree>]
positional arguments:
<project> Project path to the .xcodeproj folder.
<path> Path of the file to add to the project.
generic options:
-h, --help This message.
-t, --target <target> Target name(s) to be modified. If there is no target specified, all targets are
modified.
-b, --backup Creates a backup before start processing the command.
delete options:
-D, --delete Delete the file.
--tree <tree> Tree to add the file relative to. Available options: <absolute>, <group>,
SOURCE_ROOT, SDKROOT, DEVELOPER_DIR, BUILT_PRODUCTS_DIR. [default: SOURCE_ROOT]
add options:
-e, --exclude <regex> Pattern to exclude during the insertion of a folder. The pattern applies to file
names and folder names.
-r, --recursive Add folders and files recursively.
-G, --no-create-groups Add the folder as a file reference instead of creating group(s).
-w, --weak Add the weak flag when libraries or frameworks are added. Linking optional.
-E, --no-embed Do not embed frameworks when added.
-s, --sign-on-copy Sign frameworks when copied/embedded.
-i, --ignore-unknown-types Ignore unknown file types when added.
-C, --no-create-build-files Do not create build file phases when adding a file.
-H, --header-scope <scope> Add header file using the given scope. Available options: public or private, project.
[default: project]
"""
from pbxproj.pbxcli import *
from pbxproj.pbxextensions.ProjectFiles import FileOptions
from docopt import docopt
def execute(project, args):
# make a decision of what function to call based on the -D flag
if args['--delete']:
return _remove(project, args)
else:
return _add(project, args)
def _add(project, args):
if '--header-scope' not in args or args['--header-scope'] not in ['public', 'private', 'project']:
header_scope = 'project'
else:
header_scope = args['--header-scope']
options = FileOptions(create_build_files=not args['--no-create-build-files'],
weak=args['--weak'],
ignore_unknown_type=args['--ignore-unknown-types'],
embed_framework=not args['--no-embed'],
code_sign_on_copy=args['--sign-on-copy'],
header_scope=header_scope)
build_files = project.add_folder(args['<path>'], excludes=args['--exclude'], recursive=args['--recursive'],
create_groups=not args['--no-create-groups'], target_name=args['--target'],
file_options=options)
# print some information about the build files created.
if build_files is None:
raise Exception('No files were added to the project.')
if not build_files:
return 'Folder added to the project, no build file sections created.'
info = {}
for build_file in build_files:
if build_file.isa not in info:
info[build_file.isa] = 0
info[build_file.isa] += 1
summary = 'Folder added to the project.'
for k in info:
summary += '\n{0} {1} sections created.'.format(info[k], k)
return summary
def _remove(project, args):
if project.remove_files_by_path(args['<path>'], tree=args['--tree'], target_name=args['--target']):
return 'Folder removed from the project.'
raise Exception('An error occurred removing one of the files.')
def main():
command_parser(execute)(docopt(__doc__))
if __name__ == '__main__':
main()
|
#
# BSD 3-Clause License
#
# Copyright (c) 2020, Jonathan Bac
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy as np
import itertools
import numbers
import multiprocessing as mp
from sklearn.utils.validation import check_random_state
from sklearn.neighbors import NearestNeighbors
from scipy.special import gammainc
from inspect import getmembers, isclass
import skdim
def get_estimators():
local_class_list = [o[1] for o in getmembers(skdim.lid) if isclass(o[1])]
global_class_list = [o[1] for o in getmembers(skdim.gid) if isclass(o[1])]
local_estimators = dict(
zip([str(e).split(".")[-1][:-2] for e in local_class_list], local_class_list)
)
global_estimators = dict(
zip([str(e).split(".")[-1][:-2] for e in global_class_list], global_class_list)
)
return local_estimators, global_estimators
def indComb(NN):
pt1 = np.tile(range(NN), NN)
pt2 = np.repeat(range(NN), NN)
un = pt1 > pt2
pt1 = pt1[un]
pt2 = pt2[un]
return pt1, pt2, np.hstack((pt2[:, None], pt1[:, None]))
def indnComb(NN, n):
if n == 1:
return np.arange(NN).reshape((-1, 1))
prev = indnComb(NN, n - 1)
lastind = prev[:, -1]
ind_cf1 = np.repeat(lastind, NN)
ind_cf2 = np.tile(np.arange(NN), len(lastind))
# ind_cf2 = np.arange(NN)
# for i in range(len(lastind)-1):
# ind_cf2 = np.concatenate((ind_cf2,np.arange(NN)))
new_ind = np.where(ind_cf1 < ind_cf2)[0]
new_ind1 = (new_ind - 1) // NN
new_ind2 = new_ind % NN
new_ind2[new_ind2 == 0] = NN
return np.hstack((prev[new_ind1, :], np.arange(NN)[new_ind2].reshape((-1, 1))))
def efficient_indnComb(n, k, random_generator_):
"""
memory-efficient indnComb:
uniformly takes 5000 samples from itertools.combinations(n,k)
"""
ncomb = binom_coeff(n, k)
pop = itertools.combinations(range(n), k)
targets = set(random_generator_.choice(ncomb, min(ncomb, 5000), replace=False))
return np.array(
list(itertools.compress(pop, map(targets.__contains__, itertools.count())))
)
def lens(vectors):
return np.sqrt(np.sum(vectors ** 2, axis=1))
def hyperBall(n_points, n_dim, radius=1, center=[], random_state=None):
random_state_ = check_random_state(random_state)
if center == []:
center = np.array([0] * n_dim)
r = radius
x = random_state_.normal(size=(n_points, n_dim))
ssq = np.sum(x ** 2, axis=1)
fr = r * gammainc(n_dim / 2, ssq / 2) ** (1 / n_dim) / np.sqrt(ssq)
frtiled = np.tile(fr.reshape(n_points, 1), (1, n_dim))
p = center + np.multiply(x, frtiled)
return p
def proxy(tup):
function, X, Dict = tup
return function(X, **Dict)
def get_nn(X, k, n_jobs=1):
"""Compute the k-nearest neighbors of a dataset np.array (n_samples x n_dims)"""
neigh = NearestNeighbors(n_neighbors=k, n_jobs=n_jobs)
neigh.fit(X)
dists, inds = neigh.kneighbors(return_distance=True)
return dists, inds
def asPointwise(data, class_instance, precomputed_knn=None, n_neighbors=100, n_jobs=1):
"""Use a global estimator as a pointwise one by creating kNN neighborhoods"""
if precomputed_knn is not None:
knn = precomputed_knn
else:
_, knn = get_nn(data, k=n_neighbors, n_jobs=n_jobs)
if n_jobs > 1:
pool = mp.Pool(n_jobs)
results = pool.map(class_instance.fit, [data[i, :] for i in knn])
pool.close()
return [i.dimension_ for i in results]
else:
return [class_instance.fit(data[i, :]).dimension_ for i in knn]
def mean_local_id(local_id, knnidx):
"""
Compute point mean local ID: the mean ID of all neighborhoods in which a point appears
Parameters
----------
local_id : list or np.array
list of local ID for each point
knnidx : np.array
indices of kNN for each point returned by function get_nn
Results
-------
mean_neighborhoods_LID : np.array
list of mean local ID for each point
"""
mean_neighborhoods_LID = np.zeros(len(local_id))
for point_i in range(len(local_id)):
# get all points which have this point in their neighbourhoods
all_neighborhoods_with_point_i = np.append(
np.where(knnidx == point_i)[0], point_i
)
# get the mean local ID of these points
mean_neighborhoods_LID[point_i] = local_id[
all_neighborhoods_with_point_i
].mean()
return mean_neighborhoods_LID
def binom_coeff(n, k):
"""
Taken from : https://stackoverflow.com/questions/26560726/python-binomial-coefficient
Compute the number of ways to choose $k$ elements out of a pile of $n.$
Use an iterative approach with the multiplicative formula:
$$\frac{n!}{k!(n - k)!} =
\frac{n(n - 1)\dots(n - k + 1)}{k(k-1)\dots(1)} =
\prod_{i = 1}^{k}\frac{n + 1 - i}{i}$$
Also rely on the symmetry: $C_n^k = C_n^{n - k},$ so the product can
be calculated up to $\min(k, n - k).$
:param n: the size of the pile of elements
:param k: the number of elements to take from the pile
:return: the number of ways to choose k elements out of a pile of n
"""
# When k out of sensible range, should probably throw an exception.
# For compatibility with scipy.special.{comb, binom} returns 0 instead.
if k < 0 or k > n:
return 0
if k == 0 or k == n:
return 1
total_ways = 1
for i in range(min(k, n - k)):
total_ways = total_ways * (n - i) // (i + 1)
return total_ways
def check_random_generator(seed):
"""Turn seed into a numpy.random._generator.Generator' instance
Parameters
----------
seed : None | int | instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.default_rng()
if isinstance(seed, numbers.Integral):
return np.random.default_rng(seed)
if isinstance(seed, np.random._generator.Generator):
return seed
raise ValueError(
"%r cannot be used to seed a numpy.random._generator.Generator"
" instance" % seed
)
|
import argparse
class Namespace(argparse.Namespace):
def __init__(self, prototype=None, **kwargs):
super().__init__(**kwargs)
self.prototype = prototype
def has_own_property(self, prop):
return prop in self.__dict__
def __getattribute__(self, attr):
try:
return super().__getattribute__(attr)
except AttributeError:
if self.prototype is None:
raise
return self.prototype.__getattribute__(attr)
def __getitem__(self, item):
try:
return self.__dict__[item]
except KeyError:
if self.prototype is None:
raise
return self.prototype[item]
|
from django.apps import AppConfig
class BofanConfig(AppConfig):
name = 'bofan'
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflow_v2.services.session_entity_types import pagers
from google.cloud.dialogflow_v2.types import entity_type
from google.cloud.dialogflow_v2.types import session_entity_type
from google.cloud.dialogflow_v2.types import (
session_entity_type as gcd_session_entity_type,
)
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import SessionEntityTypesTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import SessionEntityTypesGrpcTransport
from .transports.grpc_asyncio import SessionEntityTypesGrpcAsyncIOTransport
class SessionEntityTypesClientMeta(type):
"""Metaclass for the SessionEntityTypes client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[SessionEntityTypesTransport]]
_transport_registry["grpc"] = SessionEntityTypesGrpcTransport
_transport_registry["grpc_asyncio"] = SessionEntityTypesGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[SessionEntityTypesTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class SessionEntityTypesClient(metaclass=SessionEntityTypesClientMeta):
"""Service for managing
[SessionEntityTypes][google.cloud.dialogflow.v2.SessionEntityType].
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dialogflow.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SessionEntityTypesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SessionEntityTypesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> SessionEntityTypesTransport:
"""Returns the transport used by the client instance.
Returns:
SessionEntityTypesTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def session_entity_type_path(project: str, session: str, entity_type: str,) -> str:
"""Returns a fully-qualified session_entity_type string."""
return "projects/{project}/agent/sessions/{session}/entityTypes/{entity_type}".format(
project=project, session=session, entity_type=entity_type,
)
@staticmethod
def parse_session_entity_type_path(path: str) -> Dict[str, str]:
"""Parses a session_entity_type path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/agent/sessions/(?P<session>.+?)/entityTypes/(?P<entity_type>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, SessionEntityTypesTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the session entity types client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, SessionEntityTypesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, SessionEntityTypesTransport):
# transport is a SessionEntityTypesTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
)
def list_session_entity_types(
self,
request: Union[session_entity_type.ListSessionEntityTypesRequest, dict] = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListSessionEntityTypesPager:
r"""Returns the list of all session entity types in the
specified session.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Args:
request (Union[google.cloud.dialogflow_v2.types.ListSessionEntityTypesRequest, dict]):
The request object. The request message for
[SessionEntityTypes.ListSessionEntityTypes][google.cloud.dialogflow.v2.SessionEntityTypes.ListSessionEntityTypes].
parent (str):
Required. The session to list all session entity types
from. Format:
``projects/<Project ID>/agent/sessions/<Session ID>`` or
``projects/<Project ID>/agent/environments/<Environment ID>/users/<User ID>/ sessions/<Session ID>``.
If ``Environment ID`` is not specified, we assume
default 'draft' environment. If ``User ID`` is not
specified, we assume default '-' user.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.services.session_entity_types.pagers.ListSessionEntityTypesPager:
The response message for
[SessionEntityTypes.ListSessionEntityTypes][google.cloud.dialogflow.v2.SessionEntityTypes.ListSessionEntityTypes].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a session_entity_type.ListSessionEntityTypesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, session_entity_type.ListSessionEntityTypesRequest):
request = session_entity_type.ListSessionEntityTypesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_session_entity_types
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListSessionEntityTypesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_session_entity_type(
self,
request: Union[session_entity_type.GetSessionEntityTypeRequest, dict] = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> session_entity_type.SessionEntityType:
r"""Retrieves the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Args:
request (Union[google.cloud.dialogflow_v2.types.GetSessionEntityTypeRequest, dict]):
The request object. The request message for
[SessionEntityTypes.GetSessionEntityType][google.cloud.dialogflow.v2.SessionEntityTypes.GetSessionEntityType].
name (str):
Required. The name of the session entity type. Format:
``projects/<Project ID>/agent/sessions/<Session ID>/entityTypes/<Entity Type Display Name>``
or
``projects/<Project ID>/agent/environments/<Environment ID>/users/<User ID>/sessions/<Session ID>/entityTypes/<Entity Type Display Name>``.
If ``Environment ID`` is not specified, we assume
default 'draft' environment. If ``User ID`` is not
specified, we assume default '-' user.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.SessionEntityType:
A session represents a conversation between a Dialogflow agent and an
end-user. You can create special entities, called
session entities, during a session. Session entities
can extend or replace custom entity types and only
exist during the session that they were created for.
All session data, including session entities, is
stored by Dialogflow for 20 minutes.
For more information, see the [session entity
guide](\ https://cloud.google.com/dialogflow/docs/entities-session).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a session_entity_type.GetSessionEntityTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, session_entity_type.GetSessionEntityTypeRequest):
request = session_entity_type.GetSessionEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_session_entity_type]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_session_entity_type(
self,
request: Union[
gcd_session_entity_type.CreateSessionEntityTypeRequest, dict
] = None,
*,
parent: str = None,
session_entity_type: gcd_session_entity_type.SessionEntityType = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_session_entity_type.SessionEntityType:
r"""Creates a session entity type.
If the specified session entity type already exists,
overrides the session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Args:
request (Union[google.cloud.dialogflow_v2.types.CreateSessionEntityTypeRequest, dict]):
The request object. The request message for
[SessionEntityTypes.CreateSessionEntityType][google.cloud.dialogflow.v2.SessionEntityTypes.CreateSessionEntityType].
parent (str):
Required. The session to create a session entity type
for. Format:
``projects/<Project ID>/agent/sessions/<Session ID>`` or
``projects/<Project ID>/agent/environments/<Environment ID>/users/<User ID>/ sessions/<Session ID>``.
If ``Environment ID`` is not specified, we assume
default 'draft' environment. If ``User ID`` is not
specified, we assume default '-' user.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
session_entity_type (google.cloud.dialogflow_v2.types.SessionEntityType):
Required. The session entity type to
create.
This corresponds to the ``session_entity_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.SessionEntityType:
A session represents a conversation between a Dialogflow agent and an
end-user. You can create special entities, called
session entities, during a session. Session entities
can extend or replace custom entity types and only
exist during the session that they were created for.
All session data, including session entities, is
stored by Dialogflow for 20 minutes.
For more information, see the [session entity
guide](\ https://cloud.google.com/dialogflow/docs/entities-session).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, session_entity_type])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcd_session_entity_type.CreateSessionEntityTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, gcd_session_entity_type.CreateSessionEntityTypeRequest
):
request = gcd_session_entity_type.CreateSessionEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if session_entity_type is not None:
request.session_entity_type = session_entity_type
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_session_entity_type
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_session_entity_type(
self,
request: Union[
gcd_session_entity_type.UpdateSessionEntityTypeRequest, dict
] = None,
*,
session_entity_type: gcd_session_entity_type.SessionEntityType = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_session_entity_type.SessionEntityType:
r"""Updates the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Args:
request (Union[google.cloud.dialogflow_v2.types.UpdateSessionEntityTypeRequest, dict]):
The request object. The request message for
[SessionEntityTypes.UpdateSessionEntityType][google.cloud.dialogflow.v2.SessionEntityTypes.UpdateSessionEntityType].
session_entity_type (google.cloud.dialogflow_v2.types.SessionEntityType):
Required. The session entity type to
update.
This corresponds to the ``session_entity_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. The mask to control which
fields get updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.SessionEntityType:
A session represents a conversation between a Dialogflow agent and an
end-user. You can create special entities, called
session entities, during a session. Session entities
can extend or replace custom entity types and only
exist during the session that they were created for.
All session data, including session entities, is
stored by Dialogflow for 20 minutes.
For more information, see the [session entity
guide](\ https://cloud.google.com/dialogflow/docs/entities-session).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([session_entity_type, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcd_session_entity_type.UpdateSessionEntityTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, gcd_session_entity_type.UpdateSessionEntityTypeRequest
):
request = gcd_session_entity_type.UpdateSessionEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if session_entity_type is not None:
request.session_entity_type = session_entity_type
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.update_session_entity_type
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("session_entity_type.name", request.session_entity_type.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_session_entity_type(
self,
request: Union[session_entity_type.DeleteSessionEntityTypeRequest, dict] = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified session entity type.
This method doesn't work with Google Assistant
integration. Contact Dialogflow support if you need to
use session entities with Google Assistant integration.
Args:
request (Union[google.cloud.dialogflow_v2.types.DeleteSessionEntityTypeRequest, dict]):
The request object. The request message for
[SessionEntityTypes.DeleteSessionEntityType][google.cloud.dialogflow.v2.SessionEntityTypes.DeleteSessionEntityType].
name (str):
Required. The name of the entity type to delete. Format:
``projects/<Project ID>/agent/sessions/<Session ID>/entityTypes/<Entity Type Display Name>``
or
``projects/<Project ID>/agent/environments/<Environment ID>/users/<User ID>/sessions/<Session ID>/entityTypes/<Entity Type Display Name>``.
If ``Environment ID`` is not specified, we assume
default 'draft' environment. If ``User ID`` is not
specified, we assume default '-' user.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a session_entity_type.DeleteSessionEntityTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, session_entity_type.DeleteSessionEntityTypeRequest):
request = session_entity_type.DeleteSessionEntityTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_session_entity_type
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflow",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("SessionEntityTypesClient",)
|
from django import forms
from django.shortcuts import get_object_or_404
from django.utils.text import slugify
from text_unidecode import unidecode
from ...product.models import Category
class CategoryForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.parent_pk = kwargs.pop('parent_pk')
super(CategoryForm, self).__init__(*args, **kwargs)
if self.instance.parent and self.instance.parent.hidden:
self.fields['hidden'].widget.attrs['disabled'] = True
class Meta:
model = Category
exclude = ['slug']
def save(self, commit=True):
self.instance.slug = slugify(unidecode(self.instance.name))
if self.parent_pk:
self.instance.parent = get_object_or_404(
Category, pk=self.parent_pk)
if self.instance.parent and self.instance.parent.hidden:
self.instance.hidden = True
super(CategoryForm, self).save(commit=commit)
self.instance.set_hidden_descendants(self.cleaned_data['hidden'])
return self.instance
|
# std
import re
from typing import List
# ours
from jekyll_relative_url_check.abstract import RelativeURLHook
class MarkdownRelativeURLHook(RelativeURLHook):
def __init__(self):
super().__init__()
self.absolute_url_regexs: List[re.Pattern] = [
re.compile(r"\[[^]]*]\(/[^)]*\)")
]
|
from __future__ import print_function
import matplotlib
from matplotlib import pyplot as plt
plt.switch_backend('agg')
import os
import json
import numpy as np
def extract(file_path):
if not os.path.isfile(file_path):
return -1, -1, -1
with open(file_path, 'r') as f:
lines = f.readlines()
test_roc, test_precision, test_NEF = -1, -1, -1
for line in lines:
if 'test precision' in line:
line = line.strip().split(':')
test_precision = float(line[1])
if 'test roc' in line:
line = line.strip().split(':')
test_roc = float(line[1])
if 'ratio: 0.01, NEF:' in line:
line = line.strip().replace('NEF:', '').split(',')
test_NEF = float(line[1])
return test_roc, test_precision, test_NEF
if __name__ == '__main__':
model_list = [
'random_forest_classification',
'xgboost_classification', 'xgboost_regression',
'single_deep_classification', 'single_deep_regression'
]
model_process_num_list = {
'random_forest_classification': [139, 69, 111, 212, 210, 148, 28, 61, 124, 130, 131, 141, 14, 38, 165, 65, 123, 94, 3, 88, 72],
'xgboost_classification': [140, 967, 960, 807, 263, 694, 440, 47, 116, 792, 663, 32, 564, 950, 735, 84, 364, 605, 431, 55, 388],
'xgboost_regression': [187, 6, 514, 507, 880, 440, 605, 718, 754, 409, 586, 214, 753, 65, 294, 911, 721, 81, 321, 545, 280],
'single_deep_classification': [356, 404, 215, 93, 254, 88, 423, 47, 363, 132, 5, 385, 370, 29, 415, 54, 124, 183, 180, 416],
'single_deep_regression': [199, 323, 114, 123, 47, 175, 17, 178, 106, 265, 67, 157, 369, 115, 191, 20, 27, 108, 270, 45],
'ensemble': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
'ensemble_02': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
}
# for model in model_list:
# print('Model: {}'.format(model))
# number = len(model_process_num_list[model])
# hyper_parameter_result_roc = []
# hyper_parameter_result_precision = []
# hyper_parameter_result_NEF = []
#
# for running_process in model_process_num_list[model]:
# test_roc_list, test_precision_list, test_NEF_list = [], [], []
# for idx in range(4):
# file_path = '../output/{}/{}_{}_{}.out'.format(model, model, running_process, idx)
# test_roc, test_precision, test_NEF = extract(file_path)
# if test_roc == -1 and test_precision == -1:
# print('missing file: {}'.format(file_path))
# if test_roc != -1:
# test_roc_list.append(test_roc)
# if test_precision != -1:
# test_precision_list.append(test_precision)
# if test_NEF != -1:
# test_NEF_list.append(test_NEF)
#
# hyper_parameter_result_roc.append(np.mean(test_roc_list))
# hyper_parameter_result_precision.append(np.mean(test_precision_list))
# hyper_parameter_result_NEF.append(np.mean(test_NEF_list))
#
# for running_process, roc, pr, NEF in zip(model_process_num_list[model], hyper_parameter_result_roc, hyper_parameter_result_precision, hyper_parameter_result_NEF):
# print('{}\t{}\t{}\t{}'.format(running_process, roc, pr, NEF))
# print()
print('On The Last Folder')
model_list = [
'random_forest_classification',
'xgboost_classification', 'xgboost_regression',
'single_deep_classification', 'single_deep_regression',
'baseline',
'ensemble', 'ensemble_02'
]
model_process_num_list = {
'random_forest_classification': [139],
'xgboost_classification': [140],
'xgboost_regression': [187],
'single_deep_classification': [356],
'single_deep_regression': [199],
'baseline': [0],
'ensemble': [0],
'ensemble_02': [0],
}
def update_name(name):
if name == 'random_forest_classification':
name = 'RF-C'
if name == 'xgboost_classification':
name = 'XGB-C'
if name == 'xgboost_regression':
name = 'XGB-R'
if name == 'single_deep_classification':
name = 'NN-C'
if name == 'single_deep_regression':
name = 'NN-R'
if name == 'ensemble':
name = 'Ensemble, model-based'
if name =='ensemble_02':
name = 'Ensemble, max-vote'
if name == 'baseline':
name = 'Similarity Baseline'
return name
name_list, roc_list, pr_list, NEF_list = [], [], [], []
for model in model_list:
print('Model: {}'.format(model))
number = len(model_process_num_list[model])
for running_process in model_process_num_list[model]:
if model == 'ensemble' or model == 'ensemble_02':
file_path = '../output/{}/{}.out'.format(model, running_process)
else:
file_path = '../output/{}/{}_{}_4.out'.format(model, model, running_process)
test_roc, test_pr, test_NEF = extract(file_path)
print('{}\t{}'.format(running_process, test_NEF))
name_list.append(update_name(model))
roc_list.append(test_roc)
pr_list.append(test_pr)
NEF_list.append(test_NEF)
print()
for name,roc,pr,NEF in zip(name_list, roc_list, pr_list, NEF_list):
print('{}&{:.3f}&{:.3f}&{:.3f}\\\\'.format(name, roc, pr, NEF))
|
n1 = int(input('Digite o primeiro numero: '))
n2 = int(input('Digite o segundo numero: '))
n3 = int(input('Digite o terceiro numero: '))
menor = n1
if n2 < n3 and n2 < n1:
menor = n2
if n3 < n2 and n3 < n1:
menor = n3
maior = n1
if n2 > n3 and n2 > n1:
maior = n2
if n3 > n2 and n3 > n1:
maior = n3
print('{} é menor'.format(menor))
print('{} é maior'.format(maior))
|
# stdlib
from typing import Dict
from typing import Type
# third party
import statsmodels
# syft relative
from ...generate_wrapper import GenerateWrapper
from ...lib.python.primitive_factory import PrimitiveFactory
from ...lib.python.string import String
from ...proto.lib.statsmodels.family_pb2 import FamilyProto
FAMILY_2_STR: Dict[Type[statsmodels.genmod.families.family.Family], str] = {
statsmodels.genmod.families.family.Binomial: "Binomial",
statsmodels.genmod.families.family.Gamma: "Gamma",
statsmodels.genmod.families.family.Gaussian: "Gaussian",
statsmodels.genmod.families.family.InverseGaussian: "InverseGaussian",
statsmodels.genmod.families.family.NegativeBinomial: "NegativeBinomial",
statsmodels.genmod.families.family.Poisson: "Poisson",
statsmodels.genmod.families.family.Tweedie: "Tweedie",
}
LINK_2_STR: Dict[Type[statsmodels.genmod.families.family.Family], str] = {
statsmodels.genmod.families.links.log: "log",
statsmodels.genmod.families.links.logit: "logit",
statsmodels.genmod.families.links.cauchy: "cauchy",
statsmodels.genmod.families.links.cloglog: "cloglog",
statsmodels.genmod.families.links.identity: "identity",
statsmodels.genmod.families.links.inverse_power: "inverse_power",
statsmodels.genmod.families.links.inverse_squared: "inverse_squared",
statsmodels.genmod.families.links.nbinom: "nbinom",
statsmodels.genmod.families.links.probit: "probit",
statsmodels.genmod.families.links.Power: "Power",
statsmodels.genmod.families.links.NegativeBinomial: "NegativeBinomial",
statsmodels.genmod.families.links.CDFLink: "CDFLink",
statsmodels.genmod.families.links.sqrt: "sqrt",
}
STR_2_FAMILY: Dict[str, Type[statsmodels.genmod.families.family.Family]] = {
v: k for k, v in FAMILY_2_STR.items()
}
STR_2_LINK: Dict[str, Type[statsmodels.genmod.families.family.Family]] = {
v: k for k, v in LINK_2_STR.items()
}
def object2proto(obj: Type[statsmodels.genmod.families.family.Family]) -> FamilyProto:
family_name = FAMILY_2_STR[type(obj)]
link_name = LINK_2_STR[type(obj.link)]
family_name_prim = PrimitiveFactory.generate_primitive(value=family_name)
link_name_prim = PrimitiveFactory.generate_primitive(value=link_name)
family_name_proto = family_name_prim._object2proto()
link_name_proto = link_name_prim._object2proto()
return FamilyProto(family=family_name_proto, link=link_name_proto)
def proto2object(proto: FamilyProto) -> Type[statsmodels.genmod.families.family.Family]:
family_name = str(String._proto2object(proto=proto.family))
link_name = str(String._proto2object(proto=proto.link))
obj = STR_2_FAMILY[family_name](link=STR_2_LINK[link_name])
return obj
for fam in FAMILY_2_STR.keys():
GenerateWrapper(
wrapped_type=fam,
import_path="statsmodels.genmod.families.family" + fam.__class__.__name__,
protobuf_scheme=FamilyProto,
type_object2proto=object2proto,
type_proto2object=proto2object,
)
|
# Copyright (c) 2015 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unified driver for NetApp storage systems.
Supports multiple storage systems of different families and driver modes.
"""
from oslo_log import log
from oslo_utils import importutils
from manila import exception
from manila.i18n import _
from manila.share import driver
from manila.share.drivers.netapp import options
from manila.share.drivers.netapp import utils as na_utils
LOG = log.getLogger(__name__)
MULTI_SVM = 'multi_svm'
SINGLE_SVM = 'single_svm'
DATAONTAP_CMODE_PATH = 'manila.share.drivers.netapp.dataontap.cluster_mode'
# Add new drivers here, no other code changes required.
NETAPP_UNIFIED_DRIVER_REGISTRY = {
'ontap_cluster':
{
MULTI_SVM: DATAONTAP_CMODE_PATH +
'.drv_multi_svm.NetAppCmodeMultiSvmShareDriver',
SINGLE_SVM: DATAONTAP_CMODE_PATH +
'.drv_single_svm.NetAppCmodeSingleSvmShareDriver',
},
}
NETAPP_UNIFIED_DRIVER_DEFAULT_MODE = {
'ontap_cluster': MULTI_SVM,
}
class NetAppDriver(object):
""""NetApp unified share storage driver.
Acts as a factory to create NetApp storage drivers based on the
storage family and driver mode configured.
"""
REQUIRED_FLAGS = ['netapp_storage_family', 'driver_handles_share_servers']
def __new__(cls, *args, **kwargs):
config = kwargs.get('configuration', None)
if not config:
raise exception.InvalidInput(
reason=_('Required configuration not found.'))
config.append_config_values(driver.share_opts)
config.append_config_values(options.netapp_proxy_opts)
na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config)
app_version = na_utils.OpenStackInfo().info()
LOG.info('OpenStack OS Version Info: %s', app_version)
kwargs['app_version'] = app_version
driver_mode = NetAppDriver._get_driver_mode(
config.netapp_storage_family, config.driver_handles_share_servers)
return NetAppDriver._create_driver(config.netapp_storage_family,
driver_mode,
*args, **kwargs)
@staticmethod
def _get_driver_mode(storage_family, driver_handles_share_servers):
if driver_handles_share_servers is None:
driver_mode = NETAPP_UNIFIED_DRIVER_DEFAULT_MODE.get(
storage_family.lower())
if driver_mode:
LOG.debug('Default driver mode %s selected.', driver_mode)
else:
raise exception.InvalidInput(
reason=_('Driver mode was not specified and a default '
'value could not be determined from the '
'specified storage family.'))
elif driver_handles_share_servers:
driver_mode = MULTI_SVM
else:
driver_mode = SINGLE_SVM
return driver_mode
@staticmethod
def _create_driver(storage_family, driver_mode, *args, **kwargs):
""""Creates an appropriate driver based on family and mode."""
storage_family = storage_family.lower()
fmt = {'storage_family': storage_family, 'driver_mode': driver_mode}
LOG.info('Requested unified config: %(storage_family)s and '
'%(driver_mode)s.', fmt)
family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family)
if family_meta is None:
raise exception.InvalidInput(
reason=_('Storage family %s is not supported.')
% storage_family)
driver_loc = family_meta.get(driver_mode)
if driver_loc is None:
raise exception.InvalidInput(
reason=_('Driver mode %(driver_mode)s is not supported '
'for storage family %(storage_family)s.') % fmt)
kwargs['netapp_mode'] = 'proxy'
driver = importutils.import_object(driver_loc, *args, **kwargs)
LOG.info('NetApp driver of family %(storage_family)s and mode '
'%(driver_mode)s loaded.', fmt)
return driver
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: paganin_filter
:platform: Unix
:synopsis: A plugin to apply the Paganin filter.
.. moduleauthor:: Nghia Vo <scientificsoftware@diamond.ac.uk>
"""
import math
import logging
import numpy as np
import pyfftw.interfaces.scipy_fftpack as fft
from savu.plugins.filters.base_filter import BaseFilter
from savu.plugins.driver.cpu_plugin import CpuPlugin
from savu.plugins.utils import register_plugin, dawn_compatible
@dawn_compatible
@register_plugin
class PaganinFilter(BaseFilter, CpuPlugin):
def __init__(self):
logging.debug("initialising Paganin Filter")
logging.debug("Calling super to make sure that all superclases are " +
" initialised")
super(PaganinFilter, self).__init__("PaganinFilter")
self.filtercomplex = None
self.count = 0
def set_filter_padding(self, in_pData, out_pData):
in_data = self.get_in_datasets()[0]
det_x = in_data.get_data_dimension_by_axis_label('detector_x')
det_y = in_data.get_data_dimension_by_axis_label('detector_y')
pad_det_y = '%s.%s' % (det_y, self.parameters['Padtopbottom'])
pad_det_x = '%s.%s' % (det_x, self.parameters['Padleftright'])
mode = self.parameters['Padmethod']
pad_dict = {'pad_directions': [pad_det_x, pad_det_y], 'pad_mode': mode}
in_pData[0].padding = pad_dict
out_pData[0].padding = pad_dict
def pre_process(self):
self._setup_paganin(*self.get_plugin_in_datasets()[0].get_shape())
def _setup_paganin(self, height, width):
micron = 10 ** (-6)
keV = 1000.0
distance = self.parameters['Distance']
energy = self.parameters['Energy'] * keV
resolution = self.parameters['Resolution'] * micron
wavelength = (1240.0 / energy) * 10.0 ** (-9)
ratio = self.parameters['Ratio']
height1 = height + 2 * self.parameters['Padtopbottom']
width1 = width + 2 * self.parameters['Padleftright']
centery = np.ceil(height1 / 2.0) - 1.0
centerx = np.ceil(width1 / 2.0) - 1.0
# Define the paganin filter
dpx = 1.0 / (width1 * resolution)
dpy = 1.0 / (height1 * resolution)
pxlist = (np.arange(width1) - centerx) * dpx
pylist = (np.arange(height1) - centery) * dpy
pxx = np.zeros((height1, width1), dtype=np.float32)
pxx[:, 0:width1] = pxlist
pyy = np.zeros((height1, width1), dtype=np.float32)
pyy[0:height1, :] = np.reshape(pylist, (height1, 1))
pd = (pxx * pxx + pyy * pyy) * wavelength * distance * math.pi
filter1 = 1.0 + ratio * pd
self.filtercomplex = filter1 + filter1 * 1j
def _paganin(self, data):
pci1 = fft.fft2(np.float32(data))
pci2 = fft.fftshift(pci1) / self.filtercomplex
fpci = np.abs(fft.ifft2(pci2))
result = -0.5 * self.parameters['Ratio'] * np.log(
fpci + self.parameters['increment'])
return result
def process_frames(self, data):
proj = np.nan_to_num(data[0]) # Noted performance
proj[proj == 0] = 1.0
return self._paganin(proj)
def get_max_frames(self):
return 'single'
|
from random import choice, choices, randint, random, sample, shuffle
PHONEMES = {}
PHO_CON = []
PHO_VOW = []
class Phoneme:
def __init__(self, key, **kwargs):
self.start = [key]
self.mid = [key]
self.end = [key]
self.vowel = False
self.no_start = False
self.no_mid = False
self.no_end = False
for keyword in kwargs:
self.__setattr__(keyword, kwargs[keyword])
PHONEMES[key] = self
if self.vowel:
PHO_VOW.append(self)
else:
PHO_CON.append(self)
Phoneme('A', start=['a'], mid=['a','ai','a_e'], end=['a','ae','ay'], vowel=True)
Phoneme('a', vowel=True)
Phoneme('b')
Phoneme('k', start=['c','k'], end=['c','ck','k'])
Phoneme('d')
Phoneme('E', start=['e', 'ee', 'ea'], mid=['e','e_e','ea','ie'],
end=['e','ie','y'], vowel=True)
Phoneme('e', mid=['e','ea'], no_end=True, vowel=True)
Phoneme('f', start=['f','ph'], mid=['f','ph'], end=['f','ph'])
Phoneme('g')
Phoneme('h')
Phoneme('I', start=['i','ai'], mid=['i_e','igh','y'], end=['ai','ie','igh','ye'],
vowel=True)
Phoneme('i', no_end=True, vowel=True)
Phoneme('j', start=['j','g'], mid=['dge','j','g'], end=['dge'])
Phoneme('l')
Phoneme('m', end=['m','mn'])
Phoneme('n', start=['n','gn','kn'])
Phoneme('O', start=['o', 'oa'], mid=['o','o_e','oa'], end=['o','oe','oah'],
vowel=True)
Phoneme('o', vowel=True)
Phoneme('p')
Phoneme('kw', start=['qu'], no_mid=True, no_end=True)
Phoneme('r', start=['r','wr'])
Phoneme('s', start=['s','sc','ce','ci','cy'], mid=['s','c'], end=['s','ce'])
Phoneme('t')
Phoneme('U', end=['u_e','ew'], vowel=True)
Phoneme('u', start=['a','u'], mid=['u','o'], no_end=True, vowel=True)
Phoneme('v')
Phoneme('w')
Phoneme('ks', mid=['x','xc'], end=['x'], no_start=True)
Phoneme('y')
Phoneme('z')
Phoneme('oo', mid=['oo','u'], end=['oo','u_e','ew'], vowel=True)
Phoneme('oi', mid=['oi','oy'], end=['oy'], no_start=True, vowel=True)
Phoneme('ou', start=['ou','ow'], mid=['ou'], end=['ow'], vowel=True)
Phoneme('aw', start=['aw','au'], mid=['aw','au'], end=['aw','au'], vowel=True)
Phoneme('ar', mid=['_ar'], end=['_ar'])
Phoneme('sh')
Phoneme('wh', no_mid=True, no_end=True)
Phoneme('ch', mid=['ch','tch'], end=['ch','tch'])
Phoneme('th')
Phoneme('ng', mid=['ng','nk'], end=['ng','nk'], no_start=True)
Phoneme('er', start=['er','ur'], mid=['_er','_ir','_ur'], end=['_er','_ir','_ur'])
def generate_name(theme, syllables=2):
next_vowel = False
if random() < 0.5:
name = choices(theme.c_start, theme.c_start_weight)[0]
next_vowel = True
else:
name = choices(theme.v_start, theme.v_start_weight)[0]
syllables -= 1
for i in range(syllables):
if next_vowel:
if i == syllables - 1:
name += choices(theme.v_end, theme.v_end_weight)[0]
else:
name += choices(theme.v_mid, theme.v_mid_weight)[0]
else:
if i == syllables - 1:
name += choices(theme.c_end, theme.c_end_weight)[0]
else:
name += choices(theme.c_mid, theme.c_mid_weight)[0]
next_vowel = not next_vowel
name = name.replace('_',choices(theme.c_mid, theme.c_mid_weight)[0])
name = name.replace('_','')
return name.title()
def generate_dialogue(theme):
words = []
length = randint(3,9)
for i in range(length):
words.append(generate_name(theme,choice([2,3,3,4,4,5])))
if i > 0 and not caps:
words[i] = words[i].lower()
caps = False
if i == length - 1 or random() < 0.25:
punc = choice(',.?!')
if punc == ',' and i == length - 1:
punc = '.'
words[i] = words[i] + punc
if punc != ',':
caps = True
return ' '.join(words)
class Theme:
def __init__(self):
self.reset()
def reset(self):
self.consonants = sample(PHO_CON, 9)
self.vowels = sample(PHO_VOW, 5)
self.c_start = []
self.c_start_weight = []
self.c_mid = []
self.c_mid_weight = []
self.c_end = []
self.c_end_weight = []
self.v_start = []
self.v_start_weight = []
self.v_mid = []
self.v_mid_weight = []
self.v_end = []
self.v_end_weight = []
w = 1
for p in self.consonants:
if not p.no_start:
self.c_start.append(choice(p.start))
self.c_start_weight.append(w)
if not p.no_mid:
self.c_mid.append(choice(p.mid))
self.c_mid_weight.append(w)
if not p.no_end:
self.c_end.append(choice(p.end))
self.c_end_weight.append(w)
w += 1
w = 1
for p in self.vowels:
if not p.no_start:
self.v_start.append(choice(p.start))
self.v_start_weight.append(w)
if not p.no_mid:
self.v_mid.append(choice(p.mid))
self.v_mid_weight.append(w)
if not p.no_end:
self.v_end.append(choice(p.end))
self.v_end_weight.append(w)
w += 1
|
# -*- coding: utf-8 -*-
import unittest
import pykintone
from pykintone.model import kintoneModel
import tests.envs as envs
class TestAppModelSimple(kintoneModel):
def __init__(self):
super(TestAppModelSimple, self).__init__()
self.my_key = ""
self.stringField = ""
class TestComment(unittest.TestCase):
def test_comment(self):
app = pykintone.load(envs.FILE_PATH).app()
model = TestAppModelSimple()
model.my_key = "comment_test"
model.stringField = "comment_test_now"
result = app.create(model)
self.assertTrue(result.ok) # confirm create the record to test comment
_record_id = result.record_id
# create comment
r_created = app.comment(_record_id).create("コメントのテスト")
self.assertTrue(r_created.ok)
# it requires Administrator user is registered in kintone
r_created_m = app.comment(_record_id).create("メンションのテスト", [("Administrator", "USER")])
self.assertTrue(r_created_m.ok)
# select comment
r_selected = app.comment(_record_id).select(True, 0, 10)
self.assertTrue(r_selected.ok)
self.assertTrue(2, len(r_selected.raw_comments))
comments = r_selected.comments()
self.assertTrue(1, len(comments[-1].mentions))
# delete comment
for c in comments:
r_deleted = app.comment(_record_id).delete(c.comment_id)
self.assertTrue(r_deleted.ok)
r_selected = app.comment(_record_id).select()
self.assertEqual(0, len(r_selected.raw_comments))
# done test
app.delete(_record_id)
|
#!/usr/bin/python3
#-*- coding: utf-8 -*-
"""
Python module: 'img.py'
author: Julien Straubhaar
date: jan-2018
Definition of classes for images and point sets (gslib), and relative
functions.
"""
import os
import numpy as np
# ============================================================================
class Img(object):
"""
Defines an image as a 3D grid with variable(s) / attribute(s):
nx, ny, nz: (int) number of grid cells in each direction
sx, sy, sz: (float) cell size in each direction
ox, oy, oz: (float) origin of the grid (bottom-lower-left corner)
nv: (int) number of variable(s) / attribute(s)
val: ((nv,nz,ny,nx) array) attribute(s) / variable(s) values
varname: (list of string (or string)) variable names
name: (string) name of the image
"""
def __init__(self,
nx=0, ny=0, nz=0,
sx=1.0, sy=1.0, sz=1.0,
ox=0.0, oy=0.0, oz=0.0,
nv=0, val=np.nan, varname=None,
name=""):
"""
Init function for the class:
:param val: (int/float or tuple/list/ndarray) value(s) of the new
variable:
if type is int/float: constant variable
if tuple/list/ndarray: must contain nv*nx*ny*nz values,
which are put in the image (after reshape if needed)
"""
self.nx = int(nx)
self.ny = int(ny)
self.nz = int(nz)
self.sx = float(sx)
self.sy = float(sy)
self.sz = float(sz)
self.ox = float(ox)
self.oy = float(oy)
self.oz = float(oz)
self.nv = int(nv)
valarr = np.asarray(val, dtype=float) # numpy.ndarray (possibly 0-dimensional)
if valarr.size == 1:
valarr = valarr.flat[0] * np.ones(nx*ny*nz*nv)
elif valarr.size != nx*ny*nz*nv:
print ('ERROR: val has not an acceptable size')
return
self.val = valarr.reshape(nv, nz, ny, nx)
if varname is None:
self.varname = ["V{:d}".format(i) for i in range(nv)]
else:
varname = list(np.asarray(varname).reshape(-1))
if len(varname) != nv:
print ('ERROR: varname has not an acceptable size')
return
self.varname = list(np.asarray(varname).reshape(-1))
self.name = name
# ------------------------------------------------------------------------
def __str__(self):
"""Returns name of the image: string representation of Image object"""
return self.name
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def set_default_varname(self):
"""
Sets default variable names: varname = ('V0', 'V1',...).
"""
self.varname = ["V{:d}".format(i) for i in range(self.nv)]
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def set_varname(self, varname=None, ind=-1):
"""
Sets name of the variable of the given index (if varname is None:
'V' appended by the variable index is used as varame).
"""
if ind < 0:
ii = self.nv + ind
else:
ii = ind
if ii < 0 or ii >= self.nv:
print("Nothing is done! (invalid index)")
return
if varname is None:
varname = "V{:d}".format(ii)
self.varname[ii] = varname
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def set_dimension(self, nx, ny, nz, newval=np.nan):
"""
Sets dimensions and update shape of values array (by possible
truncation or extension):
:param nx, ny, nz: (int) dimensions (number of cells) in x, y, z
direction
:param newval: (float) new value to insert if the array of values
has to be extended
"""
# Truncate val array (along reduced dimensions)
self.val = self.val[:, 0:nz, 0:ny, 0:nx]
# Extend val array when needed:
for (n, i) in zip([nx, ny, nz], [3, 2, 1]):
if n > self.val.shape[i]:
s = [j for j in self.val.shape]
s[i] = n - self.val.shape[i]
self.val = np.concatenate((self.val, newval * np.ones(s)), i)
# Update nx, ny, nz
self.nx = nx
self.ny = ny
self.nz = nz
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def set_spacing(self, sx, sy, sz):
"""
Sets cell size (sx, sy, sz).
"""
self.sx = float(sx)
self.sy = float(sy)
self.sz = float(sz)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def set_origin(self, ox, oy, oz):
"""
Sets grid origin (ox, oy, oz).
"""
self.ox = float(ox)
self.oy = float(oy)
self.oz = float(oz)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def set_grid(self, nx, ny, nz, sx, sy, sz, ox, oy, oz, newval=np.nan):
"""
Sets grid (dimension, cell size, and origin).
"""
self.set_dimension(nx, ny, nz, newval)
self.set_spacing(sx, sy, sz)
self.set_origin(ox, oy, oz)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def resize(self,
ix0=0, ix1=None,
iy0=0, iy1=None,
iz0=0, iz1=None,
iv0=0, iv1=None,
newval=np.nan,
newvarname=""):
"""
Resizes the image.
According to the x(, y, z) direction, the slice from ix0 to ix1-1
(iy0 to iy1-1, iz0 to iz1-1) is considered (if None, ix1(, iy1, iz1)
is set to nx(, ny, nz)), deplacing the origin from ox(, oy, oz)
to ox+ix0*sx(, oy+iy0*sy, oz+iz0*sz), and inserting value newval at
possible new locations:
:param ix0, ix1: (int or None) indices for x direction ix0 < ix1
:param iy0, iy1: (int or None) indices for y direction iy0 < iy1
:param iz0, iz1: (int or None) indices for z direction iz0 < iz1
:param iv0, iv1: (int or None) indices for v direction iv0 < iv1
:param newval: (float) new value to insert at possible new location
:param newvarname: (string) prefix for new variable name(s)
"""
if ix1 is None:
ix1 = self.nx
if iy1 is None:
iy1 = self.ny
if iz1 is None:
iz1 = self.nz
if iv1 is None:
iv1 = self.nv
if ix0 >= ix1:
print("Nothing is done! (invalid indices along x)")
return
if iy0 >= iy1:
print("Nothing is done! (invalid indices along y)")
return
if iz0 >= iz1:
print("Nothing is done! (invalid indices along z)")
return
if iv0 >= iv1:
print("Nothing is done! (invalid indices along v)")
return
initShape = self.val.shape
# Compute number of cell(s) to prepend (n0) and to append (n1) in each
# direction
n0 = -np.minimum([iv0, iz0, iy0, ix0], 0) # element-wise minimum
n1 = np.maximum(np.array([iv1, iz1, iy1, ix1]) - self.val.shape, 0) # element-wise minimum
# Truncate val array (along reduced dimensions)
self.val = self.val[np.max([iv0, 0]):iv1,
np.max([iz0, 0]):iz1,
np.max([iy0, 0]):iy1,
np.max([ix0, 0]):ix1]
# Extend val array when needed:
for i in range(4):
s0 = [j for j in self.val.shape]
s1 = [j for j in self.val.shape]
s0[i] = n0[i]
s1[i] = n1[i]
self.val = np.concatenate((newval * np.ones(s0), self.val, newval * np.ones(s1)), i)
# Update varname
# n0 = -np.min([iv0, 0]) # number of new variable(s) to prepend
# n1 = np.max([iv1-initshape[0], 0]) # number of new variable(s) to append
self.varname = ['newvarname' + '{}'.format(i) for i in range(n0[0])] +\
[self.varname[i]
for i in range(np.max([iv0, 0]), np.min([iv1, initShape[0]]))] +\
['newvarname' + '{}'.format(n0[0]+i) for i in range(n1[0])]
# Update nx, ny, nz, nv
self.nx = self.val.shape[3]
self.ny = self.val.shape[2]
self.nz = self.val.shape[1]
self.nv = self.val.shape[0]
# Update ox, oy, oz
self.ox = self.ox + ix0 * self.sx
self.oy = self.oy + iy0 * self.sy
self.oz = self.oz + iz0 * self.sz
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def insert_var(self, val=np.nan, varname=None, ind=0):
"""
Inserts a variable at a given index:
:param val: (int/float or tuple/list/ndarray) value(s) of the new
variable:
if type is int/float: constant variable
if tuple/list/ndarray: must contain nx*ny*nz values
:param varname: (string or None) name of the new variable
:param ind: (int) index where the new variable is inserted
"""
if ind < 0:
ii = self.nv + ind
else:
ii = ind
if ii < 0 or ii > self.nv:
print("Nothing is done! (invalid index)")
return
valarr = np.asarray(val, dtype=float) # numpy.ndarray (possibly 0-dimensional)
if valarr.size == 1:
valarr = valarr.flat[0] * np.ones(self.nxyz())
elif valarr.size != self.nxyz():
print ('ERROR: val has not an acceptable size')
return
# Extend val
self.val = np.concatenate((self.val[0:ii,...],
valarr.reshape(1, self.nz, self.ny, self.nx),
self.val[ii:,...]),
0)
# Extend varname list
if varname is None:
varname = "V{:d}".format(self.nv)
self.varname.insert(ii, varname)
# Update nv
self.nv = self.nv + 1
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def append_var(self, val=np.nan, varname=None):
"""
Appends one variable:
:param val: (int/float or tuple/list/ndarray) value(s) of the new
variable:
if type is int/float: constant variable
if tuple/list/ndarray: must contain nx*ny*nz values
:param varname: (string or None) name of the new variable
"""
self.insert_var(val=val, varname=varname, ind=self.nv)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def remove_var(self, ind=-1):
"""
Removes one variable (of given index).
"""
if ind < 0:
ii = self.nv + ind
else:
ii = ind
if ii < 0 or ii >= self.nv:
print("Nothing is done! (invalid index)")
return
# Update val array
iv =[i for i in range(self.nv)]
del iv[ii]
self.val = self.val[iv,...]
# Update varname list
del self.varname[ii]
# Update nv
self.nv = self.nv - 1
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def remove_allvar(self):
"""
Removes all variables.
"""
# Update val array
self.val = np.zeros((0, self.nz, self.ny, self.nx))
# Update varname list
self.varname = []
# Update nv
self.nv = 0
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def set_var(self, val=np.nan, varname=None, ind=-1):
"""
Sets one variable (of given index):
:param val: (int/float or tuple/list/ndarray) value(s) of the new
variable:
if type is int/float: constant variable
if tuple/list/ndarray: must contain nx*ny*nz values
:param varname: (string or None) name of the variable
:param ind: (int) index where the variable to be set
"""
if ind < 0:
ii = self.nv + ind
else:
ii = ind
if ii < 0 or ii >= self.nv:
print("Nothing is done! (invalid index)")
return
valarr = np.asarray(val, dtype=float) # numpy.ndarray (possibly 0-dimensional)
if valarr.size == 1:
valarr = valarr.flat[0] * np.ones(self.nxyz())
elif valarr.size != self.nxyz():
print ('ERROR: val has not an acceptable size')
return
# Set variable of index ii
self.val[ii,...] = valarr.reshape(self.nz, self.ny, self.nx)
# Set variable name of index ii
if varname is not None:
self.varname[ii] = varname
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def extract_var(self, indlist):
"""
Extracts variable(s) (of given index-es):
:param indlist: (int or list of ints) index or list of index-es of the
variable(s) to be extracted (kept)
"""
indlist = list(np.asarray(indlist).flatten())
indlist = [self.nv + i if i < 0 else i for i in indlist]
if sum([i >= self.nv or i < 0 for i in indlist]) > 0:
print("Nothing is done! (invalid index list)")
return
# Update val array
self.val = self.val[indlist,...]
# Update varname list
self.varname = [self.varname[i] for i in indlist]
# Update nv
self.nv = len(indlist)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def get_unique_one_var(self,ind=0):
"""
Gets unique values of one variable (of given index):
:param ind: (int) index of the variable
:return: (1-dimensional array) unique values of the variable
"""
if ind < 0:
ii = self.nv + ind
else:
ii = ind
if ii < 0 or ii >= self.nv:
print("Nothing is done! (invalid index)")
return
uval = [val for val in np.unique(self.val[ind,...]).reshape(-1)
if ~np.isnan(val)]
return (uval)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def get_prop_one_var(self, ind=0, density=True):
"""
Gets proportions (density or count) of unique values of one
variable (of given index):
:param ind: (int) index of the variable
:param density: (bool) computes densities if True and counts otherwise
:return: (list (of length 2) of 1-dimensional array) out:
out[0]: (1-dimensional array) unique values of
the variable
out[1]: (1-dimensional array) densities or counts of
the unique values
"""
if ind < 0:
ii = self.nv + ind
else:
ii = ind
if ii < 0 or ii >= self.nv:
print("Nothing is done! (invalid index)")
return
uv, cv = list(np.unique(self.val[ind,...],return_counts=True))
cv = cv[~np.isnan(uv)]
uv = uv[~np.isnan(uv)]
if density:
cv = cv / np.sum(cv)
return ([uv, cv])
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def get_unique(self):
"""
Gets unique values among all variables:
:return: (1-dimensional array) unique values found in any variable
"""
uval = [val for val in np.unique(self.val).reshape(-1)
if ~np.isnan(val)]
return (uval)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def get_prop(self, density=True):
"""
Gets proportions (density or count) of unique values for each variable:
:param density: (bool) computes densities if True and counts otherwise
:return: (list (of length 2) of array) out:
out[0]: (1-dimensional array) unique values found in
any variable
out[1]: ((self.nv, len(out[0])) array) densities or
counts of the unique values:
out[i,j]: density or count of the j-th unique
value for the i-th variable
"""
uv_all = self.get_unique()
n = len(uv_all)
cv_all = np.zeros(shape=(self.nv, n))
for i in range(self.nv):
uv, cv = self.get_prop_one_var(ind=i, density=density)
for j in range(n):
cv_all[i, uv_all==uv[j]] = cv[j]
return ([uv_all, cv_all])
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def flipx(self):
"""
Flips variable values according to x direction.
"""
self.val = self.val[:,:,:,::-1]
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def flipy(self):
"""
Flips variable values according to y direction.
"""
self.val = self.val[:,:,::-1,:]
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def flipz(self):
"""
Flips variable values according to z direction.
"""
self.val = self.val[:,::-1,:,:]
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def flipv(self):
"""
Flips variable values according to v direction.
"""
self.val = self.val[::-1,:,:,:]
self.varname = self.varname[::-1]
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def permxy(self):
"""
Permutes x and y directions.
"""
newval = np.zeros((self.nv, self.nz, self.nx, self.ny))
for i in range(self.nv):
for j in range(self.nz):
newval[i, j, :, :] = self.val[i, j, :, :].T
self.val = newval
ntmp, stmp, otmp = self.nx, self.sx, self.ox
self.nx, self.sx, self.ox = self.ny, self.sy, self.oy
self.ny, self.sy, self.oy = ntmp, stmp, otmp
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def permxz(self):
"""
Permutes x and z directions.
"""
newval = np.zeros((self.nv, self.nx, self.ny, self.nz))
for i in range(self.nv):
for j in range(self.ny):
newval[i, :, j, :] = self.val[i, :, j, :].T
self.val = newval
ntmp, stmp, otmp = self.nx, self.sx, self.ox
self.nx, self.sx, self.ox = self.nz, self.sz, self.oz
self.nz, self.sz, self.oz = ntmp, stmp, otmp
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def permyz(self):
"""
Permutes y and z directions.
"""
newval = np.zeros((self.nv, self.ny, self.nz, self.nx))
for i in range(self.nv):
for j in range(self.nx):
newval[i, :, :, j] = self.val[i, :, :, j].T
self.val = newval
ntmp, stmp, otmp = self.ny, self.sy, self.oy
self.ny, self.sy, self.oy = self.nz, self.sz, self.oz
self.nz, self.sz, self.oz = ntmp, stmp, otmp
# ------------------------------------------------------------------------
def nxyzv(self):
return (self.nx * self.ny * self.nz * self.nv)
def nxyz(self):
return (self.nx * self.ny * self.nz)
def nxy(self):
return (self.nx * self.ny)
def nxz(self):
return (self.nx * self.nz)
def nyz(self):
return (self.ny * self.nz)
def xmin(self):
return (self.ox)
def ymin(self):
return (self.oy)
def zmin(self):
return (self.oz)
def xmax(self):
return (self.ox + self.nx * self.sx)
def ymax(self):
return (self.oy + self.ny * self.sy)
def zmax(self):
return (self.oz + self.nz * self.sz)
def x(self):
"""
Returns 1-dimensional array of x coordinates.
"""
return (self.ox + 0.5 * self.sx + self.sx * np.arange(self.nx))
def y(self):
"""
Returns 1-dimensional array of y coordinates.
"""
return (self.oy + 0.5 * self.sy + self.sy * np.arange(self.ny))
def z(self):
"""
Returns 1-dimensional array of z coordinates.
"""
return (self.oz + 0.5 * self.sz + self.sz * np.arange(self.nz))
def vmin(self):
return (np.nanmin(self.val.reshape(self.nv,self.nxyz()),axis=1))
def vmax(self):
return (np.nanmax(self.val.reshape(self.nv,self.nxyz()),axis=1))
# ============================================================================
# ============================================================================
class PointSet(object):
"""
Defines a point set:
npt: (int) size of the point set (number of points)
nv: (int) number of variables (including x, y, z coordinates)
val: ((nv,npt) array) attribute(s) / variable(s) values
varname: (list of string (or string)) variable names
name: (string) name of the point set
"""
def __init__(self,
npt=0,
nv=0, val=np.nan, varname=None,
name=""):
"""
Inits function for the class:
:param val: (int/float or tuple/list/ndarray) value(s) of the new
variable:
if type is int/float: constant variable
if tuple/list/ndarray: must contain npt values
"""
self.npt = int(npt)
self.nv = int(nv)
valarr = np.asarray(val, dtype=float) # numpy.ndarray (possibly 0-dimensional)
if valarr.size == 1:
valarr = valarr.flat[0] * np.ones(npt*nv)
elif valarr.size != npt*nv:
print ('ERROR: val has not an acceptable size')
return
self.val = valarr.reshape(nv, npt)
if varname is None:
self.varname = []
if nv > 0:
self.varname.append("X")
if nv > 1:
self.varname.append("Y")
if nv > 2:
self.varname.append("Z")
if nv > 3:
for i in range(3,nv):
self.varname.append("V{:d}".format(i-3))
else:
varname = list(np.asarray(varname).reshape(-1))
if len(varname) != nv:
print ('ERROR: varname has not an acceptable size')
return
self.varname = list(np.asarray(varname).reshape(-1))
self.name = name
# ------------------------------------------------------------------------
def set_default_varname(self):
"""
Sets default variable names: 'X', 'Y', 'Z', 'V0', 'V1', ...
"""
self.varname = []
if self.nv > 0:
self.varname.append("X")
if self.nv > 1:
self.varname.append("Y")
if self.nv > 2:
self.varname.append("Z")
if self.nv > 3:
for i in range(3,self.nv):
self.varname.append("V{:d}".format(i-3))
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def set_varname(self, varname=None, ind=-1):
"""
Sets name of the variable of the given index (if varname is None:
'V' appended by the variable index is used as varname).
"""
if ind < 0:
ii = self.nv + ind
else:
ii = ind
if ii < 0 or ii >= self.nv:
print("Nothing is done! (invalid index)")
return
if varname is None:
varname = "V{:d}".format(ii)
self.varname[ii] = varname
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def insert_var(self, val=np.nan, varname=None, ind=0):
"""
Inserts a variable at a given index:
:param val: (int/float or tuple/list/ndarray) value(s) of the new
variable:
if type is int/float: constant variable
if tuple/list/ndarray: must contain npt values
:param varname: (string or None) name of the new variable
:param ind: (int) index where the variable to be set
"""
if ind < 0:
ii = self.nv + ind
else:
ii = ind
if ii < 0 or ii > self.nv:
print("Nothing is done! (invalid index)")
return
valarr = np.asarray(val, dtype=float) # numpy.ndarray (possibly 0-dimensional)
if valarr.size == 1:
valarr = valarr.flat[0] * np.ones(self.npt)
elif valarr.size != self.npt:
print ('ERROR: val has not an acceptable size')
return
# Extend val
self.val = np.concatenate((self.val[0:ii,...],
valarr.reshape(1, self.npt),
self.val[ii:,...]),
0)
# Extend varname list
if varname is None:
varname = "V{:d}".format(self.nv)
self.varname.insert(ii, varname)
# Update nv
self.nv = self.nv + 1
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def append_var(self, val=np.nan, varname=None):
"""
Appends one variable:
:param val: (int/float or tuple/list/ndarray) value(s) of the new
variable:
if type is int/float: constant variable
if tuple/list/ndarray: must contain npt values
:param varname: (string or None) name of the new variable
"""
self.insert_var(val=val, varname=varname, ind=self.nv)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def remove_var(self, ind=-1):
"""
Removes one variable (of given index).
"""
if ind < 0:
ii = self.nv + ind
else:
ii = ind
if ii < 0 or ii >= self.nv:
print("Nothing is done! (invalid index)")
return
# Update val array
iv =[i for i in range(self.nv)]
del iv[ii]
self.val = self.val[iv,...]
# Update varname list
del self.varname[ii]
# Update nv
self.nv = self.nv - 1
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def remove_allvar(self):
"""
Removes all variables.
"""
# Update val array
self.val = np.zeros((0, self.npt))
# Update varname list
self.varname = []
# Update nv
self.nv = 0
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def set_var(self, val=np.nan, varname=None, ind=-1):
"""
Sets one variable (of given index):
:param val: (int/float or tuple/list/ndarray) value(s) of the new
variable:
if type is int/float: constant variable
if tuple/list/ndarray: must contain npt values
:param varname: (string or None) name of the new variable
:param ind: (int) index where the variable to be set
"""
if ind < 0:
ii = self.nv + ind
else:
ii = ind
if ii < 0 or ii >= self.nv:
print("Nothing is done! (invalid index)")
return
valarr = np.asarray(val, dtype=float) # numpy.ndarray (possibly 0-dimensional)
if valarr.size == 1:
valarr = valarr.flat[0] * np.ones(self.npt)
elif valarr.size != self.npt:
print ('ERROR: val has not an acceptable size')
return
# Set variable of index ii
self.val[ii,...] = valarr.reshape(self.npt)
# Set variable name of index ii
if varname is not None:
self.varname[ii] = varname
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def extract_var(self, indlist):
"""
Extracts variable(s) (of given index-es):
:param indlist: (int or list of ints) index or list of index-es of the
variable(s) to be extracted (kept)
"""
indlist = list(np.asarray(indlist).flatten())
indlist = [self.nv + i if i < 0 else i for i in indlist]
if sum([i >= self.nv or i < 0 for i in indlist]) > 0:
print("Nothing is done! (invalid index list)")
return
# Update val array
self.val = self.val[indlist,...]
# Update varname list
self.varname = [self.varname[i] for i in indlist]
# Update nv
self.nv = len(indlist)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def get_unique(self,ind=0):
"""
Gets unique values of one variable (of given index):
:param ind: (int) index of the variable
:return: (1-dimensional array) unique values of the variable
"""
if ind < 0:
ii = self.nv + ind
else:
ii = ind
if ii < 0 or ii >= self.nv:
print("Nothing is done! (invalid index)")
return
return (np.unique(self.val[ind,...]))
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def get_prop_one_var(self, ind=0, density=True):
"""
Gets proportions (density or count) of unique values of one
variable (of given index):
:param ind: (int) index of the variable
:param density: (bool) computes densities if True and counts otherwise
:return: (list (of length 2) of 1-dimensional array) out:
out[0]: (1-dimensional array) unique values of
the variable
out[1]: (1-dimensional array) densities or counts of
the unique values
"""
if ind < 0:
ii = self.nv + ind
else:
ii = ind
if ii < 0 or ii >= self.nv:
print("Nothing is done! (invalid index)")
return
uv, cv = list(np.unique(self.val[ind,...],return_counts=True))
cv = cv[~np.isnan(uv)]
uv = uv[~np.isnan(uv)]
if density:
cv = cv / np.sum(cv)
return ([uv, cv])
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def to_dict(self):
"""
Returns PointSet as a dictionary
"""
return {name: values for name, values in zip(self.varname, self.val)}
# ------------------------------------------------------------------------
def x(self):
return(self.val[0])
def y(self):
return(self.val[1])
def z(self):
return(self.val[2])
def xmin(self):
return (np.min(self.val[0]))
def ymin(self):
return (np.min(self.val[1]))
def zmin(self):
return (np.min(self.val[2]))
def xmax(self):
return (np.max(self.val[0]))
def ymax(self):
return (np.max(self.val[1]))
def zmax(self):
return (np.max(self.val[2]))
# ============================================================================
# ----------------------------------------------------------------------------
def copyImg(im, varIndList=None):
"""
Copies an image (Img class), with all variables or a subset of variables:
:param im: (Img class) input image
:param varIndList: (sequence of int or None) index-es of the variables
to copy (default None: all variables), note that for
copying one variable, specify "varIndList=(iv,)"
:return: (Img class) a copy of the input image
(not a reference to)
"""
if varIndList is not None:
# Check if each index is valid
if sum([iv in range(im.nv) for iv in varIndList]) != len(varIndList):
print("ERROR: invalid index-es")
return
else:
varIndList = range(im.nv)
imOut = Img(nx=im.nx, ny=im.ny, nz=im.nz,
sx=im.sx, sy=im.sy, sz=im.sz,
ox=im.ox, oy=im.oy, oz=im.oz,
nv=len(varIndList),
name=im.name)
for i, iv in enumerate(varIndList):
imOut.set_var(val=im.val[iv,...], varname=im.varname[iv], ind=i)
return (imOut)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def readImageGslib(filename, missing_value=None):
"""
Reads an image from a file (gslib format):
--- file (ascii) ---
Nx Ny Nz [Sx Sy Sz [Ox Oy Oz]]
nvar
name_of_variable_1
...
name_of_variable_nvar
Z1(0) ... Znvar(0)
...
Z1(Ng-1) ... Znvar(Ng-1)
--- file (ascii) ---
:param filename: (string) name of the file
:param missing_value: (float or None) value that will be replaced by nan
:return: (Img class) image
"""
# Check if the file exists
if not os.path.isfile(filename):
print("ERROR: invalid filename ({})".format(filename))
return
# Open the file in read mode
with open(filename,'r') as ff:
# Read 1st line
line1 = ff.readline()
# Read 2nd line
line2 = ff.readline()
# Set number of variables
nv = int(line2)
# Set variable name (next nv lines)
varname = [ff.readline().replace("\n",'') for i in range(nv)]
# Read the rest of the file
valarr = np.loadtxt(ff)
# Convert line1 as list
g = [x for x in line1.split()]
# Set grid
nx, ny, nz = [int(n) for n in g[0:3]]
sx, sy, sz = [1.0, 1.0, 1.0]
ox, oy, oz = [0.0, 0.0, 0.0]
if len(g) >= 6:
sx, sy, sz = [float(n) for n in g[3:6]]
if len(g) >= 9:
ox, oy, oz = [float(n) for n in g[6:9]]
# Replace missing_value by np.nan
if missing_value is not None:
np.putmask(valarr, valarr == missing_value, np.nan)
# Set image
im = Img(nx, ny, nz, sx, sy, sz, ox, oy, oz, nv, valarr.T, varname, filename)
return (im)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def readImageVtk(filename, missing_value=None):
"""
Reads an image from a file (vtk format):
:param filename: (string) name of the file
:param missing_value: (float or None) value that will be replaced by nan
:return: (Img class) image
"""
# Check if the file exists
if not os.path.isfile(filename):
print("ERROR: invalid filename ({})".format(filename))
return
# Open the file in read mode
with open(filename,'r') as ff:
# Read lines 1 to 10
header = [ff.readline() for i in range(10)]
# Read the rest of the file
valarr = np.loadtxt(ff)
# Set grid
nx, ny, nz = [int(n) for n in header[4].split()[1:4]]
sx, sy, sz = [float(n) for n in header[6].split()[1:4]]
ox, oy, oz = [float(n) for n in header[5].split()[1:4]]
# Set variable
tmp = header[8].split()
if len(tmp) > 3:
nv = int(tmp[3])
else:
nv = 1
varname = tmp[1].split('/')
# Replace missing_value by np.nan
if missing_value is not None:
np.putmask(valarr, valarr == missing_value, np.nan)
# Set image
im = Img(nx, ny, nz, sx, sy, sz, ox, oy, oz, nv, valarr.T, varname, filename)
return (im)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def readImagePgm(filename, missing_value=None, varname=['pgmValue']):
"""
Reads an image from a file (pgm format):
:param filename: (string) name of the file
:param missing_value: (float or None) value that will be replaced by nan
:return: (Img class) image
"""
# Check if the file exists
if not os.path.isfile(filename):
print("ERROR: invalid filename ({})".format(filename))
return
# Open the file in read mode
with open(filename,'r') as ff:
# Read 1st line
line = ff.readline()
if line[:2] != 'P2':
print("ERROR: invalid format (first line)")
return
# Read 2nd line
line = ff.readline()
while line[0] == '#':
# Read next line
line = ff.readline()
# Set dimension
nx, ny = [int(x) for x in line.split()]
# Read next line
line = ff.readline()
if line[:3] != '255':
print("ERROR: invalid format (number of colors / max val)")
return
# Read the rest of the file
vv = [x.split() for x in ff.readlines()]
# Set grid
nz = 1 # nx, ny already set
sx, sy, sz = [1.0, 1.0, 1.0]
ox, oy, oz = [0.0, 0.0, 0.0]
# Set variable
nv = 1
varname # given in arguments
# Set variable array
valarr = np.array([int(x) for line in vv for x in line], dtype=float).reshape(-1, nv)
# Replace missing_value by np.nan
if missing_value is not None:
np.putmask(valarr, valarr == missing_value, np.nan)
# Set image
im = Img(nx, ny, nz, sx, sy, sz, ox, oy, oz, nv, valarr, varname, filename)
return (im)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def readImagePpm(filename, missing_value=None, varname=['ppmR', 'ppmG', 'ppmB']):
"""
Reads an image from a file (ppm format):
:param filename: (string) name of the file
:param missing_value: (float or None) value that will be replaced by nan
:return: (Img class) image
"""
# Check if the file exists
if not os.path.isfile(filename):
print("ERROR: invalid filename ({})".format(filename))
return
# Open the file in read mode
with open(filename,'r') as ff:
# Read 1st line
line = ff.readline()
if line[:2] != 'P3':
print("ERROR: invalid format (first line)")
return
# Read 2nd line
line = ff.readline()
while line[0] == '#':
# Read next line
line = ff.readline()
# Set dimension
nx, ny = [int(x) for x in line.split()]
# Read next line
line = ff.readline()
if line[:3] != '255':
print("ERROR: invalid format (number of colors / max val)")
return
# Read the rest of the file
vv = [x.split() for x in ff.readlines()]
# Set grid
nz = 1 # nx, ny already set
sx, sy, sz = [1.0, 1.0, 1.0]
ox, oy, oz = [0.0, 0.0, 0.0]
# Set variable
nv = 3
varname # given in arguments
# Set variable array
valarr = np.array([int(x) for line in vv for x in line], dtype=float).reshape(-1, nv)
# Replace missing_value by np.nan
if missing_value is not None:
np.putmask(valarr, valarr == missing_value, np.nan)
# Set image
im = Img(nx, ny, nz, sx, sy, sz, ox, oy, oz, nv, valarr.T, varname, filename)
return (im)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def imCategFromPgm(filename, flip_vertical=True, cmap='binary'):
"""
Reads an image from a file (pgm format (ASCII), e.g. created by Gimp):
:param filename: (string) name of the file
:param flip_vertical: (bool) if True: flip the image vertically after reading the image
:return: (tuple) (im, code, col)
im: (Img class) image with categories 0, 1, ..., n-1 as values
col : list of colors (rgba tuple, for each category) (length n)
pgm : list of initial pgm values (length n)
"""
# Read image
im = img.readImagePgm(filename)
if flip_vertical:
# Flip image vertically
im.flipy()
# Set cmap function
if isinstance(cmap, str):
cmap_func = plt.get_cmap(cmap)
else:
cmap_func = cmap
# Get colors and set color codes
v = im.val.reshape(-1)
pgm, code = np.unique(v, return_inverse=True)
col = [cmap_func(c/255.) for c in pgm]
# Set image
im = img.Img(im.nx, im.ny, im.nz, im.sx, im.sy, im.sz, im.ox, im.oy, im.oz, nv=1, val=code, varname='code')
return (im, col, pgm)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def imCategFromPpm(filename, flip_vertical=True):
"""
Reads an image from a file (ppm format (ASCII), e.g. created by Gimp):
:param filename: (string) name of the file
:param flip_vertical: (bool) if True: flip the image vertically after reading the image
:return: (tuple) (im, code, col)
im: (Img class) image with categories 0, 1, ..., n-1 as values
col : list of colors (rgba tuple, for each category) (length n)
rgb : list of initial rgb values (length n)
"""
# Read image
im = img.readImagePpm(filename)
if flip_vertical:
# Flip image vertically
im.flipy()
# Get colors and set color codes
v = np.array((1, 256, 256**2)).dot(im.val.reshape(3,-1))
x, code = np.unique(v, return_inverse=True)
x, ired = np.divmod(x, 256)
iblue, igreen = np.divmod(x, 256)
rgb = np.array((ired, igreen, iblue)).T
col = [[c/255. for c in irgb] for irgb in rgb]
# Set image
im = img.Img(im.nx, im.ny, im.nz, im.sx, im.sy, im.sz, im.ox, im.oy, im.oz, nv=1, val=code, varname='code')
return (im, col, rgb)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def writeImageGslib(im, filename, missing_value=None, fmt="%.10g"):
"""
Writes an image in a file (gslib format):
:param im: (Img class) image to be written
:param filename: (string) name of the file
:param missing_value: (float or None) nan values will be replaced
by missing_value before writing
:param fmt: (string) single format for variable values, of the
form: '%[flag]width[.precision]specifier'
"""
# Write 1st line in string shead
shead = "{} {} {} {} {} {} {} {} {}\n".format(
im.nx, im.ny, im.nz, im.sx, im.sy, im.sz, im.ox, im.oy, im.oz)
# Append 2nd line
shead = shead + "{}\n".format(im.nv)
# Append variable name(s) (next line(s))
for s in im.varname:
shead = shead + "{}\n".format(s)
# Replace np.nan by missing_value
if missing_value is not None:
np.putmask(im.val, np.isnan(im.val), missing_value)
# Open the file in write binary mode
with open(filename,'wb') as ff:
ff.write(shead.encode())
# Write variable values
np.savetxt(ff, im.val.reshape(im.nv, -1).T, delimiter=' ', fmt=fmt)
# Replace missing_value by np.nan (restore)
if missing_value is not None:
np.putmask(im.val, im.val == missing_value, np.nan)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def writeImageVtk(im, filename, missing_value=None, fmt="%.10g",
data_type='float', version=3.4, name=None):
"""
Writes an image in a file (vtk format):
:param im: (Img class) image to be written
:param filename: (string) name of the file
:param missing_value: (float or None) nan values will be replaced
by missing_value before writing
:param fmt: (string) single format for variable values, of the
form: '%[flag]width[.precision]specifier'
:param data_type: (string) data type (can be 'float', 'int', ...)
:param version: (float) version number (for data file)
:param name: (string or None) name to be written at line 2
if None, im.name is used
"""
if name is None:
name = im.name
# Set header (10 first lines)
shead = (
"# vtk DataFile Version {0}\n"
"{1}\n"
"ASCII\n"
"DATASET STRUCTURED_POINTS\n"
"DIMENSIONS {2} {3} {4}\n"
"ORIGIN {5} {6} {7}\n"
"SPACING {8} {9} {10}\n"
"POINT_DATA {11}\n"
"SCALARS {12} {13} {14}\n"
"LOOKUP_TABLE default\n"
).format(version,
name,
im.nx, im.ny, im.nz,
im.ox, im.oy, im.oz,
im.sx, im.sy, im.sz,
im.nxyz(),
'/'.join(im.varname), data_type, im.nv)
# Replace np.nan by missing_value
if missing_value is not None:
np.putmask(im.val, np.isnan(im.val), missing_value)
# Open the file in write binary mode
with open(filename,'wb') as ff:
ff.write(shead.encode())
# Write variable values
np.savetxt(ff, im.val.reshape(im.nv, -1).T, delimiter=' ', fmt=fmt)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def writeImagePgm(im, filename, missing_value=None, fmt="%.10g"):
"""
Writes an image in a file (pgm format):
:param im: (Img class) image to be written
:param filename: (string) name of the file
:param missing_value: (float or None) nan values will be replaced
by missing_value before writing
:param fmt: (string) single format for variable values, of the
form: '%[flag]width[.precision]specifier'
"""
# Write 1st line in string shead
shead = "P2\n# {0} {1} {2} {3} {4} {5} {6} {7} {8}\n{0} {1}\n255\n".format(
im.nx, im.ny, im.nz, im.sx, im.sy, im.sz, im.ox, im.oy, im.oz)
# Replace np.nan by missing_value
if missing_value is not None:
np.putmask(im.val, np.isnan(im.val), missing_value)
# Open the file in write binary mode
with open(filename,'wb') as ff:
ff.write(shead.encode())
# Write variable values
np.savetxt(ff, im.val.reshape(im.nv, -1).T, delimiter=' ', fmt=fmt)
# Replace missing_value by np.nan (restore)
if missing_value is not None:
np.putmask(im.val, im.val == missing_value, np.nan)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def writeImagePpm(im, filename, missing_value=None, fmt="%.10g"):
"""
Writes an image in a file (ppm format):
:param im: (Img class) image to be written
:param filename: (string) name of the file
:param missing_value: (float or None) nan values will be replaced
by missing_value before writing
:param fmt: (string) single format for variable values, of the
form: '%[flag]width[.precision]specifier'
"""
# Write 1st line in string shead
shead = "P3\n# {0} {1} {2} {3} {4} {5} {6} {7} {8}\n{0} {1}\n255\n".format(
im.nx, im.ny, im.nz, im.sx, im.sy, im.sz, im.ox, im.oy, im.oz)
# Replace np.nan by missing_value
if missing_value is not None:
np.putmask(im.val, np.isnan(im.val), missing_value)
# Open the file in write binary mode
with open(filename,'wb') as ff:
ff.write(shead.encode())
# Write variable values
np.savetxt(ff, im.val.reshape(im.nv, -1).T, delimiter=' ', fmt=fmt)
# Replace missing_value by np.nan (restore)
if missing_value is not None:
np.putmask(im.val, im.val == missing_value, np.nan)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def isImageDimensionEqual (im1, im2):
"""
Checks if grid dimensions of two images are equal.
"""
return (im1.nx == im2.nx and im1.ny == im2.ny and im1.nz == im2.nz)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def gatherImages (imlist, varInd=None, remVarFromInput=False):
"""
Gathers images:
:param imlist: (list) images to be gathered, they should have
the same grid dimensions
:param varInd: (int or None)
if None: all variables of each image from imlist
are put in the output image
else: only the variable of index varInd is put in
the output image
:param remVarFromInput: (bool) if True, gathered variables are removed
from the source (input image)
:return: (Img class) output image containing variables of images of imlist
"""
for i in range(1,len(imlist)):
if not isImageDimensionEqual(imlist[0], imlist[i]):
print("ERROR: grid dimensions differ, nothing done!")
return
if varInd is not None:
if varInd < 0:
print("ERROR: invalid index (negative), nothing done!")
return
for i in range(len(imlist)):
if varInd >= imlist[i].nv:
print("ERROR: invalid index, nothing done!")
return
im = Img(nx=imlist[0].nx, ny=imlist[0].ny, nz=imlist[0].nz,
sx=imlist[0].sx, sy=imlist[0].sy, sz=imlist[0].sz,
ox=imlist[0].ox, oy=imlist[0].oy, oz=imlist[0].oz,
nv=0, val=0.0)
if varInd is not None:
for i in range(len(imlist)):
im.append_var(val=imlist[i].val[varInd,...])
if remVarFromInput:
imlist[i].remove_var(varInd)
else:
for i in range(len(imlist)):
for j in range(imlist[i].nv):
im.append_var(val=imlist[i].val[j,...])
if remVarFromInput:
imlist[i].remove_allvar()
return (im)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def imageContStat (im, op='mean', **kwargs):
"""
Computes "pixel-wise" statistics over every variable of an image:
:param im: (Img class) input image
:param op: (string) statistic operator, can be:
'max': max
'mean': mean
'min': min
'std': standard deviation
'var': variance
'quantile': quantile
this operator requires the keyword argument
q=<sequence of quantile to compute>
:param kwargs: additional key word arguments passed to np.<op>
function, typically: ddof=1 if op is 'std' or 'var'
:return: (Img class) image with same grid as the input image and
one variable being the pixel-wise statistics according to 'op'
over every variable of the input image
"""
if op == 'max':
func = np.nanmax
varname = [op]
elif op == 'mean':
func = np.nanmean
varname = [op]
elif op == 'min':
func = np.nanmin
varname = [op]
elif op == 'std':
func = np.nanstd
varname = [op]
elif op == 'var':
func = np.nanvar
varname = [op]
elif op == 'quantile':
func = np.nanquantile
if 'q' not in kwargs:
print("ERROR: keyword argument 'q' required for op='quantile', nothing done!")
return
varname = [op + '_' + str(v) for v in kwargs['q']]
else:
print("ERROR: unkown operation {}, nothing done!".format(op))
return
imOut = Img(nx=im.nx, ny=im.ny, nz=im.nz,
sx=im.sx, sy=im.sy, sz=im.sz,
ox=im.ox, oy=im.oy, oz=im.oz,
nv=0, val=0.0)
vv = func(im.val.reshape(im.nv,-1), axis=0, **kwargs)
vv = vv.reshape(-1, im.nxyz())
for v, name in zip(vv, varname):
imOut.append_var(v, varname=name)
return (imOut)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def imageCategProp (im, categ):
"""
Computes "pixel-wise" proportions of given categories over every
variable of an image:
:param im: (Img class) input image
:param categ: (sequence) list of value(s) for which the proportions
are computed
:return: (Img class) image with same grid as the input image and as many
variable(s) as given by 'categ', being the pixel-wise
proportions of each category in 'categ', over every variable
of the input image
"""
categarr = np.array(categ,dtype=float).reshape(-1)
imOut = Img(nx=im.nx, ny=im.ny, nz=im.nz,
sx=im.sx, sy=im.sy, sz=im.sz,
ox=im.ox, oy=im.oy, oz=im.oz,
nv=0, val=0.0)
for code in categarr:
x = im.val.reshape(im.nv,-1) == code
x = np.asarray(x,dtype=float)
np.putmask(x, np.isnan(im.val.reshape(im.nv,-1)), np.nan)
imOut.append_var(np.mean(x, axis=0))
return (imOut)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def imageEntropy (im, varIndList=None):
"""
Computes "pixel-wise" entropy for proprotions given as variables in an
image:
:param im: (Img class) input image
:param varIndList: (sequence of int or None) index-es of the variables
to take into account (default None: all variables)
:return: (Img class) an image with one variable containing the
entropy for the variable given in input, at pixel i,
it is defined as:
Ent(i) = - sum_{v} p_v(i) * log_n(p(v(i)))
where v loops on each variable and n is the number
of variables. Note that the sum_{v} p(v(i)) should
be equal to 1
"""
if varIndList is not None:
# Check if each index is valid
if sum([iv in range(im.nv) for iv in varIndList]) != len(varIndList):
print("ERROR: invalid index-es")
return
else:
varIndList = range(im.nv)
if len(varIndList) < 2:
print("ERROR: at least 2 indexes should be given")
return
imOut = Img(nx=im.nx, ny=im.ny, nz=im.nz,
sx=im.sx, sy=im.sy, sz=im.sz,
ox=im.ox, oy=im.oy, oz=im.oz,
nv=1, val=np.nan,
name=im.name)
t = 1. / np.log(len(varIndList))
for iz in range(im.nz):
for iy in range(im.ny):
for ix in range(im.nx):
s = 0
e = 0
ok = True
for iv in varIndList:
p = im.val[iv][iz][iy][ix]
if np.isnan(p) or p < 0:
ok = False
break
s = s + p
if p > 1.e-10:
e = e - p*np.log(p)
if ok and abs(s-1.0) > 1.e-5:
ok = False
if ok:
imOut.val[0][iz][iy][ix] = t*e
return (imOut)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def copyPointSet(ps, varIndList=None):
"""
Copies point set, with all variables or a subset of variables:
:param ps: (PointSet class) input point set
:param varIndList: (sequence of int or None) index-es of the variables
to copy (default None: all variables), note that for
copying one variable, specify "varIndList=(iv,)"
:return: (PointSet class) a copy of the input point set
(not a reference to)
"""
if varIndList is not None:
# Check if each index is valid
if sum([iv in range(ps.nv) for iv in varIndList]) != len(varIndList):
print("ERROR: invalid index-es")
return
else:
varIndList = range(ps.nv)
psOut = PointSet(npt=ps.npt, nv=len(varIndList), val=0.0, name=ps.name)
for i, iv in enumerate(varIndList):
psOut.set_var(val=ps.val[iv,...], varname=ps.varname[iv], ind=i)
return (psOut)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def readPointSetGslib(filename, missing_value=None):
"""
Reads a point set from a file (gslib format):
--- file (ascii) ---
npoint
nvar+3
name_for_x_coordinate
name_for_y_coordinate
name_for_z_coordinate
name_of_variable_1
...
name_of_variable_nvar
x(1) y(1) z(1) Z1(1) ... Znvar(1)
...
x(npoint) y(npoint) z(npoint) Z1(npoint) ... Znvar(npoint)
--- file (ascii) ---
:param filename: (string) name of the file
:param missing_value: (float or None) value that will be replaced by nan
:return: (PointSet class) point set
"""
# Check if the file exists
if not os.path.isfile(filename):
print("ERROR: invalid filename ({})".format(filename))
return
# Open the file in read mode
with open(filename,'r') as ff:
# Read 1st line
line1 = ff.readline()
# Read 2nd line
line2 = ff.readline()
# Set number of variables
nv = int(line2)
# Set variable name (next nv lines)
varname = [ff.readline().replace("\n",'') for i in range(nv)]
# Read the rest of the file
valarr = np.loadtxt(ff)
# Set number of point(s)
npt = int(line1)
# Replace missing_value by np.nan
if missing_value is not None:
np.putmask(valarr, valarr == missing_value, np.nan)
# Set point set
ps = PointSet(npt=npt, nv=nv, val=valarr.T, varname=varname)
return (ps)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def writePointSetGslib(ps, filename, missing_value=None, fmt="%.10g"):
"""
Writes a point set in a file (gslib format):
:param ps: (PointSet class) point set to be written
:param filename: (string) name of the file
:param missing_value: (float or None) nan values will be replaced
by missing_value before writing
:param fmt: (string) single format for variable values, of the
form: '%[flag]width[.precision]specifier'
"""
# Write 1st line in string shead
shead = "{}\n".format(ps.npt)
# Append 2nd line
shead = shead + "{}\n".format(ps.nv)
# Append variable name(s) (next line(s))
for s in ps.varname:
shead = shead + "{}\n".format(s)
# Replace np.nan by missing_value
if missing_value is not None:
np.putmask(ps.val, np.isnan(ps.val), missing_value)
# Open the file in write binary mode
with open(filename,'wb') as ff:
ff.write(shead.encode())
# Write variable values
np.savetxt(ff, ps.val.reshape(ps.nv, -1).T, delimiter=' ', fmt=fmt)
# Replace missing_value by np.nan (restore)
if missing_value is not None:
np.putmask(ps.val, ps.val == missing_value, np.nan)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def imageToPointSet(im):
"""
Returns a point set corresponding to the input image:
:param im: (Img class) input image
:return: (PointSet class) point set corresponding to the input image
"""
# Initialize point set
ps = PointSet(npt=im.nxyz(), nv=3+im.nv, val=0.0)
# Set x-coordinate
t = im.x()
v = []
for i in range(im.nyz()):
v.append(t)
ps.set_var(val=v, varname='X', ind=0)
# Set y-coordinate
t = np.repeat(im.y(), im.nx)
v = []
for i in range(im.nz):
v.append(t)
ps.set_var(val=v, varname='Y', ind=1)
# Set z-coordinate
v = np.repeat(im.z(), im.nxy())
ps.set_var(val=v, varname='Z', ind=2)
# Set next variable(s)
for i in range(im.nv):
ps.set_var(val=im.val[i,...], varname=im.varname[i], ind=3+i)
return (ps)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def pointSetToImage(ps, nx, ny, nz, sx=1.0, sy=1.0, sz=1.0, ox=0.0, oy=0.0, oz=0.0, job=0):
"""
Returns an image corresponding to the input point set and grid:
:param ps: (PointSet class) input point set, with x, y, z-coordinates as
first three variable
:param nx, ny, nz: (int) number of grid cells in each direction
:param sx, sy, sz: (float) cell size in each direction
:param ox, oy, oz: (float) origin of the grid (bottom-lower-left corner)
:param job: (int)
if 0: an error occurs if one data is located outside of the
image grid, otherwise all data are integrated in the
image
if 1: data located outside of the image grid are ignored
(no error occurs), and all data located within the
image grid are integrated in the image
:return: (Img class) image corresponding to the input point set and grid
"""
if ps.nv < 3:
print("ERROR: invalid number of variable (should be > 3)")
return
# Initialize image
im = Img(nx=nx, ny=ny, nz=nz,
sx=sx, sy=sy, sz=sz,
ox=ox, oy=oy, oz=oz,
nv=ps.nv-3, val=np.nan,
varname=[ps.varname[3+i] for i in range(ps.nv-3)])
# Get index of point in the image
xmin, xmax = im.xmin(), im.xmax()
ymin, ymax = im.ymin(), im.ymax()
zmin, zmax = im.zmin(), im.zmax()
ix = np.array(np.floor((ps.val[0]-xmin)/sx),dtype=int)
iy = np.array(np.floor((ps.val[1]-ymin)/sy),dtype=int)
iz = np.array(np.floor((ps.val[2]-zmin)/sz),dtype=int)
# ix = [np.floor((x-xmin)/sx + 0.5) for x in ps.val[0]]
# iy = [np.floor((y-ymin)/sy + 0.5) for y in ps.val[1]]
# iz = [np.floor((z-zmin)/sz + 0.5) for z in ps.val[2]]
for i in range(ps.npt):
if ix[i] == nx:
if (ps.val[0,i]-xmin)/sx - nx < 1.e-10:
ix[i] = nx-1
if iy[i] == ny:
if (ps.val[1,i]-ymin)/sy - ny < 1.e-10:
iy[i] = ny-1
if iz[i] == nz:
if (ps.val[2,i]-zmin)/sz - nz < 1.e-10:
iz[i] = nz-1
# Check which index is out of the image grid
# iout = np.any([np.array(ix < 0), np.array(ix >= nx),
# np.array(iy < 0), np.array(iy >= ny),
# np.array(iz < 0), np.array(iz >= nz)],
# 0)
iout = np.any(np.array((ix < 0, ix >= nx,
iy < 0, iy >= ny,
iz < 0, iz >= nz)), 0)
if not job and sum(iout) > 0:
print ("ERROR: point out of the image grid!")
return
# Set values in the image
for i in range(ps.npt): # ps.npt is equal to iout.size
if not iout[i]:
im.val[:,iz[i], iy[i], ix[i]] = ps.val[3:ps.nv,i]
return (im)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def pointToGridIndex(x, y, z, nx, ny, nz, sx=1.0, sy=1.0, sz=1.0, ox=0.0, oy=0.0, oz=0.0):
"""
Convert real point coordinates to index grid:
:param x, y, z: (float) coordinates of a point
:param nx, ny, nz: (int) number of grid cells in each direction
:param sx, sy, sz: (float) cell size in each direction
:param ox, oy, oz: (float) origin of the grid (bottom-lower-left corner)
:return: [ix, iy, iz]:
(list of int of size 3) ix, iy, iz are the grid node index
in x-, y-, z-axis direction respectively
Warning: no check if the node is within the grid
Note: x, y, z can be ndarray of same shape, then
ix, iy, iz in output are ndarray of that shape
"""
ix = int((x-ox)/sx)
iy = int((y-oy)/sy)
iz = int((z-oz)/sz)
i = ix + nx * (iy + ny * iz)
return ([ix, iy, iz])
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def gridIndexToSingleGridIndex(ix, iy, iz, nx, ny, nz):
"""
Convert a grid index (3 indices) into a single grid index:
:param ix, iy, iz: (int) grid index in x-, y-, z-axis direction
:param nx, ny, nz: (int) number of grid cells in each direction
:return: i: (int) single grid index
Note: ix, iy, iz can be ndarray of same shape, then
i in output is ndarray of that shape
"""
return (ix + nx * (iy + ny * iz))
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def singleGridIndexToGridIndex(i, nx, ny, nz):
"""
Convert a single into a grid index (3 indices):
:param i: (int) single grid index
:param nx, ny, nz: (int) number of grid cells in each direction
:return: [ix, iy, iz]:
(list of 3 int) grid index in x-, y-, z-axis direction
Note: i can be a ndarray, then
ix, iy, iz in output are ndarray (of same shape)
"""
nxy = nx*ny
iz = i//nxy
j = i%nxy
iy = j//nx
ix = j%nx
return ([ix, iy, iz])
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def sampleFromPointSet(point_set, size, seed=None, mask=None):
"""
Sample random points from PointSet object
and return a PointSet
:param point_set: (PointSet) PointSet object to sample from
:param size: (size) number of points to be sampled
:param seed: (int) optional random seed
:param mask: (PointSet) PointSet of the same size showing where to sample
points where mask == 0 will be not taken into account
:return: PointSet:
A PointSet object
"""
# Initialise the seed; will randomly reseed the generator if None
np.random.seed(seed)
if mask is not None:
indices = np.where(mask.val[3,:] != 0)[0]
else:
indices = point_set.npt
# Sample only some points from the point set
sampled_indices = np.random.choice(indices, size, replace=False)
# Return the new object
return PointSet(npt=size,
nv=point_set.nv,
val=point_set.val[:,sampled_indices],
varname=point_set.varname,
name=point_set.name)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def sampleFromImage(image, size, seed=None, mask=None):
"""
Sample random points from Img object
and return a PointSet
:param image: (Img) Img object to sample from
:param size: (int) number of points to be sampled
:param seed: (int) optional random seed
:param mask: (Image) Image of the same size indicating where to sample
points where mask == 0 will be not taken into account
:return: PointSet:
A PointSet object
"""
# Create point set from image
point_set = imageToPointSet(image)
if mask is not None:
mask = imageToPointSet(mask)
return sampleFromPointSet(point_set, size, seed, mask)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def extractRandomPointFromImage (im, npt, seed=None):
"""
Extracts random points from an image (at center of grid cells) and return
the corresponding point set:
:param im: (Img class) input image
:param npt: (int) number of points to extract
(if greater than the number of image grid cells,
npt is set to this latter)
:seed: (int) seed number for initializing the random number generator (if not None)
:return: (PointSet class) point set containing the extracting points
"""
if npt <= 0:
print("ERROR: number of points negative or zero (npt={}), nothing done!".format(npt))
return
if npt >= im.nxyz():
return imageToPointSet(im)
if seed is not None:
np.random.seed(seed)
# Get random single grid indices
ind_grid = np.random.choice(np.arange(im.nxyz()), size=npt, replace=False)
# Get grid indices along each axis
ind_ixyz = np.array([singleGridIndexToGridIndex(i, im.nx, im.ny, im.nz) for i in ind_grid])
# Get points coordinates
x = im.ox + (ind_ixyz[:,0]+0.5)*im.sx
y = im.oy + (ind_ixyz[:,1]+0.5)*im.sy
z = im.oz + (ind_ixyz[:,2]+0.5)*im.sz
# Get value of every variable at points
v = np.array([im.val.reshape(im.nv,-1)[:,i] for i in ind_grid])
# Initialize point set
ps = PointSet(npt=npt, nv=3+im.nv, val=0.0)
# Set points coordinates
ps.set_var(val=x, varname='X', ind=0)
ps.set_var(val=y, varname='Y', ind=1)
ps.set_var(val=z, varname='Z', ind=2)
# Set next variable(s)
for i in range(im.nv):
ps.set_var(val=v[:,i], varname=im.varname[i], ind=3+i)
return (ps)
# ----------------------------------------------------------------------------
if __name__ == "__main__":
print("Module 'geone.img' example:")
print(" run the module 'geone.imgplot'...")
|
# Tests need to be a package otherwise ipyparallel will not find them in the package,
# when trying to import the tests in the subprocesses.
# Therefore, LEAVE THIS FILE HERE
|
# Copyright (c) 2016 Orange.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.tests.fullstack.resources import config as neutron_cfg
class ML2ConfigFixture(neutron_cfg.ML2ConfigFixture):
def __init__(self, env_desc, host_desc, temp_dir, tenant_network_types):
super(ML2ConfigFixture, self).__init__(
env_desc, host_desc, temp_dir, tenant_network_types)
if env_desc.bagpipe_ml2:
self.config['ml2']['type_drivers'] = 'route_target'
self.config.update({
'ml2_type_route_target': {
'rt_nn_ranges': '100:199'
}
})
|
import warnings
import pytest
from evalml.tuners import GridSearchTuner, NoParamsException
from evalml.tuners.tuner import Tuner
def test_grid_search_tuner_inheritance():
assert issubclass(GridSearchTuner, Tuner)
def test_grid_search_tuner_unique_values(dummy_pipeline_hyperparameters):
tuner = GridSearchTuner(dummy_pipeline_hyperparameters)
generated_parameters = []
for i in range(10):
params = tuner.propose()
generated_parameters.append(params)
assert len(generated_parameters) == 10
for i in range(10):
assert generated_parameters[i].keys() == dummy_pipeline_hyperparameters.keys()
assert generated_parameters[i]['Mock Classifier'].keys() == dummy_pipeline_hyperparameters['Mock Classifier'].keys()
def test_grid_search_tuner_no_params(dummy_pipeline_hyperparameters_small):
tuner = GridSearchTuner(dummy_pipeline_hyperparameters_small)
error_text = "Grid search has exhausted all possible parameters."
with pytest.raises(NoParamsException, match=error_text):
for i in range(10):
tuner.propose()
def test_grid_search_tuner_basic(dummy_pipeline_hyperparameters,
dummy_pipeline_hyperparameters_unicode):
tuner = GridSearchTuner(dummy_pipeline_hyperparameters)
proposed_params = tuner.propose()
assert proposed_params == {
'Mock Classifier': {
'param a': 0,
'param b': 0.0,
'param c': 'option a',
'param d': 'option a'
}
}
tuner.add(proposed_params, 0.5)
tuner = GridSearchTuner(dummy_pipeline_hyperparameters_unicode)
proposed_params = tuner.propose()
assert proposed_params == {
'Mock Classifier': {
'param a': 0,
'param b': 0.0,
'param c': 'option a 💩',
'param d': 'option a'
}
}
tuner.add(proposed_params, 0.5)
def test_grid_search_tuner_space_types():
tuner = GridSearchTuner({'Mock Classifier': {'param a': (0, 10)}})
proposed_params = tuner.propose()
assert proposed_params == {'Mock Classifier': {'param a': 0}}
tuner = GridSearchTuner({'Mock Classifier': {'param a': (0, 10.0)}})
proposed_params = tuner.propose()
assert proposed_params == {'Mock Classifier': {'param a': 0}}
def test_grid_search_tuner_invalid_space():
bound_error_text = "Upper bound must be greater than lower bound. Parameter lower bound is 1 and upper bound is 0"
with pytest.raises(ValueError, match=bound_error_text):
GridSearchTuner({'Mock Classifier': {'param a': (1, 0)}})
def test_grid_search_tuner_valid_space():
GridSearchTuner({'Mock Classifier': {'param a': 1}})
GridSearchTuner({'Mock Classifier': {'param a': "param_value"}})
tuner = GridSearchTuner({'Mock Classifier': {'param a': 3.200}})
proposed_params = tuner.propose()
assert proposed_params == {'Mock Classifier': {}}
def test_grid_search_tuner_raises_deprecated_random_state_warning():
with warnings.catch_warnings(record=True) as warn:
warnings.simplefilter("always")
GridSearchTuner({'Mock Classifier': {'param a': (0, 2)}}, random_state=13)
assert str(warn[0].message).startswith(
"Argument 'random_state' has been deprecated in favor of 'random_seed'")
|
# pyportal_weather.py updated for CircuitPython v7.1.0 2022-01-04
import sys
import time
import board
from adafruit_pyportal import PyPortal
cwd = ("/"+__file__).rsplit('/', 1)[0] # the current working directory (where this file is)
sys.path.append(cwd)
import openweather_graphics # pylint: disable=wrong-import-position
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
# Use cityname, country code where countrycode is ISO3166 format.
# E.g. "New York, US" or "London, GB"
LOCATION = "Richland, WA, US"
# Set up where we'll be fetching data from
DATA_SOURCE = "http://api.openweathermap.org/data/2.5/weather?q="+LOCATION
DATA_SOURCE += "&appid="+secrets['openweather_token']
# You'll need to get a token from openweather.org, looks like 'b6907d289e10d714a6e88b30761fae22'
DATA_LOCATION = []
# Initialize the pyportal object and let us know what data to fetch and where
# to display it
pyportal = PyPortal(url=DATA_SOURCE,
json_path=DATA_LOCATION,
status_neopixel=board.NEOPIXEL,
default_bg=0x000000)
pyportal.set_backlight(0.75)
gfx = openweather_graphics.OpenWeather_Graphics(pyportal.splash, am_pm=True, celsius=False)
localtile_refresh = None
weather_refresh = None
pyportal.play_file("storm_tracker.wav", wait_to_finish=True) # True to disable speaker after playing
while True:
# only query the online time once per hour (and on first run)
if (not localtile_refresh) or (time.monotonic() - localtile_refresh) > 3600:
try:
print("Getting time from internet!")
pyportal.get_local_time()
localtile_refresh = time.monotonic()
except (ValueError, RuntimeError) as e: # ValueError added from quote.py change
print("Some error occured, retrying! -", e)
continue
# only query the weather every 10 minutes (and on first run)
if (not weather_refresh) or (time.monotonic() - weather_refresh) > 600:
try:
value = pyportal.fetch()
print("Response is", value)
gfx.display_weather(value)
weather_refresh = time.monotonic()
except (ValueError, RuntimeError) as e: # ValueError added from quote.py change
print("Some error occured, retrying! -", e)
continue
gfx.update_time()
time.sleep(30) # wait 30 seconds before updating anything again
|
import requests
import config
class Wallpaper:
def __init__(self, path, keyword="8k", page_num=1):
self.URL = f"https://api.pexels.com/v1/search?query={keyword}&orientation=landscape&page={page_num}&size=large&per_page=1"
self.HEADERS = {"Authorization": f"{config.api_key}"}
self.JSON = self.get_json()
self.path = path
def get_json(self):
response = requests.get(self.URL, headers=self.HEADERS)
return response.json()
def download(self):
json_object = self.JSON
response = requests.get(json_object['photos'][0]['src']['large2x'])
# filename = json_object['photos'][0]['alt']
with open(f"{self.path}/wallpaper.jpeg", "wb") as f:
f.write(response.content)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.