hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f737e882a690f66a006bd442966fc40e09ca108f | 3,782 | py | Python | contrib/macdeploy/custom_dsstore.py | HuntCoinDeveloper/huntcoin | 99198152d21b58ce598f46783074b64113cc5e64 | [
"MIT"
] | 2 | 2019-05-13T02:10:08.000Z | 2019-05-26T14:47:29.000Z | contrib/macdeploy/custom_dsstore.py | HuntCoinDeveloper/huntcoin | 99198152d21b58ce598f46783074b64113cc5e64 | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | HuntCoinDeveloper/huntcoin | 99198152d21b58ce598f46783074b64113cc5e64 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07huntcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00huntcoinuser:\x00Documents:\x00huntcoin:\x00huntcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/huntcoinuser/Documents/huntcoin/huntcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Huntcoin-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 62 | 1,818 | 0.727922 |
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07huntcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00huntcoinuser:\x00Documents:\x00huntcoin:\x00huntcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/huntcoinuser/Documents/huntcoin/huntcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Huntcoin-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| true | true |
f737e9bc7fdf402c2b1765bf61ad3dca7aaae98c | 49 | py | Python | videoanalyst/utils/__init__.py | JIANG-CX/data_labeling | 8d2470bbb537dfc09ed2f7027ed8ee7de6447248 | [
"MIT"
] | 1 | 2021-05-24T10:08:51.000Z | 2021-05-24T10:08:51.000Z | videoanalyst/utils/__init__.py | JIANG-CX/data_labeling | 8d2470bbb537dfc09ed2f7027ed8ee7de6447248 | [
"MIT"
] | null | null | null | videoanalyst/utils/__init__.py | JIANG-CX/data_labeling | 8d2470bbb537dfc09ed2f7027ed8ee7de6447248 | [
"MIT"
] | null | null | null | from .misc import Registry, ensure_dir, load_cfg
| 24.5 | 48 | 0.816327 | from .misc import Registry, ensure_dir, load_cfg
| true | true |
f737e9d6e6c3287d9a34834dc5853ecc25c53032 | 2,019 | py | Python | scripts/librarian_assign_sessions.py | CutlerRU/librarian | 45205d7fc75740e29f4a90a5d91d6a4a07b5a0f1 | [
"BSD-2-Clause"
] | null | null | null | scripts/librarian_assign_sessions.py | CutlerRU/librarian | 45205d7fc75740e29f4a90a5d91d6a4a07b5a0f1 | [
"BSD-2-Clause"
] | null | null | null | scripts/librarian_assign_sessions.py | CutlerRU/librarian | 45205d7fc75740e29f4a90a5d91d6a4a07b5a0f1 | [
"BSD-2-Clause"
] | null | null | null | #! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Team.
# Licensed under the BSD License.
"""Tell the Librarian to assign any recent Observations to grouped "observing
sessions". You should only do this if no data are currently being taken,
because otherwise the currently-active session will be incorrectly described.
The RTP only ingests data from observations that have been assigned to
sessions, so this command must be run before the RTP will start working on a
night's data.
"""
from __future__ import absolute_import, division, print_function
import argparse
import os.path
import sys
import hera_librarian
p = argparse.ArgumentParser(
description=__doc__,
)
p.add_argument('--min-start-jd', dest='minimum_start_jd', metavar='JD', type=float,
help='Only consider observations starting after JD.')
p.add_argument('--max-start-jd', dest='maximum_start_jd', metavar='JD', type=float,
help='Only consider observations starting before JD.')
p.add_argument('conn_name', metavar='CONNECTION-NAME',
help='Which Librarian to talk to; as in ~/.hl_client.cfg.')
args = p.parse_args()
def die(fmt, *args):
if not len(args):
text = str(fmt)
else:
text = fmt % args
print('error:', text, file=sys.stderr)
sys.exit(1)
# Let's do it.
client = hera_librarian.LibrarianClient(args.conn_name)
try:
result = client.assign_observing_sessions(
minimum_start_jd=args.minimum_start_jd,
maximum_start_jd=args.maximum_start_jd,
)
except hera_librarian.RPCError as e:
die('assignment failed: %s', e)
try:
n = 0
for info in result['new_sessions']:
if n == 0:
print('New sessions created:')
print(' %(id)d: start JD %(start_time_jd)f, stop JD %(stop_time_jd)f, n_obs %(n_obs)d' % info)
n += 1
if n == 0:
print('No new sessions created.')
except Exception as e:
die('sessions created, but failed to print info: %s', e)
| 28.842857 | 103 | 0.683507 |
from __future__ import absolute_import, division, print_function
import argparse
import os.path
import sys
import hera_librarian
p = argparse.ArgumentParser(
description=__doc__,
)
p.add_argument('--min-start-jd', dest='minimum_start_jd', metavar='JD', type=float,
help='Only consider observations starting after JD.')
p.add_argument('--max-start-jd', dest='maximum_start_jd', metavar='JD', type=float,
help='Only consider observations starting before JD.')
p.add_argument('conn_name', metavar='CONNECTION-NAME',
help='Which Librarian to talk to; as in ~/.hl_client.cfg.')
args = p.parse_args()
def die(fmt, *args):
if not len(args):
text = str(fmt)
else:
text = fmt % args
print('error:', text, file=sys.stderr)
sys.exit(1)
client = hera_librarian.LibrarianClient(args.conn_name)
try:
result = client.assign_observing_sessions(
minimum_start_jd=args.minimum_start_jd,
maximum_start_jd=args.maximum_start_jd,
)
except hera_librarian.RPCError as e:
die('assignment failed: %s', e)
try:
n = 0
for info in result['new_sessions']:
if n == 0:
print('New sessions created:')
print(' %(id)d: start JD %(start_time_jd)f, stop JD %(stop_time_jd)f, n_obs %(n_obs)d' % info)
n += 1
if n == 0:
print('No new sessions created.')
except Exception as e:
die('sessions created, but failed to print info: %s', e)
| true | true |
f737ea22ff2ba4acb8102d13caef6197f4c5f1ca | 6,618 | py | Python | dev/local/data/pipeline.py | vguerra/fastai_docs | 95df902ef5cd08bcd58d5ca64bc8a6ea3f297531 | [
"Apache-2.0"
] | null | null | null | dev/local/data/pipeline.py | vguerra/fastai_docs | 95df902ef5cd08bcd58d5ca64bc8a6ea3f297531 | [
"Apache-2.0"
] | null | null | null | dev/local/data/pipeline.py | vguerra/fastai_docs | 95df902ef5cd08bcd58d5ca64bc8a6ea3f297531 | [
"Apache-2.0"
] | null | null | null | #AUTOGENERATED! DO NOT EDIT! File to edit: dev/03_data_pipeline.ipynb (unless otherwise specified).
__all__ = ['get_func', 'Func', 'Sig', 'compose_tfms', 'batch_to_samples', 'mk_transform', 'Pipeline', 'TfmdBase',
'TfmdList', 'TfmdDS']
#Cell
from ..torch_basics import *
from ..test import *
from .transform import *
from ..notebook.showdoc import show_doc
#Cell
def get_func(t, name, *args, **kwargs):
"Get the `t.name` (potentially partial-ized with `args` and `kwargs`) or `noop` if not defined"
f = getattr(t, name, noop)
return f if not (args or kwargs) else partial(f, *args, **kwargs)
#Cell
class Func():
"Basic wrapper around a `name` with `args` and `kwargs` to call on a given type"
def __init__(self, name, *args, **kwargs): self.name,self.args,self.kwargs = name,args,kwargs
def __repr__(self): return f'sig: {self.name}({self.args}, {self.kwargs})'
def _get(self, t): return get_func(t, self.name, *self.args, **self.kwargs)
def __call__(self,t): return L(t).mapped(self._get) if is_listy(t) else self._get(t)
#Cell
class _Sig():
def __getattr__(self,k):
def _inner(*args, **kwargs): return Func(k, *args, **kwargs)
return _inner
Sig = _Sig()
#Cell
def compose_tfms(x, tfms, is_enc=True, reverse=False, **kwargs):
"Apply all `func_nm` attribute of `tfms` on `x`, maybe in `reverse` order"
if reverse: tfms = reversed(tfms)
for f in tfms:
if not is_enc: f = f.decode
x = f(x, **kwargs)
return x
#Cell
def batch_to_samples(b, max_n=10):
"'Transposes' a batch to (at most `max_n`) samples"
if isinstance(b, Tensor): return b[:max_n]
else:
res = L(b).mapped(partial(batch_to_samples,max_n=max_n))
return L(retain_types(res.zipped(), [b]))
#Cell
def mk_transform(f, as_item=True):
"Convert function `f` to `Transform` if it isn't already one"
f = instantiate(f)
return f if isinstance(f,Transform) else Transform(f, as_item=as_item)
#Cell
class Pipeline:
"A pipeline of composed (for encode/decode) transforms, setup with types"
def __init__(self, funcs=None, as_item=False, filt=None):
if isinstance(funcs, Pipeline): funcs = funcs.fs
elif isinstance(funcs, Transform): funcs = [funcs]
self.filt,self.default = filt,None
self.fs = L(ifnone(funcs,[noop])).mapped(mk_transform).sorted(key='order')
self.set_as_item(as_item)
for f in self.fs:
name = camel2snake(type(f).__name__)
a = getattr(self,name,None)
if a is not None: f = L(a)+f
setattr(self, name, f)
def set_as_item(self, as_item):
self.as_item = as_item
for f in self.fs: f.as_item = as_item
def setup(self, items=None):
self.items = items
tfms,self.fs = self.fs,L()
for t in tfms: self.add(t,items)
def add(self,t, items=None):
t.setup(items)
self.fs.append(t)
def __call__(self, o): return compose_tfms(o, tfms=self.fs, filt=self.filt)
def decode (self, o): return compose_tfms(o, tfms=self.fs, is_enc=False, reverse=True, filt=self.filt)
def __repr__(self): return f"Pipeline: {self.fs}"
def __getitem__(self,i): return self.fs[i]
def decode_batch(self, b, max_n=10): return batch_to_samples(b, max_n=max_n).mapped(self.decode)
def __setstate__(self,data): self.__dict__.update(data)
def __getattr__(self,k):
if k.startswith('_') or k=='fs': raise AttributeError(k)
res = sum(self.fs.attrgot(k).mapped(L), [])
if not res: raise AttributeError(k)
return res[0] if len(res)==1 else res
def show(self, o, ctx=None, **kwargs):
for f in reversed(self.fs):
res = self._show(o, ctx, **kwargs)
if res is not None: return res
o = f.decode(o, filt=self.filt)
return self._show(o, ctx, **kwargs)
def _show(self, o, ctx, **kwargs):
o1 = [o] if self.as_item or not is_listy(o) else o
if not all(hasattr(o_, 'show') for o_ in o1): return
for o_ in o1: ctx = o_.show(ctx=ctx, **kwargs)
return ifnone(ctx,1)
#Cell
class TfmdBase(L):
"Base class for transformed lists"
def _gets(self, i): return L(self._get(i_) for i_ in mask2idxs(i))
def subset(self, idxs): return self._new(super()._gets(idxs))
def decode_at(self, idx): return self.decode(self[idx])
def show_at(self, idx, **kwargs): return self.show(self[idx], **kwargs)
#Cell
class TfmdList(TfmdBase):
"A `Pipeline` of `tfms` applied to a collection of `items`"
def __init__(self, items, tfms, do_setup=True, as_item=True, use_list=None, filt=None):
super().__init__(items, use_list=use_list)
if isinstance(tfms,TfmdList): tfms = tfms.tfms
if isinstance(tfms,Pipeline): do_setup=False
self.tfms = Pipeline(tfms, as_item=as_item, filt=filt)
if do_setup: self.setup()
def _new(self, items, *args, **kwargs): return super()._new(items, tfms=self.tfms, do_setup=False, use_list=None, filt=self.filt)
def _get (self, i): return self.tfms(super()._get(i))
def __repr__(self): return f"{self.__class__.__name__}: {self.items}\ntfms - {self.tfms.fs}"
# Delegating to `self.tfms`
def show(self, o, **kwargs): return self.tfms.show(o, **kwargs)
def setup(self): self.tfms.setup(self)
def decode(self, x, **kwargs): return self.tfms.decode(x, **kwargs)
def __call__(self, x, **kwargs): return self.tfms.__call__(x, **kwargs)
@property
def filt(self): return self.tfms.filt
@filt.setter
def filt(self,v): self.tfms.filt = v
#Cell
@docs
class TfmdDS(TfmdBase):
"A dataset that creates a tuple from each `tfms`, passed thru `ds_tfms`"
def __init__(self, items, tfms=None, do_setup=True, use_list=None, filt=None):
super().__init__(items, use_list=use_list)
if tfms is None: tms = [None]
self.tls = [TfmdList(items, t, do_setup=do_setup, filt=filt, use_list=use_list) for t in L(tfms)]
def _get(self, it): return tuple(tl._get(it) for tl in self.tls)
def __repr__(self): return coll_repr(self)
def decode(self, o): return tuple(it.decode(o_) for o_,it in zip(o,self.tls))
def show(self, o, ctx=None, **kwargs):
for o_,it in zip(o,self.tls): ctx = it.show(o_, ctx=ctx, **kwargs)
return ctx
@property
def filt(self): return self.tls[0].filt
@filt.setter
def filt(self,v):
for tl in self.tls: tl.filt = v
_docs=dict(
decode="Compose `decode` of all `tuple_tfms` then all `tfms` on `i`",
show="Show item `o` in `ctx`") | 39.392857 | 133 | 0.643246 |
__all__ = ['get_func', 'Func', 'Sig', 'compose_tfms', 'batch_to_samples', 'mk_transform', 'Pipeline', 'TfmdBase',
'TfmdList', 'TfmdDS']
from ..torch_basics import *
from ..test import *
from .transform import *
from ..notebook.showdoc import show_doc
def get_func(t, name, *args, **kwargs):
f = getattr(t, name, noop)
return f if not (args or kwargs) else partial(f, *args, **kwargs)
class Func():
def __init__(self, name, *args, **kwargs): self.name,self.args,self.kwargs = name,args,kwargs
def __repr__(self): return f'sig: {self.name}({self.args}, {self.kwargs})'
def _get(self, t): return get_func(t, self.name, *self.args, **self.kwargs)
def __call__(self,t): return L(t).mapped(self._get) if is_listy(t) else self._get(t)
class _Sig():
def __getattr__(self,k):
def _inner(*args, **kwargs): return Func(k, *args, **kwargs)
return _inner
Sig = _Sig()
def compose_tfms(x, tfms, is_enc=True, reverse=False, **kwargs):
if reverse: tfms = reversed(tfms)
for f in tfms:
if not is_enc: f = f.decode
x = f(x, **kwargs)
return x
def batch_to_samples(b, max_n=10):
if isinstance(b, Tensor): return b[:max_n]
else:
res = L(b).mapped(partial(batch_to_samples,max_n=max_n))
return L(retain_types(res.zipped(), [b]))
def mk_transform(f, as_item=True):
f = instantiate(f)
return f if isinstance(f,Transform) else Transform(f, as_item=as_item)
class Pipeline:
def __init__(self, funcs=None, as_item=False, filt=None):
if isinstance(funcs, Pipeline): funcs = funcs.fs
elif isinstance(funcs, Transform): funcs = [funcs]
self.filt,self.default = filt,None
self.fs = L(ifnone(funcs,[noop])).mapped(mk_transform).sorted(key='order')
self.set_as_item(as_item)
for f in self.fs:
name = camel2snake(type(f).__name__)
a = getattr(self,name,None)
if a is not None: f = L(a)+f
setattr(self, name, f)
def set_as_item(self, as_item):
self.as_item = as_item
for f in self.fs: f.as_item = as_item
def setup(self, items=None):
self.items = items
tfms,self.fs = self.fs,L()
for t in tfms: self.add(t,items)
def add(self,t, items=None):
t.setup(items)
self.fs.append(t)
def __call__(self, o): return compose_tfms(o, tfms=self.fs, filt=self.filt)
def decode (self, o): return compose_tfms(o, tfms=self.fs, is_enc=False, reverse=True, filt=self.filt)
def __repr__(self): return f"Pipeline: {self.fs}"
def __getitem__(self,i): return self.fs[i]
def decode_batch(self, b, max_n=10): return batch_to_samples(b, max_n=max_n).mapped(self.decode)
def __setstate__(self,data): self.__dict__.update(data)
def __getattr__(self,k):
if k.startswith('_') or k=='fs': raise AttributeError(k)
res = sum(self.fs.attrgot(k).mapped(L), [])
if not res: raise AttributeError(k)
return res[0] if len(res)==1 else res
def show(self, o, ctx=None, **kwargs):
for f in reversed(self.fs):
res = self._show(o, ctx, **kwargs)
if res is not None: return res
o = f.decode(o, filt=self.filt)
return self._show(o, ctx, **kwargs)
def _show(self, o, ctx, **kwargs):
o1 = [o] if self.as_item or not is_listy(o) else o
if not all(hasattr(o_, 'show') for o_ in o1): return
for o_ in o1: ctx = o_.show(ctx=ctx, **kwargs)
return ifnone(ctx,1)
class TfmdBase(L):
def _gets(self, i): return L(self._get(i_) for i_ in mask2idxs(i))
def subset(self, idxs): return self._new(super()._gets(idxs))
def decode_at(self, idx): return self.decode(self[idx])
def show_at(self, idx, **kwargs): return self.show(self[idx], **kwargs)
class TfmdList(TfmdBase):
def __init__(self, items, tfms, do_setup=True, as_item=True, use_list=None, filt=None):
super().__init__(items, use_list=use_list)
if isinstance(tfms,TfmdList): tfms = tfms.tfms
if isinstance(tfms,Pipeline): do_setup=False
self.tfms = Pipeline(tfms, as_item=as_item, filt=filt)
if do_setup: self.setup()
def _new(self, items, *args, **kwargs): return super()._new(items, tfms=self.tfms, do_setup=False, use_list=None, filt=self.filt)
def _get (self, i): return self.tfms(super()._get(i))
def __repr__(self): return f"{self.__class__.__name__}: {self.items}\ntfms - {self.tfms.fs}"
def show(self, o, **kwargs): return self.tfms.show(o, **kwargs)
def setup(self): self.tfms.setup(self)
def decode(self, x, **kwargs): return self.tfms.decode(x, **kwargs)
def __call__(self, x, **kwargs): return self.tfms.__call__(x, **kwargs)
@property
def filt(self): return self.tfms.filt
@filt.setter
def filt(self,v): self.tfms.filt = v
@docs
class TfmdDS(TfmdBase):
def __init__(self, items, tfms=None, do_setup=True, use_list=None, filt=None):
super().__init__(items, use_list=use_list)
if tfms is None: tms = [None]
self.tls = [TfmdList(items, t, do_setup=do_setup, filt=filt, use_list=use_list) for t in L(tfms)]
def _get(self, it): return tuple(tl._get(it) for tl in self.tls)
def __repr__(self): return coll_repr(self)
def decode(self, o): return tuple(it.decode(o_) for o_,it in zip(o,self.tls))
def show(self, o, ctx=None, **kwargs):
for o_,it in zip(o,self.tls): ctx = it.show(o_, ctx=ctx, **kwargs)
return ctx
@property
def filt(self): return self.tls[0].filt
@filt.setter
def filt(self,v):
for tl in self.tls: tl.filt = v
_docs=dict(
decode="Compose `decode` of all `tuple_tfms` then all `tfms` on `i`",
show="Show item `o` in `ctx`") | true | true |
f737eabe46d4f3bf805ab1749720629948465669 | 744 | py | Python | peering/api/urls.py | maznu/peering-manager | d249fcf530f4cc48b39429badb79bc203e0148ba | [
"Apache-2.0"
] | 127 | 2017-10-12T00:27:45.000Z | 2020-08-07T11:13:55.000Z | peering/api/urls.py | maznu/peering-manager | d249fcf530f4cc48b39429badb79bc203e0148ba | [
"Apache-2.0"
] | 247 | 2017-12-26T12:55:34.000Z | 2020-08-08T11:57:35.000Z | peering/api/urls.py | maznu/peering-manager | d249fcf530f4cc48b39429badb79bc203e0148ba | [
"Apache-2.0"
] | 63 | 2017-10-13T06:46:05.000Z | 2020-08-08T00:41:57.000Z | from peering_manager.api import OrderedDefaultRouter
from . import views
router = OrderedDefaultRouter()
router.APIRootView = views.PeeringRootView
router.register("autonomous-systems", views.AutonomousSystemViewSet)
router.register("bgp-groups", views.BGPGroupViewSet)
router.register("communities", views.CommunityViewSet)
router.register("direct-peering-sessions", views.DirectPeeringSessionViewSet)
router.register("internet-exchanges", views.InternetExchangeViewSet)
router.register(
"internet-exchange-peering-sessions", views.InternetExchangePeeringSessionViewSet
)
router.register("routers", views.RouterViewSet)
router.register("routing-policies", views.RoutingPolicyViewSet)
app_name = "peering-api"
urlpatterns = router.urls
| 35.428571 | 85 | 0.831989 | from peering_manager.api import OrderedDefaultRouter
from . import views
router = OrderedDefaultRouter()
router.APIRootView = views.PeeringRootView
router.register("autonomous-systems", views.AutonomousSystemViewSet)
router.register("bgp-groups", views.BGPGroupViewSet)
router.register("communities", views.CommunityViewSet)
router.register("direct-peering-sessions", views.DirectPeeringSessionViewSet)
router.register("internet-exchanges", views.InternetExchangeViewSet)
router.register(
"internet-exchange-peering-sessions", views.InternetExchangePeeringSessionViewSet
)
router.register("routers", views.RouterViewSet)
router.register("routing-policies", views.RoutingPolicyViewSet)
app_name = "peering-api"
urlpatterns = router.urls
| true | true |
f737ead5619af5d5687eb2a45d4625f6ac1b2123 | 13,887 | py | Python | slackmojicode/compiler.py | puhitaku/slackmojicode | 0084aa0df029a0c34d47bcf63169872062d0eea3 | [
"Unlicense"
] | 6 | 2016-12-03T14:50:41.000Z | 2020-11-04T16:03:32.000Z | slackmojicode/compiler.py | puhitaku/slackmojicode | 0084aa0df029a0c34d47bcf63169872062d0eea3 | [
"Unlicense"
] | null | null | null | slackmojicode/compiler.py | puhitaku/slackmojicode | 0084aa0df029a0c34d47bcf63169872062d0eea3 | [
"Unlicense"
] | null | null | null | import bytecode, objects, errors
import ast as ast_objects
class Context(object):
"""Shamelessly plundered from Cycy"""
def __init__(self):
self.instructions = []
self.constants = []
self.variables = {}
#self.NULL = self.register_constant(objects.Null())
#self.TRUE = self.register_constant(objects.Boolean(True))
#self.FALSE = self.register_constant(objects.Boolean(False))
def emit(self, byte_code, arg=bytecode.NO_ARG):
assert(isinstance(byte_code,int))
assert(isinstance(arg,int))
self.instructions.append((byte_code,arg))
def register_variable(self, name):
index = len(self.variables)
self.variables[index] = objects.Variable(name,objects.Null())
return index
def register_constant(self, constant):
index = len(self.constants)
self.constants.append(constant)
return index
#def register_function(self, function):
# index = len(self.functions)
# self.functions[index] = function
# return index
def build(self, arguments=[], name="<input>"):
if isinstance(arguments, ast_objects.Null):
arguments = []
elif isinstance(arguments, ast_objects.Array):
arguments = [s.getname() for s in arguments.getstatements()]
return bytecode.Bytecode(
instructions=self.instructions,
name=name,
arguments=arguments,
constants=self.constants,
variables=self.variables,
)
def compile_program(context, ast):
assert(isinstance(ast,ast_objects.Program))
for statement in ast.get_statements():
compile_any(context,statement)
def compile_functiondeclaration(context, ast):
assert(isinstance(ast,ast_objects.FunctionDeclaration))
# new context, but need access to outer context
ctx = Context()
fn_index = context.register_variable(ast.name)
for v in context.constants:
ctx.constants.append(v)
for k, v in context.variables.iteritems():
ctx.variables[k] = v
indexes = []
if type(ast.args) is not ast_objects.Null:
for arg in reversed(ast.args.get_statements()):
assert(isinstance(arg,ast_objects.Variable))
name = str(arg.getname())
index = ctx.register_variable(name)
indexes.append(index)
#context.emit(bytecode.STORE_VARIABLE, index)
compile_block(ctx,ast.block)
fn = ctx.build(indexes, name=ast.name)
context.variables[fn_index] = objects.Variable(ast.name,objects.Function(ast.name,fn))
ctx.variables[fn_index] = objects.Variable(ast.name,objects.Function(ast.name,fn))
context.emit(bytecode.LOAD_VARIABLE,fn_index)
def compile_call(context, ast):
assert(isinstance(ast,ast_objects.Call))
# this is a call really
if type(ast.args) is ast_objects.InnerArray:
for arg in ast.args.get_statements():
compile_any(context, arg)
index = -1
for k, v in context.variables.iteritems():
assert(isinstance(v, objects.Variable))
#assert(isinstance(v.value, objects.Function))
if v.name == ast.name:
index = k
if index > -1:
context.emit(bytecode.CALL, index)
else:
raise Exception("function %s does not exist" % ast.name)
def compile_block(context, ast):
assert(isinstance(ast,ast_objects.Block))
for statement in ast.get_statements():
compile_any(context,statement)
def compile_innerarray(context, ast):
assert(isinstance(ast,ast_objects.InnerArray))
# this is used for function args I think
for statement in ast.get_statements():
compile_any(context,statement)
def compile_array(context, ast):
assert(isinstance(ast,ast_objects.Array))
length = len(ast.get_statements())
for statement in reversed(ast.get_statements()):
compile_any(context,statement)
context.emit(bytecode.STORE_ARRAY,length)
def compile_innerdict(context, ast):
assert(isinstance(ast,ast_objects.InnerDict))
for key, val in ast.get_data().iteritems():
compile_any(context,key)
compile_any(context,val)
def compile_dict(context, ast):
assert(isinstance(ast,ast_objects.Dict))
length = len(ast.get_data().keys())
for key, val in ast.get_data().iteritems():
compile_any(context,key)
compile_any(context,val)
context.emit(bytecode.STORE_DICT,length)
def compile_null(context, ast):
assert(isinstance(ast,ast_objects.Null))
context.emit(bytecode.LOAD_CONST,0)
def compile_boolean(context, ast):
assert(isinstance(ast,ast_objects.Boolean))
value = objects.Boolean(ast.value)
index = context.register_constant(value)
context.emit(bytecode.LOAD_CONST,index)
def compile_integer(context, ast):
assert(isinstance(ast,ast_objects.Integer))
value = objects.Integer(ast.value)
index = context.register_constant(value)
context.emit(bytecode.LOAD_CONST,index)
def compile_float(context, ast):
assert(isinstance(ast,ast_objects.Float))
value = objects.Float(ast.value)
index = context.register_constant(value)
context.emit(bytecode.LOAD_CONST,index)
def compile_string(context, ast):
assert(isinstance(ast,ast_objects.String))
value = objects.String(ast.value)
index = context.register_constant(value)
context.emit(bytecode.LOAD_CONST,index)
def compile_variable(context, ast):
assert(isinstance(ast,ast_objects.Variable))
index = None
for k, v in context.variables.iteritems():
assert(isinstance(v,objects.Variable))
if v.name == ast.getname():
index = k
break
if index is not None:
context.emit(bytecode.LOAD_VARIABLE,index)
else:
raise Exception("Variable %s not yet defined" % ast.getname())
def compile_print(context, ast):
assert(isinstance(ast,ast_objects.Print))
compile_any(context,ast.value)
context.emit(bytecode.PRINT,bytecode.NO_ARG)
def compile_if(context, ast):
# compile the condition
assert(isinstance(ast, ast_objects.If))
compile_any(context, ast.condition)
# add true
t = context.register_constant(objects.Boolean(True))
context.emit(bytecode.LOAD_CONST,t)
# compare the condition to true
context.emit(bytecode.BINARY_EQ,bytecode.NO_ARG)
# condition:
# jump if zero (false): false block
# true block
# jump to end
# false block
# TODO: let jump target labels, not values! store the name of the jump
# in a constant and then reference that constant name, which can contain the
# jump position and be updated if need be
context.emit(bytecode.JUMP_IF_ZERO,0)
# make a note of the instruction we'll have to change
false_jump = len(context.instructions) - 1
# then add the true block
compile_any(context,ast.body)
# then a jump from the true block to after the false block
context.emit(bytecode.JUMP,0)
# the start of the false block is the current length
false_block = len(context.instructions)
# so set the false block jump to that point
context.instructions[false_jump] = (context.instructions[false_jump][0],false_block)
compile_any(context,ast.else_body)
# get the point we're at now
after_false = len(context.instructions)
# then change the true jump to point here
context.instructions[false_block-1] = (context.instructions[false_block-1][0], after_false)
def compile_while(context, ast):
assert(isinstance(ast, ast_objects.While))
condition_pos = len(context.instructions)
compile_any(context, ast.condition)
# add true
t = context.register_constant(objects.Boolean(True))
context.emit(bytecode.LOAD_CONST,t)
# compare the condition to true
context.emit(bytecode.BINARY_EQ,bytecode.NO_ARG)
# condition:
# jump if zero (false): after the block
# block
# jump to condition
# this will point to after the end
context.emit(bytecode.JUMP_IF_ZERO,0)
# make a note of the instruction we'll have to change
false_jump = len(context.instructions) - 1
compile_any(context,ast.body)
context.emit(bytecode.JUMP,condition_pos)
after_block = len(context.instructions)
context.instructions[false_jump] = (context.instructions[false_jump][0],after_block)
def compile_equal(context, ast):
assert(isinstance(ast,ast_objects.Equal))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_EQ,bytecode.NO_ARG)
def compile_equal(context, ast):
assert(isinstance(ast,ast_objects.Equal))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_EQ,bytecode.NO_ARG)
def compile_notequal(context, ast):
assert(isinstance(ast,ast_objects.NotEqual))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_NEQ,bytecode.NO_ARG)
def compile_greaterthan(context, ast):
assert(isinstance(ast,ast_objects.GreaterThan))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_GT,bytecode.NO_ARG)
def compile_greaterthanequal(context, ast):
assert(isinstance(ast,ast_objects.GreaterThanEqual))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_GTE,bytecode.NO_ARG)
def compile_lessthan(context, ast):
assert(isinstance(ast,ast_objects.LessThan))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_LT,bytecode.NO_ARG)
def compile_lessthanequal(context, ast):
assert(isinstance(ast,ast_objects.LessThanEqual))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_LTE,bytecode.NO_ARG)
def compile_and(context, ast):
assert(isinstance(ast,ast_objects.And))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_AND,bytecode.NO_ARG)
def compile_or(context, ast):
assert(isinstance(ast,ast_objects.Or))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_OR,bytecode.NO_ARG)
def compile_not(context, ast):
assert(isinstance(ast,ast_objects.Not))
compile_any(context, ast.value)
context.emit(bytecode.NOT,bytecode.NO_ARG)
def compile_add(context, ast):
assert(isinstance(ast,ast_objects.Add))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_ADD,bytecode.NO_ARG)
def compile_sub(context, ast):
assert(isinstance(ast,ast_objects.Sub))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_SUB,bytecode.NO_ARG)
def compile_mul(context, ast):
assert(isinstance(ast,ast_objects.Mul))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_MUL,bytecode.NO_ARG)
def compile_div(context, ast):
assert(isinstance(ast,ast_objects.Div))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_DIV,bytecode.NO_ARG)
def compile_assignment(context, ast):
assert(isinstance(ast,ast_objects.Assignment))
assert(isinstance(ast.left,ast_objects.Variable))
name = str(ast.left.getname())
index = None
for k, v in context.variables.iteritems():
assert(isinstance(v,objects.Variable))
if v.name == name:
index = k
break
if index is None:
index = context.register_variable(name)
compile_any(context, ast.right)
context.emit(bytecode.STORE_VARIABLE, index)
def compile_argument(context, name):
index = context.register_variable(str(name))
context.emit(bytecode.STORE_VARIABLE, index)
def compile_index(context, ast):
assert(isinstance(ast,ast_objects.Index))
compile_any(context, ast.right)
compile_any(context, ast.left)
context.emit(bytecode.INDEX,bytecode.NO_ARG)
def compile_any(context, ast):
typename = ast.__class__.__name__.lower()
#funcname = "compile_%s" % typename.lower()
funcs = {
"index":compile_index,
"div":compile_div,
"sub":compile_sub,
"mul":compile_mul,
"assignment":compile_assignment,
"argument":compile_argument,
"add":compile_add,
"call":compile_call,
"functiondeclaration":compile_functiondeclaration,
"block":compile_block,
"or":compile_or,
"and":compile_and,
"not":compile_not,
"print":compile_print,
"string":compile_string,
"integer":compile_integer,
"float":compile_float,
"boolean":compile_boolean,
"array":compile_array,
"innerarray":compile_innerarray,
"dict":compile_dict,
"innerdict":compile_dict,
"program":compile_program,
"null":compile_null,
"variable":compile_variable,
"if":compile_if,
"while":compile_while,
"greaterthan":compile_greaterthan,
"greaterthanequal":compile_greaterthanequal,
"lessthan":compile_lessthan,
"lessthanequal":compile_lessthanequal,
"equal":compile_equal,
"notequal":compile_notequal,
}
func = funcs.get(typename,None)
if func:
func(context, ast)
else:
raise Exception("Cannot compile %s - cannot find it" % (typename))
def compile(ast, context=None):
"""
Begin here.
"""
if context is None:
context = Context()
compile_any(context, ast)
context.emit(bytecode.RETURN,1)
return context.build()
| 30.791574 | 95 | 0.686181 | import bytecode, objects, errors
import ast as ast_objects
class Context(object):
def __init__(self):
self.instructions = []
self.constants = []
self.variables = {}
def emit(self, byte_code, arg=bytecode.NO_ARG):
assert(isinstance(byte_code,int))
assert(isinstance(arg,int))
self.instructions.append((byte_code,arg))
def register_variable(self, name):
index = len(self.variables)
self.variables[index] = objects.Variable(name,objects.Null())
return index
def register_constant(self, constant):
index = len(self.constants)
self.constants.append(constant)
return index
def build(self, arguments=[], name="<input>"):
if isinstance(arguments, ast_objects.Null):
arguments = []
elif isinstance(arguments, ast_objects.Array):
arguments = [s.getname() for s in arguments.getstatements()]
return bytecode.Bytecode(
instructions=self.instructions,
name=name,
arguments=arguments,
constants=self.constants,
variables=self.variables,
)
def compile_program(context, ast):
assert(isinstance(ast,ast_objects.Program))
for statement in ast.get_statements():
compile_any(context,statement)
def compile_functiondeclaration(context, ast):
assert(isinstance(ast,ast_objects.FunctionDeclaration))
ctx = Context()
fn_index = context.register_variable(ast.name)
for v in context.constants:
ctx.constants.append(v)
for k, v in context.variables.iteritems():
ctx.variables[k] = v
indexes = []
if type(ast.args) is not ast_objects.Null:
for arg in reversed(ast.args.get_statements()):
assert(isinstance(arg,ast_objects.Variable))
name = str(arg.getname())
index = ctx.register_variable(name)
indexes.append(index)
compile_block(ctx,ast.block)
fn = ctx.build(indexes, name=ast.name)
context.variables[fn_index] = objects.Variable(ast.name,objects.Function(ast.name,fn))
ctx.variables[fn_index] = objects.Variable(ast.name,objects.Function(ast.name,fn))
context.emit(bytecode.LOAD_VARIABLE,fn_index)
def compile_call(context, ast):
assert(isinstance(ast,ast_objects.Call))
if type(ast.args) is ast_objects.InnerArray:
for arg in ast.args.get_statements():
compile_any(context, arg)
index = -1
for k, v in context.variables.iteritems():
assert(isinstance(v, objects.Variable))
if v.name == ast.name:
index = k
if index > -1:
context.emit(bytecode.CALL, index)
else:
raise Exception("function %s does not exist" % ast.name)
def compile_block(context, ast):
assert(isinstance(ast,ast_objects.Block))
for statement in ast.get_statements():
compile_any(context,statement)
def compile_innerarray(context, ast):
assert(isinstance(ast,ast_objects.InnerArray))
for statement in ast.get_statements():
compile_any(context,statement)
def compile_array(context, ast):
assert(isinstance(ast,ast_objects.Array))
length = len(ast.get_statements())
for statement in reversed(ast.get_statements()):
compile_any(context,statement)
context.emit(bytecode.STORE_ARRAY,length)
def compile_innerdict(context, ast):
assert(isinstance(ast,ast_objects.InnerDict))
for key, val in ast.get_data().iteritems():
compile_any(context,key)
compile_any(context,val)
def compile_dict(context, ast):
assert(isinstance(ast,ast_objects.Dict))
length = len(ast.get_data().keys())
for key, val in ast.get_data().iteritems():
compile_any(context,key)
compile_any(context,val)
context.emit(bytecode.STORE_DICT,length)
def compile_null(context, ast):
assert(isinstance(ast,ast_objects.Null))
context.emit(bytecode.LOAD_CONST,0)
def compile_boolean(context, ast):
assert(isinstance(ast,ast_objects.Boolean))
value = objects.Boolean(ast.value)
index = context.register_constant(value)
context.emit(bytecode.LOAD_CONST,index)
def compile_integer(context, ast):
assert(isinstance(ast,ast_objects.Integer))
value = objects.Integer(ast.value)
index = context.register_constant(value)
context.emit(bytecode.LOAD_CONST,index)
def compile_float(context, ast):
assert(isinstance(ast,ast_objects.Float))
value = objects.Float(ast.value)
index = context.register_constant(value)
context.emit(bytecode.LOAD_CONST,index)
def compile_string(context, ast):
assert(isinstance(ast,ast_objects.String))
value = objects.String(ast.value)
index = context.register_constant(value)
context.emit(bytecode.LOAD_CONST,index)
def compile_variable(context, ast):
assert(isinstance(ast,ast_objects.Variable))
index = None
for k, v in context.variables.iteritems():
assert(isinstance(v,objects.Variable))
if v.name == ast.getname():
index = k
break
if index is not None:
context.emit(bytecode.LOAD_VARIABLE,index)
else:
raise Exception("Variable %s not yet defined" % ast.getname())
def compile_print(context, ast):
assert(isinstance(ast,ast_objects.Print))
compile_any(context,ast.value)
context.emit(bytecode.PRINT,bytecode.NO_ARG)
def compile_if(context, ast):
assert(isinstance(ast, ast_objects.If))
compile_any(context, ast.condition)
t = context.register_constant(objects.Boolean(True))
context.emit(bytecode.LOAD_CONST,t)
context.emit(bytecode.BINARY_EQ,bytecode.NO_ARG)
context.emit(bytecode.JUMP_IF_ZERO,0)
false_jump = len(context.instructions) - 1
# then add the true block
compile_any(context,ast.body)
# then a jump from the true block to after the false block
context.emit(bytecode.JUMP,0)
# the start of the false block is the current length
false_block = len(context.instructions)
# so set the false block jump to that point
context.instructions[false_jump] = (context.instructions[false_jump][0],false_block)
compile_any(context,ast.else_body)
# get the point we're at now
after_false = len(context.instructions)
context.instructions[false_block-1] = (context.instructions[false_block-1][0], after_false)
def compile_while(context, ast):
assert(isinstance(ast, ast_objects.While))
condition_pos = len(context.instructions)
compile_any(context, ast.condition)
t = context.register_constant(objects.Boolean(True))
context.emit(bytecode.LOAD_CONST,t)
context.emit(bytecode.BINARY_EQ,bytecode.NO_ARG)
context.emit(bytecode.JUMP_IF_ZERO,0)
false_jump = len(context.instructions) - 1
compile_any(context,ast.body)
context.emit(bytecode.JUMP,condition_pos)
after_block = len(context.instructions)
context.instructions[false_jump] = (context.instructions[false_jump][0],after_block)
def compile_equal(context, ast):
assert(isinstance(ast,ast_objects.Equal))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_EQ,bytecode.NO_ARG)
def compile_equal(context, ast):
assert(isinstance(ast,ast_objects.Equal))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_EQ,bytecode.NO_ARG)
def compile_notequal(context, ast):
assert(isinstance(ast,ast_objects.NotEqual))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_NEQ,bytecode.NO_ARG)
def compile_greaterthan(context, ast):
assert(isinstance(ast,ast_objects.GreaterThan))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_GT,bytecode.NO_ARG)
def compile_greaterthanequal(context, ast):
assert(isinstance(ast,ast_objects.GreaterThanEqual))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_GTE,bytecode.NO_ARG)
def compile_lessthan(context, ast):
assert(isinstance(ast,ast_objects.LessThan))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_LT,bytecode.NO_ARG)
def compile_lessthanequal(context, ast):
assert(isinstance(ast,ast_objects.LessThanEqual))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_LTE,bytecode.NO_ARG)
def compile_and(context, ast):
assert(isinstance(ast,ast_objects.And))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_AND,bytecode.NO_ARG)
def compile_or(context, ast):
assert(isinstance(ast,ast_objects.Or))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_OR,bytecode.NO_ARG)
def compile_not(context, ast):
assert(isinstance(ast,ast_objects.Not))
compile_any(context, ast.value)
context.emit(bytecode.NOT,bytecode.NO_ARG)
def compile_add(context, ast):
assert(isinstance(ast,ast_objects.Add))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_ADD,bytecode.NO_ARG)
def compile_sub(context, ast):
assert(isinstance(ast,ast_objects.Sub))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_SUB,bytecode.NO_ARG)
def compile_mul(context, ast):
assert(isinstance(ast,ast_objects.Mul))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_MUL,bytecode.NO_ARG)
def compile_div(context, ast):
assert(isinstance(ast,ast_objects.Div))
compile_any(context, ast.left)
compile_any(context, ast.right)
context.emit(bytecode.BINARY_DIV,bytecode.NO_ARG)
def compile_assignment(context, ast):
assert(isinstance(ast,ast_objects.Assignment))
assert(isinstance(ast.left,ast_objects.Variable))
name = str(ast.left.getname())
index = None
for k, v in context.variables.iteritems():
assert(isinstance(v,objects.Variable))
if v.name == name:
index = k
break
if index is None:
index = context.register_variable(name)
compile_any(context, ast.right)
context.emit(bytecode.STORE_VARIABLE, index)
def compile_argument(context, name):
index = context.register_variable(str(name))
context.emit(bytecode.STORE_VARIABLE, index)
def compile_index(context, ast):
assert(isinstance(ast,ast_objects.Index))
compile_any(context, ast.right)
compile_any(context, ast.left)
context.emit(bytecode.INDEX,bytecode.NO_ARG)
def compile_any(context, ast):
typename = ast.__class__.__name__.lower()
#funcname = "compile_%s" % typename.lower()
funcs = {
"index":compile_index,
"div":compile_div,
"sub":compile_sub,
"mul":compile_mul,
"assignment":compile_assignment,
"argument":compile_argument,
"add":compile_add,
"call":compile_call,
"functiondeclaration":compile_functiondeclaration,
"block":compile_block,
"or":compile_or,
"and":compile_and,
"not":compile_not,
"print":compile_print,
"string":compile_string,
"integer":compile_integer,
"float":compile_float,
"boolean":compile_boolean,
"array":compile_array,
"innerarray":compile_innerarray,
"dict":compile_dict,
"innerdict":compile_dict,
"program":compile_program,
"null":compile_null,
"variable":compile_variable,
"if":compile_if,
"while":compile_while,
"greaterthan":compile_greaterthan,
"greaterthanequal":compile_greaterthanequal,
"lessthan":compile_lessthan,
"lessthanequal":compile_lessthanequal,
"equal":compile_equal,
"notequal":compile_notequal,
}
func = funcs.get(typename,None)
if func:
func(context, ast)
else:
raise Exception("Cannot compile %s - cannot find it" % (typename))
def compile(ast, context=None):
if context is None:
context = Context()
compile_any(context, ast)
context.emit(bytecode.RETURN,1)
return context.build()
| true | true |
f737ebad449e849bb4b5f32df937cb8c0e897f08 | 8,853 | py | Python | scripts/stake_emu.py | pixelplex-dev/lotus | 39a1e9041a748981dd2d085e350d97f9e8c51f40 | [
"Apache-2.0",
"MIT"
] | 34 | 2020-10-30T07:01:44.000Z | 2021-09-22T06:20:21.000Z | scripts/stake_emu.py | pixelplex-dev/lotus | 39a1e9041a748981dd2d085e350d97f9e8c51f40 | [
"Apache-2.0",
"MIT"
] | 20 | 2020-10-30T14:09:41.000Z | 2021-09-22T08:24:12.000Z | scripts/stake_emu.py | pixelplex-dev/lotus | 39a1e9041a748981dd2d085e350d97f9e8c51f40 | [
"Apache-2.0",
"MIT"
] | 16 | 2020-10-30T11:16:10.000Z | 2022-02-25T09:03:17.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections import defaultdict
EpochsInHour = 120
EpochsInDay = 2880
FIL_PRECISION = 10**18
class RunTime(object):
def __init__(self):
self.epoch = 0
self.caller = ""
self.amount = 0
class VestingSpec(object):
def __init__(self, vest_period, step_duration):
self.step_duration = step_duration
self.vest_period = vest_period
self.initial_delay = 0
self.quantization = 12 * EpochsInHour
class VestingFunds(object):
def __init__(self):
self.funds = []
def unlock_vested_funds(self, curr_epoch):
unlocked = 0
last_index_to_rm = -1
for i, (epoch, amount) in enumerate(self.funds):
if epoch >= curr_epoch:
break
unlocked += amount
last_index_to_rm = i
if last_index_to_rm != -1:
self.funds = self.funds[last_index_to_rm+1:]
return unlocked
def quantize_up(self, e, unit, offset_seed):
offset = offset_seed % unit
remainder = (e - offset) % unit
quotient = (e - offset) // unit
if remainder == 0:
return unit * quotient + offset
if (e - offset) < 0:
return unit * quotient + offset
return unit * (quotient + 1) + offset
def add_locked_funds(self, curr_epoch, vesting_sum, stake_period_start, vest_spec: VestingSpec):
epoch_to_index = {}
for i, (epoch, amount) in enumerate(self.funds):
epoch_to_index[epoch] = i
vest_begin = curr_epoch + vest_spec.initial_delay
vested_so_far = 0
e = vest_begin + vest_spec.step_duration
while vested_so_far < vesting_sum:
vest_epoch = self.quantize_up(e, vest_spec.quantization, stake_period_start)
elapsed = vest_epoch - vest_begin
if elapsed < vest_spec.vest_period:
target_vest = vesting_sum * elapsed // vest_spec.vest_period
else:
target_vest = vesting_sum
vest_this_time = target_vest - vested_so_far
vested_so_far = target_vest
if vest_epoch in epoch_to_index:
index = epoch_to_index[vest_epoch]
epoch, amount = self.funds[index]
self.funds[index] = (epoch, amount+vest_this_time)
else:
self.funds.append((vest_epoch, vest_this_time))
epoch_to_index[vest_epoch] = len(self.funds) - 1
e += vest_spec.step_duration
self.funds = sorted(self.funds, key=lambda x: x[0])
class StakeActor(object):
def __init__(self, round_period, principal_lock_duration, mature_period, max_reward_per_round, inflation_factor, first_round_epoch, vest_spec):
self.round_period = round_period
self.principal_lock_duration = principal_lock_duration
self.mature_period = mature_period
self.max_reward_per_round = max_reward_per_round
self.inflation_factor = inflation_factor
self.stake_period_start = first_round_epoch
self.next_round_epoch = first_round_epoch
self.vest_spec = vest_spec
self.total_stake_power = 0
self.last_round_reward = 0
self.inflation_denominator = 10000
self.locked_principal_map = defaultdict(list)
self.available_principal_map = defaultdict(int)
self.vesting_reward_map = defaultdict(VestingFunds)
self.available_reward_map = defaultdict(int)
self.stake_power_map = defaultdict(int)
def deposit(self, rt: RunTime):
self.locked_principal_map[rt.caller].append((rt.epoch, rt.amount))
def withdraw_principal(self, rt: RunTime):
amount = rt.amount
avail = self.available_principal_map[rt.caller]
if amount <= avail:
self.available_principal_map[rt.caller] -= amount
else:
print("!:", rt.epoch, "error withdraw_principal more than available")
def withdraw_reward(self, rt: RunTime):
amount = rt.amount
avail = self.available_principal_map[rt.caller]
if amount <= avail:
self.available_principal_map[rt.caller] -= amount
else:
print("!:", rt.epoch, "error withdraw_reward more than available")
def unlock_locked_principals(self, rt: RunTime):
for staker, locked_principals in self.locked_principal_map.items():
unlocked = 0
last_index_to_rm = -1
for i, (epoch, amount) in enumerate(locked_principals):
if epoch + self.principal_lock_duration >= rt.epoch:
break
unlocked += amount
last_index_to_rm = i
if last_index_to_rm != -1:
self.locked_principal_map[staker] = locked_principals[last_index_to_rm+1:]
self.available_principal_map[staker] += unlocked
def update_stake_powers(self, rt: RunTime):
total = 0
powers = defaultdict(int)
for staker, locked_principals in self.locked_principal_map.items():
power = 0
for (epoch, amount) in locked_principals:
if epoch + self.mature_period >= rt.epoch:
break
power += amount
powers[staker] = power
total += power
for staker, available_principal in self.available_principal_map.items():
powers[staker] += available_principal
total += available_principal
self.stake_power_map = powers
self.total_stake_power = total
def unlock_vesting_rewards(self, rt: RunTime):
for staker, vesting_funds in self.vesting_reward_map.items():
unlocked = vesting_funds.unlock_vested_funds(rt.epoch)
self.vesting_reward_map[staker] = vesting_funds
self.available_reward_map[staker] += unlocked
def distribute_rewards(self, rt: RunTime) -> int:
assert rt.epoch >= self.next_round_epoch
total_reward = 0
vest_spec = self.vest_spec
if self.total_stake_power > 0:
total_reward = self.total_stake_power * self.inflation_factor // self.inflation_denominator
total_reward = min(total_reward, self.max_reward_per_round)
if total_reward > 0:
for staker, power in self.stake_power_map.items():
vesting_sum = power * total_reward // self.total_stake_power
if vesting_sum > 0:
vesting_funds = self.vesting_reward_map[staker]
vesting_funds.add_locked_funds(rt.epoch, vesting_sum, self.stake_period_start, vest_spec)
return total_reward
def on_epoch_tick(self, rt: RunTime):
self.unlock_locked_principals(rt)
self.update_stake_powers(rt)
self.unlock_vesting_rewards(rt)
if rt.epoch >= self.next_round_epoch:
self.last_round_reward = self.distribute_rewards(rt)
self.next_round_epoch += self.round_period
class Message(object):
def __init__(self, epoch: int, sender: str, amount: int, func):
self.epoch = epoch
self.sender = sender
self.amount = amount
self.func = func
class VM(object):
def __init__(self, stake_actor: StakeActor):
self.stake_actor = stake_actor
def exec(self, messages: list[Message], stop_at: int):
rt = RunTime()
message_map = defaultdict(list[Message])
for msg in messages:
message_map[msg.epoch].append(msg)
for epoch in range(0, stop_at + 1):
rt.epoch = epoch
for msg in message_map[epoch]:
rt.caller = msg.sender
rt.amount = msg.amount
msg.func(rt, self.stake_actor)
rt.caller = "system"
rt.amount = 0
self.stake_actor.on_epoch_tick(rt)
def run():
stake_actor = StakeActor(
round_period=EpochsInDay,
principal_lock_duration=90*EpochsInDay,
mature_period=12*EpochsInHour,
max_reward_per_round=30000*FIL_PRECISION,
inflation_factor=100,
first_round_epoch=584461,
vest_spec=VestingSpec(180*EpochsInDay, EpochsInDay)
)
vm = VM(stake_actor)
messages = []
messages.append(Message(epoch=584480, sender="t001", amount=10000*FIL_PRECISION, func=lambda rt, actor: actor.deposit(rt)))
vm.exec(messages, stop_at=608000)
print("locked_principal_map", stake_actor.locked_principal_map)
print("available_principal_map", stake_actor.available_principal_map)
print("stake_power_map", stake_actor.stake_power_map)
print("total_stake_power", stake_actor.total_stake_power)
print("vesting_reward_map", stake_actor.vesting_reward_map["t001"].funds)
if __name__ == "__main__":
run() | 38.491304 | 147 | 0.636281 |
from collections import defaultdict
EpochsInHour = 120
EpochsInDay = 2880
FIL_PRECISION = 10**18
class RunTime(object):
def __init__(self):
self.epoch = 0
self.caller = ""
self.amount = 0
class VestingSpec(object):
def __init__(self, vest_period, step_duration):
self.step_duration = step_duration
self.vest_period = vest_period
self.initial_delay = 0
self.quantization = 12 * EpochsInHour
class VestingFunds(object):
def __init__(self):
self.funds = []
def unlock_vested_funds(self, curr_epoch):
unlocked = 0
last_index_to_rm = -1
for i, (epoch, amount) in enumerate(self.funds):
if epoch >= curr_epoch:
break
unlocked += amount
last_index_to_rm = i
if last_index_to_rm != -1:
self.funds = self.funds[last_index_to_rm+1:]
return unlocked
def quantize_up(self, e, unit, offset_seed):
offset = offset_seed % unit
remainder = (e - offset) % unit
quotient = (e - offset) // unit
if remainder == 0:
return unit * quotient + offset
if (e - offset) < 0:
return unit * quotient + offset
return unit * (quotient + 1) + offset
def add_locked_funds(self, curr_epoch, vesting_sum, stake_period_start, vest_spec: VestingSpec):
epoch_to_index = {}
for i, (epoch, amount) in enumerate(self.funds):
epoch_to_index[epoch] = i
vest_begin = curr_epoch + vest_spec.initial_delay
vested_so_far = 0
e = vest_begin + vest_spec.step_duration
while vested_so_far < vesting_sum:
vest_epoch = self.quantize_up(e, vest_spec.quantization, stake_period_start)
elapsed = vest_epoch - vest_begin
if elapsed < vest_spec.vest_period:
target_vest = vesting_sum * elapsed // vest_spec.vest_period
else:
target_vest = vesting_sum
vest_this_time = target_vest - vested_so_far
vested_so_far = target_vest
if vest_epoch in epoch_to_index:
index = epoch_to_index[vest_epoch]
epoch, amount = self.funds[index]
self.funds[index] = (epoch, amount+vest_this_time)
else:
self.funds.append((vest_epoch, vest_this_time))
epoch_to_index[vest_epoch] = len(self.funds) - 1
e += vest_spec.step_duration
self.funds = sorted(self.funds, key=lambda x: x[0])
class StakeActor(object):
def __init__(self, round_period, principal_lock_duration, mature_period, max_reward_per_round, inflation_factor, first_round_epoch, vest_spec):
self.round_period = round_period
self.principal_lock_duration = principal_lock_duration
self.mature_period = mature_period
self.max_reward_per_round = max_reward_per_round
self.inflation_factor = inflation_factor
self.stake_period_start = first_round_epoch
self.next_round_epoch = first_round_epoch
self.vest_spec = vest_spec
self.total_stake_power = 0
self.last_round_reward = 0
self.inflation_denominator = 10000
self.locked_principal_map = defaultdict(list)
self.available_principal_map = defaultdict(int)
self.vesting_reward_map = defaultdict(VestingFunds)
self.available_reward_map = defaultdict(int)
self.stake_power_map = defaultdict(int)
def deposit(self, rt: RunTime):
self.locked_principal_map[rt.caller].append((rt.epoch, rt.amount))
def withdraw_principal(self, rt: RunTime):
amount = rt.amount
avail = self.available_principal_map[rt.caller]
if amount <= avail:
self.available_principal_map[rt.caller] -= amount
else:
print("!:", rt.epoch, "error withdraw_principal more than available")
def withdraw_reward(self, rt: RunTime):
amount = rt.amount
avail = self.available_principal_map[rt.caller]
if amount <= avail:
self.available_principal_map[rt.caller] -= amount
else:
print("!:", rt.epoch, "error withdraw_reward more than available")
def unlock_locked_principals(self, rt: RunTime):
for staker, locked_principals in self.locked_principal_map.items():
unlocked = 0
last_index_to_rm = -1
for i, (epoch, amount) in enumerate(locked_principals):
if epoch + self.principal_lock_duration >= rt.epoch:
break
unlocked += amount
last_index_to_rm = i
if last_index_to_rm != -1:
self.locked_principal_map[staker] = locked_principals[last_index_to_rm+1:]
self.available_principal_map[staker] += unlocked
def update_stake_powers(self, rt: RunTime):
total = 0
powers = defaultdict(int)
for staker, locked_principals in self.locked_principal_map.items():
power = 0
for (epoch, amount) in locked_principals:
if epoch + self.mature_period >= rt.epoch:
break
power += amount
powers[staker] = power
total += power
for staker, available_principal in self.available_principal_map.items():
powers[staker] += available_principal
total += available_principal
self.stake_power_map = powers
self.total_stake_power = total
def unlock_vesting_rewards(self, rt: RunTime):
for staker, vesting_funds in self.vesting_reward_map.items():
unlocked = vesting_funds.unlock_vested_funds(rt.epoch)
self.vesting_reward_map[staker] = vesting_funds
self.available_reward_map[staker] += unlocked
def distribute_rewards(self, rt: RunTime) -> int:
assert rt.epoch >= self.next_round_epoch
total_reward = 0
vest_spec = self.vest_spec
if self.total_stake_power > 0:
total_reward = self.total_stake_power * self.inflation_factor // self.inflation_denominator
total_reward = min(total_reward, self.max_reward_per_round)
if total_reward > 0:
for staker, power in self.stake_power_map.items():
vesting_sum = power * total_reward // self.total_stake_power
if vesting_sum > 0:
vesting_funds = self.vesting_reward_map[staker]
vesting_funds.add_locked_funds(rt.epoch, vesting_sum, self.stake_period_start, vest_spec)
return total_reward
def on_epoch_tick(self, rt: RunTime):
self.unlock_locked_principals(rt)
self.update_stake_powers(rt)
self.unlock_vesting_rewards(rt)
if rt.epoch >= self.next_round_epoch:
self.last_round_reward = self.distribute_rewards(rt)
self.next_round_epoch += self.round_period
class Message(object):
def __init__(self, epoch: int, sender: str, amount: int, func):
self.epoch = epoch
self.sender = sender
self.amount = amount
self.func = func
class VM(object):
def __init__(self, stake_actor: StakeActor):
self.stake_actor = stake_actor
def exec(self, messages: list[Message], stop_at: int):
rt = RunTime()
message_map = defaultdict(list[Message])
for msg in messages:
message_map[msg.epoch].append(msg)
for epoch in range(0, stop_at + 1):
rt.epoch = epoch
for msg in message_map[epoch]:
rt.caller = msg.sender
rt.amount = msg.amount
msg.func(rt, self.stake_actor)
rt.caller = "system"
rt.amount = 0
self.stake_actor.on_epoch_tick(rt)
def run():
stake_actor = StakeActor(
round_period=EpochsInDay,
principal_lock_duration=90*EpochsInDay,
mature_period=12*EpochsInHour,
max_reward_per_round=30000*FIL_PRECISION,
inflation_factor=100,
first_round_epoch=584461,
vest_spec=VestingSpec(180*EpochsInDay, EpochsInDay)
)
vm = VM(stake_actor)
messages = []
messages.append(Message(epoch=584480, sender="t001", amount=10000*FIL_PRECISION, func=lambda rt, actor: actor.deposit(rt)))
vm.exec(messages, stop_at=608000)
print("locked_principal_map", stake_actor.locked_principal_map)
print("available_principal_map", stake_actor.available_principal_map)
print("stake_power_map", stake_actor.stake_power_map)
print("total_stake_power", stake_actor.total_stake_power)
print("vesting_reward_map", stake_actor.vesting_reward_map["t001"].funds)
if __name__ == "__main__":
run() | true | true |
f737ec2071e3d896cc9502a59fb7b4c4c1fc4562 | 20,598 | py | Python | combiner/combiner/tf/approx_attention.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-13T21:48:52.000Z | 2022-03-13T21:48:52.000Z | combiner/combiner/tf/approx_attention.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | null | null | null | combiner/combiner/tf/approx_attention.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
import tensorflow.compat.v1 as tf
import math
from combiner.tf import attention
from combiner.tf import ops
import functools
def shift_right(x, axis):
"""Shift input x to the right along given axis."""
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[axis] = (1, 0)
padded = tf.pad(x, pad_widths)
return tf.slice(padded, begin=[0]*len(x.shape), size=x.shape)
def shift_left(x, axis):
"""Shift input x to the left along given axis."""
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[axis] = (0, 1)
padded = tf.pad(x, pad_widths)
begin = [0]*len(x.shape)
begin[axis] = 1
return tf.slice(padded, begin=begin, size=x.shape)
def approx_cummax(x, axis, exclusive=False, reverse=False):
"""Approximate the cummax operation in jax."""
sum_x = tf.math.cumsum(x, axis, exclusive=exclusive, reverse=reverse)
# return tf.math.cumsum(tf.nn.relu(x), axis, reverse=reverse)
return sum_x
def get_causal_mask(x, axis, is_strict, upper=False):
"""Get attention mask bias (keep a lower triangle).
Args:
x: input tensor
axis: across which dim to make mask
is_strict: if True, the diagonal will be masked out as well.
upper: upper or lower triangle
Returns:
mask: tensor of {0, -1e9} ^ (x.shape[axis], x.shape[axis])
"""
seq_len = tf.shape(x)[axis]
if is_strict:
if upper:
mask = tf.linalg.band_part(
tf.ones([seq_len, seq_len], dtype=x.dtype),
num_lower=-1, num_upper=0)
else:
mask = tf.linalg.band_part(
tf.ones([seq_len, seq_len], dtype=x.dtype),
num_lower=0, num_upper=-1)
else:
if upper:
mask = 1.0 - tf.linalg.band_part(
tf.ones([seq_len, seq_len], dtype=x.dtype),
num_lower=0, num_upper=-1)
else:
mask = 1.0 - tf.linalg.band_part(
tf.ones([seq_len, seq_len], dtype=x.dtype),
num_lower=-1, num_upper=0)
mask = -1e9 * mask
return mask
def pooling_summary(x, axis, local_summary, keepdims=False):
"""Perform a cheap pooling summary of a span.
Args:
x: input tensor
axis: over which axis to summarize
local_summary: str of format activation-pooling, choose
from {relu, identity}-{max, sum, mean}
keepdims: whether to keep the summarized singleton axis.
Returns:
y: the same shape as x for other axis,
except y.shape[axis] = 1 if keepdims=True,
otherwise y.rank = x.rank + 1
"""
act, pool = local_summary.split('-')
if act == 'relu':
x = tf.nn.relu(x)
elif act == 'identity':
pass
elif act == 'deepset':
x = ops.trail_dense(x, x.shape.as_list()[-1], bias=False)
x = tf.nn.relu(x)
else:
raise ValueError('Unsupported activation: %s' % act)
if pool == 'mean':
x = tf.math.reduce_mean(x, axis=axis, keepdims=keepdims)
elif pool == 'max':
x = tf.math.reduce_max(x, axis=axis, keepdims=keepdims)
elif pool == 'sum':
x = tf.math.reduce_sum(x, axis=axis, keepdims=keepdims)
else:
raise ValueError('Unsupported pooling: %s' % pool)
return x
def axial_mixture_unidir(x, config, is_training=True, causal=True):
"""Full attention matrix with axial pattern as local and mixture for global summary."""
del is_training
assert causal
bsize = x.shape[0]
query, key, value = attention.get_qkv(x, x, x, hidden_size=config.model_size,
num_heads=config.num_heads, bias=config.dense_use_bias)
head_dim = config.model_size // config.num_heads
assert config.max_seq_len % config.max_seg_len == 0
num_seg = config.max_seq_len // config.max_seg_len
cur_query = tf.reshape(query, [bsize,
num_seg,
config.max_seg_len,
config.num_heads,
head_dim])
cur_key = tf.reshape(key, cur_query.shape)
cur_val = tf.reshape(value, cur_query.shape)
col_logit_expr = 'BSUNK,BTUNK->BUNST'
col_attn_expr = 'BUNST,BTUNK->BSUNK'
col_strict_mask = get_causal_mask(cur_query,
axis=1,
is_strict=True)[tf.newaxis, tf.newaxis, tf.newaxis, :, :]
row_logit_expr = 'BUSNK,BUTNK->BUNST'
row_attn_expr = 'BUNST,BUTNK->BUSNK'
row_mask = get_causal_mask(cur_query,
axis=2,
is_strict=False)[tf.newaxis, tf.newaxis, tf.newaxis, :, :]
col_logits = tf.einsum(col_logit_expr, cur_query, cur_key) + col_strict_mask
row_logits = tf.einsum(row_logit_expr, cur_query, cur_key) + row_mask
###################
col_up2down_query = approx_cummax(cur_query, axis=1)
col_up2down_key = shift_right(approx_cummax(cur_key, axis=1), axis=1)
col_mask = get_causal_mask(
cur_query, axis=1, is_strict=False)[tf.newaxis, tf.newaxis,
tf.newaxis, :, :]
col_up2down_logits = tf.einsum(col_logit_expr, col_up2down_query,
cur_key) + col_mask
col_up2down_attn_weights = attention.float32_softmax(
col_up2down_logits, axis=-1)
col_up2down_summary = tf.einsum(col_attn_expr, col_up2down_attn_weights,
cur_val)
col_up2down_summary = shift_right(col_up2down_summary, axis=1)
row_only_myself_mask = tf.eye(tf.shape(cur_query)[2], dtype=cur_query.dtype)[tf.newaxis, tf.newaxis, tf.newaxis, :, :]
row_without_myself_mask = -1e9 * row_only_myself_mask
all_maskout = tf.cast(tf.fill(row_without_myself_mask.shape, -1e9), cur_query.dtype)
row_without_myself_mask = tf.concat([all_maskout] + [row_without_myself_mask] * (cur_query.shape[1] - 1),
axis=1)
previous_row_logits = tf.einsum(row_logit_expr, cur_query, col_up2down_key) + row_without_myself_mask
###################
row_left2right_query = approx_cummax(cur_query, axis=2)
row_left2right_key = shift_right(approx_cummax(cur_key, axis=2), axis=2)
row_left2right_logits = tf.einsum(row_logit_expr, row_left2right_query,
cur_key) + row_mask
row_left2right_attn_weights = attention.float32_softmax(
row_left2right_logits, axis=-1)
row_left2right_summary = tf.einsum(row_attn_expr, row_left2right_attn_weights,
cur_val)
row_left2right_summary = shift_right(row_left2right_summary, axis=2)
all_maskout = tf.cast(tf.fill(col_strict_mask.shape, -1e9), cur_query.dtype)
col_strict_without_first_mask = tf.concat(
[all_maskout] + [col_strict_mask] * (cur_query.shape[2] - 1), axis=1)
top_left_col_logits = tf.einsum(
col_logit_expr, cur_query,
row_left2right_key) + col_strict_without_first_mask
###################
row_right2left_query = approx_cummax(cur_query, axis=2, reverse=True)
row_right2left_key = shift_left(
approx_cummax(cur_key, axis=2, reverse=True), axis=2)
row_upper_mask = get_causal_mask(
cur_query, axis=2, is_strict=False, upper=True)[tf.newaxis, tf.newaxis,
tf.newaxis, :, :]
row_right2left_logits = tf.einsum(row_logit_expr, row_right2left_query,
cur_key) + row_upper_mask
row_right2left_attn_weights = attention.float32_softmax(
row_right2left_logits, axis=-1)
row_right2left_summary = tf.einsum(row_attn_expr, row_right2left_attn_weights,
cur_val)
row_right2left_summary = shift_left(row_right2left_summary, axis=2)
col_strict_without_last_mask = tf.concat(
[col_strict_mask] * (cur_query.shape[2] - 1) + [all_maskout], axis=1)
top_right_col_logits = tf.einsum(
col_logit_expr, cur_query,
row_right2left_key) + col_strict_without_last_mask
###################
joint_logits = tf.concat([
tf.transpose(col_logits, perm=[0, 3, 2, 1, 4]), row_logits,
previous_row_logits,
tf.transpose(top_left_col_logits, perm=[0, 3, 2, 1, 4]),
tf.transpose(top_right_col_logits, perm=[0, 3, 2, 1, 4])
],
axis=-1)
attn_weights = attention.float32_softmax(joint_logits, axis=-1)
col_att, row_att, previous_row_att, top_left_col_att, top_right_col_att = tf.split(attn_weights,
[num_seg,
config.max_seg_len,
config.max_seg_len,
num_seg,
num_seg], axis=-1)
col_att = tf.transpose(col_att, [0, 3, 2, 1, 4])
top_left_col_att = tf.transpose(top_left_col_att, [0, 3, 2, 1, 4])
top_right_col_att = tf.transpose(top_right_col_att, [0, 3, 2, 1, 4])
col_merged = tf.einsum(col_attn_expr, col_att, cur_val)
row_merged = tf.einsum(row_attn_expr, row_att, cur_val)
previous_row_merged = tf.einsum(row_attn_expr, previous_row_att,
col_up2down_summary)
top_left_merged = tf.einsum(col_attn_expr, top_left_col_att,
row_left2right_summary)
top_right_merged = tf.einsum(col_attn_expr, top_right_col_att,
row_right2left_summary)
joint_merged = tf.reshape(
col_merged + row_merged + previous_row_merged + top_left_merged +
top_right_merged,
[bsize, num_seg * config.max_seg_len, config.num_heads, head_dim])
output = ops.trail_dense(joint_merged, config.model_size, begin_axis=-2)
return output
def sqrt_fixed_full(x, config, is_training=True, causal=True):
"""Full attention matrix with sqrt decomposition."""
bsize = x.shape[0]
query, key, value = attention.get_qkv(x, x, x, hidden_size=config.model_size,
num_heads=config.num_heads,
bias=config.dense_use_bias)
head_dim = config.model_size // config.num_heads
assert config.max_seq_len % config.max_seg_len == 0
num_seg = config.max_seq_len // config.max_seg_len
cur_query = tf.reshape(query, [-1,
num_seg,
config.max_seg_len,
config.num_heads,
head_dim])
with tf.variable_scope('pooling_query'):
merged_query = pooling_summary(cur_query, axis=2,
local_summary=config.local_summary,
keepdims=True)
cur_key = tf.reshape(key, cur_query.shape)
cur_val = tf.reshape(value, cur_query.shape)
span_val = attention.dot_product_attention(merged_query,
cur_key,
cur_val,
is_training=is_training,
attn_axis=1,
dropatt=config.dropatt)
span_val = tf.squeeze(span_val, axis=2)
with tf.variable_scope('pooling_key'):
span_key = pooling_summary(cur_key, axis=2,
local_summary=config.local_summary,
keepdims=False)
local_logits = tf.einsum('bsqhd,bskhd->bsqhk', cur_query, cur_key)
if causal:
local_mask = get_causal_mask(cur_query, axis=2, is_strict=False)
local_mask = tf.expand_dims(local_mask, axis=-2)
local_logits += local_mask
prev_logits = tf.einsum('bqhd,bkhd->bqhk', query, span_key)
if causal:
prev_mask = get_causal_mask(cur_query, axis=1, is_strict=True)
prev_mask = tf.repeat(prev_mask, [config.max_seg_len] * num_seg, axis=0)
prev_logits += tf.expand_dims(prev_mask, axis=1)
joint_logits = tf.concat([tf.reshape(local_logits,
[bsize, config.max_seq_len,
config.num_heads, -1]),
prev_logits], axis=-1)
attn_weights = attention.float32_softmax(joint_logits, axis=-1)
local_att, prev_att = tf.split(attn_weights, [config.max_seg_len, num_seg],
axis=-1)
if is_training:
local_att = tf.nn.dropout(local_att, rate=config.dropatt)
local_att = tf.reshape(local_att, [bsize, num_seg,
config.max_seg_len,
config.num_heads,
config.max_seg_len])
local_merged = tf.einsum('bsqhk,bskhd->bsqhd', local_att, cur_val)
prev_merged = tf.einsum('bqhk,bkhd->bqhd', prev_att, span_val)
joint_merged = prev_merged + tf.reshape(local_merged, prev_merged.shape)
output = ops.trail_dense(joint_merged, config.model_size, begin_axis=-2)
return output
def axial_rowmajor(x, config, is_training=True, causal=True):
"""Full attention matrix with sqrt decomposition."""
bsize = x.shape[0]
seq_len = x.shape.as_list()[1]
head_dim = config.model_size // config.num_heads
assert seq_len % config.max_seg_len == 0
num_seg = seq_len // config.max_seg_len
x_sqr = tf.reshape(x,
[bsize, num_seg, config.max_seg_len, config.model_size])
q_row_local, key_row_local, value_row_local = attention.get_qkv(
x_sqr, x_sqr, x_sqr, hidden_size=config.model_size,
num_heads=config.num_heads, bias=config.dense_use_bias)
local_logits = tf.einsum('bsqhd,bskhd->bsqhk', q_row_local, key_row_local)
row_probs = attention.float32_softmax(local_logits, axis=-1)
if is_training:
row_probs = tf.nn.dropout(row_probs, rate=config.dropatt)
row_attn_out = tf.einsum('bsqhk,bskhd->bsqhd', row_probs, value_row_local)
if config.row_summary == 'none':
key_row = key_row_local
elif config.row_summary in ['wsum', 'proj', 'wsum_proj']:
if 'wsum' in config.row_summary:
pre_summary = tf.einsum('bsqhk,bskhd->bsqhd', row_probs, key_row_local)
else:
pre_summary = row_attn_out
if 'proj' in config.row_summary:
with tf.variable_scope('rowmajor_param_post'):
key_row = ops.trail_dense(pre_summary, config.model_size, begin_axis=-2,
bias=config.dense_use_bias)
key_row = ops.postprocess(x_sqr, key_row, config, is_training)
_, key_row = ops.preprocess(key_row, config)
key_row = ops.trail_dense(key_row, [config.num_heads, head_dim],
bias=config.dense_use_bias)
else:
key_row = pre_summary
else:
raise ValueError('Unknown row summary %s' % config.row_summary)
if causal:
local_mask = get_causal_mask(q_row_local, axis=2, is_strict=False)
local_logits += local_mask[:, tf.newaxis, :]
global_logits = tf.einsum('bqlhd,bklhd->bqlhk', q_row_local, key_row)
if causal:
global_mask = get_causal_mask(q_row_local, axis=1, is_strict=True)
global_logits += global_mask[:, tf.newaxis, tf.newaxis, :]
# (bsize, num_seg, seg_len, n_head, seg_len + num_seg)
joint_logits = tf.concat([local_logits, global_logits], axis=-1)
attn_probs = attention.float32_softmax(joint_logits, axis=-1)
local_att, global_att = tf.split(attn_probs,
[config.max_seg_len, num_seg],
axis=-1)
if is_training:
local_att = tf.nn.dropout(local_att, rate=config.dropatt)
local_merged = tf.einsum('bsqhk,bskhd->bsqhd', local_att, value_row_local)
global_merged = tf.einsum('bqlhv,bvlhd->bqlhd', global_att, row_attn_out)
joint_merged = tf.reshape(local_merged + global_merged,
[bsize, seq_len,
config.num_heads, head_dim])
output = ops.trail_dense(joint_merged, config.model_size,
begin_axis=-2, bias=config.dense_use_bias)
return output
def axial_mixture_bidir(x, config, is_training=True, causal=False):
"""Full attention matrix with axial mixture decomposition."""
assert not causal
bsize = x.shape[0]
seq_len = x.shape.as_list()[1]
head_dim = config.model_size // config.num_heads
assert seq_len % config.max_seg_len == 0
num_seg = seq_len // config.max_seg_len
x_sqr = tf.reshape(x,
[bsize, num_seg, config.max_seg_len, config.model_size])
query, key, value = attention.get_qkv(
x_sqr, x_sqr, x_sqr, hidden_size=config.model_size,
num_heads=config.num_heads, bias=config.dense_use_bias)
local_row_logits = tf.einsum('bushd,buthd->bhust', query, key)
local_col_logits = tf.einsum('bsuhd,btuhd->bhsut', query, key)
# TODO: add self-mask for local_col_logits
span_attn_fn = functools.partial(attention.dot_product_attention,
key_heads=key,
value_heads=value,
is_training=is_training,
dropatt=config.dropatt)
# === top-down summary ===
col_query_topdown = approx_cummax(query, 1, exclusive=True)
col_key_topdown = approx_cummax(key, 1, exclusive=True)
col_t2d_mask = get_causal_mask(x_sqr, axis=1, is_strict=True)
col_t2d_val = span_attn_fn(query_heads=col_query_topdown,
attn_axis=0,
attn_bias=col_t2d_mask)
# === bottom-up summary ===
col_query_bottomup = approx_cummax(query, 1, exclusive=True, reverse=True)
col_key_bottomup = approx_cummax(key, 1, exclusive=True, reverse=True)
col_b2t_mask = get_causal_mask(x_sqr, axis=1, is_strict=True, upper=True)
col_b2t_val = span_attn_fn(query_heads=col_query_bottomup,
attn_axis=0,
attn_bias=col_b2t_mask)
# === left2right summary ===
row_query_left2right = approx_cummax(query, 2, exclusive=True)
row_key_left2right = approx_cummax(key, 2, exclusive=True)
row_l2r_mask = get_causal_mask(x_sqr, axis=2, is_strict=True)
row_l2r_val = span_attn_fn(query_heads=row_query_left2right,
attn_axis=1,
attn_bias=row_l2r_mask)
# === right2left summary ===
row_query_right2left = approx_cummax(query, 2, exclusive=True, reverse=True)
row_key_right2left = approx_cummax(key, 2, exclusive=True, reverse=True)
row_r2l_mask = get_causal_mask(x_sqr, axis=2, is_strict=True, upper=True)
row_r2l_val = span_attn_fn(query_heads=row_query_right2left,
attn_axis=1,
attn_bias=row_r2l_mask)
global_t2d_logits = tf.einsum('bushd,buthd->bhust', query, col_key_topdown)
global_b2t_logits = tf.einsum('bushd,buthd->bhust', query, col_key_bottomup)
global_l2r_logits = tf.einsum('bsuhd,btuhd->bhsut', query, row_key_left2right)
global_r2l_logits = tf.einsum('bsuhd,btuhd->bhsut', query, row_key_right2left)
joint_logits = tf.concat([local_row_logits, local_col_logits,
global_t2d_logits, global_b2t_logits,
global_l2r_logits, global_r2l_logits], axis=-1)
attn_probs = attention.float32_softmax(joint_logits, axis=-1)
prow, pcol, pt2d, pb2t, pl2r, pr2l = tf.split(
attn_probs, [config.max_seg_len, num_seg, config.max_seg_len,
config.max_seg_len, num_seg, num_seg], axis=-1)
mrow = tf.einsum('bhust,buthd->bushd', prow, value)
mcol = tf.einsum('bhsut,btuhd->bsuhd', pcol, value)
mt2d = tf.einsum('bhust,buthd->bushd', pt2d, col_t2d_val)
mb2t = tf.einsum('bhust,buthd->bushd', pb2t, col_b2t_val)
ml2r = tf.einsum('bhsut,btuhd->bsuhd', pl2r, row_l2r_val)
mr2l = tf.einsum('bhsut,btuhd->bsuhd', pr2l, row_r2l_val)
joint_merged = mrow + mcol + mt2d + mb2t + ml2r + mr2l
joint_merged = tf.reshape(joint_merged,
[bsize, seq_len, config.num_heads, head_dim])
output = ops.trail_dense(joint_merged, config.model_size,
begin_axis=-2, bias=config.dense_use_bias)
return output
| 46.080537 | 120 | 0.635013 |
import tensorflow.compat.v1 as tf
import math
from combiner.tf import attention
from combiner.tf import ops
import functools
def shift_right(x, axis):
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[axis] = (1, 0)
padded = tf.pad(x, pad_widths)
return tf.slice(padded, begin=[0]*len(x.shape), size=x.shape)
def shift_left(x, axis):
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[axis] = (0, 1)
padded = tf.pad(x, pad_widths)
begin = [0]*len(x.shape)
begin[axis] = 1
return tf.slice(padded, begin=begin, size=x.shape)
def approx_cummax(x, axis, exclusive=False, reverse=False):
sum_x = tf.math.cumsum(x, axis, exclusive=exclusive, reverse=reverse)
return sum_x
def get_causal_mask(x, axis, is_strict, upper=False):
seq_len = tf.shape(x)[axis]
if is_strict:
if upper:
mask = tf.linalg.band_part(
tf.ones([seq_len, seq_len], dtype=x.dtype),
num_lower=-1, num_upper=0)
else:
mask = tf.linalg.band_part(
tf.ones([seq_len, seq_len], dtype=x.dtype),
num_lower=0, num_upper=-1)
else:
if upper:
mask = 1.0 - tf.linalg.band_part(
tf.ones([seq_len, seq_len], dtype=x.dtype),
num_lower=0, num_upper=-1)
else:
mask = 1.0 - tf.linalg.band_part(
tf.ones([seq_len, seq_len], dtype=x.dtype),
num_lower=-1, num_upper=0)
mask = -1e9 * mask
return mask
def pooling_summary(x, axis, local_summary, keepdims=False):
act, pool = local_summary.split('-')
if act == 'relu':
x = tf.nn.relu(x)
elif act == 'identity':
pass
elif act == 'deepset':
x = ops.trail_dense(x, x.shape.as_list()[-1], bias=False)
x = tf.nn.relu(x)
else:
raise ValueError('Unsupported activation: %s' % act)
if pool == 'mean':
x = tf.math.reduce_mean(x, axis=axis, keepdims=keepdims)
elif pool == 'max':
x = tf.math.reduce_max(x, axis=axis, keepdims=keepdims)
elif pool == 'sum':
x = tf.math.reduce_sum(x, axis=axis, keepdims=keepdims)
else:
raise ValueError('Unsupported pooling: %s' % pool)
return x
def axial_mixture_unidir(x, config, is_training=True, causal=True):
del is_training
assert causal
bsize = x.shape[0]
query, key, value = attention.get_qkv(x, x, x, hidden_size=config.model_size,
num_heads=config.num_heads, bias=config.dense_use_bias)
head_dim = config.model_size // config.num_heads
assert config.max_seq_len % config.max_seg_len == 0
num_seg = config.max_seq_len // config.max_seg_len
cur_query = tf.reshape(query, [bsize,
num_seg,
config.max_seg_len,
config.num_heads,
head_dim])
cur_key = tf.reshape(key, cur_query.shape)
cur_val = tf.reshape(value, cur_query.shape)
col_logit_expr = 'BSUNK,BTUNK->BUNST'
col_attn_expr = 'BUNST,BTUNK->BSUNK'
col_strict_mask = get_causal_mask(cur_query,
axis=1,
is_strict=True)[tf.newaxis, tf.newaxis, tf.newaxis, :, :]
row_logit_expr = 'BUSNK,BUTNK->BUNST'
row_attn_expr = 'BUNST,BUTNK->BUSNK'
row_mask = get_causal_mask(cur_query,
axis=2,
is_strict=False)[tf.newaxis, tf.newaxis, tf.newaxis, :, :]
col_logits = tf.einsum(col_logit_expr, cur_query, cur_key) + col_strict_mask
row_logits = tf.einsum(row_logit_expr, cur_query, cur_key) + row_mask
ry, axis=1, is_strict=False)[tf.newaxis, tf.newaxis,
tf.newaxis, :, :]
col_up2down_logits = tf.einsum(col_logit_expr, col_up2down_query,
cur_key) + col_mask
col_up2down_attn_weights = attention.float32_softmax(
col_up2down_logits, axis=-1)
col_up2down_summary = tf.einsum(col_attn_expr, col_up2down_attn_weights,
cur_val)
col_up2down_summary = shift_right(col_up2down_summary, axis=1)
row_only_myself_mask = tf.eye(tf.shape(cur_query)[2], dtype=cur_query.dtype)[tf.newaxis, tf.newaxis, tf.newaxis, :, :]
row_without_myself_mask = -1e9 * row_only_myself_mask
all_maskout = tf.cast(tf.fill(row_without_myself_mask.shape, -1e9), cur_query.dtype)
row_without_myself_mask = tf.concat([all_maskout] + [row_without_myself_mask] * (cur_query.shape[1] - 1),
axis=1)
previous_row_logits = tf.einsum(row_logit_expr, cur_query, col_up2down_key) + row_without_myself_mask
row_logit_expr, row_left2right_query,
cur_key) + row_mask
row_left2right_attn_weights = attention.float32_softmax(
row_left2right_logits, axis=-1)
row_left2right_summary = tf.einsum(row_attn_expr, row_left2right_attn_weights,
cur_val)
row_left2right_summary = shift_right(row_left2right_summary, axis=2)
all_maskout = tf.cast(tf.fill(col_strict_mask.shape, -1e9), cur_query.dtype)
col_strict_without_first_mask = tf.concat(
[all_maskout] + [col_strict_mask] * (cur_query.shape[2] - 1), axis=1)
top_left_col_logits = tf.einsum(
col_logit_expr, cur_query,
row_left2right_key) + col_strict_without_first_mask
row_upper_mask = get_causal_mask(
cur_query, axis=2, is_strict=False, upper=True)[tf.newaxis, tf.newaxis,
tf.newaxis, :, :]
row_right2left_logits = tf.einsum(row_logit_expr, row_right2left_query,
cur_key) + row_upper_mask
row_right2left_attn_weights = attention.float32_softmax(
row_right2left_logits, axis=-1)
row_right2left_summary = tf.einsum(row_attn_expr, row_right2left_attn_weights,
cur_val)
row_right2left_summary = shift_left(row_right2left_summary, axis=2)
col_strict_without_last_mask = tf.concat(
[col_strict_mask] * (cur_query.shape[2] - 1) + [all_maskout], axis=1)
top_right_col_logits = tf.einsum(
col_logit_expr, cur_query,
row_right2left_key) + col_strict_without_last_mask
, 3, 2, 1, 4]),
tf.transpose(top_right_col_logits, perm=[0, 3, 2, 1, 4])
],
axis=-1)
attn_weights = attention.float32_softmax(joint_logits, axis=-1)
col_att, row_att, previous_row_att, top_left_col_att, top_right_col_att = tf.split(attn_weights,
[num_seg,
config.max_seg_len,
config.max_seg_len,
num_seg,
num_seg], axis=-1)
col_att = tf.transpose(col_att, [0, 3, 2, 1, 4])
top_left_col_att = tf.transpose(top_left_col_att, [0, 3, 2, 1, 4])
top_right_col_att = tf.transpose(top_right_col_att, [0, 3, 2, 1, 4])
col_merged = tf.einsum(col_attn_expr, col_att, cur_val)
row_merged = tf.einsum(row_attn_expr, row_att, cur_val)
previous_row_merged = tf.einsum(row_attn_expr, previous_row_att,
col_up2down_summary)
top_left_merged = tf.einsum(col_attn_expr, top_left_col_att,
row_left2right_summary)
top_right_merged = tf.einsum(col_attn_expr, top_right_col_att,
row_right2left_summary)
joint_merged = tf.reshape(
col_merged + row_merged + previous_row_merged + top_left_merged +
top_right_merged,
[bsize, num_seg * config.max_seg_len, config.num_heads, head_dim])
output = ops.trail_dense(joint_merged, config.model_size, begin_axis=-2)
return output
def sqrt_fixed_full(x, config, is_training=True, causal=True):
bsize = x.shape[0]
query, key, value = attention.get_qkv(x, x, x, hidden_size=config.model_size,
num_heads=config.num_heads,
bias=config.dense_use_bias)
head_dim = config.model_size // config.num_heads
assert config.max_seq_len % config.max_seg_len == 0
num_seg = config.max_seq_len // config.max_seg_len
cur_query = tf.reshape(query, [-1,
num_seg,
config.max_seg_len,
config.num_heads,
head_dim])
with tf.variable_scope('pooling_query'):
merged_query = pooling_summary(cur_query, axis=2,
local_summary=config.local_summary,
keepdims=True)
cur_key = tf.reshape(key, cur_query.shape)
cur_val = tf.reshape(value, cur_query.shape)
span_val = attention.dot_product_attention(merged_query,
cur_key,
cur_val,
is_training=is_training,
attn_axis=1,
dropatt=config.dropatt)
span_val = tf.squeeze(span_val, axis=2)
with tf.variable_scope('pooling_key'):
span_key = pooling_summary(cur_key, axis=2,
local_summary=config.local_summary,
keepdims=False)
local_logits = tf.einsum('bsqhd,bskhd->bsqhk', cur_query, cur_key)
if causal:
local_mask = get_causal_mask(cur_query, axis=2, is_strict=False)
local_mask = tf.expand_dims(local_mask, axis=-2)
local_logits += local_mask
prev_logits = tf.einsum('bqhd,bkhd->bqhk', query, span_key)
if causal:
prev_mask = get_causal_mask(cur_query, axis=1, is_strict=True)
prev_mask = tf.repeat(prev_mask, [config.max_seg_len] * num_seg, axis=0)
prev_logits += tf.expand_dims(prev_mask, axis=1)
joint_logits = tf.concat([tf.reshape(local_logits,
[bsize, config.max_seq_len,
config.num_heads, -1]),
prev_logits], axis=-1)
attn_weights = attention.float32_softmax(joint_logits, axis=-1)
local_att, prev_att = tf.split(attn_weights, [config.max_seg_len, num_seg],
axis=-1)
if is_training:
local_att = tf.nn.dropout(local_att, rate=config.dropatt)
local_att = tf.reshape(local_att, [bsize, num_seg,
config.max_seg_len,
config.num_heads,
config.max_seg_len])
local_merged = tf.einsum('bsqhk,bskhd->bsqhd', local_att, cur_val)
prev_merged = tf.einsum('bqhk,bkhd->bqhd', prev_att, span_val)
joint_merged = prev_merged + tf.reshape(local_merged, prev_merged.shape)
output = ops.trail_dense(joint_merged, config.model_size, begin_axis=-2)
return output
def axial_rowmajor(x, config, is_training=True, causal=True):
bsize = x.shape[0]
seq_len = x.shape.as_list()[1]
head_dim = config.model_size // config.num_heads
assert seq_len % config.max_seg_len == 0
num_seg = seq_len // config.max_seg_len
x_sqr = tf.reshape(x,
[bsize, num_seg, config.max_seg_len, config.model_size])
q_row_local, key_row_local, value_row_local = attention.get_qkv(
x_sqr, x_sqr, x_sqr, hidden_size=config.model_size,
num_heads=config.num_heads, bias=config.dense_use_bias)
local_logits = tf.einsum('bsqhd,bskhd->bsqhk', q_row_local, key_row_local)
row_probs = attention.float32_softmax(local_logits, axis=-1)
if is_training:
row_probs = tf.nn.dropout(row_probs, rate=config.dropatt)
row_attn_out = tf.einsum('bsqhk,bskhd->bsqhd', row_probs, value_row_local)
if config.row_summary == 'none':
key_row = key_row_local
elif config.row_summary in ['wsum', 'proj', 'wsum_proj']:
if 'wsum' in config.row_summary:
pre_summary = tf.einsum('bsqhk,bskhd->bsqhd', row_probs, key_row_local)
else:
pre_summary = row_attn_out
if 'proj' in config.row_summary:
with tf.variable_scope('rowmajor_param_post'):
key_row = ops.trail_dense(pre_summary, config.model_size, begin_axis=-2,
bias=config.dense_use_bias)
key_row = ops.postprocess(x_sqr, key_row, config, is_training)
_, key_row = ops.preprocess(key_row, config)
key_row = ops.trail_dense(key_row, [config.num_heads, head_dim],
bias=config.dense_use_bias)
else:
key_row = pre_summary
else:
raise ValueError('Unknown row summary %s' % config.row_summary)
if causal:
local_mask = get_causal_mask(q_row_local, axis=2, is_strict=False)
local_logits += local_mask[:, tf.newaxis, :]
global_logits = tf.einsum('bqlhd,bklhd->bqlhk', q_row_local, key_row)
if causal:
global_mask = get_causal_mask(q_row_local, axis=1, is_strict=True)
global_logits += global_mask[:, tf.newaxis, tf.newaxis, :]
joint_logits = tf.concat([local_logits, global_logits], axis=-1)
attn_probs = attention.float32_softmax(joint_logits, axis=-1)
local_att, global_att = tf.split(attn_probs,
[config.max_seg_len, num_seg],
axis=-1)
if is_training:
local_att = tf.nn.dropout(local_att, rate=config.dropatt)
local_merged = tf.einsum('bsqhk,bskhd->bsqhd', local_att, value_row_local)
global_merged = tf.einsum('bqlhv,bvlhd->bqlhd', global_att, row_attn_out)
joint_merged = tf.reshape(local_merged + global_merged,
[bsize, seq_len,
config.num_heads, head_dim])
output = ops.trail_dense(joint_merged, config.model_size,
begin_axis=-2, bias=config.dense_use_bias)
return output
def axial_mixture_bidir(x, config, is_training=True, causal=False):
assert not causal
bsize = x.shape[0]
seq_len = x.shape.as_list()[1]
head_dim = config.model_size // config.num_heads
assert seq_len % config.max_seg_len == 0
num_seg = seq_len // config.max_seg_len
x_sqr = tf.reshape(x,
[bsize, num_seg, config.max_seg_len, config.model_size])
query, key, value = attention.get_qkv(
x_sqr, x_sqr, x_sqr, hidden_size=config.model_size,
num_heads=config.num_heads, bias=config.dense_use_bias)
local_row_logits = tf.einsum('bushd,buthd->bhust', query, key)
local_col_logits = tf.einsum('bsuhd,btuhd->bhsut', query, key)
span_attn_fn = functools.partial(attention.dot_product_attention,
key_heads=key,
value_heads=value,
is_training=is_training,
dropatt=config.dropatt)
col_query_topdown = approx_cummax(query, 1, exclusive=True)
col_key_topdown = approx_cummax(key, 1, exclusive=True)
col_t2d_mask = get_causal_mask(x_sqr, axis=1, is_strict=True)
col_t2d_val = span_attn_fn(query_heads=col_query_topdown,
attn_axis=0,
attn_bias=col_t2d_mask)
col_query_bottomup = approx_cummax(query, 1, exclusive=True, reverse=True)
col_key_bottomup = approx_cummax(key, 1, exclusive=True, reverse=True)
col_b2t_mask = get_causal_mask(x_sqr, axis=1, is_strict=True, upper=True)
col_b2t_val = span_attn_fn(query_heads=col_query_bottomup,
attn_axis=0,
attn_bias=col_b2t_mask)
row_query_left2right = approx_cummax(query, 2, exclusive=True)
row_key_left2right = approx_cummax(key, 2, exclusive=True)
row_l2r_mask = get_causal_mask(x_sqr, axis=2, is_strict=True)
row_l2r_val = span_attn_fn(query_heads=row_query_left2right,
attn_axis=1,
attn_bias=row_l2r_mask)
row_query_right2left = approx_cummax(query, 2, exclusive=True, reverse=True)
row_key_right2left = approx_cummax(key, 2, exclusive=True, reverse=True)
row_r2l_mask = get_causal_mask(x_sqr, axis=2, is_strict=True, upper=True)
row_r2l_val = span_attn_fn(query_heads=row_query_right2left,
attn_axis=1,
attn_bias=row_r2l_mask)
global_t2d_logits = tf.einsum('bushd,buthd->bhust', query, col_key_topdown)
global_b2t_logits = tf.einsum('bushd,buthd->bhust', query, col_key_bottomup)
global_l2r_logits = tf.einsum('bsuhd,btuhd->bhsut', query, row_key_left2right)
global_r2l_logits = tf.einsum('bsuhd,btuhd->bhsut', query, row_key_right2left)
joint_logits = tf.concat([local_row_logits, local_col_logits,
global_t2d_logits, global_b2t_logits,
global_l2r_logits, global_r2l_logits], axis=-1)
attn_probs = attention.float32_softmax(joint_logits, axis=-1)
prow, pcol, pt2d, pb2t, pl2r, pr2l = tf.split(
attn_probs, [config.max_seg_len, num_seg, config.max_seg_len,
config.max_seg_len, num_seg, num_seg], axis=-1)
mrow = tf.einsum('bhust,buthd->bushd', prow, value)
mcol = tf.einsum('bhsut,btuhd->bsuhd', pcol, value)
mt2d = tf.einsum('bhust,buthd->bushd', pt2d, col_t2d_val)
mb2t = tf.einsum('bhust,buthd->bushd', pb2t, col_b2t_val)
ml2r = tf.einsum('bhsut,btuhd->bsuhd', pl2r, row_l2r_val)
mr2l = tf.einsum('bhsut,btuhd->bsuhd', pr2l, row_r2l_val)
joint_merged = mrow + mcol + mt2d + mb2t + ml2r + mr2l
joint_merged = tf.reshape(joint_merged,
[bsize, seq_len, config.num_heads, head_dim])
output = ops.trail_dense(joint_merged, config.model_size,
begin_axis=-2, bias=config.dense_use_bias)
return output
| true | true |
f737ecd6ea49249d14515c7f3e8a045c30507e84 | 7,250 | py | Python | tools.py | slavatulaev/rsdb | 85822db107953abd099ed296b6f3a88bb4e742c5 | [
"Unlicense"
] | 1 | 2019-04-01T09:41:09.000Z | 2019-04-01T09:41:09.000Z | tools.py | slavatulaev/rsdb | 85822db107953abd099ed296b6f3a88bb4e742c5 | [
"Unlicense"
] | null | null | null | tools.py | slavatulaev/rsdb | 85822db107953abd099ed296b6f3a88bb4e742c5 | [
"Unlicense"
] | 1 | 2019-11-18T16:33:49.000Z | 2019-11-18T16:33:49.000Z | #!/usr/bin/env python
import random
import string
import ftplib
import zipfile
import os
import re
import sys
import socket
def genRandomString(length): # генерирует и возвращает строку случайных символов заданой длины в нижнем регистре
rS = ''.join(random.choices(string.ascii_lowercase + string.digits, k=length))
return rS
def genRandomStringUp(length): # генерирует и возвращает строку случайных символов заданой длины в верхнем регистре
rS = ''.join(random.choices(string.ascii_uppercase + string.digits, k=length))
return rS
def genRandomStringMix(length): # генерирует и возвращает строку случайных символов заданой длины
rS = ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=length))
return rS
def ftpUploadFile(ftp, ftpPath, ftpLogin, ftpPassword, filePath): # загрузка файла на ftp
ftpFilePath = ''
fileName = filePath.split('/')[-1]
i = 0
while True:
try:
print('connecting ftp://' + ftp + ' - try ' + str(i))
ftpObj = ftplib.FTP(ftp, ftpLogin, ftpPassword, timeout = 10 )
print(ftpObj)
print("ftp connected succesfully")
break
except:
i += 1
if i > 10 :
return ''
print("changing directory...")
ftpObj.cwd(ftpPath)
print("directory changed succesfully")
i = 0
while True:
try:
print("opening file " + filePath)
f = open(filePath, 'rb')
print("sending file to ftp...")
ftpObj.storbinary("STOR "+ filePath, f)
print("closing ftp connection ...")
ftpObj.quit()
f.close()
ftpFilePath = fileName
break
except:
print("faled to upload %s to ftp" % filePath)
i += 1
f.close()
if i > 10 :
return ''
print("this is try nomber %s, will try again now..." % str(i))
return ftpFilePath # возвращает путь к файлу на ftp
def zipFiles(filesList = (), nameLen = 32): # создает во временном каталоге архив с рандомным названием из nameLen-х символов
zipFilePath = 'tmp/' + genRandomString(nameLen) + '.zip'
try:
os.mkdir('tmp')
except:
pass
try:
zipFile = zipfile.ZipFile(zipFilePath, 'w', zipfile.ZIP_DEFLATED)
for f in filesList:
zipFile.write(f)
zipFile.close()
except:
return ''
return zipFilePath
# ftpUploadFile('files.000webhost.com','public_html','cryptocashback','qaz1XsW2','workList.csv')
def normalizeOVPNConfig(cfgData, deviceStr = ''): # приводит конфиг к стандартизованному виду
caData = []
certData = []
keyData = []
caStart = False
certStart = False
keyStart = False
cfgLines = []
#print('cfgData is: ', cfgData)
for line in cfgData:
#print('current line', line)
if line.strip() == '<ca>':
caData.append(line)
caStart = True
continue
if line.strip() == '<cert>':
certData.append(line)
certStart = True
continue
if line.strip() == '<key>':
keyData.append(line)
keyStart = True
continue
if line.strip() == '</ca>':
caData.append(line)
caStart = False
continue
if line.strip() == '</cert>':
certData.append(line)
certStart = False
continue
if line.strip() == '</key>':
keyData.append(line)
keyStart = False
continue
if caStart == True:
caData.append(line)
continue
if certStart == True:
certData.append(line)
continue
if keyStart == True:
keyData.append(line)
continue
if ((line[0] == '#') and ((line.find('setenv opt tls-cipher') == -1) and (line.find('dhcp-option') == -1))):
continue
if (line.strip() == 'block-outside-dns'):
continue
if (line.find('verb') != -1):
cfgLines.append('verb 4\n')
continue
if line.strip() == 'tls-cipher "DEFAULT:@SECLEVEL=0"':
cfgLines.append('setenv opt tls-cipher "DEFAULT:@SECLEVEL=0"\n')
continue
if (line.find('tun-mtu') != -1):
cfgLines.append('tun-mtu 1500\n')
continue
if (line.find('mssfix') != -1):
if ((deviceStr.find('Belkin') != -1) or (deviceStr.find('ASUS') != -1)):
cfgLines.append('mssfix 0\n')
else:
cfgLines.append('mssfix 1200\n')
continue
if line.find('remote ') != -1:
addr = line[line.find(' ')+1:line.rfind(' ')].strip()
if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', addr) != None:
ipAddr = addr
cfgLines.append(line)
else:
try:
ipAddr = socket.gethostbyname(addr)
cfgLines.append(line.replace(addr,ipAddr))
except:
print('URL ', addr, 'could not be resolved. check it! ============== Attencion ===========')
cfgLines.append(line)
continue
if (line.strip() != ''):
cfgLines.append(line)
if (deviceStr.find('Belkin') != -1):
try:
if (cfgLines.index('dhcp-option DNS 1.1.1.1\n') > -1):
# print('this Belkin router already have DNS info in config ---------------')
pass
except:
cfgLines.append('dhcp-option DNS 1.1.1.1\n')
# print('here we add DNS info in config ++++++++++++++++')
if (deviceStr.find('NETGEAR') != -1):
try:
if ((cfgLines.index('#setenv opt tls-cipher "DEFAULT:@SECLEVEL=0"\n') > -1) or (cfgLines.index('setenv opt tls-cipher "DEFAULT:@SECLEVEL=0"\n') > -1)):
pass
except:
cfgLines.append('#setenv opt tls-cipher "DEFAULT:@SECLEVEL=0"\n')
try:
if (cfgLines.index('setenv opt block-outside-dns\n') > -1):
pass
except:
cfgLines.append('setenv opt block-outside-dns\n')
try:
if (cfgLines.index('redirect-gateway def1\n') > -1):
pass
except:
cfgLines.append('redirect-gateway def1\n')
try:
if (cfgLines.index('ping-timer-rem\n') > -1):
pass
except:
cfgLines.append('ping-timer-rem\n')
try:
if (cfgLines.index('verb 4\n') > -1):
pass
except:
cfgLines.append('verb 4\n')
try:
if (cfgLines.index('tun-mtu 1500\n') > -1):
pass
except:
cfgLines.append('tun-mtu 1500\n')
try:
if (cfgLines.index('mssfix 1200\n') > -1) or (cfgLines.index('mssfix 0\n') > -1):
pass
except:
cfgLines.append('mssfix 1200\n')
cfgData = []
for l in cfgLines: cfgData.append(l)
for l in caData: cfgData.append(l)
for l in certData: cfgData.append(l)
for l in keyData: cfgData.append(l)
return cfgData | 34.855769 | 163 | 0.531172 |
import random
import string
import ftplib
import zipfile
import os
import re
import sys
import socket
def genRandomString(length):
rS = ''.join(random.choices(string.ascii_lowercase + string.digits, k=length))
return rS
def genRandomStringUp(length):
rS = ''.join(random.choices(string.ascii_uppercase + string.digits, k=length))
return rS
def genRandomStringMix(length):
rS = ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=length))
return rS
def ftpUploadFile(ftp, ftpPath, ftpLogin, ftpPassword, filePath):
ftpFilePath = ''
fileName = filePath.split('/')[-1]
i = 0
while True:
try:
print('connecting ftp://' + ftp + ' - try ' + str(i))
ftpObj = ftplib.FTP(ftp, ftpLogin, ftpPassword, timeout = 10 )
print(ftpObj)
print("ftp connected succesfully")
break
except:
i += 1
if i > 10 :
return ''
print("changing directory...")
ftpObj.cwd(ftpPath)
print("directory changed succesfully")
i = 0
while True:
try:
print("opening file " + filePath)
f = open(filePath, 'rb')
print("sending file to ftp...")
ftpObj.storbinary("STOR "+ filePath, f)
print("closing ftp connection ...")
ftpObj.quit()
f.close()
ftpFilePath = fileName
break
except:
print("faled to upload %s to ftp" % filePath)
i += 1
f.close()
if i > 10 :
return ''
print("this is try nomber %s, will try again now..." % str(i))
return ftpFilePath
def zipFiles(filesList = (), nameLen = 32):
zipFilePath = 'tmp/' + genRandomString(nameLen) + '.zip'
try:
os.mkdir('tmp')
except:
pass
try:
zipFile = zipfile.ZipFile(zipFilePath, 'w', zipfile.ZIP_DEFLATED)
for f in filesList:
zipFile.write(f)
zipFile.close()
except:
return ''
return zipFilePath
def normalizeOVPNConfig(cfgData, deviceStr = ''):
caData = []
certData = []
keyData = []
caStart = False
certStart = False
keyStart = False
cfgLines = []
for line in cfgData:
if line.strip() == '<ca>':
caData.append(line)
caStart = True
continue
if line.strip() == '<cert>':
certData.append(line)
certStart = True
continue
if line.strip() == '<key>':
keyData.append(line)
keyStart = True
continue
if line.strip() == '</ca>':
caData.append(line)
caStart = False
continue
if line.strip() == '</cert>':
certData.append(line)
certStart = False
continue
if line.strip() == '</key>':
keyData.append(line)
keyStart = False
continue
if caStart == True:
caData.append(line)
continue
if certStart == True:
certData.append(line)
continue
if keyStart == True:
keyData.append(line)
continue
if ((line[0] == '#') and ((line.find('setenv opt tls-cipher') == -1) and (line.find('dhcp-option') == -1))):
continue
if (line.strip() == 'block-outside-dns'):
continue
if (line.find('verb') != -1):
cfgLines.append('verb 4\n')
continue
if line.strip() == 'tls-cipher "DEFAULT:@SECLEVEL=0"':
cfgLines.append('setenv opt tls-cipher "DEFAULT:@SECLEVEL=0"\n')
continue
if (line.find('tun-mtu') != -1):
cfgLines.append('tun-mtu 1500\n')
continue
if (line.find('mssfix') != -1):
if ((deviceStr.find('Belkin') != -1) or (deviceStr.find('ASUS') != -1)):
cfgLines.append('mssfix 0\n')
else:
cfgLines.append('mssfix 1200\n')
continue
if line.find('remote ') != -1:
addr = line[line.find(' ')+1:line.rfind(' ')].strip()
if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', addr) != None:
ipAddr = addr
cfgLines.append(line)
else:
try:
ipAddr = socket.gethostbyname(addr)
cfgLines.append(line.replace(addr,ipAddr))
except:
print('URL ', addr, 'could not be resolved. check it! ============== Attencion ===========')
cfgLines.append(line)
continue
if (line.strip() != ''):
cfgLines.append(line)
if (deviceStr.find('Belkin') != -1):
try:
if (cfgLines.index('dhcp-option DNS 1.1.1.1\n') > -1):
pass
except:
cfgLines.append('dhcp-option DNS 1.1.1.1\n')
if (deviceStr.find('NETGEAR') != -1):
try:
if ((cfgLines.index('#setenv opt tls-cipher "DEFAULT:@SECLEVEL=0"\n') > -1) or (cfgLines.index('setenv opt tls-cipher "DEFAULT:@SECLEVEL=0"\n') > -1)):
pass
except:
cfgLines.append('#setenv opt tls-cipher "DEFAULT:@SECLEVEL=0"\n')
try:
if (cfgLines.index('setenv opt block-outside-dns\n') > -1):
pass
except:
cfgLines.append('setenv opt block-outside-dns\n')
try:
if (cfgLines.index('redirect-gateway def1\n') > -1):
pass
except:
cfgLines.append('redirect-gateway def1\n')
try:
if (cfgLines.index('ping-timer-rem\n') > -1):
pass
except:
cfgLines.append('ping-timer-rem\n')
try:
if (cfgLines.index('verb 4\n') > -1):
pass
except:
cfgLines.append('verb 4\n')
try:
if (cfgLines.index('tun-mtu 1500\n') > -1):
pass
except:
cfgLines.append('tun-mtu 1500\n')
try:
if (cfgLines.index('mssfix 1200\n') > -1) or (cfgLines.index('mssfix 0\n') > -1):
pass
except:
cfgLines.append('mssfix 1200\n')
cfgData = []
for l in cfgLines: cfgData.append(l)
for l in caData: cfgData.append(l)
for l in certData: cfgData.append(l)
for l in keyData: cfgData.append(l)
return cfgData | true | true |
f737ede7d0db61bfa4fbe6917b44da4b93843274 | 13,078 | py | Python | spark_cluster/04_5_HV_activeLearn/HV_v4_activeLearn_NYT_sim2_and_sim3_to_sim1/6100_ML2_HV_v4_activeLearn_NYT_sim2_and_sim3_to_sim1_round5.py | poltextlab/nyt_hybrid_classification_workflow | 3f676938b08f4373be3a83e975ee51dfa5ce6bf5 | [
"MIT"
] | null | null | null | spark_cluster/04_5_HV_activeLearn/HV_v4_activeLearn_NYT_sim2_and_sim3_to_sim1/6100_ML2_HV_v4_activeLearn_NYT_sim2_and_sim3_to_sim1_round5.py | poltextlab/nyt_hybrid_classification_workflow | 3f676938b08f4373be3a83e975ee51dfa5ce6bf5 | [
"MIT"
] | null | null | null | spark_cluster/04_5_HV_activeLearn/HV_v4_activeLearn_NYT_sim2_and_sim3_to_sim1/6100_ML2_HV_v4_activeLearn_NYT_sim2_and_sim3_to_sim1_round5.py | poltextlab/nyt_hybrid_classification_workflow | 3f676938b08f4373be3a83e975ee51dfa5ce6bf5 | [
"MIT"
] | null | null | null | # import libraries
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark.sql.types import *
from pyspark.sql.functions import col, count, when
from pyspark.ml.classification import LinearSVC
import pandas as pd
#################################################
# spark config
#################################################
mtaMaster = "spark://192.168.0.182:7077"
conf = SparkConf()
conf.setMaster(mtaMaster)
conf.set("spark.executor.memory", "24g")
conf.set("spark.driver.memory", "26g")
conf.set("spark.cores.max", 96)
conf.set("spark.driver.cores", 8)
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
conf.set("spark.kryoserializer.buffer", "256m")
conf.set("spark.kryoserializer.buffer.max", "256m")
conf.set("spark.default.parallelism", 24)
conf.set("spark.eventLog.enabled", "true")
conf.set("spark.eventLog.dir", "hdfs://192.168.0.182:9000/eventlog")
conf.set("spark.history.fs.logDirectory", "hdfs://192.168.0.182:9000/eventlog")
conf.set("spark.driver.maxResultSize", "2g")
conf.getAll()
#################################################
# create spark session
#################################################
spark = SparkSession.builder.appName('ML2_HV_v4_activeLearn_NYT_sim2_and_sim3_to_sim1_round5').config(conf=conf).getOrCreate()
sc = spark.sparkContext
# check things are working
print(sc)
print(sc.defaultParallelism)
print("SPARK CONTEXT IS RUNNING")
#################################################
# define major topic codes
#################################################
# major topic codes for loop (NO 23 IN THE NYT CORPUS)
majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 100]
#majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 100]
#################################################
# loop starts here
#################################################
for h in range(3):
# read table from hdfs
df_original = spark.read.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v4_activeLearn_NYT_round5_start.parquet").repartition(50)
# check loaded data
print(df_original.printSchema())
print(df_original.show())
df_original.groupBy("majortopic").count().show(30, False)
#################################################
# prepare to log sample numbers
#################################################
columns = ["label", "non_label_all", "non_label_sample", "train_all"]
df_numbers = pd.DataFrame(index=majortopic_codes, columns=columns)
for i in majortopic_codes:
#################################################
# prepare df for svm requirements
#################################################
print("majortopic is:", i)
# separate majortopic
df_original = df_original.withColumn("label", when(df_original["majortopic"] == i, 1).otherwise(0))
# label has to be double for SVM
df_original = df_original.withColumn('label', df_original.label.cast(DoubleType()))
#################################################
# separate training and test sets
#################################################
df_train = df_original.where((col('train_r5') == 1) | (col('train_r2_neg') == i) | (col('train_r3_neg') == i) | (col('train_r4_neg') == i) | (col('train_r5_neg') == i))
df_test = df_original.where((col('train_r5') == 0) & (col('train_r2_neg') != i) & (col('train_r3_neg') != i) & (col('train_r4_neg') != i) & (col('train_r5_neg') != i))
# make training data proportional with regards to label occurrence frequency
df_train_mtc = df_train.where(col('label') == 1)
df_train_non_mtc = df_train.where(col('label') == 0)
df_train_count = df_train.count()
df_train_mtc_count = df_train_mtc.count()
df_train_non_mtc_count = df_train_non_mtc.count()
print("Rows in training DataFrame with label = ", df_train_mtc_count)
print("Rows in training DataFrame without label = ", df_train_non_mtc_count)
if df_train_mtc_count/df_train_non_mtc_count < 0.1:
if df_train_mtc_count*10 < df_train_count//10:
sample_num = df_train_count//10
else: sample_num = df_train_mtc_count*10
print("sample_num = ", sample_num)
print("df_train_non_mtc = ", df_train_non_mtc_count)
sampling_fraction = sample_num/df_train_non_mtc_count
print("sampling_fraction = ", sampling_fraction)
df_train_non_mtc = df_train_non_mtc.sample(False, sampling_fraction)
df_train_non_mtc_sample = df_train_non_mtc.count()
print("Rows in training DataFrame without label = ", df_train_non_mtc_sample)
df_train = df_train_mtc.union(df_train_non_mtc)
# numbers to logtable
df_numbers["non_label_sample"].loc[i] = df_train_non_mtc_sample
df_numbers["train_all"].loc[i] = df_train_mtc_count + df_train_non_mtc_sample
else:
# numbers to logtable
df_numbers["non_label_sample"].loc[i] = df_train_non_mtc_count
df_numbers["train_all"].loc[i] = df_train_count
# numbers to logtable
df_numbers["label"].loc[i] = df_train_mtc_count
df_numbers["non_label_all"].loc[i] = df_train_non_mtc_count
print(df_numbers)
# NOTE: this type of copying wouldn't work in python, but does work in pyspark!
df_train_orig = df_train
df_test_orig = df_test
df_loop = 0
df_train_mtc = 0
df_train_non_mtc = 0
print("Rows in training DataFrame = ", df_train.count())
print("Rows in test DataFrame = ", df_test.count())
#################################################
# SVM
#################################################
for j in range(3):
df_train = df_train_orig
df_test = df_test_orig
# define svm
lsvc = LinearSVC(featuresCol='features', labelCol='label', maxIter=10, regParam=0.1)
# train the model.
lsvcModel = lsvc.fit(df_train)
print("fit model finished, starting scoring:", j)
# score the model on test data.
predictions = lsvcModel.transform(df_test)
df_train = 0
df_test = 0
lsvcModel = 0
print(predictions.printSchema())
print(predictions.show())
df_write = predictions.select("doc_id", "prediction")
predictions = 0
df_write = df_write.withColumn('prediction', df_write.prediction.cast(IntegerType()))
df_write = df_write.withColumn('prediction', df_write.prediction * i)
new_col_name = 'prediction_{i}'.format(i=i)
df_write = df_write.withColumnRenamed('prediction', new_col_name)
# write partial result to parquet
dest_name = "hdfs://192.168.0.182:9000/input/NYT_prediction_mtc{i}_{j}.parquet".format(i=i, j=j)
df_write.write.parquet(dest_name, mode="overwrite")
df_write = 0
print("DONE")
print("ALL SVM DONE round5_{h}".format(h=h+1))
df_numbers.to_csv("ML2_HV_v4_activeLearn_NYT_round5_sample{h}_sample_numbers.csv".format(h=h+1), index=False)
# empty memory
spark.catalog.clearCache()
print("cache cleared")
#######################################################
### parquet to pandas
#######################################################
for j in range(3):
# read from parquet format
for i in majortopic_codes:
source_name = "hdfs://192.168.0.182:9000/input/NYT_prediction_mtc{i}_{j}.parquet".format(i=i, j=j)
df = spark.read.parquet(source_name).repartition(50)
if i == 1:
df_results = df
else:
df_results = df_results.join(df, 'doc_id', 'inner')
df = df_results
df_results = 0
# convert prediction results to pandas df
df = df.toPandas()
df.to_csv("ML2_HV_v4_activeLearn_NYT_round5_sample{h}_svm{j}.csv".format(h=h+1,j=j), index=False)
#########################################################################
# create results and leftovers tables
#########################################################################
# all of the following happen in pandas outside the spark context
for i in range(3):
for j in range(3):
df = pd.read_csv("ML2_HV_v4_activeLearn_NYT_round5_sample{i}_svm{j}.csv".format(i=i+1, j=j))
df = df.sort_values(by=['doc_id'])
df = df.reset_index(drop=True)
#print(df.head())
if i == 0 and j == 0:
df_results = df
else:
df_lemma = df_results.iloc[:,1:].add(df.iloc[:,1:])
df_results = pd.concat([df_results[['doc_id']], df_lemma], axis=1)
#print(df_results.head())
for i in majortopic_codes:
df_results[["prediction_{i}".format(i=i)]] = df_results[["prediction_{i}".format(i=i)]].floordiv(i)
df_results["max_value"] = df_results.iloc[:,1:].max(axis = 1, numeric_only = True)
df_results["how_many_9votes"] = df_results.iloc[:,:-1].isin([9]).sum(1)
print(df_results.shape)
df_results = df_results.loc[df_results["max_value"]==9]
print(df_results.shape)
# first get table of multiple nine votes for active learning
df_activeLearn = df_results.loc[df_results["how_many_9votes"]>1]
# then get all simple verdicts
df_results = df_results.loc[df_results["how_many_9votes"]==1]
print(df_results.shape)
# prepare table for active learning
# first get the full result table for further analysis later
df_activeLearn.to_csv("ML2_v4_activeLearn_NYT_r5_activeLearn_raw.csv", index=False)
# since this is a simulation a dummy value will suffice here
df_activeLearn["verdict"] = "dummy_value"
df_activeLearn = df_activeLearn[["doc_id", "verdict"]]
# prepare table of single verdicts
df_results = df_results.drop(['max_value', 'how_many_9votes'], axis=1)
print(df_results.head())
for i in majortopic_codes:
df_results[["prediction_{i}".format(i=i)]] = df_results[["prediction_{i}".format(i=i)]].floordiv(9)
print(df_results.head())
for i in majortopic_codes:
df_results[["prediction_{i}".format(i=i)]] = df_results[["prediction_{i}".format(i=i)]]*i
df_results["verdict"] = df_results.iloc[:,1:].sum(1)
df_results = df_results[["doc_id", "verdict"]]
# now we move back to the spark context!!
# for that we need to move the pandas df into a spark df
df = spark.createDataFrame(df_results)
# if there are no elements selected for active learning trying to move the empty pandas df into the
# spark context will throw an error
if df_activeLearn.empty:
print("no elements selected for active learning")
df_al = pd.DataFrame({'col1': [1]})
df_al = spark.createDataFrame(df_al)
else:
df_al = spark.createDataFrame(df_activeLearn)
# load df_original
df_original = spark.read.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v4_activeLearn_NYT_round5_start.parquet").repartition(50)
# create results table
df_results = df_original.join(df, "doc_id", "inner")
if len(df_al.columns) == 1:
df_results_al = df_al
else:
df_results_al = df_original.join(df_al, "doc_id", "inner")
# create table of non-classified and training elements
ids_drop = df.select("doc_id")
df_original = df_original.join(ids_drop, "doc_id", "left_anti")
# once more for those selected for active learning
if len(df_al.columns) == 1:
print("no elements selected for active learning")
else:
ids_drop = df_al.select("doc_id")
df_original = df_original.join(ids_drop, "doc_id", "left_anti")
# write to parquet for use in human validation script
df_original.write.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v4_activeLearn_NYT_r5_train_and_remaining_NOTclassified.parquet", mode="overwrite")
df_results.write.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v4_activeLearn_NYT_r5_classified.parquet", mode="overwrite")
df_results_al.write.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v4_activeLearn_NYT_r5_activeLearn.parquet", mode="overwrite")
# convert tables to pandas df and write to csv
df_original = df_original.drop("text", "words", "raw_features", "features").toPandas()
df_results = df_results.drop("text", "words", "raw_features", "features").toPandas()
if len(df_al.columns) != 1:
df_results_al = df_results_al.drop("text", "words", "raw_features", "features").toPandas()
df_original.to_csv("ML2_HV_v4_activeLearn_NYT_r5_train_and_remaining_NOTclassified.csv", index=False)
df_results.to_csv("ML2_HV_v4_activeLearn_NYT_r5_classified.csv", index=False)
if len(df_al.columns) != 1:
df_results_al.to_csv("ML2_HV_v4_activeLearn_NYT_r5_activeLearn.csv", index=False)
print("df_original: ", df_original.shape[0])
print("df_results: ", df_results.shape[0])
if len(df_al.columns) != 1:
print("df_results_activeLearn: ", df_results_al.shape[0])
else:
print("df_results_activeLearn: 0")
sc.stop()
spark.stop()
| 38.807122 | 176 | 0.625631 |
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark.sql.types import *
from pyspark.sql.functions import col, count, when
from pyspark.ml.classification import LinearSVC
import pandas as pd
| true | true |
f737ee0e24b035a1be31bccb6520852045423200 | 2,776 | py | Python | qa/rpc-tests/mempool_spendcoinbase.py | L00119483/TechSquad.io | 3ebafca95c5b125f3dbe52d9d4cde29c61a48975 | [
"MIT"
] | 4 | 2018-06-16T20:08:19.000Z | 2018-08-22T15:44:58.000Z | qa/rpc-tests/mempool_spendcoinbase.py | L00119483/TechSquad.io | 3ebafca95c5b125f3dbe52d9d4cde29c61a48975 | [
"MIT"
] | null | null | null | qa/rpc-tests/mempool_spendcoinbase.py | L00119483/TechSquad.io | 3ebafca95c5b125f3dbe52d9d4cde29c61a48975 | [
"MIT"
] | 7 | 2018-06-06T18:51:07.000Z | 2018-09-08T15:17:04.000Z | #!/usr/bin/env python2
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test spending coinbase transactions.
# The coinbase transaction in block N can appear in block
# N+100... so is valid in the mempool when the best block
# height is N+99.
# This test makes sure coinbase spends that will be mature
# in the next block are accepted into the memory pool,
# but less mature coinbase spends are NOT.
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
# Create one-input, one-output, no-fee transaction:
class MempoolSpendCoinbaseTest(BitcoinTestFramework):
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].setgenerate(True, 1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
| 39.657143 | 91 | 0.691643 |
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
class MempoolSpendCoinbaseTest(BitcoinTestFramework):
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
self.nodes[0].setgenerate(True, 1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
| true | true |
f737ee8063f88a4c5b5bd906d18b1b14dc6a3e8d | 754 | py | Python | var/spack/repos/builtin/packages/ray/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-06-25T15:25:29.000Z | 2020-06-25T15:25:29.000Z | var/spack/repos/builtin/packages/ray/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2018-07-06T19:11:46.000Z | 2018-07-06T19:12:28.000Z | var/spack/repos/builtin/packages/ray/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-03-06T11:04:37.000Z | 2020-03-06T11:04:37.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Ray(CMakePackage):
"""Parallel genome assemblies for parallel DNA sequencing"""
homepage = "http://denovoassembler.sourceforge.net/"
url = "https://downloads.sourceforge.net/project/denovoassembler/Ray-2.3.1.tar.bz2"
version('2.3.1', sha256='3122edcdf97272af3014f959eab9a0f0e5a02c8ffc897d842b06b06ccd748036')
depends_on('mpi')
@run_after('build')
def make(self):
mkdirp(prefix.bin)
make('PREFIX=%s' % prefix.bin)
def install(self, spec, prefix):
make('install')
| 29 | 95 | 0.704244 |
from spack import *
class Ray(CMakePackage):
homepage = "http://denovoassembler.sourceforge.net/"
url = "https://downloads.sourceforge.net/project/denovoassembler/Ray-2.3.1.tar.bz2"
version('2.3.1', sha256='3122edcdf97272af3014f959eab9a0f0e5a02c8ffc897d842b06b06ccd748036')
depends_on('mpi')
@run_after('build')
def make(self):
mkdirp(prefix.bin)
make('PREFIX=%s' % prefix.bin)
def install(self, spec, prefix):
make('install')
| true | true |
f737efa8da13d1e6b4006f607a2c3dddab25a27c | 3,211 | py | Python | haiku/_src/random_test.py | timwillhack/dm-haikuBah2 | b76a3db3a39b82c8a1ae5a81a8a0173c23c252e5 | [
"Apache-2.0"
] | 1,647 | 2020-02-21T14:24:31.000Z | 2022-03-31T04:31:34.000Z | haiku/_src/random_test.py | timwillhack/dm-haikuBah2 | b76a3db3a39b82c8a1ae5a81a8a0173c23c252e5 | [
"Apache-2.0"
] | 169 | 2020-02-21T14:07:25.000Z | 2022-03-31T13:08:28.000Z | haiku/_src/random_test.py | timwillhack/dm-haikuBah2 | b76a3db3a39b82c8a1ae5a81a8a0173c23c252e5 | [
"Apache-2.0"
] | 159 | 2020-02-21T19:31:02.000Z | 2022-03-29T12:41:35.000Z | # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.random."""
import functools
from absl.testing import absltest
from haiku._src import base
from haiku._src import random
from haiku._src import transform
import jax
from jax import prng
import jax.numpy as jnp
import numpy as np
class RandomTest(absltest.TestCase):
def test_optimize_rng_splitting(self):
def f():
k1 = base.next_rng_key()
k2 = base.next_rng_key()
return k1, k2
key = jax.random.PRNGKey(42)
assert_allclose = functools.partial(np.testing.assert_allclose, atol=1e-5)
# With optimize_rng_use the keys returned should be equal to split(n).
f_opt = transform.transform(random.optimize_rng_use(f))
jax.tree_multimap(assert_allclose,
f_opt.apply({}, key),
tuple(jax.random.split(key, 3))[1:])
# Without optimize_rng_use the keys should be equivalent to splitting in a
# loop.
f = transform.transform(f)
jax.tree_multimap(assert_allclose,
f.apply({}, key),
tuple(split_for_n(key, 2)))
def test_rbg_default_impl(self):
with jax.default_prng_impl("rbg"):
key = jax.random.PRNGKey(42)
self.assertEqual(key.shape, (4,))
_, apply = transform.transform(base.next_rng_key)
out_key = apply({}, key)
self.assertEqual(out_key.shape, (4,))
class CustomRNGTest(absltest.TestCase):
def setUp(self):
super().setUp()
jax.config.update("jax_enable_custom_prng", True)
def tearDown(self):
super().tearDown()
jax.config.update("jax_enable_custom_prng", False)
def test_custom_key(self):
count = 0
def count_splits(_, num):
nonlocal count
count += 1
return jnp.zeros((num, 13), np.uint32)
differently_shaped_prng_impl = prng.PRNGImpl(
# Testing a different key shape to make sure it's accepted by Haiku
key_shape=(13,),
seed=lambda _: jnp.zeros((13,), np.uint32),
split=count_splits,
random_bits=lambda *_, data: jnp.zeros(data, np.uint32),
fold_in=lambda key, _: key)
init, _ = transform.transform(base.next_rng_key)
key = prng.seed_with_impl(differently_shaped_prng_impl, 42)
init(key)
self.assertEqual(count, 1)
# testing if Tracers with a different key shape are accepted
jax.jit(init)(key)
self.assertEqual(count, 2)
def split_for_n(key, n):
for _ in range(n):
key, subkey = jax.random.split(key)
yield subkey
if __name__ == "__main__":
absltest.main()
| 30.875 | 80 | 0.667393 |
import functools
from absl.testing import absltest
from haiku._src import base
from haiku._src import random
from haiku._src import transform
import jax
from jax import prng
import jax.numpy as jnp
import numpy as np
class RandomTest(absltest.TestCase):
def test_optimize_rng_splitting(self):
def f():
k1 = base.next_rng_key()
k2 = base.next_rng_key()
return k1, k2
key = jax.random.PRNGKey(42)
assert_allclose = functools.partial(np.testing.assert_allclose, atol=1e-5)
f_opt = transform.transform(random.optimize_rng_use(f))
jax.tree_multimap(assert_allclose,
f_opt.apply({}, key),
tuple(jax.random.split(key, 3))[1:])
f = transform.transform(f)
jax.tree_multimap(assert_allclose,
f.apply({}, key),
tuple(split_for_n(key, 2)))
def test_rbg_default_impl(self):
with jax.default_prng_impl("rbg"):
key = jax.random.PRNGKey(42)
self.assertEqual(key.shape, (4,))
_, apply = transform.transform(base.next_rng_key)
out_key = apply({}, key)
self.assertEqual(out_key.shape, (4,))
class CustomRNGTest(absltest.TestCase):
def setUp(self):
super().setUp()
jax.config.update("jax_enable_custom_prng", True)
def tearDown(self):
super().tearDown()
jax.config.update("jax_enable_custom_prng", False)
def test_custom_key(self):
count = 0
def count_splits(_, num):
nonlocal count
count += 1
return jnp.zeros((num, 13), np.uint32)
differently_shaped_prng_impl = prng.PRNGImpl(
key_shape=(13,),
seed=lambda _: jnp.zeros((13,), np.uint32),
split=count_splits,
random_bits=lambda *_, data: jnp.zeros(data, np.uint32),
fold_in=lambda key, _: key)
init, _ = transform.transform(base.next_rng_key)
key = prng.seed_with_impl(differently_shaped_prng_impl, 42)
init(key)
self.assertEqual(count, 1)
# testing if Tracers with a different key shape are accepted
jax.jit(init)(key)
self.assertEqual(count, 2)
def split_for_n(key, n):
for _ in range(n):
key, subkey = jax.random.split(key)
yield subkey
if __name__ == "__main__":
absltest.main()
| true | true |
f737f07ff5c625d3a4e070ba3882eb7d1922f130 | 3,366 | py | Python | azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/subnet.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure/mgmt/network/v2017_03_01/models/subnet.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 2 | 2016-09-30T21:40:24.000Z | 2017-11-10T18:16:18.000Z | azure/mgmt/network/v2017_03_01/models/subnet.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class Subnet(SubResource):
"""Subnet in a virtual network resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param address_prefix: The address prefix for the subnet.
:type address_prefix: str
:param network_security_group: The reference of the NetworkSecurityGroup
resource.
:type network_security_group:
~azure.mgmt.network.v2017_03_01.models.NetworkSecurityGroup
:param route_table: The reference of the RouteTable resource.
:type route_table: ~azure.mgmt.network.v2017_03_01.models.RouteTable
:ivar ip_configurations: Gets an array of references to the network
interface IP configurations using subnet.
:vartype ip_configurations:
list[~azure.mgmt.network.v2017_03_01.models.IPConfiguration]
:param resource_navigation_links: Gets an array of references to the
external resources using subnet.
:type resource_navigation_links:
list[~azure.mgmt.network.v2017_03_01.models.ResourceNavigationLink]
:param provisioning_state: The provisioning state of the resource.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'ip_configurations': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'address_prefix': {'key': 'properties.addressPrefix', 'type': 'str'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'NetworkSecurityGroup'},
'route_table': {'key': 'properties.routeTable', 'type': 'RouteTable'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[IPConfiguration]'},
'resource_navigation_links': {'key': 'properties.resourceNavigationLinks', 'type': '[ResourceNavigationLink]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, address_prefix=None, network_security_group=None, route_table=None, resource_navigation_links=None, provisioning_state=None, name=None, etag=None):
super(Subnet, self).__init__(id=id)
self.address_prefix = address_prefix
self.network_security_group = network_security_group
self.route_table = route_table
self.ip_configurations = None
self.resource_navigation_links = resource_navigation_links
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| 44.88 | 179 | 0.676173 |
from .sub_resource import SubResource
class Subnet(SubResource):
_validation = {
'ip_configurations': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'address_prefix': {'key': 'properties.addressPrefix', 'type': 'str'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'NetworkSecurityGroup'},
'route_table': {'key': 'properties.routeTable', 'type': 'RouteTable'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[IPConfiguration]'},
'resource_navigation_links': {'key': 'properties.resourceNavigationLinks', 'type': '[ResourceNavigationLink]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, address_prefix=None, network_security_group=None, route_table=None, resource_navigation_links=None, provisioning_state=None, name=None, etag=None):
super(Subnet, self).__init__(id=id)
self.address_prefix = address_prefix
self.network_security_group = network_security_group
self.route_table = route_table
self.ip_configurations = None
self.resource_navigation_links = resource_navigation_links
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| true | true |
f737f1247fff27cdd82d67d936ffb5270c251013 | 15,114 | py | Python | library/nsxt_segment.py | madhukark/nsx-pacific | eadcebe6fb3521cd4db721329092958e9f02e6cc | [
"BSD-2-Clause"
] | 6 | 2020-03-25T16:49:52.000Z | 2020-04-11T16:01:35.000Z | library/nsxt_segment.py | madhukark/nsx-pacific | eadcebe6fb3521cd4db721329092958e9f02e6cc | [
"BSD-2-Clause"
] | 3 | 2020-03-26T19:30:15.000Z | 2020-04-16T22:17:24.000Z | library/nsxt_segment.py | madhukark/nsx-pacific | eadcebe6fb3521cd4db721329092958e9f02e6cc | [
"BSD-2-Clause"
] | 2 | 2020-03-25T23:49:30.000Z | 2020-03-26T21:52:23.000Z | #!/usr/bin/env python
#
# Copyright 2018 VMware, Inc.
# SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nsxt_segment
short_description: Create or Delete a Policy Segment
description:
Creates or deletes a Policy Segment.
Required attributes include id and display_name.
If the specified TransportZone is of VLAN type, a vlan_id is also required.
version_added: "2.8"
author: Gautam Verma
extends_documentation_fragment: vmware_nsxt
options:
id:
description: The id of the Policy Segment.
required: true
type: str
description:
description: Segment description.
type: str
tier0_id:
description: The Uplink of the Policy Segment.
Mutually exclusive with tier_1_id.
type: str
tier0_display_name:
description: Same as tier_0_id. Either one can be specified.
If both are specified, tier_0_id takes
precedence.
type: str
tier1_id:
description: The Uplink of the Policy Segment.
Mutually exclusive with tier_0_id but takes precedence.
type: str
tier1_display_name:
description: Same as tier_1_id. Either one can be specified.
If both are specified, tier_1_id takes
precedence.
type: str
domain_name:
description: Domain name associated with the Policy Segment.
type: str
transport_zone_id:
description: The TZ associated with the Policy Segment.
type: str
transport_zone_display_name:
description: Same as transport_zone_id. Either one can be specified.
If both are specified, transport_zone_id takes
precedence.
type: str
enforcementpoint_id:
description: The EnforcementPoint ID where the TZ is located.
Required if transport_zone_id is specified.
default: default
type: str
site_id:
description: The site ID where the EnforcementPoint is located.
Required if transport_zone_id is specified.
default: default
type: str
vlan_ids:
description: VLAN ids for a VLAN backed Segment.
Can be a VLAN id or a range of VLAN ids specified with '-'
in between.
type: list
subnets:
description: Subnets that belong to this Policy Segment.
type: dict
suboptions:
dhcp_ranges:
description: DHCP address ranges for dynamic IP allocation.
DHCP address ranges are used for dynamic IP
allocation. Supports address range and CIDR
formats. First valid host address from the first
value is assigned to DHCP server IP address.
Existing values cannot be deleted or modified, but
additional DHCP ranges can be added.
Formats, e.g. 10.12.2.64/26, 10.12.2.2-10.12.2.50
type: list
gateway_address:
description: Gateway IP address.
Gateway IP address in CIDR format for both IPv4
and IPv6.
required: True
type: str
segment_ports:
type: list
description:
- Add the Segment Ports to be create, updated, or deleted in this
section
element: dict
suboptions:
id:
description: The id of the Policy Segment Port.
required: false
type: str
display_name:
description:
- Segment Port display name.
- Either this or id must be specified. If both are
specified, id takes precedence.
required: false
type: str
description:
description:
- Segment description.
type: str
tags:
description: Opaque identifiers meaningful to the API user.
type: dict
suboptions:
scope:
description: Tag scope.
required: true
type: str
tag:
description: Tag value.
required: true
type: str
state:
choices:
- present
- absent
description:
- State can be either 'present' or 'absent'. 'present' is
used to create or update resource. 'absent' is used to
delete resource
- Required if I(id != null)
required: true
address_bindings:
description: Static address binding used for the port.
type: dict
suboptions:
ip_address:
description: IP Address for port binding.
type: str
mac_address:
description: Mac address for port binding.
type: str
vlan_id:
description: VLAN ID for port binding.
type: str
attachment:
description: VIF attachment.
type: dict
suboptions:
allocate_addresses:
description: Indicate how IP will be
allocated for the port.
type: str
choices:
- IP_POOL
- MAC_POOL
- BOTH
- NONE
app_id:
description: ID used to identify/look up a
child attachment behind a
parent attachment.
type: str
context_id:
description: Parent VIF ID if type is CHILD,
Transport node ID if type is
INDEPENDENT.
type: str
id:
description: VIF UUID on NSX Manager.
type: str
traffic_tag:
description:
- VLAN ID
- Not valid when type is INDEPENDENT, mainly
used to identify traffic from different ports
in container use case
type: int
type:
description: Type of port attachment.
type: str
choices:
- PARENT
- CHILD
- INDEPENDENT
'''
EXAMPLES = '''
- name: create Segment
nsxt_segment:
hostname: "10.10.10.10"
username: "username"
password: "password"
validate_certs: False
display_name: test-seg-4
state: present
domain_name: dn1
transport_zone_display_name: "1-transportzone-730"
subnets:
- gateway_address: "40.1.1.1/16"
segment_ports:
- display_name: test-sp-1
state: present
- display_name: test-sp-2
state: present
- display_name: test-sp-3
state: present
'''
RETURN = '''# '''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.nsxt_base_resource import NSXTBaseRealizableResource
from ansible.module_utils.nsxt_resource_urls import (
SEGMENT_PORT_URL, SEGMENT_URL, TIER_0_URL, TIER_1_URL, TRANSPORT_ZONE_URL)
from ansible.module_utils._text import to_native
class NSXTSegment(NSXTBaseRealizableResource):
@staticmethod
def get_resource_spec():
segment_arg_spec = {}
segment_arg_spec.update(
subnets=dict(
required=False,
type='list',
options=dict(
dhcp_ranges=dict(
required=False,
type='list'
),
gateway_address=dict(
required=True,
type='str'
)
)
),
tier0_id=dict(
required=False,
type='str'
),
tier0_display_name=dict(
required=False,
type='str'
),
tier1_id=dict(
required=False,
type='str'
),
tier1_display_name=dict(
required=False,
type='str'
),
domain_name=dict(
required=False,
type='str'
),
vlan_ids=dict(
required=False,
type='list'
),
transport_zone_id=dict(
required=False,
type='str'
),
transport_zone_display_name=dict(
required=False,
type='str'
),
site_id=dict(
required=False,
type='str',
default="default"
),
enforcementpoint_id=dict(
required=False,
type='str',
default="default"
)
)
return segment_arg_spec
@staticmethod
def get_resource_base_url(baseline_args=None):
return SEGMENT_URL
def update_resource_params(self, nsx_resource_params):
if self.do_resource_params_have_attr_with_id_or_display_name(
"tier0"):
tier0_id = self.get_id_using_attr_name_else_fail(
"tier0", nsx_resource_params,
TIER_0_URL, "Tier0")
nsx_resource_params["connectivity_path"] = (
TIER_0_URL + "/" + tier0_id)
elif self.do_resource_params_have_attr_with_id_or_display_name(
"tier1"):
tier1_id = self.get_id_using_attr_name_else_fail(
"tier1", nsx_resource_params,
TIER_1_URL, "Tier1")
nsx_resource_params["connectivity_path"] = (
TIER_1_URL + "/" + tier1_id)
if self.do_resource_params_have_attr_with_id_or_display_name(
"transport_zone"):
site_id = nsx_resource_params.pop("site_id")
enforcementpoint_id = nsx_resource_params.pop(
"enforcementpoint_id")
transport_zone_base_url = (
TRANSPORT_ZONE_URL.format(site_id, enforcementpoint_id))
transport_zone_id = self.get_id_using_attr_name_else_fail(
"transport_zone", nsx_resource_params,
transport_zone_base_url, "Transport Zone")
nsx_resource_params["transport_zone_path"] = (
transport_zone_base_url + "/" + transport_zone_id)
def update_parent_info(self, parent_info):
parent_info["segment_id"] = self.id
class NSXTSegmentPort(NSXTBaseRealizableResource):
def get_spec_identifier(self):
return NSXTSegment.NSXTSegmentPort.get_spec_identifier()
@classmethod
def get_spec_identifier(cls):
return "segment_ports"
@staticmethod
def get_resource_spec():
segment_port_arg_spec = {}
segment_port_arg_spec.update(
address_bindings=dict(
required=False,
type='dict',
options=dict(
ip_address=dict(
required=False,
type='str'
),
mac_address=dict(
required=False,
type='str'
),
vlan_id=dict(
required=False,
type='int'
)
)
),
attachment=dict(
required=False,
type='dict',
options=dict(
allocate_addresses=dict(
required=False,
type='str',
choices=['IP_POOL', 'MAC_POOL', 'BOTH', 'NONE']
),
app_id=dict(
required=False,
type='str',
),
context_id=dict(
required=False,
type='str',
),
id=dict(
required=False,
type='str',
),
traffic_tag=dict(
required=False,
type='int'
),
type=dict(
required=False,
type='str',
choices=['PARENT', 'CHILD', 'INDEPENDENT']
)
)
)
)
return segment_port_arg_spec
@staticmethod
def get_resource_base_url(parent_info):
segment_id = parent_info.get("segment_id", 'default')
return SEGMENT_PORT_URL.format(segment_id)
if __name__ == '__main__':
segment = NSXTSegment()
segment.realize()
| 36.331731 | 79 | 0.494244 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nsxt_segment
short_description: Create or Delete a Policy Segment
description:
Creates or deletes a Policy Segment.
Required attributes include id and display_name.
If the specified TransportZone is of VLAN type, a vlan_id is also required.
version_added: "2.8"
author: Gautam Verma
extends_documentation_fragment: vmware_nsxt
options:
id:
description: The id of the Policy Segment.
required: true
type: str
description:
description: Segment description.
type: str
tier0_id:
description: The Uplink of the Policy Segment.
Mutually exclusive with tier_1_id.
type: str
tier0_display_name:
description: Same as tier_0_id. Either one can be specified.
If both are specified, tier_0_id takes
precedence.
type: str
tier1_id:
description: The Uplink of the Policy Segment.
Mutually exclusive with tier_0_id but takes precedence.
type: str
tier1_display_name:
description: Same as tier_1_id. Either one can be specified.
If both are specified, tier_1_id takes
precedence.
type: str
domain_name:
description: Domain name associated with the Policy Segment.
type: str
transport_zone_id:
description: The TZ associated with the Policy Segment.
type: str
transport_zone_display_name:
description: Same as transport_zone_id. Either one can be specified.
If both are specified, transport_zone_id takes
precedence.
type: str
enforcementpoint_id:
description: The EnforcementPoint ID where the TZ is located.
Required if transport_zone_id is specified.
default: default
type: str
site_id:
description: The site ID where the EnforcementPoint is located.
Required if transport_zone_id is specified.
default: default
type: str
vlan_ids:
description: VLAN ids for a VLAN backed Segment.
Can be a VLAN id or a range of VLAN ids specified with '-'
in between.
type: list
subnets:
description: Subnets that belong to this Policy Segment.
type: dict
suboptions:
dhcp_ranges:
description: DHCP address ranges for dynamic IP allocation.
DHCP address ranges are used for dynamic IP
allocation. Supports address range and CIDR
formats. First valid host address from the first
value is assigned to DHCP server IP address.
Existing values cannot be deleted or modified, but
additional DHCP ranges can be added.
Formats, e.g. 10.12.2.64/26, 10.12.2.2-10.12.2.50
type: list
gateway_address:
description: Gateway IP address.
Gateway IP address in CIDR format for both IPv4
and IPv6.
required: True
type: str
segment_ports:
type: list
description:
- Add the Segment Ports to be create, updated, or deleted in this
section
element: dict
suboptions:
id:
description: The id of the Policy Segment Port.
required: false
type: str
display_name:
description:
- Segment Port display name.
- Either this or id must be specified. If both are
specified, id takes precedence.
required: false
type: str
description:
description:
- Segment description.
type: str
tags:
description: Opaque identifiers meaningful to the API user.
type: dict
suboptions:
scope:
description: Tag scope.
required: true
type: str
tag:
description: Tag value.
required: true
type: str
state:
choices:
- present
- absent
description:
- State can be either 'present' or 'absent'. 'present' is
used to create or update resource. 'absent' is used to
delete resource
- Required if I(id != null)
required: true
address_bindings:
description: Static address binding used for the port.
type: dict
suboptions:
ip_address:
description: IP Address for port binding.
type: str
mac_address:
description: Mac address for port binding.
type: str
vlan_id:
description: VLAN ID for port binding.
type: str
attachment:
description: VIF attachment.
type: dict
suboptions:
allocate_addresses:
description: Indicate how IP will be
allocated for the port.
type: str
choices:
- IP_POOL
- MAC_POOL
- BOTH
- NONE
app_id:
description: ID used to identify/look up a
child attachment behind a
parent attachment.
type: str
context_id:
description: Parent VIF ID if type is CHILD,
Transport node ID if type is
INDEPENDENT.
type: str
id:
description: VIF UUID on NSX Manager.
type: str
traffic_tag:
description:
- VLAN ID
- Not valid when type is INDEPENDENT, mainly
used to identify traffic from different ports
in container use case
type: int
type:
description: Type of port attachment.
type: str
choices:
- PARENT
- CHILD
- INDEPENDENT
'''
EXAMPLES = '''
- name: create Segment
nsxt_segment:
hostname: "10.10.10.10"
username: "username"
password: "password"
validate_certs: False
display_name: test-seg-4
state: present
domain_name: dn1
transport_zone_display_name: "1-transportzone-730"
subnets:
- gateway_address: "40.1.1.1/16"
segment_ports:
- display_name: test-sp-1
state: present
- display_name: test-sp-2
state: present
- display_name: test-sp-3
state: present
'''
RETURN = '''# '''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.nsxt_base_resource import NSXTBaseRealizableResource
from ansible.module_utils.nsxt_resource_urls import (
SEGMENT_PORT_URL, SEGMENT_URL, TIER_0_URL, TIER_1_URL, TRANSPORT_ZONE_URL)
from ansible.module_utils._text import to_native
class NSXTSegment(NSXTBaseRealizableResource):
@staticmethod
def get_resource_spec():
segment_arg_spec = {}
segment_arg_spec.update(
subnets=dict(
required=False,
type='list',
options=dict(
dhcp_ranges=dict(
required=False,
type='list'
),
gateway_address=dict(
required=True,
type='str'
)
)
),
tier0_id=dict(
required=False,
type='str'
),
tier0_display_name=dict(
required=False,
type='str'
),
tier1_id=dict(
required=False,
type='str'
),
tier1_display_name=dict(
required=False,
type='str'
),
domain_name=dict(
required=False,
type='str'
),
vlan_ids=dict(
required=False,
type='list'
),
transport_zone_id=dict(
required=False,
type='str'
),
transport_zone_display_name=dict(
required=False,
type='str'
),
site_id=dict(
required=False,
type='str',
default="default"
),
enforcementpoint_id=dict(
required=False,
type='str',
default="default"
)
)
return segment_arg_spec
@staticmethod
def get_resource_base_url(baseline_args=None):
return SEGMENT_URL
def update_resource_params(self, nsx_resource_params):
if self.do_resource_params_have_attr_with_id_or_display_name(
"tier0"):
tier0_id = self.get_id_using_attr_name_else_fail(
"tier0", nsx_resource_params,
TIER_0_URL, "Tier0")
nsx_resource_params["connectivity_path"] = (
TIER_0_URL + "/" + tier0_id)
elif self.do_resource_params_have_attr_with_id_or_display_name(
"tier1"):
tier1_id = self.get_id_using_attr_name_else_fail(
"tier1", nsx_resource_params,
TIER_1_URL, "Tier1")
nsx_resource_params["connectivity_path"] = (
TIER_1_URL + "/" + tier1_id)
if self.do_resource_params_have_attr_with_id_or_display_name(
"transport_zone"):
site_id = nsx_resource_params.pop("site_id")
enforcementpoint_id = nsx_resource_params.pop(
"enforcementpoint_id")
transport_zone_base_url = (
TRANSPORT_ZONE_URL.format(site_id, enforcementpoint_id))
transport_zone_id = self.get_id_using_attr_name_else_fail(
"transport_zone", nsx_resource_params,
transport_zone_base_url, "Transport Zone")
nsx_resource_params["transport_zone_path"] = (
transport_zone_base_url + "/" + transport_zone_id)
def update_parent_info(self, parent_info):
parent_info["segment_id"] = self.id
class NSXTSegmentPort(NSXTBaseRealizableResource):
def get_spec_identifier(self):
return NSXTSegment.NSXTSegmentPort.get_spec_identifier()
@classmethod
def get_spec_identifier(cls):
return "segment_ports"
@staticmethod
def get_resource_spec():
segment_port_arg_spec = {}
segment_port_arg_spec.update(
address_bindings=dict(
required=False,
type='dict',
options=dict(
ip_address=dict(
required=False,
type='str'
),
mac_address=dict(
required=False,
type='str'
),
vlan_id=dict(
required=False,
type='int'
)
)
),
attachment=dict(
required=False,
type='dict',
options=dict(
allocate_addresses=dict(
required=False,
type='str',
choices=['IP_POOL', 'MAC_POOL', 'BOTH', 'NONE']
),
app_id=dict(
required=False,
type='str',
),
context_id=dict(
required=False,
type='str',
),
id=dict(
required=False,
type='str',
),
traffic_tag=dict(
required=False,
type='int'
),
type=dict(
required=False,
type='str',
choices=['PARENT', 'CHILD', 'INDEPENDENT']
)
)
)
)
return segment_port_arg_spec
@staticmethod
def get_resource_base_url(parent_info):
segment_id = parent_info.get("segment_id", 'default')
return SEGMENT_PORT_URL.format(segment_id)
if __name__ == '__main__':
segment = NSXTSegment()
segment.realize()
| true | true |
f737f38e7b67e1c2e8de4bf96ddfbbd31aae65ed | 4,011 | py | Python | src/guiltytargets/pipeline.py | Shicheng-Guo/guiltytargets | 53832939b17ce2aa6a80aee298b975b778dd1bf6 | [
"MIT"
] | 10 | 2018-10-15T14:33:53.000Z | 2021-11-02T19:02:19.000Z | src/guiltytargets/pipeline.py | Shicheng-Guo/guiltytargets | 53832939b17ce2aa6a80aee298b975b778dd1bf6 | [
"MIT"
] | 7 | 2019-02-11T10:37:32.000Z | 2022-01-27T09:03:35.000Z | src/guiltytargets/pipeline.py | hfroehlich30975/GuiltyTargets | f0f4b5ed3ba5e8e383b9e2b684814560d6674029 | [
"MIT"
] | 5 | 2019-10-11T12:28:51.000Z | 2021-08-17T19:51:51.000Z | # -*- coding: utf-8 -*-
"""Pipeline for GuiltyTargets."""
from typing import List, Tuple
import pandas as pd
from .constants import gat2vec_config
from .gat2vec import Classification, Gat2Vec, gat2vec_paths
from .ppi_network_annotation import AttributeNetwork, LabeledNetwork, Network, generate_ppi_network, parse_dge
from .ppi_network_annotation.parsers import parse_gene_list
__all__ = [
'run',
'rank_targets',
]
def run(
input_directory,
targets_path,
ppi_graph_path,
dge_path,
auc_output_path,
probs_output_path,
max_adj_p,
max_log2_fold_change,
min_log2_fold_change,
entrez_id_header,
log2_fold_change_header,
adj_p_header,
base_mean_header,
entrez_delimiter,
ppi_edge_min_confidence,
) -> None:
"""Run the GuiltyTargets pipeline."""
gene_list = parse_dge(
dge_path=dge_path,
entrez_id_header=entrez_id_header,
log2_fold_change_header=log2_fold_change_header,
adj_p_header=adj_p_header,
entrez_delimiter=entrez_delimiter,
base_mean_header=base_mean_header,
)
network = generate_ppi_network(
ppi_graph_path=ppi_graph_path,
dge_list=gene_list,
max_adj_p=max_adj_p,
max_log2_fold_change=max_log2_fold_change,
min_log2_fold_change=min_log2_fold_change,
ppi_edge_min_confidence=ppi_edge_min_confidence,
)
targets = parse_gene_list(targets_path, network.graph)
auc_df, probs_df = rank_targets(
directory=input_directory,
targets=targets,
network=network,
)
probs_df.to_csv(
probs_output_path,
sep="\t",
)
auc_df.to_csv(
auc_output_path,
encoding="utf-8",
sep="\t",
index=False,
)
def write_gat2vec_input_files(network: Network, targets: List[str], home_dir: str) -> None:
"""Write the input files for gat2vec tool.
:param network: Network object with attributes overlayed on it.
:param targets:
:param home_dir:
"""
network.write_adj_list(gat2vec_paths.get_adjlist_path(home_dir, "graph"))
attribute_network = AttributeNetwork(network)
attribute_network.write_attribute_adj_list(gat2vec_paths.get_adjlist_path(home_dir, "na"))
labeled_network = LabeledNetwork(network)
labeled_network.write_index_labels(targets, gat2vec_paths.get_labels_path(home_dir))
def rank_targets(
network: Network,
targets: List[str],
directory: str,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Rank proteins based on their likelihood of being targets.
:param network: The PPI network annotated with differential gene expression data.
:param targets: A list of targets.
:param directory: Home directory for Gat2Vec.
:return: A 2-tuple of the auc dataframe and the probabilities dataframe?
"""
write_gat2vec_input_files(network=network, targets=targets, home_dir=directory)
g2v = Gat2Vec(directory, directory, label=False, tr=gat2vec_config.training_ratio)
model = g2v.train_gat2vec(
gat2vec_config.num_walks,
gat2vec_config.walk_length,
gat2vec_config.dimension,
gat2vec_config.window_size,
output=True,
)
classifier = Classification(directory, directory, tr=gat2vec_config.training_ratio)
auc_df = classifier.evaluate(model, label=False, evaluation_scheme="cv")
probs_df = get_rankings(classifier, model, network)
return auc_df, probs_df
def get_rankings(
classifier: Classification,
embedding: pd.DataFrame,
network: Network,
) -> pd.DataFrame:
"""Save the predicted rankings to a file.
:param classifier: Classification model.
:param embedding: Embedding model
:param network: PPI network with annotations
"""
probs_df = pd.DataFrame(classifier.get_prediction_probs_for_entire_set(embedding))
probs_df['Entrez'] = network.get_attribute_from_indices(
probs_df.index.values,
attribute_name='name',
)
return probs_df
| 28.856115 | 110 | 0.715532 |
from typing import List, Tuple
import pandas as pd
from .constants import gat2vec_config
from .gat2vec import Classification, Gat2Vec, gat2vec_paths
from .ppi_network_annotation import AttributeNetwork, LabeledNetwork, Network, generate_ppi_network, parse_dge
from .ppi_network_annotation.parsers import parse_gene_list
__all__ = [
'run',
'rank_targets',
]
def run(
input_directory,
targets_path,
ppi_graph_path,
dge_path,
auc_output_path,
probs_output_path,
max_adj_p,
max_log2_fold_change,
min_log2_fold_change,
entrez_id_header,
log2_fold_change_header,
adj_p_header,
base_mean_header,
entrez_delimiter,
ppi_edge_min_confidence,
) -> None:
gene_list = parse_dge(
dge_path=dge_path,
entrez_id_header=entrez_id_header,
log2_fold_change_header=log2_fold_change_header,
adj_p_header=adj_p_header,
entrez_delimiter=entrez_delimiter,
base_mean_header=base_mean_header,
)
network = generate_ppi_network(
ppi_graph_path=ppi_graph_path,
dge_list=gene_list,
max_adj_p=max_adj_p,
max_log2_fold_change=max_log2_fold_change,
min_log2_fold_change=min_log2_fold_change,
ppi_edge_min_confidence=ppi_edge_min_confidence,
)
targets = parse_gene_list(targets_path, network.graph)
auc_df, probs_df = rank_targets(
directory=input_directory,
targets=targets,
network=network,
)
probs_df.to_csv(
probs_output_path,
sep="\t",
)
auc_df.to_csv(
auc_output_path,
encoding="utf-8",
sep="\t",
index=False,
)
def write_gat2vec_input_files(network: Network, targets: List[str], home_dir: str) -> None:
network.write_adj_list(gat2vec_paths.get_adjlist_path(home_dir, "graph"))
attribute_network = AttributeNetwork(network)
attribute_network.write_attribute_adj_list(gat2vec_paths.get_adjlist_path(home_dir, "na"))
labeled_network = LabeledNetwork(network)
labeled_network.write_index_labels(targets, gat2vec_paths.get_labels_path(home_dir))
def rank_targets(
network: Network,
targets: List[str],
directory: str,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
write_gat2vec_input_files(network=network, targets=targets, home_dir=directory)
g2v = Gat2Vec(directory, directory, label=False, tr=gat2vec_config.training_ratio)
model = g2v.train_gat2vec(
gat2vec_config.num_walks,
gat2vec_config.walk_length,
gat2vec_config.dimension,
gat2vec_config.window_size,
output=True,
)
classifier = Classification(directory, directory, tr=gat2vec_config.training_ratio)
auc_df = classifier.evaluate(model, label=False, evaluation_scheme="cv")
probs_df = get_rankings(classifier, model, network)
return auc_df, probs_df
def get_rankings(
classifier: Classification,
embedding: pd.DataFrame,
network: Network,
) -> pd.DataFrame:
probs_df = pd.DataFrame(classifier.get_prediction_probs_for_entire_set(embedding))
probs_df['Entrez'] = network.get_attribute_from_indices(
probs_df.index.values,
attribute_name='name',
)
return probs_df
| true | true |
f737f3d4131b56174d565a0575f0331decd3591a | 20,724 | py | Python | orttraining/orttraining/test/python/orttraining_test_checkpoint.py | mszhanyi/onnxruntime | 6f85d3e5c81c919022ac4a77e5a051da8518b15d | [
"MIT"
] | 669 | 2018-12-03T22:00:31.000Z | 2019-05-06T19:42:49.000Z | orttraining/orttraining/test/python/orttraining_test_checkpoint.py | mszhanyi/onnxruntime | 6f85d3e5c81c919022ac4a77e5a051da8518b15d | [
"MIT"
] | 440 | 2018-12-03T21:09:56.000Z | 2019-05-06T20:47:23.000Z | orttraining/orttraining/test/python/orttraining_test_checkpoint.py | mszhanyi/onnxruntime | 6f85d3e5c81c919022ac4a77e5a051da8518b15d | [
"MIT"
] | 140 | 2018-12-03T21:15:28.000Z | 2019-05-06T18:02:36.000Z | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import subprocess
import os
import shutil
import sys
from checkpoint._test_helpers import makedir
from _test_commons import _single_run, _distributed_run
checkpoint_dir = os.path.abspath("checkpoint/checkpoint_dir/")
makedir(checkpoint_dir)
# test workflow:
# - there are a total of three files that are used for checkpointing tests:
# - orttraining_test_checkpoint.py: co-ordinating all the checkpoint tests
# - orttraining_test_save_checkpoint.py: responsible for saving all checkpoint files and trained states
# - orttraining_test_load_checkpoint.py: loading the saved checkpoints and the saved states and asserting whether
# the saved states match the loaded states.
# - and tests encompassing checkpointing tests for scenarios:
# - from [onnxruntime orttrainer][full_precision, mixed_precision][single node training, data parallel training, distributed zero, distributed megatron, distributed zero+megatron training] to
# [onnxruntime orttrainer, pytorch][full_precision, mixed_precision][single node training, data parallel training, distributed zero, distributed megatron, distributed zero+megatron training]
# - all tests cannot be written in the same process because:
# - some of them require to be run in a distributed environment (using mpirun) while others can be run using a single process.
# - there is a known limitation where the distributed training run context is implemented as a singleton, so in the same process, no more than one
# orttrainer can be instantiated. Hence the need to run these tests in different processes one at a time.
# - workflow:
# - orttraining_test_checkpoint.py calls orttraining_test_save_checkpoint.py to save following files to disk
# - ORTTrainer checkpoint files through the ORTTrainer.save_checkpoint method
# - ORTTrainer states through pickle after extracting all the states of the ORTTrainer through the ORTTrainer.state_dict method
# - for each configuration across [onnxruntime orttrainer][full_precision, mixed_precision][single node training, data parallel training, distributed zero training]
# - orttraining_test_checkpoint.py calls orttraining_test_load_checkpoint.py to load each checkpoint into each orttrainer configuration
# - Saved ORTTrainer checkpoint files are loaded into an ORTTrainer using the ORTTrainer.load_checkpoint method for each ORTTrainer configuration.
# - Saved states are loaded into a python dictionary (called the state dictionary) through pickle
# - state dictionary is extracted from the ORTTrainer after it has loaded the checkpoint file and the onnx graph has been initialized (by calling eval_step)
# through the ORTTrainer.state_dict method.
# - the loaded state dictionary (through pickle) is compared against the extracted state dictionary for:
# - equality (or near equality) of model states
# - equality (or near equality) of optimizer states
# - In some cases the comparison is not directly possible; for example single node trainer to a distributed zero trainer because the extracted state
# dictionary is a distributed one and cannot be compared against a single node trainer directly.
# - First these states are saved using pickle for each rank to a file on disk
# - Wait for all ranks to complete writing the file to disk using barrier()
# - Load all states and aggregate them into 1 state dictionary
# - Compare this aggregated state dictionary against the original one loaded from disk.
# - Similarly, it is not possible to compare mixed precision zero trainer state_dict against full precision zero trainer state_dict because the
# full precision states are sharded in the mixed precision trainer run and not shareded in the full precision trainer run. To compare these two state_dicts:
# - Both state_dicts (mixed precision and full precision) are saved to file for all ranks.
# - Wait for all ranks to complete writing the file to disk using barrier()
# - Load all states and aggregate them into 1 state dictionary fpr both the configs.
# - Compare this aggregated state dictionaries against one another.
save_checkpoint_file = os.path.join("checkpoint", "orttraining_test_save_checkpoint.py")
load_checkpoint_file = os.path.join("checkpoint", "orttraining_test_load_checkpoint.py")
aggregate_checkpoint_file = os.path.join("checkpoint", "orttraining_test_checkpoint_aggregation.py")
optim_state_file = os.path.join("checkpoint", "orttraining_test_load_optimizer_state.py")
backend_api_file = os.path.join("checkpoint", "orttraining_test_backend_api.py")
single_node_full_precision_path = os.path.join(checkpoint_dir, "single_node", "full_precision")
single_node_mixed_precision_path = os.path.join(checkpoint_dir, "single_node", "mixed_precision")
distributed_zero_full_precision_lamb_path = os.path.join(checkpoint_dir, "distributed_zero", "full_precision", "lamb")
distributed_zero_mixed_precision_lamb_path = os.path.join(checkpoint_dir, "distributed_zero", "mixed_precision", "lamb")
# megatron saving and loading uses a different model
single_node_full_precision_bart_path = os.path.join(checkpoint_dir, "bart", "single_node", "full_precision")
single_node_mixed_precision_bart_path = os.path.join(checkpoint_dir, "bart", "single_node", "mixed_precision")
distributed_zero_full_precision_lamb_bart_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero", "full_precision", "lamb"
)
distributed_zero_mixed_precision_lamb_bart_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero", "mixed_precision", "lamb"
)
distributed_megatron_full_precision_lamb_path = os.path.join(
checkpoint_dir, "bart", "distributed_megatron", "full_precision", "lamb"
)
distributed_megatron_mixed_precision_lamb_path = os.path.join(
checkpoint_dir, "bart", "distributed_megatron", "mixed_precision", "lamb"
)
distributed_zero_megatron_full_precision_adam_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero_megatron", "full_precision", "adam"
)
distributed_zero_megatron_mixed_precision_adam_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero_megatron", "mixed_precision", "adam"
)
distributed_zero_megatron_full_precision_lamb_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero_megatron", "full_precision", "lamb"
)
distributed_zero_megatron_mixed_precision_lamb_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero_megatron", "mixed_precision", "lamb"
)
# save all checkpoint files (pre-checkpoint)
_single_run(save_checkpoint_file, "single_node_full_precision", single_node_full_precision_path)
_single_run(save_checkpoint_file, "single_node_mixed_precision", single_node_mixed_precision_path)
_distributed_run(
save_checkpoint_file, "distributed_zero_full_precision_lamb", distributed_zero_full_precision_lamb_path
)
_distributed_run(
save_checkpoint_file, "distributed_zero_mixed_precision_lamb", distributed_zero_mixed_precision_lamb_path
)
_single_run(save_checkpoint_file, "single_node_full_precision_bart", single_node_full_precision_bart_path)
_single_run(save_checkpoint_file, "single_node_mixed_precision_bart", single_node_mixed_precision_bart_path)
_distributed_run(
save_checkpoint_file, "distributed_zero_full_precision_lamb_bart", distributed_zero_full_precision_lamb_bart_path
)
_distributed_run(
save_checkpoint_file, "distributed_zero_mixed_precision_lamb_bart", distributed_zero_mixed_precision_lamb_bart_path
)
_distributed_run(
save_checkpoint_file, "distributed_megatron_full_precision_lamb", distributed_megatron_full_precision_lamb_path
)
_distributed_run(
save_checkpoint_file, "distributed_megatron_mixed_precision_lamb", distributed_megatron_mixed_precision_lamb_path
)
_distributed_run(
save_checkpoint_file,
"distributed_zero_megatron_full_precision_lamb",
distributed_zero_megatron_full_precision_lamb_path,
)
_distributed_run(
save_checkpoint_file,
"distributed_zero_megatron_mixed_precision_lamb",
distributed_zero_megatron_mixed_precision_lamb_path,
)
# load checkpoint files (post-checkpoint)
# going to single node trainer
_single_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_single_node_full_precision",
single_node_full_precision_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_single_node_full_precision",
single_node_mixed_precision_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_single_node_mixed_precision",
single_node_mixed_precision_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_single_node_mixed_precision",
single_node_full_precision_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_single_node_full_precision",
distributed_zero_full_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_single_node_full_precision",
distributed_zero_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_single_node_mixed_precision",
distributed_zero_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_single_node_mixed_precision",
distributed_zero_full_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_single_node_full_precision",
distributed_megatron_full_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_single_node_full_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_single_node_mixed_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_single_node_mixed_precision",
distributed_megatron_full_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_single_node_full_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_single_node_full_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_single_node_mixed_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_single_node_mixed_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
# going to distributed zero trainer
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_zero_full_precision",
single_node_full_precision_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_zero_full_precision",
single_node_mixed_precision_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_zero_mixed_precision",
single_node_mixed_precision_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_zero_mixed_precision",
single_node_full_precision_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_zero_full_precision",
distributed_zero_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_zero_full_precision",
distributed_zero_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_zero_mixed_precision",
distributed_zero_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_zero_mixed_precision",
distributed_zero_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_zero_full_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_zero_full_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_zero_mixed_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_zero_mixed_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_zero_full_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_zero_full_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_zero_mixed_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_zero_mixed_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
# going to distributed zero+megatron trainer
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_megatron_full_precision",
single_node_full_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_megatron_full_precision",
single_node_mixed_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_megatron_mixed_precision",
single_node_mixed_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_megatron_mixed_precision",
single_node_full_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_megatron_full_precision",
distributed_zero_full_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_megatron_full_precision",
distributed_zero_mixed_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_megatron_mixed_precision",
distributed_zero_mixed_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_megatron_mixed_precision",
distributed_zero_full_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_megatron_full_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_megatron_full_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_megatron_mixed_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_megatron_mixed_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_megatron_full_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_megatron_full_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_megatron_mixed_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_megatron_mixed_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
# going to distributed zero+megatron trainer
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_zero_megatron_full_precision",
single_node_full_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_zero_megatron_full_precision",
single_node_mixed_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_zero_megatron_mixed_precision",
single_node_mixed_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_zero_megatron_mixed_precision",
single_node_full_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_zero_megatron_full_precision",
distributed_zero_full_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_zero_megatron_full_precision",
distributed_zero_mixed_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_zero_megatron_mixed_precision",
distributed_zero_mixed_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_zero_megatron_mixed_precision",
distributed_zero_full_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_zero_megatron_full_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_zero_megatron_full_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_zero_megatron_mixed_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_zero_megatron_mixed_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_zero_megatron_full_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_zero_megatron_full_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_zero_megatron_mixed_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_zero_megatron_mixed_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
shutil.rmtree(checkpoint_dir)
| 45.150327 | 196 | 0.84226 |
import subprocess
import os
import shutil
import sys
from checkpoint._test_helpers import makedir
from _test_commons import _single_run, _distributed_run
checkpoint_dir = os.path.abspath("checkpoint/checkpoint_dir/")
makedir(checkpoint_dir)
save_checkpoint_file = os.path.join("checkpoint", "orttraining_test_save_checkpoint.py")
load_checkpoint_file = os.path.join("checkpoint", "orttraining_test_load_checkpoint.py")
aggregate_checkpoint_file = os.path.join("checkpoint", "orttraining_test_checkpoint_aggregation.py")
optim_state_file = os.path.join("checkpoint", "orttraining_test_load_optimizer_state.py")
backend_api_file = os.path.join("checkpoint", "orttraining_test_backend_api.py")
single_node_full_precision_path = os.path.join(checkpoint_dir, "single_node", "full_precision")
single_node_mixed_precision_path = os.path.join(checkpoint_dir, "single_node", "mixed_precision")
distributed_zero_full_precision_lamb_path = os.path.join(checkpoint_dir, "distributed_zero", "full_precision", "lamb")
distributed_zero_mixed_precision_lamb_path = os.path.join(checkpoint_dir, "distributed_zero", "mixed_precision", "lamb")
single_node_full_precision_bart_path = os.path.join(checkpoint_dir, "bart", "single_node", "full_precision")
single_node_mixed_precision_bart_path = os.path.join(checkpoint_dir, "bart", "single_node", "mixed_precision")
distributed_zero_full_precision_lamb_bart_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero", "full_precision", "lamb"
)
distributed_zero_mixed_precision_lamb_bart_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero", "mixed_precision", "lamb"
)
distributed_megatron_full_precision_lamb_path = os.path.join(
checkpoint_dir, "bart", "distributed_megatron", "full_precision", "lamb"
)
distributed_megatron_mixed_precision_lamb_path = os.path.join(
checkpoint_dir, "bart", "distributed_megatron", "mixed_precision", "lamb"
)
distributed_zero_megatron_full_precision_adam_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero_megatron", "full_precision", "adam"
)
distributed_zero_megatron_mixed_precision_adam_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero_megatron", "mixed_precision", "adam"
)
distributed_zero_megatron_full_precision_lamb_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero_megatron", "full_precision", "lamb"
)
distributed_zero_megatron_mixed_precision_lamb_path = os.path.join(
checkpoint_dir, "bart", "distributed_zero_megatron", "mixed_precision", "lamb"
)
_single_run(save_checkpoint_file, "single_node_full_precision", single_node_full_precision_path)
_single_run(save_checkpoint_file, "single_node_mixed_precision", single_node_mixed_precision_path)
_distributed_run(
save_checkpoint_file, "distributed_zero_full_precision_lamb", distributed_zero_full_precision_lamb_path
)
_distributed_run(
save_checkpoint_file, "distributed_zero_mixed_precision_lamb", distributed_zero_mixed_precision_lamb_path
)
_single_run(save_checkpoint_file, "single_node_full_precision_bart", single_node_full_precision_bart_path)
_single_run(save_checkpoint_file, "single_node_mixed_precision_bart", single_node_mixed_precision_bart_path)
_distributed_run(
save_checkpoint_file, "distributed_zero_full_precision_lamb_bart", distributed_zero_full_precision_lamb_bart_path
)
_distributed_run(
save_checkpoint_file, "distributed_zero_mixed_precision_lamb_bart", distributed_zero_mixed_precision_lamb_bart_path
)
_distributed_run(
save_checkpoint_file, "distributed_megatron_full_precision_lamb", distributed_megatron_full_precision_lamb_path
)
_distributed_run(
save_checkpoint_file, "distributed_megatron_mixed_precision_lamb", distributed_megatron_mixed_precision_lamb_path
)
_distributed_run(
save_checkpoint_file,
"distributed_zero_megatron_full_precision_lamb",
distributed_zero_megatron_full_precision_lamb_path,
)
_distributed_run(
save_checkpoint_file,
"distributed_zero_megatron_mixed_precision_lamb",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_single_node_full_precision",
single_node_full_precision_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_single_node_full_precision",
single_node_mixed_precision_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_single_node_mixed_precision",
single_node_mixed_precision_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_single_node_mixed_precision",
single_node_full_precision_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_single_node_full_precision",
distributed_zero_full_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_single_node_full_precision",
distributed_zero_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_single_node_mixed_precision",
distributed_zero_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_single_node_mixed_precision",
distributed_zero_full_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_single_node_full_precision",
distributed_megatron_full_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_single_node_full_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_single_node_mixed_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_single_node_mixed_precision",
distributed_megatron_full_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_single_node_full_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_single_node_full_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_single_node_mixed_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_single_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_single_node_mixed_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_zero_full_precision",
single_node_full_precision_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_zero_full_precision",
single_node_mixed_precision_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_zero_mixed_precision",
single_node_mixed_precision_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_zero_mixed_precision",
single_node_full_precision_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_zero_full_precision",
distributed_zero_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_zero_full_precision",
distributed_zero_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_zero_mixed_precision",
distributed_zero_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_zero_mixed_precision",
distributed_zero_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_zero_full_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_zero_full_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_zero_mixed_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_zero_mixed_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_zero_full_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_zero_full_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_zero_mixed_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_zero_mixed_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_megatron_full_precision",
single_node_full_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_megatron_full_precision",
single_node_mixed_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_megatron_mixed_precision",
single_node_mixed_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_megatron_mixed_precision",
single_node_full_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_megatron_full_precision",
distributed_zero_full_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_megatron_full_precision",
distributed_zero_mixed_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_megatron_mixed_precision",
distributed_zero_mixed_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_megatron_mixed_precision",
distributed_zero_full_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_megatron_full_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_megatron_full_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_megatron_mixed_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_megatron_mixed_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_megatron_full_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_megatron_full_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_megatron_mixed_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_megatron_mixed_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_zero_megatron_full_precision",
single_node_full_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_zero_megatron_full_precision",
single_node_mixed_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_mixed_precision_into_distributed_zero_megatron_mixed_precision",
single_node_mixed_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_single_node_full_precision_into_distributed_zero_megatron_mixed_precision",
single_node_full_precision_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_zero_megatron_full_precision",
distributed_zero_full_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_zero_megatron_full_precision",
distributed_zero_mixed_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_mixed_precision_into_distributed_zero_megatron_mixed_precision",
distributed_zero_mixed_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_full_precision_into_distributed_zero_megatron_mixed_precision",
distributed_zero_full_precision_lamb_bart_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_zero_megatron_full_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_zero_megatron_full_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_mixed_precision_into_distributed_zero_megatron_mixed_precision",
distributed_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_megatron_full_precision_into_distributed_zero_megatron_mixed_precision",
distributed_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_zero_megatron_full_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_zero_megatron_full_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_mixed_precision_into_distributed_zero_megatron_mixed_precision",
distributed_zero_megatron_mixed_precision_lamb_path,
)
_distributed_run(
load_checkpoint_file,
"test_load_from_distributed_zero_megatron_full_precision_into_distributed_zero_megatron_mixed_precision",
distributed_zero_megatron_full_precision_lamb_path,
)
shutil.rmtree(checkpoint_dir)
| true | true |
f737f51af748d81f700990f6c7b3daa8fd8e7ae5 | 823 | py | Python | tests/isolated/import_deps_test.py | Vs0923/Voxel51 | d644805922ebfbc729f1211f572d77be7d625887 | [
"Apache-2.0"
] | 1 | 2020-10-09T05:16:49.000Z | 2020-10-09T05:16:49.000Z | tests/isolated/import_deps_test.py | Vs0923/Voxel51 | d644805922ebfbc729f1211f572d77be7d625887 | [
"Apache-2.0"
] | null | null | null | tests/isolated/import_deps_test.py | Vs0923/Voxel51 | d644805922ebfbc729f1211f572d77be7d625887 | [
"Apache-2.0"
] | null | null | null | """
Test that the fiftyone core does not depend on Tensorflow or PyTorch.
"""
import sys
import pytest
# raise an ImportError if any of these modules are imported
# https://docs.python.org/3/reference/import.html#the-module-cache
sys.modules["tensorflow"] = None
sys.modules["tensorflow_datasets"] = None
sys.modules["torch"] = None
sys.modules["torchvision"] = None
def test_import_core():
# should not raise an ImportError, i.e. should not depend on the modules
# disabled above
import fiftyone
def test_import_tf():
with pytest.raises(ImportError) as exc_info:
import fiftyone.utils.tf
assert exc_info.value.name == "tensorflow"
def test_import_torch():
with pytest.raises(ImportError) as exc_info:
import fiftyone.utils.torch
assert exc_info.value.name == "torch"
| 23.514286 | 76 | 0.72661 |
import sys
import pytest
sorflow"] = None
sys.modules["tensorflow_datasets"] = None
sys.modules["torch"] = None
sys.modules["torchvision"] = None
def test_import_core():
import fiftyone
def test_import_tf():
with pytest.raises(ImportError) as exc_info:
import fiftyone.utils.tf
assert exc_info.value.name == "tensorflow"
def test_import_torch():
with pytest.raises(ImportError) as exc_info:
import fiftyone.utils.torch
assert exc_info.value.name == "torch"
| true | true |
f737f6de296a1844b0529b7b080c7dc35b93148e | 2,171 | py | Python | sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/_generated/aio/_phone_number_administration_service.py | abhahn/azure-sdk-for-python | 09521dfb517e0859ec961cae006fb728d787b565 | [
"MIT"
] | 2 | 2019-08-23T21:14:00.000Z | 2021-09-07T18:32:34.000Z | sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/_generated/aio/_phone_number_administration_service.py | rakshith91/azure-sdk-for-python | 3c4f2575d31260fa1bda870b04e34c082ac5702b | [
"MIT"
] | null | null | null | sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/_generated/aio/_phone_number_administration_service.py | rakshith91/azure-sdk-for-python | 3c4f2575d31260fa1bda870b04e34c082ac5702b | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any
from azure.core import AsyncPipelineClient
from msrest import Deserializer, Serializer
from ._configuration import PhoneNumberAdministrationServiceConfiguration
from .operations import PhoneNumberAdministrationOperations
from .. import models
class PhoneNumberAdministrationService(object):
"""Phone Number Administration Service.
:ivar phone_number_administration: PhoneNumberAdministrationOperations operations
:vartype phone_number_administration: azure.communication.phonenumbers.aio.operations.PhoneNumberAdministrationOperations
:param endpoint: The endpoint of the Azure Communication resource.
:type endpoint: str
"""
def __init__(
self,
endpoint: str,
**kwargs: Any
) -> None:
base_url = '{endpoint}'
self._config = PhoneNumberAdministrationServiceConfiguration(endpoint, **kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.phone_number_administration = PhoneNumberAdministrationOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "PhoneNumberAdministrationService":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| 40.203704 | 125 | 0.69415 |
from typing import Any
from azure.core import AsyncPipelineClient
from msrest import Deserializer, Serializer
from ._configuration import PhoneNumberAdministrationServiceConfiguration
from .operations import PhoneNumberAdministrationOperations
from .. import models
class PhoneNumberAdministrationService(object):
def __init__(
self,
endpoint: str,
**kwargs: Any
) -> None:
base_url = '{endpoint}'
self._config = PhoneNumberAdministrationServiceConfiguration(endpoint, **kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.phone_number_administration = PhoneNumberAdministrationOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "PhoneNumberAdministrationService":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| true | true |
f737f6fee7f362944f8abe9a0bba10716e153129 | 11,177 | py | Python | perfkitbenchmarker/static_virtual_machine.py | zmgit/PerfKitBenchmarker | 5d496db22c41f6b345ab28375aae4b5f39415ba7 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/static_virtual_machine.py | zmgit/PerfKitBenchmarker | 5d496db22c41f6b345ab28375aae4b5f39415ba7 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/static_virtual_machine.py | zmgit/PerfKitBenchmarker | 5d496db22c41f6b345ab28375aae4b5f39415ba7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent a Static Virtual Machine object.
All static VMs provided in a given group will be used before any non-static
VMs are provisioned. For example, in a test that uses 4 VMs, if 3 static VMs
are provided, all of them will be used and one additional non-static VM
will be provisioned. The VM's should be set up with passwordless ssh and
passwordless sudo (neither sshing nor running a sudo command should prompt
the user for a password).
All VM specifics are self-contained and the class provides methods to
operate on the VM: boot, shutdown, etc.
"""
import collections
import json
import logging
import threading
from perfkitbenchmarker import disk
from perfkitbenchmarker import flags
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import os_types
from perfkitbenchmarker import resource
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import windows_virtual_machine
FLAGS = flags.FLAGS
flags.DEFINE_list('static_vm_tags', None,
'The tags of static VMs for PKB to run with. Even if other '
'VMs are specified in a config, if they aren\'t in this list '
'they will be skipped during VM creation.')
class StaticVmSpec(virtual_machine.BaseVmSpec):
"""Object containing all info needed to create a Static VM."""
CLOUD = 'Static'
def __init__(self, component_full_name, ip_address=None, user_name=None,
ssh_private_key=None, internal_ip=None, ssh_port=22,
password=None, disk_specs=None, os_type=None, tag=None,
**kwargs):
"""Initialize the StaticVmSpec object.
Args:
component_full_name: string. Fully qualified name of the configurable
component containing the config options.
ip_address: The public ip address of the VM.
user_name: The username of the VM that the keyfile corresponds to.
ssh_private_key: The absolute path to the private keyfile to use to ssh
to the VM.
internal_ip: The internal ip address of the VM.
ssh_port: The port number to use for SSH and SCP commands.
password: The password used to log into the VM (Windows Only).
disk_specs: None or a list of dictionaries containing kwargs used to
create disk.BaseDiskSpecs.
os_type: The OS type of the VM. See the flag of the same name for more
information.
tag: A string that allows the VM to be included or excluded from a run
by using the 'static_vm_tags' flag.
"""
super(StaticVmSpec, self).__init__(component_full_name, **kwargs)
self.ip_address = ip_address
self.user_name = user_name
self.ssh_private_key = ssh_private_key
self.internal_ip = internal_ip
self.ssh_port = ssh_port
self.password = password
self.os_type = os_type
self.tag = tag
self.disk_specs = [
disk.BaseDiskSpec(
'{0}.disk_specs[{1}]'.format(component_full_name, i),
flag_values=kwargs.get('flag_values'), **disk_spec)
for i, disk_spec in enumerate(disk_specs or ())]
class StaticDisk(disk.BaseDisk):
"""Object representing a static Disk."""
def _Create(self):
"""StaticDisks don't implement _Create()."""
pass
def _Delete(self):
"""StaticDisks don't implement _Delete()."""
pass
def Attach(self):
"""StaticDisks don't implement Attach()."""
pass
def Detach(self):
"""StaticDisks don't implement Detach()."""
pass
class StaticVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing a Static Virtual Machine."""
CLOUD = 'Static'
is_static = True
vm_pool = collections.deque()
vm_pool_lock = threading.Lock()
def __init__(self, vm_spec):
"""Initialize a static virtual machine.
Args:
vm_spec: A StaticVmSpec object containing arguments.
"""
super(StaticVirtualMachine, self).__init__(vm_spec)
self.ip_address = vm_spec.ip_address
self.user_name = vm_spec.user_name
self.ssh_private_key = vm_spec.ssh_private_key
self.internal_ip = vm_spec.internal_ip
self.zone = self.zone or ('Static - %s@%s' % (self.user_name,
self.ip_address))
self.ssh_port = vm_spec.ssh_port
self.password = vm_spec.password
self.disk_specs = vm_spec.disk_specs
self.from_pool = False
def _Create(self):
"""StaticVirtualMachines do not implement _Create()."""
pass
def _Delete(self):
"""Returns the virtual machine to the pool."""
if self.from_pool:
with self.vm_pool_lock:
self.vm_pool.appendleft(self)
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
spec = self.disk_specs[len(self.scratch_disks)]
self.scratch_disks.append(StaticDisk(spec))
def DeleteScratchDisks(self):
"""StaticVirtualMachines do not delete scratch disks."""
pass
@classmethod
def ReadStaticVirtualMachineFile(cls, file_obj):
"""Read a file describing the static VMs to use.
This function will read the static VM information from the provided file,
instantiate VMs corresponding to the info, and add the VMs to the static
VM pool. The provided file should contain a single array in JSON-format.
Each element in the array must be an object with required format:
ip_address: string.
user_name: string.
keyfile_path: string.
ssh_port: integer, optional. Default 22
internal_ip: string, optional.
zone: string, optional.
local_disks: array of strings, optional.
scratch_disk_mountpoints: array of strings, optional
os_type: string, optional (see package_managers)
install_packages: bool, optional
Args:
file_obj: An open handle to a file containing the static VM info.
Raises:
ValueError: On missing required keys, or invalid keys.
"""
vm_arr = json.load(file_obj)
if not isinstance(vm_arr, list):
raise ValueError('Invalid static VM file. Expected array, got: %s.' %
type(vm_arr))
required_keys = frozenset(['ip_address', 'user_name'])
linux_required_keys = required_keys | frozenset(['keyfile_path'])
required_keys_by_os = {
os_types.WINDOWS: required_keys | frozenset(['password']),
os_types.DEBIAN: linux_required_keys,
os_types.RHEL: linux_required_keys,
os_types.UBUNTU_CONTAINER: linux_required_keys,
}
required_keys = required_keys_by_os[FLAGS.os_type]
optional_keys = frozenset(['internal_ip', 'zone', 'local_disks',
'scratch_disk_mountpoints', 'os_type',
'ssh_port', 'install_packages'])
allowed_keys = required_keys | optional_keys
def VerifyItemFormat(item):
"""Verify that the decoded JSON object matches the required schema."""
item_keys = frozenset(item)
extra_keys = sorted(item_keys - allowed_keys)
missing_keys = required_keys - item_keys
if extra_keys:
raise ValueError('Unexpected keys: {0}'.format(', '.join(extra_keys)))
elif missing_keys:
raise ValueError('Missing required keys: {0}'.format(
', '.join(missing_keys)))
for item in vm_arr:
VerifyItemFormat(item)
ip_address = item['ip_address']
user_name = item['user_name']
keyfile_path = item.get('keyfile_path')
internal_ip = item.get('internal_ip')
zone = item.get('zone')
local_disks = item.get('local_disks', [])
password = item.get('password')
if not isinstance(local_disks, list):
raise ValueError('Expected a list of local disks, got: {0}'.format(
local_disks))
scratch_disk_mountpoints = item.get('scratch_disk_mountpoints', [])
if not isinstance(scratch_disk_mountpoints, list):
raise ValueError(
'Expected a list of disk mount points, got: {0}'.format(
scratch_disk_mountpoints))
ssh_port = item.get('ssh_port', 22)
os_type = item.get('os_type')
install_packages = item.get('install_packages', True)
if ((os_type == os_types.WINDOWS and FLAGS.os_type != os_types.WINDOWS) or
(os_type != os_types.WINDOWS and FLAGS.os_type == os_types.WINDOWS)):
raise ValueError('Please only use Windows VMs when using '
'--os_type=windows and vice versa.')
disk_kwargs_list = []
for path in scratch_disk_mountpoints:
disk_kwargs_list.append({'mount_point': path})
for local_disk in local_disks:
disk_kwargs_list.append({'device_path': local_disk})
vm_spec = StaticVmSpec(
'static_vm_file', ip_address=ip_address, user_name=user_name,
ssh_port=ssh_port, install_packages=install_packages,
ssh_private_key=keyfile_path, internal_ip=internal_ip, zone=zone,
disk_specs=disk_kwargs_list, password=password,
flag_values=flags.FLAGS)
vm_class = GetStaticVmClass(os_type)
vm = vm_class(vm_spec)
cls.vm_pool.append(vm)
@classmethod
def GetStaticVirtualMachine(cls):
"""Pull a Static VM from the pool of static VMs.
If there are no VMs left in the pool, the method will return None.
Returns:
A static VM from the pool, or None if there are no static VMs left.
"""
with cls.vm_pool_lock:
if cls.vm_pool:
vm = cls.vm_pool.popleft()
vm.from_pool = True
return vm
else:
return None
def GetStaticVmClass(os_type):
"""Returns the static VM class that corresponds to the os_type."""
if not os_type:
logging.warning('Could not find os type for VM. Defaulting to debian.')
os_type = os_types.DEBIAN
return resource.GetResourceClass(virtual_machine.BaseVirtualMachine,
CLOUD=StaticVirtualMachine.CLOUD,
OS_TYPE=os_type)
class ContainerizedStaticVirtualMachine(
StaticVirtualMachine, linux_virtual_machine.ContainerizedDebianMixin):
pass
class DebianBasedStaticVirtualMachine(StaticVirtualMachine,
linux_virtual_machine.DebianMixin):
pass
class RhelBasedStaticVirtualMachine(StaticVirtualMachine,
linux_virtual_machine.RhelMixin):
pass
class WindowsBasedStaticVirtualMachine(StaticVirtualMachine,
windows_virtual_machine.WindowsMixin):
pass
| 35.709265 | 80 | 0.685425 |
import collections
import json
import logging
import threading
from perfkitbenchmarker import disk
from perfkitbenchmarker import flags
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import os_types
from perfkitbenchmarker import resource
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import windows_virtual_machine
FLAGS = flags.FLAGS
flags.DEFINE_list('static_vm_tags', None,
'The tags of static VMs for PKB to run with. Even if other '
'VMs are specified in a config, if they aren\'t in this list '
'they will be skipped during VM creation.')
class StaticVmSpec(virtual_machine.BaseVmSpec):
CLOUD = 'Static'
def __init__(self, component_full_name, ip_address=None, user_name=None,
ssh_private_key=None, internal_ip=None, ssh_port=22,
password=None, disk_specs=None, os_type=None, tag=None,
**kwargs):
super(StaticVmSpec, self).__init__(component_full_name, **kwargs)
self.ip_address = ip_address
self.user_name = user_name
self.ssh_private_key = ssh_private_key
self.internal_ip = internal_ip
self.ssh_port = ssh_port
self.password = password
self.os_type = os_type
self.tag = tag
self.disk_specs = [
disk.BaseDiskSpec(
'{0}.disk_specs[{1}]'.format(component_full_name, i),
flag_values=kwargs.get('flag_values'), **disk_spec)
for i, disk_spec in enumerate(disk_specs or ())]
class StaticDisk(disk.BaseDisk):
def _Create(self):
pass
def _Delete(self):
pass
def Attach(self):
pass
def Detach(self):
pass
class StaticVirtualMachine(virtual_machine.BaseVirtualMachine):
CLOUD = 'Static'
is_static = True
vm_pool = collections.deque()
vm_pool_lock = threading.Lock()
def __init__(self, vm_spec):
super(StaticVirtualMachine, self).__init__(vm_spec)
self.ip_address = vm_spec.ip_address
self.user_name = vm_spec.user_name
self.ssh_private_key = vm_spec.ssh_private_key
self.internal_ip = vm_spec.internal_ip
self.zone = self.zone or ('Static - %s@%s' % (self.user_name,
self.ip_address))
self.ssh_port = vm_spec.ssh_port
self.password = vm_spec.password
self.disk_specs = vm_spec.disk_specs
self.from_pool = False
def _Create(self):
pass
def _Delete(self):
if self.from_pool:
with self.vm_pool_lock:
self.vm_pool.appendleft(self)
def CreateScratchDisk(self, disk_spec):
spec = self.disk_specs[len(self.scratch_disks)]
self.scratch_disks.append(StaticDisk(spec))
def DeleteScratchDisks(self):
pass
@classmethod
def ReadStaticVirtualMachineFile(cls, file_obj):
vm_arr = json.load(file_obj)
if not isinstance(vm_arr, list):
raise ValueError('Invalid static VM file. Expected array, got: %s.' %
type(vm_arr))
required_keys = frozenset(['ip_address', 'user_name'])
linux_required_keys = required_keys | frozenset(['keyfile_path'])
required_keys_by_os = {
os_types.WINDOWS: required_keys | frozenset(['password']),
os_types.DEBIAN: linux_required_keys,
os_types.RHEL: linux_required_keys,
os_types.UBUNTU_CONTAINER: linux_required_keys,
}
required_keys = required_keys_by_os[FLAGS.os_type]
optional_keys = frozenset(['internal_ip', 'zone', 'local_disks',
'scratch_disk_mountpoints', 'os_type',
'ssh_port', 'install_packages'])
allowed_keys = required_keys | optional_keys
def VerifyItemFormat(item):
item_keys = frozenset(item)
extra_keys = sorted(item_keys - allowed_keys)
missing_keys = required_keys - item_keys
if extra_keys:
raise ValueError('Unexpected keys: {0}'.format(', '.join(extra_keys)))
elif missing_keys:
raise ValueError('Missing required keys: {0}'.format(
', '.join(missing_keys)))
for item in vm_arr:
VerifyItemFormat(item)
ip_address = item['ip_address']
user_name = item['user_name']
keyfile_path = item.get('keyfile_path')
internal_ip = item.get('internal_ip')
zone = item.get('zone')
local_disks = item.get('local_disks', [])
password = item.get('password')
if not isinstance(local_disks, list):
raise ValueError('Expected a list of local disks, got: {0}'.format(
local_disks))
scratch_disk_mountpoints = item.get('scratch_disk_mountpoints', [])
if not isinstance(scratch_disk_mountpoints, list):
raise ValueError(
'Expected a list of disk mount points, got: {0}'.format(
scratch_disk_mountpoints))
ssh_port = item.get('ssh_port', 22)
os_type = item.get('os_type')
install_packages = item.get('install_packages', True)
if ((os_type == os_types.WINDOWS and FLAGS.os_type != os_types.WINDOWS) or
(os_type != os_types.WINDOWS and FLAGS.os_type == os_types.WINDOWS)):
raise ValueError('Please only use Windows VMs when using '
'--os_type=windows and vice versa.')
disk_kwargs_list = []
for path in scratch_disk_mountpoints:
disk_kwargs_list.append({'mount_point': path})
for local_disk in local_disks:
disk_kwargs_list.append({'device_path': local_disk})
vm_spec = StaticVmSpec(
'static_vm_file', ip_address=ip_address, user_name=user_name,
ssh_port=ssh_port, install_packages=install_packages,
ssh_private_key=keyfile_path, internal_ip=internal_ip, zone=zone,
disk_specs=disk_kwargs_list, password=password,
flag_values=flags.FLAGS)
vm_class = GetStaticVmClass(os_type)
vm = vm_class(vm_spec)
cls.vm_pool.append(vm)
@classmethod
def GetStaticVirtualMachine(cls):
with cls.vm_pool_lock:
if cls.vm_pool:
vm = cls.vm_pool.popleft()
vm.from_pool = True
return vm
else:
return None
def GetStaticVmClass(os_type):
if not os_type:
logging.warning('Could not find os type for VM. Defaulting to debian.')
os_type = os_types.DEBIAN
return resource.GetResourceClass(virtual_machine.BaseVirtualMachine,
CLOUD=StaticVirtualMachine.CLOUD,
OS_TYPE=os_type)
class ContainerizedStaticVirtualMachine(
StaticVirtualMachine, linux_virtual_machine.ContainerizedDebianMixin):
pass
class DebianBasedStaticVirtualMachine(StaticVirtualMachine,
linux_virtual_machine.DebianMixin):
pass
class RhelBasedStaticVirtualMachine(StaticVirtualMachine,
linux_virtual_machine.RhelMixin):
pass
class WindowsBasedStaticVirtualMachine(StaticVirtualMachine,
windows_virtual_machine.WindowsMixin):
pass
| true | true |
f737f73b87d428e1c8fc898334f98d981f840a28 | 5,500 | py | Python | ros/src/waypoint_updater/waypoint_updater.py | wolf-zchen/CarND-capstone | b6b768bfd01f03a5256c2db4b84f9d7a42149de2 | [
"MIT"
] | null | null | null | ros/src/waypoint_updater/waypoint_updater.py | wolf-zchen/CarND-capstone | b6b768bfd01f03a5256c2db4b84f9d7a42149de2 | [
"MIT"
] | null | null | null | ros/src/waypoint_updater/waypoint_updater.py | wolf-zchen/CarND-capstone | b6b768bfd01f03a5256c2db4b84f9d7a42149de2 | [
"MIT"
] | 2 | 2018-10-15T00:34:10.000Z | 2018-10-20T21:44:08.000Z | #!/usr/bin/env python
import numpy as np
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 30 # Number of waypoints we will publish. You can change this number
MAX_DECEL = 1
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb,queue_size = 1)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb, queue_size = 1)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb, queue_size = 1)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.stopline_wp_idx = -1
self.loop()
#rospy.spin()
def loop(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
if self.pose and self.base_waypoints:
#Get closest waypoint
closest_waypoint_idx = self.get_closest_waypoint_idx()
self.publish_waypoints(closest_waypoint_idx)
rate.sleep()
def get_closest_waypoint_idx(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoint_tree.query([x,y],1)[1]
#check if closet is ahead or behind vehicle
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx - 1]
# Equation for hyperplane through closest_coords
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x,y])
val = np.dot(cl_vect - prev_vect, pos_vect -cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self,closest_idx):
#lane = Lane()
#lane.header = self.base_waypoints.header
#lane.waypoints = self.base_waypoints.waypoints[closest_idx:closest_idx + LOOKAHEAD_WPS]
final_lane = self.generate_lane()
self.final_waypoints_pub.publish(final_lane)
def generate_lane(self):
lane = Lane()
closest_idx = self.get_closest_waypoint_idx()
farthest_idx = closest_idx + LOOKAHEAD_WPS
base_waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx]
if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):
lane.waypoints = base_waypoints
else:
lane.waypoints = self.decelerate_waypoints(base_waypoints,closest_idx)
return lane
def decelerate_waypoints(self,waypoints,closest_idx):
temp = []
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
stop_idx = max(self.stopline_wp_idx - closest_idx - 3, 0)
dist = self.distance(waypoints, i, stop_idx)
vel = math.sqrt(2 * MAX_DECEL * dist)
if vel < 1.0:
vel = 0
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
temp.append(p)
return temp
def pose_cb(self, msg):
# TODO: Implement
self.pose = msg
def waypoints_cb(self, waypoints):
# TODO: Implement
self.base_waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
self.stopline_wp_idx = msg.data
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| 35.483871 | 132 | 0.657636 |
import numpy as np
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
import math
LOOKAHEAD_WPS = 30
MAX_DECEL = 1
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb,queue_size = 1)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb, queue_size = 1)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb, queue_size = 1)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
self.pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.stopline_wp_idx = -1
self.loop()
def loop(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
if self.pose and self.base_waypoints:
closest_waypoint_idx = self.get_closest_waypoint_idx()
self.publish_waypoints(closest_waypoint_idx)
rate.sleep()
def get_closest_waypoint_idx(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoint_tree.query([x,y],1)[1]
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx - 1]
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x,y])
val = np.dot(cl_vect - prev_vect, pos_vect -cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self,closest_idx):
final_lane = self.generate_lane()
self.final_waypoints_pub.publish(final_lane)
def generate_lane(self):
lane = Lane()
closest_idx = self.get_closest_waypoint_idx()
farthest_idx = closest_idx + LOOKAHEAD_WPS
base_waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx]
if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):
lane.waypoints = base_waypoints
else:
lane.waypoints = self.decelerate_waypoints(base_waypoints,closest_idx)
return lane
def decelerate_waypoints(self,waypoints,closest_idx):
temp = []
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
stop_idx = max(self.stopline_wp_idx - closest_idx - 3, 0)
dist = self.distance(waypoints, i, stop_idx)
vel = math.sqrt(2 * MAX_DECEL * dist)
if vel < 1.0:
vel = 0
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
temp.append(p)
return temp
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.base_waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.stopline_wp_idx = msg.data
def obstacle_cb(self, msg):
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| true | true |
f737f882db2b290298fbb71d121e895fc21988ce | 45,958 | py | Python | espnet/nets/pytorch_backend/e2e_vc_transformer.py | undeadyequ/espnet | 8c3f85ce695153abcb9cf365180b1d7554ad565e | [
"Apache-2.0"
] | 4 | 2021-06-18T01:57:08.000Z | 2021-12-23T05:26:02.000Z | espnet/nets/pytorch_backend/e2e_vc_transformer.py | undeadyequ/espnet | 8c3f85ce695153abcb9cf365180b1d7554ad565e | [
"Apache-2.0"
] | null | null | null | espnet/nets/pytorch_backend/e2e_vc_transformer.py | undeadyequ/espnet | 8c3f85ce695153abcb9cf365180b1d7554ad565e | [
"Apache-2.0"
] | 1 | 2022-01-07T02:29:05.000Z | 2022-01-07T02:29:05.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Nagoya University (Wen-Chin Huang)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Voice Transformer Network (Transformer-VC) related modules."""
import logging
import torch
import torch.nn.functional as F
from espnet.nets.pytorch_backend.e2e_asr_transformer import subsequent_mask
from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import (
Tacotron2Loss as TransformerLoss, # noqa: H301
)
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
from espnet.nets.pytorch_backend.tacotron2.decoder import Postnet
from espnet.nets.pytorch_backend.tacotron2.decoder import Prenet as DecoderPrenet
from espnet.nets.pytorch_backend.tacotron2.encoder import Encoder as EncoderPrenet
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.embedding import ScaledPositionalEncoding
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.initializer import initialize
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.cli_utils import strtobool
from espnet.utils.fill_missing_args import fill_missing_args
from espnet.nets.pytorch_backend.e2e_tts_transformer import (
GuidedMultiHeadAttentionLoss, # noqa: H301
TTSPlot, # noqa: H301
)
class Transformer(TTSInterface, torch.nn.Module):
"""VC Transformer module.
This is a module of the Voice Transformer Network
(a.k.a. VTN or Transformer-VC) described in
`Voice Transformer Network: Sequence-to-Sequence
Voice Conversion Using Transformer with
Text-to-Speech Pretraining`_,
which convert the sequence of acoustic features
into the sequence of acoustic features.
.. _`Voice Transformer Network: Sequence-to-Sequence
Voice Conversion Using Transformer with
Text-to-Speech Pretraining`:
https://arxiv.org/pdf/1912.06813.pdf
"""
@staticmethod
def add_arguments(parser):
"""Add model-specific arguments to the parser."""
group = parser.add_argument_group("transformer model setting")
# network structure related
group.add_argument(
"--eprenet-conv-layers",
default=0,
type=int,
help="Number of encoder prenet convolution layers",
)
group.add_argument(
"--eprenet-conv-chans",
default=0,
type=int,
help="Number of encoder prenet convolution channels",
)
group.add_argument(
"--eprenet-conv-filts",
default=0,
type=int,
help="Filter size of encoder prenet convolution",
)
group.add_argument(
"--transformer-input-layer",
default="linear",
type=str,
help="Type of input layer (linear or conv2d)",
)
group.add_argument(
"--dprenet-layers",
default=2,
type=int,
help="Number of decoder prenet layers",
)
group.add_argument(
"--dprenet-units",
default=256,
type=int,
help="Number of decoder prenet hidden units",
)
group.add_argument(
"--elayers", default=3, type=int, help="Number of encoder layers"
)
group.add_argument(
"--eunits", default=1536, type=int, help="Number of encoder hidden units"
)
group.add_argument(
"--adim",
default=384,
type=int,
help="Number of attention transformation dimensions",
)
group.add_argument(
"--aheads",
default=4,
type=int,
help="Number of heads for multi head attention",
)
group.add_argument(
"--dlayers", default=3, type=int, help="Number of decoder layers"
)
group.add_argument(
"--dunits", default=1536, type=int, help="Number of decoder hidden units"
)
group.add_argument(
"--positionwise-layer-type",
default="linear",
type=str,
choices=["linear", "conv1d", "conv1d-linear"],
help="Positionwise layer type.",
)
group.add_argument(
"--positionwise-conv-kernel-size",
default=1,
type=int,
help="Kernel size of positionwise conv1d layer",
)
group.add_argument(
"--postnet-layers", default=5, type=int, help="Number of postnet layers"
)
group.add_argument(
"--postnet-chans", default=256, type=int, help="Number of postnet channels"
)
group.add_argument(
"--postnet-filts", default=5, type=int, help="Filter size of postnet"
)
group.add_argument(
"--use-scaled-pos-enc",
default=True,
type=strtobool,
help="Use trainable scaled positional encoding"
"instead of the fixed scale one.",
)
group.add_argument(
"--use-batch-norm",
default=True,
type=strtobool,
help="Whether to use batch normalization",
)
group.add_argument(
"--encoder-normalize-before",
default=False,
type=strtobool,
help="Whether to apply layer norm before encoder block",
)
group.add_argument(
"--decoder-normalize-before",
default=False,
type=strtobool,
help="Whether to apply layer norm before decoder block",
)
group.add_argument(
"--encoder-concat-after",
default=False,
type=strtobool,
help="Whether to concatenate attention layer's input and output in encoder",
)
group.add_argument(
"--decoder-concat-after",
default=False,
type=strtobool,
help="Whether to concatenate attention layer's input and output in decoder",
)
group.add_argument(
"--reduction-factor",
default=1,
type=int,
help="Reduction factor (for decoder)",
)
group.add_argument(
"--encoder-reduction-factor",
default=1,
type=int,
help="Reduction factor (for encoder)",
)
group.add_argument(
"--spk-embed-dim",
default=None,
type=int,
help="Number of speaker embedding dimensions",
)
group.add_argument(
"--spk-embed-integration-type",
type=str,
default="add",
choices=["add", "concat"],
help="How to integrate speaker embedding",
)
# training related
group.add_argument(
"--transformer-init",
type=str,
default="pytorch",
choices=[
"pytorch",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
],
help="How to initialize transformer parameters",
)
group.add_argument(
"--initial-encoder-alpha",
type=float,
default=1.0,
help="Initial alpha value in encoder's ScaledPositionalEncoding",
)
group.add_argument(
"--initial-decoder-alpha",
type=float,
default=1.0,
help="Initial alpha value in decoder's ScaledPositionalEncoding",
)
group.add_argument(
"--transformer-lr",
default=1.0,
type=float,
help="Initial value of learning rate",
)
group.add_argument(
"--transformer-warmup-steps",
default=4000,
type=int,
help="Optimizer warmup steps",
)
group.add_argument(
"--transformer-enc-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder except for attention",
)
group.add_argument(
"--transformer-enc-positional-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder positional encoding",
)
group.add_argument(
"--transformer-enc-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder self-attention",
)
group.add_argument(
"--transformer-dec-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder "
"except for attention and pos encoding",
)
group.add_argument(
"--transformer-dec-positional-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder positional encoding",
)
group.add_argument(
"--transformer-dec-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder self-attention",
)
group.add_argument(
"--transformer-enc-dec-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder-decoder attention",
)
group.add_argument(
"--eprenet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in encoder prenet",
)
group.add_argument(
"--dprenet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in decoder prenet",
)
group.add_argument(
"--postnet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in postnet",
)
group.add_argument(
"--pretrained-model", default=None, type=str, help="Pretrained model path"
)
# loss related
group.add_argument(
"--use-masking",
default=True,
type=strtobool,
help="Whether to use masking in calculation of loss",
)
group.add_argument(
"--use-weighted-masking",
default=False,
type=strtobool,
help="Whether to use weighted masking in calculation of loss",
)
group.add_argument(
"--loss-type",
default="L1",
choices=["L1", "L2", "L1+L2"],
help="How to calc loss",
)
group.add_argument(
"--bce-pos-weight",
default=5.0,
type=float,
help="Positive sample weight in BCE calculation "
"(only for use-masking=True)",
)
group.add_argument(
"--use-guided-attn-loss",
default=False,
type=strtobool,
help="Whether to use guided attention loss",
)
group.add_argument(
"--guided-attn-loss-sigma",
default=0.4,
type=float,
help="Sigma in guided attention loss",
)
group.add_argument(
"--guided-attn-loss-lambda",
default=1.0,
type=float,
help="Lambda in guided attention loss",
)
group.add_argument(
"--num-heads-applied-guided-attn",
default=2,
type=int,
help="Number of heads in each layer to be applied guided attention loss"
"if set -1, all of the heads will be applied.",
)
group.add_argument(
"--num-layers-applied-guided-attn",
default=2,
type=int,
help="Number of layers to be applied guided attention loss"
"if set -1, all of the layers will be applied.",
)
group.add_argument(
"--modules-applied-guided-attn",
type=str,
nargs="+",
default=["encoder-decoder"],
help="Module name list to be applied guided attention loss",
)
return parser
@property
def attention_plot_class(self):
"""Return plot class for attention weight plot."""
return TTSPlot
def __init__(self, idim, odim, args=None):
"""Initialize Transformer-VC module.
Args:
idim (int): Dimension of the inputs.
odim (int): Dimension of the outputs.
args (Namespace, optional):
- eprenet_conv_layers (int):
Number of encoder prenet convolution layers.
- eprenet_conv_chans (int):
Number of encoder prenet convolution channels.
- eprenet_conv_filts (int):
Filter size of encoder prenet convolution.
- transformer_input_layer (str): Input layer before the encoder.
- dprenet_layers (int): Number of decoder prenet layers.
- dprenet_units (int): Number of decoder prenet hidden units.
- elayers (int): Number of encoder layers.
- eunits (int): Number of encoder hidden units.
- adim (int): Number of attention transformation dimensions.
- aheads (int): Number of heads for multi head attention.
- dlayers (int): Number of decoder layers.
- dunits (int): Number of decoder hidden units.
- postnet_layers (int): Number of postnet layers.
- postnet_chans (int): Number of postnet channels.
- postnet_filts (int): Filter size of postnet.
- use_scaled_pos_enc (bool):
Whether to use trainable scaled positional encoding.
- use_batch_norm (bool):
Whether to use batch normalization in encoder prenet.
- encoder_normalize_before (bool):
Whether to perform layer normalization before encoder block.
- decoder_normalize_before (bool):
Whether to perform layer normalization before decoder block.
- encoder_concat_after (bool): Whether to concatenate
attention layer's input and output in encoder.
- decoder_concat_after (bool): Whether to concatenate
attention layer's input and output in decoder.
- reduction_factor (int): Reduction factor (for decoder).
- encoder_reduction_factor (int): Reduction factor (for encoder).
- spk_embed_dim (int): Number of speaker embedding dimenstions.
- spk_embed_integration_type: How to integrate speaker embedding.
- transformer_init (float): How to initialize transformer parameters.
- transformer_lr (float): Initial value of learning rate.
- transformer_warmup_steps (int): Optimizer warmup steps.
- transformer_enc_dropout_rate (float):
Dropout rate in encoder except attention & positional encoding.
- transformer_enc_positional_dropout_rate (float):
Dropout rate after encoder positional encoding.
- transformer_enc_attn_dropout_rate (float):
Dropout rate in encoder self-attention module.
- transformer_dec_dropout_rate (float):
Dropout rate in decoder except attention & positional encoding.
- transformer_dec_positional_dropout_rate (float):
Dropout rate after decoder positional encoding.
- transformer_dec_attn_dropout_rate (float):
Dropout rate in deocoder self-attention module.
- transformer_enc_dec_attn_dropout_rate (float):
Dropout rate in encoder-deocoder attention module.
- eprenet_dropout_rate (float): Dropout rate in encoder prenet.
- dprenet_dropout_rate (float): Dropout rate in decoder prenet.
- postnet_dropout_rate (float): Dropout rate in postnet.
- use_masking (bool):
Whether to apply masking for padded part in loss calculation.
- use_weighted_masking (bool):
Whether to apply weighted masking in loss calculation.
- bce_pos_weight (float): Positive sample weight in bce calculation
(only for use_masking=true).
- loss_type (str): How to calculate loss.
- use_guided_attn_loss (bool): Whether to use guided attention loss.
- num_heads_applied_guided_attn (int):
Number of heads in each layer to apply guided attention loss.
- num_layers_applied_guided_attn (int):
Number of layers to apply guided attention loss.
- modules_applied_guided_attn (list):
List of module names to apply guided attention loss.
- guided-attn-loss-sigma (float) Sigma in guided attention loss.
- guided-attn-loss-lambda (float): Lambda in guided attention loss.
"""
# initialize base classes
TTSInterface.__init__(self)
torch.nn.Module.__init__(self)
# fill missing arguments
args = fill_missing_args(args, self.add_arguments)
# store hyperparameters
self.idim = idim
self.odim = odim
self.spk_embed_dim = args.spk_embed_dim
if self.spk_embed_dim is not None:
self.spk_embed_integration_type = args.spk_embed_integration_type
self.use_scaled_pos_enc = args.use_scaled_pos_enc
self.reduction_factor = args.reduction_factor
self.encoder_reduction_factor = args.encoder_reduction_factor
self.transformer_input_layer = args.transformer_input_layer
self.loss_type = args.loss_type
self.use_guided_attn_loss = args.use_guided_attn_loss
if self.use_guided_attn_loss:
if args.num_layers_applied_guided_attn == -1:
self.num_layers_applied_guided_attn = args.elayers
else:
self.num_layers_applied_guided_attn = (
args.num_layers_applied_guided_attn
)
if args.num_heads_applied_guided_attn == -1:
self.num_heads_applied_guided_attn = args.aheads
else:
self.num_heads_applied_guided_attn = args.num_heads_applied_guided_attn
self.modules_applied_guided_attn = args.modules_applied_guided_attn
# use idx 0 as padding idx
padding_idx = 0
# get positional encoding class
pos_enc_class = (
ScaledPositionalEncoding if self.use_scaled_pos_enc else PositionalEncoding
)
# define transformer encoder
if args.eprenet_conv_layers != 0:
# encoder prenet
encoder_input_layer = torch.nn.Sequential(
EncoderPrenet(
idim=idim,
elayers=0,
econv_layers=args.eprenet_conv_layers,
econv_chans=args.eprenet_conv_chans,
econv_filts=args.eprenet_conv_filts,
use_batch_norm=args.use_batch_norm,
dropout_rate=args.eprenet_dropout_rate,
padding_idx=padding_idx,
input_layer=torch.nn.Linear(
idim * args.encoder_reduction_factor, idim
),
),
torch.nn.Linear(args.eprenet_conv_chans, args.adim),
)
elif args.transformer_input_layer == "linear":
encoder_input_layer = torch.nn.Linear(
idim * args.encoder_reduction_factor, args.adim
)
else:
encoder_input_layer = args.transformer_input_layer
self.encoder = Encoder(
idim=idim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=encoder_input_layer,
dropout_rate=args.transformer_enc_dropout_rate,
positional_dropout_rate=args.transformer_enc_positional_dropout_rate,
attention_dropout_rate=args.transformer_enc_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=args.encoder_normalize_before,
concat_after=args.encoder_concat_after,
positionwise_layer_type=args.positionwise_layer_type,
positionwise_conv_kernel_size=args.positionwise_conv_kernel_size,
)
# define projection layer
if self.spk_embed_dim is not None:
if self.spk_embed_integration_type == "add":
self.projection = torch.nn.Linear(self.spk_embed_dim, args.adim)
else:
self.projection = torch.nn.Linear(
args.adim + self.spk_embed_dim, args.adim
)
# define transformer decoder
if args.dprenet_layers != 0:
# decoder prenet
decoder_input_layer = torch.nn.Sequential(
DecoderPrenet(
idim=odim,
n_layers=args.dprenet_layers,
n_units=args.dprenet_units,
dropout_rate=args.dprenet_dropout_rate,
),
torch.nn.Linear(args.dprenet_units, args.adim),
)
else:
decoder_input_layer = "linear"
self.decoder = Decoder(
odim=-1,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.transformer_dec_dropout_rate,
positional_dropout_rate=args.transformer_dec_positional_dropout_rate,
self_attention_dropout_rate=args.transformer_dec_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_enc_dec_attn_dropout_rate,
input_layer=decoder_input_layer,
use_output_layer=False,
pos_enc_class=pos_enc_class,
normalize_before=args.decoder_normalize_before,
concat_after=args.decoder_concat_after,
)
# define final projection
self.feat_out = torch.nn.Linear(args.adim, odim * args.reduction_factor)
self.prob_out = torch.nn.Linear(args.adim, args.reduction_factor)
# define postnet
self.postnet = (
None
if args.postnet_layers == 0
else Postnet(
idim=idim,
odim=odim,
n_layers=args.postnet_layers,
n_chans=args.postnet_chans,
n_filts=args.postnet_filts,
use_batch_norm=args.use_batch_norm,
dropout_rate=args.postnet_dropout_rate,
)
)
# define loss function
self.criterion = TransformerLoss(
use_masking=args.use_masking,
use_weighted_masking=args.use_weighted_masking,
bce_pos_weight=args.bce_pos_weight,
)
if self.use_guided_attn_loss:
self.attn_criterion = GuidedMultiHeadAttentionLoss(
sigma=args.guided_attn_loss_sigma, alpha=args.guided_attn_loss_lambda,
)
# initialize parameters
self._reset_parameters(
init_type=args.transformer_init,
init_enc_alpha=args.initial_encoder_alpha,
init_dec_alpha=args.initial_decoder_alpha,
)
# load pretrained model
if args.pretrained_model is not None:
self.load_pretrained_model(args.pretrained_model)
def _reset_parameters(self, init_type, init_enc_alpha=1.0, init_dec_alpha=1.0):
# initialize parameters
initialize(self, init_type)
# initialize alpha in scaled positional encoding
if self.use_scaled_pos_enc:
self.encoder.embed[-1].alpha.data = torch.tensor(init_enc_alpha)
self.decoder.embed[-1].alpha.data = torch.tensor(init_dec_alpha)
def _add_first_frame_and_remove_last_frame(self, ys):
ys_in = torch.cat(
[ys.new_zeros((ys.shape[0], 1, ys.shape[2])), ys[:, :-1]], dim=1
)
return ys_in
def forward(self, xs, ilens, ys, labels, olens, spembs=None, *args, **kwargs):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of padded acoustic features (B, Tmax, idim).
ilens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
olens (LongTensor): Batch of the lengths of each target (B,).
spembs (Tensor, optional): Batch of speaker embedding vectors
(B, spk_embed_dim).
Returns:
Tensor: Loss value.
"""
# remove unnecessary padded part (for multi-gpus)
max_ilen = max(ilens)
max_olen = max(olens)
if max_ilen != xs.shape[1]:
xs = xs[:, :max_ilen]
if max_olen != ys.shape[1]:
ys = ys[:, :max_olen]
labels = labels[:, :max_olen]
# thin out input frames for reduction factor
# (B, Lmax, idim) -> (B, Lmax // r, idim * r)
if self.encoder_reduction_factor > 1:
B, Lmax, idim = xs.shape
if Lmax % self.encoder_reduction_factor != 0:
xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :]
xs_ds = xs.contiguous().view(
B,
int(Lmax / self.encoder_reduction_factor),
idim * self.encoder_reduction_factor,
)
ilens_ds = ilens.new(
[ilen // self.encoder_reduction_factor for ilen in ilens]
)
else:
xs_ds, ilens_ds = xs, ilens
# forward encoder
x_masks = self._source_mask(ilens_ds)
hs, hs_masks = self.encoder(xs_ds, x_masks)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs_int = self._integrate_with_spk_embed(hs, spembs)
else:
hs_int = hs
# thin out frames for reduction factor (B, Lmax, odim) -> (B, Lmax//r, odim)
if self.reduction_factor > 1:
ys_in = ys[:, self.reduction_factor - 1 :: self.reduction_factor]
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
ys_in, olens_in = ys, olens
# add first zero frame and remove last frame for auto-regressive
ys_in = self._add_first_frame_and_remove_last_frame(ys_in)
# if conv2d, modify mask. Use ceiling division here
if "conv2d" in self.transformer_input_layer:
ilens_ds_st = ilens_ds.new(
[((ilen - 2 + 1) // 2 - 2 + 1) // 2 for ilen in ilens_ds]
)
else:
ilens_ds_st = ilens_ds
# forward decoder
y_masks = self._target_mask(olens_in)
zs, _ = self.decoder(ys_in, y_masks, hs_int, hs_masks)
# (B, Lmax//r, odim * r) -> (B, Lmax//r * r, odim)
before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)
# (B, Lmax//r, r) -> (B, Lmax//r * r)
logits = self.prob_out(zs).view(zs.size(0), -1)
# postnet -> (B, Lmax//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
# modifiy mod part of groundtruth
if self.reduction_factor > 1:
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
max_olen = max(olens)
ys = ys[:, :max_olen]
labels = labels[:, :max_olen]
labels[:, -1] = 1.0 # make sure at least one frame has 1
# caluculate loss values
l1_loss, l2_loss, bce_loss = self.criterion(
after_outs, before_outs, logits, ys, labels, olens
)
if self.loss_type == "L1":
loss = l1_loss + bce_loss
elif self.loss_type == "L2":
loss = l2_loss + bce_loss
elif self.loss_type == "L1+L2":
loss = l1_loss + l2_loss + bce_loss
else:
raise ValueError("unknown --loss-type " + self.loss_type)
report_keys = [
{"l1_loss": l1_loss.item()},
{"l2_loss": l2_loss.item()},
{"bce_loss": bce_loss.item()},
{"loss": loss.item()},
]
# calculate guided attention loss
if self.use_guided_attn_loss:
# calculate for encoder
if "encoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.encoder.encoders)))
):
att_ws += [
self.encoder.encoders[layer_idx].self_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_in, T_in)
enc_attn_loss = self.attn_criterion(
att_ws, ilens_ds_st, ilens_ds_st
) # TODO(unilight): is changing to ilens_ds_st right?
loss = loss + enc_attn_loss
report_keys += [{"enc_attn_loss": enc_attn_loss.item()}]
# calculate for decoder
if "decoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.decoder.decoders)))
):
att_ws += [
self.decoder.decoders[layer_idx].self_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_out, T_out)
dec_attn_loss = self.attn_criterion(att_ws, olens_in, olens_in)
loss = loss + dec_attn_loss
report_keys += [{"dec_attn_loss": dec_attn_loss.item()}]
# calculate for encoder-decoder
if "encoder-decoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.decoder.decoders)))
):
att_ws += [
self.decoder.decoders[layer_idx].src_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_out, T_in)
enc_dec_attn_loss = self.attn_criterion(
att_ws, ilens_ds_st, olens_in
) # TODO(unilight): is changing to ilens_ds_st right?
loss = loss + enc_dec_attn_loss
report_keys += [{"enc_dec_attn_loss": enc_dec_attn_loss.item()}]
# report extra information
if self.use_scaled_pos_enc:
report_keys += [
{"encoder_alpha": self.encoder.embed[-1].alpha.data.item()},
{"decoder_alpha": self.decoder.embed[-1].alpha.data.item()},
]
self.reporter.report(report_keys)
return loss
def inference(self, x, inference_args, spemb=None, *args, **kwargs):
"""Generate the sequence of features given the sequences of acoustic features.
Args:
x (Tensor): Input sequence of acoustic features (T, idim).
inference_args (Namespace):
- threshold (float): Threshold in inference.
- minlenratio (float): Minimum length ratio in inference.
- maxlenratio (float): Maximum length ratio in inference.
spemb (Tensor, optional): Speaker embedding vector (spk_embed_dim).
Returns:
Tensor: Output sequence of features (L, odim).
Tensor: Output sequence of stop probabilities (L,).
Tensor: Encoder-decoder (source) attention weights (#layers, #heads, L, T).
"""
# get options
threshold = inference_args.threshold
minlenratio = inference_args.minlenratio
maxlenratio = inference_args.maxlenratio
use_att_constraint = getattr(
inference_args, "use_att_constraint", False
) # keep compatibility
if use_att_constraint:
logging.warning(
"Attention constraint is not yet supported in Transformer. Not enabled."
)
# thin out input frames for reduction factor
# (B, Lmax, idim) -> (B, Lmax // r, idim * r)
if self.encoder_reduction_factor > 1:
Lmax, idim = x.shape
if Lmax % self.encoder_reduction_factor != 0:
x = x[: -(Lmax % self.encoder_reduction_factor), :]
x_ds = x.contiguous().view(
int(Lmax / self.encoder_reduction_factor),
idim * self.encoder_reduction_factor,
)
else:
x_ds = x
# forward encoder
x_ds = x_ds.unsqueeze(0)
hs, _ = self.encoder(x_ds, None)
# integrate speaker embedding
if self.spk_embed_dim is not None:
spembs = spemb.unsqueeze(0)
hs = self._integrate_with_spk_embed(hs, spembs)
# set limits of length
maxlen = int(hs.size(1) * maxlenratio / self.reduction_factor)
minlen = int(hs.size(1) * minlenratio / self.reduction_factor)
# initialize
idx = 0
ys = hs.new_zeros(1, 1, self.odim)
outs, probs = [], []
# forward decoder step-by-step
z_cache = self.decoder.init_state(x)
while True:
# update index
idx += 1
# calculate output and stop prob at idx-th step
y_masks = subsequent_mask(idx).unsqueeze(0).to(x.device)
z, z_cache = self.decoder.forward_one_step(
ys, y_masks, hs, cache=z_cache
) # (B, adim)
outs += [
self.feat_out(z).view(self.reduction_factor, self.odim)
] # [(r, odim), ...]
probs += [torch.sigmoid(self.prob_out(z))[0]] # [(r), ...]
# update next inputs
ys = torch.cat(
(ys, outs[-1][-1].view(1, 1, self.odim)), dim=1
) # (1, idx + 1, odim)
# get attention weights
att_ws_ = []
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention) and "src" in name:
att_ws_ += [m.attn[0, :, -1].unsqueeze(1)] # [(#heads, 1, T),...]
if idx == 1:
att_ws = att_ws_
else:
# [(#heads, l, T), ...]
att_ws = [
torch.cat([att_w, att_w_], dim=1)
for att_w, att_w_ in zip(att_ws, att_ws_)
]
# check whether to finish generation
if int(sum(probs[-1] >= threshold)) > 0 or idx >= maxlen:
# check mininum length
if idx < minlen:
continue
outs = (
torch.cat(outs, dim=0).unsqueeze(0).transpose(1, 2)
) # (L, odim) -> (1, L, odim) -> (1, odim, L)
if self.postnet is not None:
outs = outs + self.postnet(outs) # (1, odim, L)
outs = outs.transpose(2, 1).squeeze(0) # (L, odim)
probs = torch.cat(probs, dim=0)
break
# concatenate attention weights -> (#layers, #heads, L, T)
att_ws = torch.stack(att_ws, dim=0)
return outs, probs, att_ws
def calculate_all_attentions(
self,
xs,
ilens,
ys,
olens,
spembs=None,
skip_output=False,
keep_tensor=False,
*args,
**kwargs
):
"""Calculate all of the attention weights.
Args:
xs (Tensor): Batch of padded acoustic features (B, Tmax, idim).
ilens (LongTensor): Batch of lengths of each input batch (B,).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
olens (LongTensor): Batch of the lengths of each target (B,).
spembs (Tensor, optional): Batch of speaker embedding vectors
(B, spk_embed_dim).
skip_output (bool, optional): Whether to skip calculate the final output.
keep_tensor (bool, optional): Whether to keep original tensor.
Returns:
dict: Dict of attention weights and outputs.
"""
with torch.no_grad():
# thin out input frames for reduction factor
# (B, Lmax, idim) -> (B, Lmax // r, idim * r)
if self.encoder_reduction_factor > 1:
B, Lmax, idim = xs.shape
if Lmax % self.encoder_reduction_factor != 0:
xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :]
xs_ds = xs.contiguous().view(
B,
int(Lmax / self.encoder_reduction_factor),
idim * self.encoder_reduction_factor,
)
ilens_ds = ilens.new(
[ilen // self.encoder_reduction_factor for ilen in ilens]
)
else:
xs_ds, ilens_ds = xs, ilens
# forward encoder
x_masks = self._source_mask(ilens_ds)
hs, hs_masks = self.encoder(xs_ds, x_masks)
# integrate speaker embedding
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
# thin out frames for reduction factor
# (B, Lmax, odim) -> (B, Lmax//r, odim)
if self.reduction_factor > 1:
ys_in = ys[:, self.reduction_factor - 1 :: self.reduction_factor]
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
ys_in, olens_in = ys, olens
# add first zero frame and remove last frame for auto-regressive
ys_in = self._add_first_frame_and_remove_last_frame(ys_in)
# forward decoder
y_masks = self._target_mask(olens_in)
zs, _ = self.decoder(ys_in, y_masks, hs, hs_masks)
# calculate final outputs
if not skip_output:
before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
# modifiy mod part of output lengths due to reduction factor > 1
if self.reduction_factor > 1:
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
# store into dict
att_ws_dict = dict()
if keep_tensor:
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention):
att_ws_dict[name] = m.attn
if not skip_output:
att_ws_dict["before_postnet_fbank"] = before_outs
att_ws_dict["after_postnet_fbank"] = after_outs
else:
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention):
attn = m.attn.cpu().numpy()
if "encoder" in name:
attn = [a[:, :l, :l] for a, l in zip(attn, ilens.tolist())]
elif "decoder" in name:
if "src" in name:
attn = [
a[:, :ol, :il]
for a, il, ol in zip(
attn, ilens.tolist(), olens_in.tolist()
)
]
elif "self" in name:
attn = [
a[:, :l, :l] for a, l in zip(attn, olens_in.tolist())
]
else:
logging.warning("unknown attention module: " + name)
else:
logging.warning("unknown attention module: " + name)
att_ws_dict[name] = attn
if not skip_output:
before_outs = before_outs.cpu().numpy()
after_outs = after_outs.cpu().numpy()
att_ws_dict["before_postnet_fbank"] = [
m[:l].T for m, l in zip(before_outs, olens.tolist())
]
att_ws_dict["after_postnet_fbank"] = [
m[:l].T for m, l in zip(after_outs, olens.tolist())
]
return att_ws_dict
def _integrate_with_spk_embed(self, hs, spembs):
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, Tmax, adim).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, Tmax, adim)
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds and then apply projection
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = self.projection(torch.cat([hs, spembs], dim=-1))
else:
raise NotImplementedError("support only add or concat.")
return hs
def _source_mask(self, ilens):
"""Make masks for self-attention.
Args:
ilens (LongTensor or List): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> ilens = [5, 3]
>>> self._source_mask(ilens)
tensor([[[1, 1, 1, 1, 1],
[[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)
return x_masks.unsqueeze(-2)
def _target_mask(self, olens):
"""Make masks for masked self-attention.
Args:
olens (LongTensor or List): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for masked self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> olens = [5, 3]
>>> self._target_mask(olens)
tensor([[[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]],
[[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
y_masks = make_non_pad_mask(olens).to(next(self.parameters()).device)
s_masks = subsequent_mask(y_masks.size(-1), device=y_masks.device).unsqueeze(0)
return y_masks.unsqueeze(-2) & s_masks
@property
def base_plot_keys(self):
"""Return base key names to plot during training.
keys should match what `chainer.reporter` reports.
If you add the key `loss`, the reporter will report `main/loss`
and `validation/main/loss` values.
also `loss.png` will be created as a figure visulizing `main/loss`
and `validation/main/loss` values.
Returns:
list: List of strings which are base keys to plot during training.
"""
plot_keys = ["loss", "l1_loss", "l2_loss", "bce_loss"]
if self.use_scaled_pos_enc:
plot_keys += ["encoder_alpha", "decoder_alpha"]
if self.use_guided_attn_loss:
if "encoder" in self.modules_applied_guided_attn:
plot_keys += ["enc_attn_loss"]
if "decoder" in self.modules_applied_guided_attn:
plot_keys += ["dec_attn_loss"]
if "encoder-decoder" in self.modules_applied_guided_attn:
plot_keys += ["enc_dec_attn_loss"]
return plot_keys
| 39.687392 | 88 | 0.55233 |
import logging
import torch
import torch.nn.functional as F
from espnet.nets.pytorch_backend.e2e_asr_transformer import subsequent_mask
from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import (
Tacotron2Loss as TransformerLoss,
)
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
from espnet.nets.pytorch_backend.tacotron2.decoder import Postnet
from espnet.nets.pytorch_backend.tacotron2.decoder import Prenet as DecoderPrenet
from espnet.nets.pytorch_backend.tacotron2.encoder import Encoder as EncoderPrenet
from espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention
from espnet.nets.pytorch_backend.transformer.decoder import Decoder
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.embedding import ScaledPositionalEncoding
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.initializer import initialize
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.cli_utils import strtobool
from espnet.utils.fill_missing_args import fill_missing_args
from espnet.nets.pytorch_backend.e2e_tts_transformer import (
GuidedMultiHeadAttentionLoss,
TTSPlot,
)
class Transformer(TTSInterface, torch.nn.Module):
@staticmethod
def add_arguments(parser):
group = parser.add_argument_group("transformer model setting")
group.add_argument(
"--eprenet-conv-layers",
default=0,
type=int,
help="Number of encoder prenet convolution layers",
)
group.add_argument(
"--eprenet-conv-chans",
default=0,
type=int,
help="Number of encoder prenet convolution channels",
)
group.add_argument(
"--eprenet-conv-filts",
default=0,
type=int,
help="Filter size of encoder prenet convolution",
)
group.add_argument(
"--transformer-input-layer",
default="linear",
type=str,
help="Type of input layer (linear or conv2d)",
)
group.add_argument(
"--dprenet-layers",
default=2,
type=int,
help="Number of decoder prenet layers",
)
group.add_argument(
"--dprenet-units",
default=256,
type=int,
help="Number of decoder prenet hidden units",
)
group.add_argument(
"--elayers", default=3, type=int, help="Number of encoder layers"
)
group.add_argument(
"--eunits", default=1536, type=int, help="Number of encoder hidden units"
)
group.add_argument(
"--adim",
default=384,
type=int,
help="Number of attention transformation dimensions",
)
group.add_argument(
"--aheads",
default=4,
type=int,
help="Number of heads for multi head attention",
)
group.add_argument(
"--dlayers", default=3, type=int, help="Number of decoder layers"
)
group.add_argument(
"--dunits", default=1536, type=int, help="Number of decoder hidden units"
)
group.add_argument(
"--positionwise-layer-type",
default="linear",
type=str,
choices=["linear", "conv1d", "conv1d-linear"],
help="Positionwise layer type.",
)
group.add_argument(
"--positionwise-conv-kernel-size",
default=1,
type=int,
help="Kernel size of positionwise conv1d layer",
)
group.add_argument(
"--postnet-layers", default=5, type=int, help="Number of postnet layers"
)
group.add_argument(
"--postnet-chans", default=256, type=int, help="Number of postnet channels"
)
group.add_argument(
"--postnet-filts", default=5, type=int, help="Filter size of postnet"
)
group.add_argument(
"--use-scaled-pos-enc",
default=True,
type=strtobool,
help="Use trainable scaled positional encoding"
"instead of the fixed scale one.",
)
group.add_argument(
"--use-batch-norm",
default=True,
type=strtobool,
help="Whether to use batch normalization",
)
group.add_argument(
"--encoder-normalize-before",
default=False,
type=strtobool,
help="Whether to apply layer norm before encoder block",
)
group.add_argument(
"--decoder-normalize-before",
default=False,
type=strtobool,
help="Whether to apply layer norm before decoder block",
)
group.add_argument(
"--encoder-concat-after",
default=False,
type=strtobool,
help="Whether to concatenate attention layer's input and output in encoder",
)
group.add_argument(
"--decoder-concat-after",
default=False,
type=strtobool,
help="Whether to concatenate attention layer's input and output in decoder",
)
group.add_argument(
"--reduction-factor",
default=1,
type=int,
help="Reduction factor (for decoder)",
)
group.add_argument(
"--encoder-reduction-factor",
default=1,
type=int,
help="Reduction factor (for encoder)",
)
group.add_argument(
"--spk-embed-dim",
default=None,
type=int,
help="Number of speaker embedding dimensions",
)
group.add_argument(
"--spk-embed-integration-type",
type=str,
default="add",
choices=["add", "concat"],
help="How to integrate speaker embedding",
)
group.add_argument(
"--transformer-init",
type=str,
default="pytorch",
choices=[
"pytorch",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
],
help="How to initialize transformer parameters",
)
group.add_argument(
"--initial-encoder-alpha",
type=float,
default=1.0,
help="Initial alpha value in encoder's ScaledPositionalEncoding",
)
group.add_argument(
"--initial-decoder-alpha",
type=float,
default=1.0,
help="Initial alpha value in decoder's ScaledPositionalEncoding",
)
group.add_argument(
"--transformer-lr",
default=1.0,
type=float,
help="Initial value of learning rate",
)
group.add_argument(
"--transformer-warmup-steps",
default=4000,
type=int,
help="Optimizer warmup steps",
)
group.add_argument(
"--transformer-enc-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder except for attention",
)
group.add_argument(
"--transformer-enc-positional-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder positional encoding",
)
group.add_argument(
"--transformer-enc-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder self-attention",
)
group.add_argument(
"--transformer-dec-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder "
"except for attention and pos encoding",
)
group.add_argument(
"--transformer-dec-positional-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder positional encoding",
)
group.add_argument(
"--transformer-dec-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer decoder self-attention",
)
group.add_argument(
"--transformer-enc-dec-attn-dropout-rate",
default=0.1,
type=float,
help="Dropout rate for transformer encoder-decoder attention",
)
group.add_argument(
"--eprenet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in encoder prenet",
)
group.add_argument(
"--dprenet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in decoder prenet",
)
group.add_argument(
"--postnet-dropout-rate",
default=0.5,
type=float,
help="Dropout rate in postnet",
)
group.add_argument(
"--pretrained-model", default=None, type=str, help="Pretrained model path"
)
group.add_argument(
"--use-masking",
default=True,
type=strtobool,
help="Whether to use masking in calculation of loss",
)
group.add_argument(
"--use-weighted-masking",
default=False,
type=strtobool,
help="Whether to use weighted masking in calculation of loss",
)
group.add_argument(
"--loss-type",
default="L1",
choices=["L1", "L2", "L1+L2"],
help="How to calc loss",
)
group.add_argument(
"--bce-pos-weight",
default=5.0,
type=float,
help="Positive sample weight in BCE calculation "
"(only for use-masking=True)",
)
group.add_argument(
"--use-guided-attn-loss",
default=False,
type=strtobool,
help="Whether to use guided attention loss",
)
group.add_argument(
"--guided-attn-loss-sigma",
default=0.4,
type=float,
help="Sigma in guided attention loss",
)
group.add_argument(
"--guided-attn-loss-lambda",
default=1.0,
type=float,
help="Lambda in guided attention loss",
)
group.add_argument(
"--num-heads-applied-guided-attn",
default=2,
type=int,
help="Number of heads in each layer to be applied guided attention loss"
"if set -1, all of the heads will be applied.",
)
group.add_argument(
"--num-layers-applied-guided-attn",
default=2,
type=int,
help="Number of layers to be applied guided attention loss"
"if set -1, all of the layers will be applied.",
)
group.add_argument(
"--modules-applied-guided-attn",
type=str,
nargs="+",
default=["encoder-decoder"],
help="Module name list to be applied guided attention loss",
)
return parser
@property
def attention_plot_class(self):
return TTSPlot
def __init__(self, idim, odim, args=None):
TTSInterface.__init__(self)
torch.nn.Module.__init__(self)
args = fill_missing_args(args, self.add_arguments)
self.idim = idim
self.odim = odim
self.spk_embed_dim = args.spk_embed_dim
if self.spk_embed_dim is not None:
self.spk_embed_integration_type = args.spk_embed_integration_type
self.use_scaled_pos_enc = args.use_scaled_pos_enc
self.reduction_factor = args.reduction_factor
self.encoder_reduction_factor = args.encoder_reduction_factor
self.transformer_input_layer = args.transformer_input_layer
self.loss_type = args.loss_type
self.use_guided_attn_loss = args.use_guided_attn_loss
if self.use_guided_attn_loss:
if args.num_layers_applied_guided_attn == -1:
self.num_layers_applied_guided_attn = args.elayers
else:
self.num_layers_applied_guided_attn = (
args.num_layers_applied_guided_attn
)
if args.num_heads_applied_guided_attn == -1:
self.num_heads_applied_guided_attn = args.aheads
else:
self.num_heads_applied_guided_attn = args.num_heads_applied_guided_attn
self.modules_applied_guided_attn = args.modules_applied_guided_attn
padding_idx = 0
pos_enc_class = (
ScaledPositionalEncoding if self.use_scaled_pos_enc else PositionalEncoding
)
if args.eprenet_conv_layers != 0:
encoder_input_layer = torch.nn.Sequential(
EncoderPrenet(
idim=idim,
elayers=0,
econv_layers=args.eprenet_conv_layers,
econv_chans=args.eprenet_conv_chans,
econv_filts=args.eprenet_conv_filts,
use_batch_norm=args.use_batch_norm,
dropout_rate=args.eprenet_dropout_rate,
padding_idx=padding_idx,
input_layer=torch.nn.Linear(
idim * args.encoder_reduction_factor, idim
),
),
torch.nn.Linear(args.eprenet_conv_chans, args.adim),
)
elif args.transformer_input_layer == "linear":
encoder_input_layer = torch.nn.Linear(
idim * args.encoder_reduction_factor, args.adim
)
else:
encoder_input_layer = args.transformer_input_layer
self.encoder = Encoder(
idim=idim,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=encoder_input_layer,
dropout_rate=args.transformer_enc_dropout_rate,
positional_dropout_rate=args.transformer_enc_positional_dropout_rate,
attention_dropout_rate=args.transformer_enc_attn_dropout_rate,
pos_enc_class=pos_enc_class,
normalize_before=args.encoder_normalize_before,
concat_after=args.encoder_concat_after,
positionwise_layer_type=args.positionwise_layer_type,
positionwise_conv_kernel_size=args.positionwise_conv_kernel_size,
)
if self.spk_embed_dim is not None:
if self.spk_embed_integration_type == "add":
self.projection = torch.nn.Linear(self.spk_embed_dim, args.adim)
else:
self.projection = torch.nn.Linear(
args.adim + self.spk_embed_dim, args.adim
)
if args.dprenet_layers != 0:
decoder_input_layer = torch.nn.Sequential(
DecoderPrenet(
idim=odim,
n_layers=args.dprenet_layers,
n_units=args.dprenet_units,
dropout_rate=args.dprenet_dropout_rate,
),
torch.nn.Linear(args.dprenet_units, args.adim),
)
else:
decoder_input_layer = "linear"
self.decoder = Decoder(
odim=-1,
attention_dim=args.adim,
attention_heads=args.aheads,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.transformer_dec_dropout_rate,
positional_dropout_rate=args.transformer_dec_positional_dropout_rate,
self_attention_dropout_rate=args.transformer_dec_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_enc_dec_attn_dropout_rate,
input_layer=decoder_input_layer,
use_output_layer=False,
pos_enc_class=pos_enc_class,
normalize_before=args.decoder_normalize_before,
concat_after=args.decoder_concat_after,
)
self.feat_out = torch.nn.Linear(args.adim, odim * args.reduction_factor)
self.prob_out = torch.nn.Linear(args.adim, args.reduction_factor)
self.postnet = (
None
if args.postnet_layers == 0
else Postnet(
idim=idim,
odim=odim,
n_layers=args.postnet_layers,
n_chans=args.postnet_chans,
n_filts=args.postnet_filts,
use_batch_norm=args.use_batch_norm,
dropout_rate=args.postnet_dropout_rate,
)
)
self.criterion = TransformerLoss(
use_masking=args.use_masking,
use_weighted_masking=args.use_weighted_masking,
bce_pos_weight=args.bce_pos_weight,
)
if self.use_guided_attn_loss:
self.attn_criterion = GuidedMultiHeadAttentionLoss(
sigma=args.guided_attn_loss_sigma, alpha=args.guided_attn_loss_lambda,
)
self._reset_parameters(
init_type=args.transformer_init,
init_enc_alpha=args.initial_encoder_alpha,
init_dec_alpha=args.initial_decoder_alpha,
)
if args.pretrained_model is not None:
self.load_pretrained_model(args.pretrained_model)
def _reset_parameters(self, init_type, init_enc_alpha=1.0, init_dec_alpha=1.0):
initialize(self, init_type)
if self.use_scaled_pos_enc:
self.encoder.embed[-1].alpha.data = torch.tensor(init_enc_alpha)
self.decoder.embed[-1].alpha.data = torch.tensor(init_dec_alpha)
def _add_first_frame_and_remove_last_frame(self, ys):
ys_in = torch.cat(
[ys.new_zeros((ys.shape[0], 1, ys.shape[2])), ys[:, :-1]], dim=1
)
return ys_in
def forward(self, xs, ilens, ys, labels, olens, spembs=None, *args, **kwargs):
max_ilen = max(ilens)
max_olen = max(olens)
if max_ilen != xs.shape[1]:
xs = xs[:, :max_ilen]
if max_olen != ys.shape[1]:
ys = ys[:, :max_olen]
labels = labels[:, :max_olen]
if self.encoder_reduction_factor > 1:
B, Lmax, idim = xs.shape
if Lmax % self.encoder_reduction_factor != 0:
xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :]
xs_ds = xs.contiguous().view(
B,
int(Lmax / self.encoder_reduction_factor),
idim * self.encoder_reduction_factor,
)
ilens_ds = ilens.new(
[ilen // self.encoder_reduction_factor for ilen in ilens]
)
else:
xs_ds, ilens_ds = xs, ilens
x_masks = self._source_mask(ilens_ds)
hs, hs_masks = self.encoder(xs_ds, x_masks)
if self.spk_embed_dim is not None:
hs_int = self._integrate_with_spk_embed(hs, spembs)
else:
hs_int = hs
if self.reduction_factor > 1:
ys_in = ys[:, self.reduction_factor - 1 :: self.reduction_factor]
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
ys_in, olens_in = ys, olens
ys_in = self._add_first_frame_and_remove_last_frame(ys_in)
if "conv2d" in self.transformer_input_layer:
ilens_ds_st = ilens_ds.new(
[((ilen - 2 + 1) // 2 - 2 + 1) // 2 for ilen in ilens_ds]
)
else:
ilens_ds_st = ilens_ds
y_masks = self._target_mask(olens_in)
zs, _ = self.decoder(ys_in, y_masks, hs_int, hs_masks)
before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)
logits = self.prob_out(zs).view(zs.size(0), -1)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
if self.reduction_factor > 1:
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
max_olen = max(olens)
ys = ys[:, :max_olen]
labels = labels[:, :max_olen]
labels[:, -1] = 1.0
l1_loss, l2_loss, bce_loss = self.criterion(
after_outs, before_outs, logits, ys, labels, olens
)
if self.loss_type == "L1":
loss = l1_loss + bce_loss
elif self.loss_type == "L2":
loss = l2_loss + bce_loss
elif self.loss_type == "L1+L2":
loss = l1_loss + l2_loss + bce_loss
else:
raise ValueError("unknown --loss-type " + self.loss_type)
report_keys = [
{"l1_loss": l1_loss.item()},
{"l2_loss": l2_loss.item()},
{"bce_loss": bce_loss.item()},
{"loss": loss.item()},
]
if self.use_guided_attn_loss:
if "encoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.encoder.encoders)))
):
att_ws += [
self.encoder.encoders[layer_idx].self_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1)
enc_attn_loss = self.attn_criterion(
att_ws, ilens_ds_st, ilens_ds_st
)
loss = loss + enc_attn_loss
report_keys += [{"enc_attn_loss": enc_attn_loss.item()}]
if "decoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.decoder.decoders)))
):
att_ws += [
self.decoder.decoders[layer_idx].self_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1)
dec_attn_loss = self.attn_criterion(att_ws, olens_in, olens_in)
loss = loss + dec_attn_loss
report_keys += [{"dec_attn_loss": dec_attn_loss.item()}]
if "encoder-decoder" in self.modules_applied_guided_attn:
att_ws = []
for idx, layer_idx in enumerate(
reversed(range(len(self.decoder.decoders)))
):
att_ws += [
self.decoder.decoders[layer_idx].src_attn.attn[
:, : self.num_heads_applied_guided_attn
]
]
if idx + 1 == self.num_layers_applied_guided_attn:
break
att_ws = torch.cat(att_ws, dim=1)
enc_dec_attn_loss = self.attn_criterion(
att_ws, ilens_ds_st, olens_in
)
loss = loss + enc_dec_attn_loss
report_keys += [{"enc_dec_attn_loss": enc_dec_attn_loss.item()}]
if self.use_scaled_pos_enc:
report_keys += [
{"encoder_alpha": self.encoder.embed[-1].alpha.data.item()},
{"decoder_alpha": self.decoder.embed[-1].alpha.data.item()},
]
self.reporter.report(report_keys)
return loss
def inference(self, x, inference_args, spemb=None, *args, **kwargs):
threshold = inference_args.threshold
minlenratio = inference_args.minlenratio
maxlenratio = inference_args.maxlenratio
use_att_constraint = getattr(
inference_args, "use_att_constraint", False
)
if use_att_constraint:
logging.warning(
"Attention constraint is not yet supported in Transformer. Not enabled."
)
if self.encoder_reduction_factor > 1:
Lmax, idim = x.shape
if Lmax % self.encoder_reduction_factor != 0:
x = x[: -(Lmax % self.encoder_reduction_factor), :]
x_ds = x.contiguous().view(
int(Lmax / self.encoder_reduction_factor),
idim * self.encoder_reduction_factor,
)
else:
x_ds = x
x_ds = x_ds.unsqueeze(0)
hs, _ = self.encoder(x_ds, None)
if self.spk_embed_dim is not None:
spembs = spemb.unsqueeze(0)
hs = self._integrate_with_spk_embed(hs, spembs)
maxlen = int(hs.size(1) * maxlenratio / self.reduction_factor)
minlen = int(hs.size(1) * minlenratio / self.reduction_factor)
idx = 0
ys = hs.new_zeros(1, 1, self.odim)
outs, probs = [], []
z_cache = self.decoder.init_state(x)
while True:
idx += 1
y_masks = subsequent_mask(idx).unsqueeze(0).to(x.device)
z, z_cache = self.decoder.forward_one_step(
ys, y_masks, hs, cache=z_cache
)
outs += [
self.feat_out(z).view(self.reduction_factor, self.odim)
]
probs += [torch.sigmoid(self.prob_out(z))[0]]
ys = torch.cat(
(ys, outs[-1][-1].view(1, 1, self.odim)), dim=1
)
att_ws_ = []
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention) and "src" in name:
att_ws_ += [m.attn[0, :, -1].unsqueeze(1)] x == 1:
att_ws = att_ws_
else:
t_ws = [
torch.cat([att_w, att_w_], dim=1)
for att_w, att_w_ in zip(att_ws, att_ws_)
]
if int(sum(probs[-1] >= threshold)) > 0 or idx >= maxlen:
if idx < minlen:
continue
outs = (
torch.cat(outs, dim=0).unsqueeze(0).transpose(1, 2)
)
if self.postnet is not None:
outs = outs + self.postnet(outs)
outs = outs.transpose(2, 1).squeeze(0)
probs = torch.cat(probs, dim=0)
break
s, dim=0)
return outs, probs, att_ws
def calculate_all_attentions(
self,
xs,
ilens,
ys,
olens,
spembs=None,
skip_output=False,
keep_tensor=False,
*args,
**kwargs
):
with torch.no_grad():
if self.encoder_reduction_factor > 1:
B, Lmax, idim = xs.shape
if Lmax % self.encoder_reduction_factor != 0:
xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :]
xs_ds = xs.contiguous().view(
B,
int(Lmax / self.encoder_reduction_factor),
idim * self.encoder_reduction_factor,
)
ilens_ds = ilens.new(
[ilen // self.encoder_reduction_factor for ilen in ilens]
)
else:
xs_ds, ilens_ds = xs, ilens
x_masks = self._source_mask(ilens_ds)
hs, hs_masks = self.encoder(xs_ds, x_masks)
if self.spk_embed_dim is not None:
hs = self._integrate_with_spk_embed(hs, spembs)
if self.reduction_factor > 1:
ys_in = ys[:, self.reduction_factor - 1 :: self.reduction_factor]
olens_in = olens.new([olen // self.reduction_factor for olen in olens])
else:
ys_in, olens_in = ys, olens
ys_in = self._add_first_frame_and_remove_last_frame(ys_in)
y_masks = self._target_mask(olens_in)
zs, _ = self.decoder(ys_in, y_masks, hs, hs_masks)
if not skip_output:
before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
if self.reduction_factor > 1:
olens = olens.new([olen - olen % self.reduction_factor for olen in olens])
att_ws_dict = dict()
if keep_tensor:
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention):
att_ws_dict[name] = m.attn
if not skip_output:
att_ws_dict["before_postnet_fbank"] = before_outs
att_ws_dict["after_postnet_fbank"] = after_outs
else:
for name, m in self.named_modules():
if isinstance(m, MultiHeadedAttention):
attn = m.attn.cpu().numpy()
if "encoder" in name:
attn = [a[:, :l, :l] for a, l in zip(attn, ilens.tolist())]
elif "decoder" in name:
if "src" in name:
attn = [
a[:, :ol, :il]
for a, il, ol in zip(
attn, ilens.tolist(), olens_in.tolist()
)
]
elif "self" in name:
attn = [
a[:, :l, :l] for a, l in zip(attn, olens_in.tolist())
]
else:
logging.warning("unknown attention module: " + name)
else:
logging.warning("unknown attention module: " + name)
att_ws_dict[name] = attn
if not skip_output:
before_outs = before_outs.cpu().numpy()
after_outs = after_outs.cpu().numpy()
att_ws_dict["before_postnet_fbank"] = [
m[:l].T for m, l in zip(before_outs, olens.tolist())
]
att_ws_dict["after_postnet_fbank"] = [
m[:l].T for m, l in zip(after_outs, olens.tolist())
]
return att_ws_dict
def _integrate_with_spk_embed(self, hs, spembs):
if self.spk_embed_integration_type == "add":
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = self.projection(torch.cat([hs, spembs], dim=-1))
else:
raise NotImplementedError("support only add or concat.")
return hs
def _source_mask(self, ilens):
x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)
return x_masks.unsqueeze(-2)
def _target_mask(self, olens):
y_masks = make_non_pad_mask(olens).to(next(self.parameters()).device)
s_masks = subsequent_mask(y_masks.size(-1), device=y_masks.device).unsqueeze(0)
return y_masks.unsqueeze(-2) & s_masks
@property
def base_plot_keys(self):
plot_keys = ["loss", "l1_loss", "l2_loss", "bce_loss"]
if self.use_scaled_pos_enc:
plot_keys += ["encoder_alpha", "decoder_alpha"]
if self.use_guided_attn_loss:
if "encoder" in self.modules_applied_guided_attn:
plot_keys += ["enc_attn_loss"]
if "decoder" in self.modules_applied_guided_attn:
plot_keys += ["dec_attn_loss"]
if "encoder-decoder" in self.modules_applied_guided_attn:
plot_keys += ["enc_dec_attn_loss"]
return plot_keys
| true | true |
f737f906965a60282d80df86f3042a8d4f691a86 | 930 | py | Python | internal/settings.py | ninichang/cartogram-web | 6ec75713945b0b310c7df0d6f7a3fdb0ef3b5a99 | [
"MIT"
] | 1 | 2020-06-23T15:03:31.000Z | 2020-06-23T15:03:31.000Z | internal/settings.py | ninichang/cartogram-web | 6ec75713945b0b310c7df0d6f7a3fdb0ef3b5a99 | [
"MIT"
] | null | null | null | internal/settings.py | ninichang/cartogram-web | 6ec75713945b0b310c7df0d6f7a3fdb0ef3b5a99 | [
"MIT"
] | 1 | 2019-09-15T19:53:39.000Z | 2019-09-15T19:53:39.000Z | import os
CARTOGRAM_EXE = os.environ['CARTOGRAM_EXE']
CARTOGRAM_DATA_DIR = os.environ['CARTOGRAM_DATA_DIR']
CARTOGRAM_COLOR = os.environ['CARTOGRAM_COLOR']
DEBUG = True if os.environ['CARTOGRAM_DEBUG'].lower() == "true" else False
DATABASE_URI = os.environ['CARTOGRAM_DATABASE_URI']
USE_DATABASE = True if os.environ['CARTOGRAM_USE_DATABASE'].lower() == "true" else False
HOST = os.environ['CARTOGRAM_HOST']
PORT = int(os.environ['CARTOGRAM_PORT'])
VERSION = os.environ['CARTOGRAM_VERSION']
SMTP_HOST = os.environ['CARTOGRAM_SMTP_HOST']
SMTP_PORT = int(os.environ['CARTOGRAM_SMTP_PORT'])
SMTP_AUTHENTICATION_REQUIRED = True if os.environ['CARTOGRAM_SMTP_AUTHENTICATION_REQUIRED'].lower() == "true" else False
SMTP_USER = os.environ['CARTOGRAM_SMTP_USER']
SMTP_PASSWORD = os.environ['CARTOGRAM_SMTP_PASSWORD']
SMTP_FROM_EMAIL = os.environ['CARTOGRAM_SMTP_FROM_EMAIL']
SMTP_DESTINATION = os.environ['CARTOGRAM_SMTP_DESTINATION'] | 44.285714 | 120 | 0.793548 | import os
CARTOGRAM_EXE = os.environ['CARTOGRAM_EXE']
CARTOGRAM_DATA_DIR = os.environ['CARTOGRAM_DATA_DIR']
CARTOGRAM_COLOR = os.environ['CARTOGRAM_COLOR']
DEBUG = True if os.environ['CARTOGRAM_DEBUG'].lower() == "true" else False
DATABASE_URI = os.environ['CARTOGRAM_DATABASE_URI']
USE_DATABASE = True if os.environ['CARTOGRAM_USE_DATABASE'].lower() == "true" else False
HOST = os.environ['CARTOGRAM_HOST']
PORT = int(os.environ['CARTOGRAM_PORT'])
VERSION = os.environ['CARTOGRAM_VERSION']
SMTP_HOST = os.environ['CARTOGRAM_SMTP_HOST']
SMTP_PORT = int(os.environ['CARTOGRAM_SMTP_PORT'])
SMTP_AUTHENTICATION_REQUIRED = True if os.environ['CARTOGRAM_SMTP_AUTHENTICATION_REQUIRED'].lower() == "true" else False
SMTP_USER = os.environ['CARTOGRAM_SMTP_USER']
SMTP_PASSWORD = os.environ['CARTOGRAM_SMTP_PASSWORD']
SMTP_FROM_EMAIL = os.environ['CARTOGRAM_SMTP_FROM_EMAIL']
SMTP_DESTINATION = os.environ['CARTOGRAM_SMTP_DESTINATION'] | true | true |
f737f968a585b7fd999eaa118151b3a87ca2f4fc | 5,025 | py | Python | test/functional/feature_messaging.py | jellymlg/Bagicoin | b4b3d832e1ef33466f7daa8766538fe6492581d5 | [
"MIT"
] | null | null | null | test/functional/feature_messaging.py | jellymlg/Bagicoin | b4b3d832e1ef33466f7daa8766538fe6492581d5 | [
"MIT"
] | null | null | null | test/functional/feature_messaging.py | jellymlg/Bagicoin | b4b3d832e1ef33466f7daa8766538fe6492581d5 | [
"MIT"
] | 1 | 2021-07-23T09:30:16.000Z | 2021-07-23T09:30:16.000Z | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Copyright (c) 2017-2020 The Raven Core developers
# Copyright (c) 2021 The Bagi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Testing messaging
"""
from test_framework.test_framework import BagiTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, assert_contains, assert_does_not_contain, assert_contains_pair
class MessagingTest(BagiTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-assetindex'], ['-assetindex'], ['-assetindex']]
def activate_messaging(self):
self.log.info("Generating BAGI for node[0] and activating messaging...")
n0 = self.nodes[0]
n0.generate(1)
self.sync_all()
n0.generate(431)
self.sync_all()
assert_equal("active", n0.getblockchaininfo()['bip9_softforks']['messaging_restricted']['status'])
def test_messaging(self):
self.log.info("Testing messaging!")
n0, n1 = self.nodes[0], self.nodes[1]
spam_name = "SPAM"
asset_name = "MESSAGING"
owner_name = "MESSAGING!"
channel_one = "MESSAGING~ONE"
channel_two = "MESSAGING~TWO"
ipfs_hash = "QmZPGfJojdTzaqCWJu2m3krark38X1rqEHBo4SjeqHKB26"
# need ownership before channels can be created
assert_raises_rpc_error(-32600, "Wallet doesn't have asset: " + owner_name,
n0.issue, channel_one)
n0.issue(asset_name, 100)
n0.issue(channel_one)
n0.issue(channel_two)
n0.issue(spam_name, 100)
n0.generate(1)
self.sync_all()
# you're auto-subscribed to your own channels
n0_channels = n0.viewallmessagechannels()
assert_contains(owner_name, n0_channels)
assert_contains(channel_one, n0_channels)
assert_contains(channel_two, n0_channels)
# n1 subscribes to owner and channel one
assert_equal([], n1.viewallmessagechannels())
n1.subscribetochannel(owner_name)
n1.subscribetochannel(channel_one)
n1_channels = n1.viewallmessagechannels()
assert_contains(owner_name, n1_channels)
assert_contains(channel_one, n1_channels)
assert_does_not_contain(channel_two, n1_channels)
# n0 sends a message on owner
n0.sendmessage(owner_name, ipfs_hash)
n0.generate(1)
self.sync_all()
# n1 views then clears messages
n1_messages = n1.viewallmessages()
assert_equal(1, len(n1_messages))
message = n1_messages[0]
assert_contains_pair("Asset Name", owner_name, message)
assert_contains_pair("Message", ipfs_hash, message)
n1.clearmessages()
n1_messages = n1.viewallmessages()
assert_equal(0, len(n1_messages))
# n0 sends more messages on channels one and two
n0.sendmessage(channel_one, ipfs_hash)
n0.sendmessage(channel_two, ipfs_hash)
n0.generate(1)
self.sync_all()
# n1 views then clears messages
n1_messages = n1.viewallmessages()
assert_equal(1, len(n1_messages))
message = n1_messages[0]
assert_contains_pair("Asset Name", channel_one, message)
assert_contains_pair("Message", ipfs_hash, message)
n1.clearmessages()
n1_messages = n1.viewallmessages()
assert_equal(0, len(n1_messages))
# n1 unsubscribes
n1.unsubscribefromchannel(owner_name)
n1.unsubscribefromchannel(channel_one)
assert_equal(0, len(n1.viewallmessagechannels()))
# auto-subscribe / spam protection (first address use only)
addr1 = n1.getnewaddress()
n0.transfer(asset_name, 10, addr1)
n0.generate(1)
self.sync_all()
n0.transfer(spam_name, 10, addr1)
n1_channels = n1.viewallmessagechannels()
assert_equal(1, len(n1_channels))
assert_contains(owner_name, n1_channels)
assert_does_not_contain(spam_name, n1_channels)
n1.unsubscribefromchannel(owner_name)
# pre-existing messages (don't see w/o rescan)
assert_equal(0, len(n1.viewallmessages()))
n0.sendmessage(channel_two, ipfs_hash)
n0.generate(1)
self.sync_all()
assert_equal(0, len(n1.viewallmessages()))
n1.subscribetochannel(channel_two)
assert_equal(0, len(n1.viewallmessages()))
n0.sendmessage(channel_two, ipfs_hash)
n0.generate(1)
self.sync_all()
assert_equal(1, len(n1.viewallmessages()))
assert_contains_pair("Asset Name", channel_two, n1.viewallmessages()[0])
n1.clearmessages()
n1.unsubscribefromchannel(channel_two)
def run_test(self):
self.activate_messaging()
self.test_messaging()
if __name__ == '__main__':
MessagingTest().main()
| 35.638298 | 133 | 0.667861 |
from test_framework.test_framework import BagiTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, assert_contains, assert_does_not_contain, assert_contains_pair
class MessagingTest(BagiTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-assetindex'], ['-assetindex'], ['-assetindex']]
def activate_messaging(self):
self.log.info("Generating BAGI for node[0] and activating messaging...")
n0 = self.nodes[0]
n0.generate(1)
self.sync_all()
n0.generate(431)
self.sync_all()
assert_equal("active", n0.getblockchaininfo()['bip9_softforks']['messaging_restricted']['status'])
def test_messaging(self):
self.log.info("Testing messaging!")
n0, n1 = self.nodes[0], self.nodes[1]
spam_name = "SPAM"
asset_name = "MESSAGING"
owner_name = "MESSAGING!"
channel_one = "MESSAGING~ONE"
channel_two = "MESSAGING~TWO"
ipfs_hash = "QmZPGfJojdTzaqCWJu2m3krark38X1rqEHBo4SjeqHKB26"
assert_raises_rpc_error(-32600, "Wallet doesn't have asset: " + owner_name,
n0.issue, channel_one)
n0.issue(asset_name, 100)
n0.issue(channel_one)
n0.issue(channel_two)
n0.issue(spam_name, 100)
n0.generate(1)
self.sync_all()
# you're auto-subscribed to your own channels
n0_channels = n0.viewallmessagechannels()
assert_contains(owner_name, n0_channels)
assert_contains(channel_one, n0_channels)
assert_contains(channel_two, n0_channels)
assert_equal([], n1.viewallmessagechannels())
n1.subscribetochannel(owner_name)
n1.subscribetochannel(channel_one)
n1_channels = n1.viewallmessagechannels()
assert_contains(owner_name, n1_channels)
assert_contains(channel_one, n1_channels)
assert_does_not_contain(channel_two, n1_channels)
n0.sendmessage(owner_name, ipfs_hash)
n0.generate(1)
self.sync_all()
n1_messages = n1.viewallmessages()
assert_equal(1, len(n1_messages))
message = n1_messages[0]
assert_contains_pair("Asset Name", owner_name, message)
assert_contains_pair("Message", ipfs_hash, message)
n1.clearmessages()
n1_messages = n1.viewallmessages()
assert_equal(0, len(n1_messages))
n0.sendmessage(channel_one, ipfs_hash)
n0.sendmessage(channel_two, ipfs_hash)
n0.generate(1)
self.sync_all()
n1_messages = n1.viewallmessages()
assert_equal(1, len(n1_messages))
message = n1_messages[0]
assert_contains_pair("Asset Name", channel_one, message)
assert_contains_pair("Message", ipfs_hash, message)
n1.clearmessages()
n1_messages = n1.viewallmessages()
assert_equal(0, len(n1_messages))
n1.unsubscribefromchannel(owner_name)
n1.unsubscribefromchannel(channel_one)
assert_equal(0, len(n1.viewallmessagechannels()))
addr1 = n1.getnewaddress()
n0.transfer(asset_name, 10, addr1)
n0.generate(1)
self.sync_all()
n0.transfer(spam_name, 10, addr1)
n1_channels = n1.viewallmessagechannels()
assert_equal(1, len(n1_channels))
assert_contains(owner_name, n1_channels)
assert_does_not_contain(spam_name, n1_channels)
n1.unsubscribefromchannel(owner_name)
assert_equal(0, len(n1.viewallmessages()))
n0.sendmessage(channel_two, ipfs_hash)
n0.generate(1)
self.sync_all()
assert_equal(0, len(n1.viewallmessages()))
n1.subscribetochannel(channel_two)
assert_equal(0, len(n1.viewallmessages()))
n0.sendmessage(channel_two, ipfs_hash)
n0.generate(1)
self.sync_all()
assert_equal(1, len(n1.viewallmessages()))
assert_contains_pair("Asset Name", channel_two, n1.viewallmessages()[0])
n1.clearmessages()
n1.unsubscribefromchannel(channel_two)
def run_test(self):
self.activate_messaging()
self.test_messaging()
if __name__ == '__main__':
MessagingTest().main()
| true | true |
f737fbbf879277da4602328bff9a822f2bc28be9 | 305 | py | Python | myopenpantry/views/__init__.py | MyOpenPantry/flask-backend | e94702bfa04f36c1a6015ae3e9c37dfb7b923279 | [
"MIT"
] | null | null | null | myopenpantry/views/__init__.py | MyOpenPantry/flask-backend | e94702bfa04f36c1a6015ae3e9c37dfb7b923279 | [
"MIT"
] | 4 | 2021-03-28T19:47:04.000Z | 2021-05-04T00:59:46.000Z | myopenpantry/views/__init__.py | MyOpenPantry/flask-backend | e94702bfa04f36c1a6015ae3e9c37dfb7b923279 | [
"MIT"
] | null | null | null | from . import ingredients
from . import items
from . import recipes
from . import tags
MODULES = (
ingredients,
items,
recipes,
tags,
)
def register_blueprints(api):
"""Initialize application with all modules"""
for module in MODULES:
api.register_blueprint(module.blp)
| 16.944444 | 49 | 0.685246 | from . import ingredients
from . import items
from . import recipes
from . import tags
MODULES = (
ingredients,
items,
recipes,
tags,
)
def register_blueprints(api):
for module in MODULES:
api.register_blueprint(module.blp)
| true | true |
f737fbe220c66c02fa707fbe007d524155850a2d | 774 | py | Python | twitterbot/admin.py | invinst/CPDB | c2d8ae8888b13d956cc1068742f18d45736d4121 | [
"Apache-2.0"
] | 16 | 2016-05-20T09:03:32.000Z | 2020-09-13T14:23:06.000Z | twitterbot/admin.py | invinst/CPDB | c2d8ae8888b13d956cc1068742f18d45736d4121 | [
"Apache-2.0"
] | 2 | 2016-05-24T01:44:14.000Z | 2016-06-17T22:19:45.000Z | twitterbot/admin.py | invinst/CPDB | c2d8ae8888b13d956cc1068742f18d45736d4121 | [
"Apache-2.0"
] | 2 | 2016-10-10T16:14:19.000Z | 2020-10-26T00:17:02.000Z | from django.contrib import admin
from twitterbot.models import ResponseTemplate, TwitterBotError, TwitterBotResponseLog
class TwitterBotErrorAdmin(admin.ModelAdmin):
list_display = ('stack_trace', 'timestamp')
class TwitterBotResponseLogAdmin(admin.ModelAdmin):
list_display = ('tweet_url', 'tweet_content', 'tweeted_at',
'incoming_tweet_username', 'incoming_tweet_url', 'incoming_tweet_content',
'originating_tweet_username', 'originating_tweet_url', 'originating_tweet_content',
'entity_url', 'matched_strings')
admin.site.register(ResponseTemplate, admin.ModelAdmin)
admin.site.register(TwitterBotError, TwitterBotErrorAdmin)
admin.site.register(TwitterBotResponseLog, TwitterBotResponseLogAdmin)
| 40.736842 | 103 | 0.764858 | from django.contrib import admin
from twitterbot.models import ResponseTemplate, TwitterBotError, TwitterBotResponseLog
class TwitterBotErrorAdmin(admin.ModelAdmin):
list_display = ('stack_trace', 'timestamp')
class TwitterBotResponseLogAdmin(admin.ModelAdmin):
list_display = ('tweet_url', 'tweet_content', 'tweeted_at',
'incoming_tweet_username', 'incoming_tweet_url', 'incoming_tweet_content',
'originating_tweet_username', 'originating_tweet_url', 'originating_tweet_content',
'entity_url', 'matched_strings')
admin.site.register(ResponseTemplate, admin.ModelAdmin)
admin.site.register(TwitterBotError, TwitterBotErrorAdmin)
admin.site.register(TwitterBotResponseLog, TwitterBotResponseLogAdmin)
| true | true |
f737fed74e631be874294a3868f569e6c287070e | 1,001 | py | Python | src/run_experiment.py | UKPLab/thesis2018-tk_mtl_sequence_tagging | c2041097b1f6f895183d14ef06f60632bc30a34f | [
"Apache-2.0"
] | 9 | 2018-06-25T09:59:19.000Z | 2022-03-05T07:08:12.000Z | src/run_experiment.py | UKPLab/thesis2018-tk_mtl_sequence_tagging | c2041097b1f6f895183d14ef06f60632bc30a34f | [
"Apache-2.0"
] | 7 | 2020-01-28T22:26:24.000Z | 2022-02-09T23:43:33.000Z | src/run_experiment.py | UKPLab/thesis2018-tk_mtl_sequence_tagging | c2041097b1f6f895183d14ef06f60632bc30a34f | [
"Apache-2.0"
] | null | null | null | """
This module allows to run an experiment from a configuration template file.
"""
import argparse
from ConfigGenerator import ConfigGenerator
from use_network import train
def main():
"""
Parse the CLI arguments and then run the experiment with different trials (i.e. hyper-parameter configurations).
"""
parser = argparse.ArgumentParser(description="Running experiments with the MTL sequence tagging framework.")
parser.add_argument("trials", help="The number of trials to perform", type=int)
parser.add_argument("template", help="Path to the template file", type=str)
parser.add_argument(
"config_out", help="Directory where to output configuration files (may also be a temporary directory)"
)
args = parser.parse_args()
config_generator = ConfigGenerator(args.template, args.config_out)
for trial in xrange(args.trials):
config_path = config_generator.generate()
train(config_path)
if __name__ == "__main__":
main()
| 31.28125 | 116 | 0.727273 |
import argparse
from ConfigGenerator import ConfigGenerator
from use_network import train
def main():
parser = argparse.ArgumentParser(description="Running experiments with the MTL sequence tagging framework.")
parser.add_argument("trials", help="The number of trials to perform", type=int)
parser.add_argument("template", help="Path to the template file", type=str)
parser.add_argument(
"config_out", help="Directory where to output configuration files (may also be a temporary directory)"
)
args = parser.parse_args()
config_generator = ConfigGenerator(args.template, args.config_out)
for trial in xrange(args.trials):
config_path = config_generator.generate()
train(config_path)
if __name__ == "__main__":
main()
| true | true |
f738016c55118d481488edeade86e5bba28cdac2 | 1,835 | py | Python | pos_scorer.py | animeshsagar/Part-of-Speech-Tagging | 4dc9d60ecdee2f19d42ca489692845c74265f95f | [
"MIT"
] | null | null | null | pos_scorer.py | animeshsagar/Part-of-Speech-Tagging | 4dc9d60ecdee2f19d42ca489692845c74265f95f | [
"MIT"
] | null | null | null | pos_scorer.py | animeshsagar/Part-of-Speech-Tagging | 4dc9d60ecdee2f19d42ca489692845c74265f95f | [
"MIT"
] | null | null | null | ###################################
# CS B551 Fall 2018, Assignment #3
#
# Scoring code by D. Crandall
#
# PLEASE DON'T MODIFY THIS FILE.
# Edit pos_solver.py instead!
#
class Score:
def __init__(self):
self.word_scorecard = {}
self.sentence_scorecard = {}
self.word_count = 0
self.sentence_count = 0
def score(self, algo_outputs, gt):
self.word_count += len(gt)
self.sentence_count += 1
for algo,labels in algo_outputs.items():
correct = 0
for j in range(0, len(gt)):
correct += 1 if gt[j] == labels[j] else 0
self.word_scorecard[algo] = self.word_scorecard.get(algo, 0) + correct
self.sentence_scorecard[algo] = self.sentence_scorecard.get(algo, 0) + (correct == len(gt))
def print_scores(self):
print("\n==> So far scored %d sentences with %d words." % (self.sentence_count, self.word_count))
print(" Words correct: Sentences correct: ")
for i in sorted(self.word_scorecard):
print("%18s: %7.2f%% %7.2f%%" % (i, self.word_scorecard[i]*100 / float(self.word_count), self.sentence_scorecard[i]*100 / float(self.sentence_count)))
@staticmethod
def print_helper(description, list, sentence):
print (("%40s" % description) + " " + " ".join([(("%-" + str(max(4,len(sentence[i]))) + "s") % list[i]) for i in range(0,len(list)) ] ) )
@staticmethod
def print_results(sentence, outputs, posteriors, models):
Score.print_helper(" ".join([("%7s" % model) for model in models]), sentence, sentence)
for algo in sorted(outputs.keys()):
Score.print_helper(algo + " "+" ".join(["%7.2f" % posteriors[algo][model] for model in models]), outputs[algo], sentence)
| 38.229167 | 178 | 0.578747 | entence_scorecard[algo] = self.sentence_scorecard.get(algo, 0) + (correct == len(gt))
def print_scores(self):
print("\n==> So far scored %d sentences with %d words." % (self.sentence_count, self.word_count))
print(" Words correct: Sentences correct: ")
for i in sorted(self.word_scorecard):
print("%18s: %7.2f%% %7.2f%%" % (i, self.word_scorecard[i]*100 / float(self.word_count), self.sentence_scorecard[i]*100 / float(self.sentence_count)))
@staticmethod
def print_helper(description, list, sentence):
print (("%40s" % description) + " " + " ".join([(("%-" + str(max(4,len(sentence[i]))) + "s") % list[i]) for i in range(0,len(list)) ] ) )
@staticmethod
def print_results(sentence, outputs, posteriors, models):
Score.print_helper(" ".join([("%7s" % model) for model in models]), sentence, sentence)
for algo in sorted(outputs.keys()):
Score.print_helper(algo + " "+" ".join(["%7.2f" % posteriors[algo][model] for model in models]), outputs[algo], sentence)
| true | true |
f73804718aace66c62ba5c416f0a97e0243065d2 | 8,560 | py | Python | fairseq/optim/adam.py | mpsilfve/fairseq | eb228ee74c6bc9803eb7dbd398d8cda16c55ccd2 | [
"MIT"
] | 115 | 2021-08-25T14:58:12.000Z | 2022-03-21T11:25:36.000Z | fairseq/optim/adam.py | mpsilfve/fairseq | eb228ee74c6bc9803eb7dbd398d8cda16c55ccd2 | [
"MIT"
] | 10 | 2021-11-14T12:28:48.000Z | 2022-02-28T14:13:40.000Z | fairseq/optim/adam.py | mpsilfve/fairseq | eb228ee74c6bc9803eb7dbd398d8cda16c55ccd2 | [
"MIT"
] | 11 | 2021-12-07T02:19:03.000Z | 2022-03-16T09:18:27.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
import torch
import torch.distributed as dist
import torch.optim
from fairseq.dataclass import FairseqDataclass
from fairseq.optim import FairseqOptimizer, register_optimizer
from fairseq.optim.fused_adam import get_fused_adam_class
from omegaconf import II, DictConfig
logger = logging.getLogger(__name__)
@dataclass
class FairseqAdamConfig(FairseqDataclass):
adam_betas: str = field(
default="(0.9, 0.999)", metadata={"help": "betas for Adam optimizer"}
)
adam_eps: float = field(
default=1e-8, metadata={"help": "epsilon for Adam optimizer"}
)
weight_decay: float = field(default=0.0, metadata={"help": "weight decay"})
use_old_adam: bool = field(
default=False, metadata={"help": "Use fairseq.optim.adam.Adam"}
)
# TODO common vars below in parent
tpu: bool = II("common.tpu")
lr: List[float] = II("optimization.lr")
@register_optimizer("adam", dataclass=FairseqAdamConfig)
class FairseqAdam(FairseqOptimizer):
"""Adam optimizer for fairseq.
Important note: this optimizer corresponds to the "AdamW" variant of
Adam in its weight decay behavior. As such, it is most closely
analogous to torch.optim.AdamW from PyTorch.
"""
def __init__(self, cfg: DictConfig, params):
super().__init__(cfg)
fused_adam_cls = get_fused_adam_class()
use_fused_adam = (
not getattr(cfg, "use_old_adam", False)
and fused_adam_cls is not None
and torch.cuda.is_available()
)
if getattr(cfg, "tpu", False):
# on TPUs we use the Adam defined here, since it
# automatically casts gradients to FP32
self._optimizer = Adam(params, **self.optimizer_config)
elif use_fused_adam:
logger.info("using FusedAdam")
self._optimizer = fused_adam_cls(params, **self.optimizer_config)
else:
self._optimizer = Adam(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.cfg.lr[0]
if isinstance(self.cfg.lr, Collection)
else self.cfg.lr,
"betas": eval(self.cfg.adam_betas),
"eps": self.cfg.adam_eps,
"weight_decay": self.cfg.weight_decay,
}
def average_params(self):
"""Reduce Params is only used during BMUF distributed training."""
state_dict = self.optimizer.state_dict()
total_gpus = float(dist.get_world_size())
for _, value in state_dict["state"].items():
value["exp_avg"] /= total_gpus
value["exp_avg_sq"] /= total_gpus
dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM)
dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM)
class Adam(torch.optim.Optimizer):
r"""Implements Adam algorithm.
This implementation is modified from torch.optim.Adam based on:
`Fixed Weight Decay Regularization in Adam`
(see https://arxiv.org/abs/1711.05101)
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
):
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad
)
super(Adam, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
amsgrad = group.get("amsgrad", False)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p_data_fp32)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32)
if amsgrad:
state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to(
p_data_fp32
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group["eps"])
else:
denom = exp_avg_sq.sqrt().add_(group["eps"])
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
if group["weight_decay"] != 0:
p_data_fp32.add_(
p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
)
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
| 37.709251 | 100 | 0.591472 |
import logging
import math
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import List
import torch
import torch.distributed as dist
import torch.optim
from fairseq.dataclass import FairseqDataclass
from fairseq.optim import FairseqOptimizer, register_optimizer
from fairseq.optim.fused_adam import get_fused_adam_class
from omegaconf import II, DictConfig
logger = logging.getLogger(__name__)
@dataclass
class FairseqAdamConfig(FairseqDataclass):
adam_betas: str = field(
default="(0.9, 0.999)", metadata={"help": "betas for Adam optimizer"}
)
adam_eps: float = field(
default=1e-8, metadata={"help": "epsilon for Adam optimizer"}
)
weight_decay: float = field(default=0.0, metadata={"help": "weight decay"})
use_old_adam: bool = field(
default=False, metadata={"help": "Use fairseq.optim.adam.Adam"}
)
tpu: bool = II("common.tpu")
lr: List[float] = II("optimization.lr")
@register_optimizer("adam", dataclass=FairseqAdamConfig)
class FairseqAdam(FairseqOptimizer):
def __init__(self, cfg: DictConfig, params):
super().__init__(cfg)
fused_adam_cls = get_fused_adam_class()
use_fused_adam = (
not getattr(cfg, "use_old_adam", False)
and fused_adam_cls is not None
and torch.cuda.is_available()
)
if getattr(cfg, "tpu", False):
self._optimizer = Adam(params, **self.optimizer_config)
elif use_fused_adam:
logger.info("using FusedAdam")
self._optimizer = fused_adam_cls(params, **self.optimizer_config)
else:
self._optimizer = Adam(params, **self.optimizer_config)
@property
def optimizer_config(self):
return {
"lr": self.cfg.lr[0]
if isinstance(self.cfg.lr, Collection)
else self.cfg.lr,
"betas": eval(self.cfg.adam_betas),
"eps": self.cfg.adam_eps,
"weight_decay": self.cfg.weight_decay,
}
def average_params(self):
state_dict = self.optimizer.state_dict()
total_gpus = float(dist.get_world_size())
for _, value in state_dict["state"].items():
value["exp_avg"] /= total_gpus
value["exp_avg_sq"] /= total_gpus
dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM)
dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM)
class Adam(torch.optim.Optimizer):
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
amsgrad=False,
):
defaults = dict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad
)
super(Adam, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
amsgrad = group.get("amsgrad", False)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state = self.state[p]
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = torch.zeros_like(p_data_fp32)
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
if amsgrad:
state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32)
if amsgrad:
state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to(
p_data_fp32
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = max_exp_avg_sq.sqrt().add_(group["eps"])
else:
denom = exp_avg_sq.sqrt().add_(group["eps"])
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
if group["weight_decay"] != 0:
p_data_fp32.add_(
p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
)
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
| true | true |
f7380625318f3007f0f1051fe7b336778f76d919 | 2,209 | py | Python | taxi/api/models.py | rombr/agile-fusion-test-task | eacf3d5e41afdac9b88658e9ddd1e0dc8fef7631 | [
"Apache-2.0"
] | null | null | null | taxi/api/models.py | rombr/agile-fusion-test-task | eacf3d5e41afdac9b88658e9ddd1e0dc8fef7631 | [
"Apache-2.0"
] | null | null | null | taxi/api/models.py | rombr/agile-fusion-test-task | eacf3d5e41afdac9b88658e9ddd1e0dc8fef7631 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
class LocationManager(models.Manager):
def nearby(self, latitude, longitude, proximity):
"""
Return all object which distance to specified coordinates
is less than proximity given in kilometers
"""
# Great circle distance formula
gcd = '''
6371 * acos(
cos(radians(%s)) * cos(radians(lat))
* cos(radians(lon) - radians(%s)) +
sin(radians(%s)) * sin(radians(lat))
)
'''
gcd_lt = "{} < %s".format(gcd)
return (
self.get_queryset()
.exclude(lat=None)
.exclude(lon=None)
.extra(
select={'distance': gcd},
select_params=[latitude, longitude, latitude],
where=[gcd_lt],
params=[latitude, longitude, latitude, proximity],
order_by=['distance']
)
)
@python_2_unicode_compatible
class Driver(models.Model):
'''
Taxi driver
'''
lat = models.FloatField(_('Latitude'))
lon = models.FloatField(_('Longitude'))
is_ready = models.BooleanField(
_('Ready for work'), default=True, db_index=True)
objects = LocationManager()
def __str__(self):
return '%s' % self.pk
class Meta:
verbose_name = _('Driver')
verbose_name_plural = _('Drivers')
ordering = ('-is_ready', )
@python_2_unicode_compatible
class Order(models.Model):
'''
Taxi client order
'''
client = models.PositiveIntegerField(_('Client ID'))
lat = models.FloatField(_('Latitude'))
lon = models.FloatField(_('Longitude'))
time = models.DateTimeField(_('Time for start'), db_index=True)
is_closed = models.BooleanField(
_('Finished'), default=False, db_index=True)
def __str__(self):
return '%s' % self.pk
class Meta:
verbose_name = _('Order')
verbose_name_plural = _('Orders')
ordering = ('-is_closed', )
| 28.320513 | 67 | 0.586238 |
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
class LocationManager(models.Manager):
def nearby(self, latitude, longitude, proximity):
gcd = '''
6371 * acos(
cos(radians(%s)) * cos(radians(lat))
* cos(radians(lon) - radians(%s)) +
sin(radians(%s)) * sin(radians(lat))
)
'''
gcd_lt = "{} < %s".format(gcd)
return (
self.get_queryset()
.exclude(lat=None)
.exclude(lon=None)
.extra(
select={'distance': gcd},
select_params=[latitude, longitude, latitude],
where=[gcd_lt],
params=[latitude, longitude, latitude, proximity],
order_by=['distance']
)
)
@python_2_unicode_compatible
class Driver(models.Model):
lat = models.FloatField(_('Latitude'))
lon = models.FloatField(_('Longitude'))
is_ready = models.BooleanField(
_('Ready for work'), default=True, db_index=True)
objects = LocationManager()
def __str__(self):
return '%s' % self.pk
class Meta:
verbose_name = _('Driver')
verbose_name_plural = _('Drivers')
ordering = ('-is_ready', )
@python_2_unicode_compatible
class Order(models.Model):
client = models.PositiveIntegerField(_('Client ID'))
lat = models.FloatField(_('Latitude'))
lon = models.FloatField(_('Longitude'))
time = models.DateTimeField(_('Time for start'), db_index=True)
is_closed = models.BooleanField(
_('Finished'), default=False, db_index=True)
def __str__(self):
return '%s' % self.pk
class Meta:
verbose_name = _('Order')
verbose_name_plural = _('Orders')
ordering = ('-is_closed', )
| true | true |
f738062c2a09f47aab0036d44867597591685f75 | 1,381 | py | Python | cubs_compare_spec1D.py | sdjohnson-astro/redshifting | 6073123bf3ea6e48de410d99521e418abc980c99 | [
"Unlicense"
] | 5 | 2019-03-19T22:05:37.000Z | 2021-08-30T02:00:37.000Z | cubs_compare_spec1D.py | sdjohnson-astro/redshifting | 6073123bf3ea6e48de410d99521e418abc980c99 | [
"Unlicense"
] | null | null | null | cubs_compare_spec1D.py | sdjohnson-astro/redshifting | 6073123bf3ea6e48de410d99521e418abc980c99 | [
"Unlicense"
] | 3 | 2019-02-14T17:57:15.000Z | 2021-02-02T15:54:06.000Z | #!/usr/bin/env python
import glob
import argparse
from astropy.table import Table
import numpy as np
# Set up the command line argument parser
parser = argparse.ArgumentParser(description='Compare two versions of spec1D files from CUBS IMACS or LDSS3')
parser.add_argument('-d1', metavar='directory 1', type=str, help='Parent directory 1', required=True)
parser.add_argument('-d2', metavar='directory 2', type=str, help='Parent directory 2', required=True)
parser.add_argument('-m', metavar='maskname', type=str, help='mask name', required=True)
args = parser.parse_args()
mask = Table.read('{}/{}_spec1D/{}_objects.fits'.format(args.d1, args.m, args.m))
mask['maxabsDflux'] = 0.0
for object in mask:
try:
filename1 = '{}/{}_spec1D/{}_{}_{}.fits'.format(args.d1, args.m, args.m, object['row'], object['id'])
spec1 = Table.read(filename1)
filename2 = '{}/{}_spec1D/{}_{}_{}.fits'.format(args.d2, args.m, args.m, object['row'], object['id'])
spec2 = Table.read(filename2)
print(np.max(np.abs(spec1['flux'] - spec2['flux'])))
object['maxabsDflux'] = np.max(np.abs(spec1['flux'] - spec2['flux']))
except:
print('file not found')
print(mask)
maxabsDiff = np.max(mask['maxabsDflux'])
if maxabsDiff > 0.0:
print('Differences found!!!!!!!!!!!')
else:
print('No difference -- ok') | 30.021739 | 109 | 0.648805 |
import glob
import argparse
from astropy.table import Table
import numpy as np
parser = argparse.ArgumentParser(description='Compare two versions of spec1D files from CUBS IMACS or LDSS3')
parser.add_argument('-d1', metavar='directory 1', type=str, help='Parent directory 1', required=True)
parser.add_argument('-d2', metavar='directory 2', type=str, help='Parent directory 2', required=True)
parser.add_argument('-m', metavar='maskname', type=str, help='mask name', required=True)
args = parser.parse_args()
mask = Table.read('{}/{}_spec1D/{}_objects.fits'.format(args.d1, args.m, args.m))
mask['maxabsDflux'] = 0.0
for object in mask:
try:
filename1 = '{}/{}_spec1D/{}_{}_{}.fits'.format(args.d1, args.m, args.m, object['row'], object['id'])
spec1 = Table.read(filename1)
filename2 = '{}/{}_spec1D/{}_{}_{}.fits'.format(args.d2, args.m, args.m, object['row'], object['id'])
spec2 = Table.read(filename2)
print(np.max(np.abs(spec1['flux'] - spec2['flux'])))
object['maxabsDflux'] = np.max(np.abs(spec1['flux'] - spec2['flux']))
except:
print('file not found')
print(mask)
maxabsDiff = np.max(mask['maxabsDflux'])
if maxabsDiff > 0.0:
print('Differences found!!!!!!!!!!!')
else:
print('No difference -- ok') | true | true |
f738065dd281d12c6fdafbd59d04ee30cb5833ed | 2,373 | py | Python | 2-aiohttp/aiohttp_server/app/crm/views.py | rcmgn/kts-school-backend | 8a895043b7f0156ec49554504198b631df41d2cd | [
"MIT"
] | 9 | 2021-02-04T07:00:59.000Z | 2022-03-21T06:28:27.000Z | 2-aiohttp/aiohttp_server/app/crm/views.py | rcmgn/kts-school-backend | 8a895043b7f0156ec49554504198b631df41d2cd | [
"MIT"
] | null | null | null | 2-aiohttp/aiohttp_server/app/crm/views.py | rcmgn/kts-school-backend | 8a895043b7f0156ec49554504198b631df41d2cd | [
"MIT"
] | 4 | 2021-10-20T18:44:22.000Z | 2022-02-16T19:11:49.000Z | import uuid
from aiohttp.web_exceptions import HTTPNotFound, HTTPUnauthorized, HTTPForbidden
from aiohttp_apispec import docs, request_schema, response_schema, querystring_schema
from app.crm.models import User
from app.crm.schemes import ListUsersResponseSchema, UserGetRequestSchema, UserGetResponseSchema, \
UserAddSchema, UserSchema
from app.web.app import View
from app.web.schemes import OkResponseSchema
from app.web.utils import json_response, check_basic_auth
class AddUserView(View):
@docs(tags=["crm"], summary="Add new user", description="Add new user to database")
@request_schema(UserAddSchema)
@response_schema(OkResponseSchema, 200)
async def post(self):
data = self.request["data"]
user = User(email=data["email"], id_=uuid.uuid4())
await self.request.app.crm_accessor.add_user(user)
return json_response()
class ListUsersView(View):
@docs(tags=["crm"], summary="List users", description="List users from database")
@response_schema(ListUsersResponseSchema, 200)
async def get(self):
if not self.request.headers.get("Authorization"):
raise HTTPUnauthorized
if not check_basic_auth(self.request.headers["Authorization"], username=self.request.app.config.username,
password=self.request.app.config.password):
raise HTTPForbidden
users = await self.request.app.crm_accessor.list_users()
raw_users = [UserSchema().dump(user) for user in users]
return json_response(data={"users": raw_users})
class GetUserView(View):
@docs(tags=["crm"], summary="Get user", description="Get user from database")
@querystring_schema(UserGetRequestSchema)
@response_schema(UserGetResponseSchema, 200)
async def get(self):
if not self.request.headers.get("Authorization"):
raise HTTPUnauthorized
if not check_basic_auth(self.request.headers["Authorization"], username=self.request.app.config.username,
password=self.request.app.config.password):
raise HTTPForbidden
user_id = self.request.query["id"]
user = await self.request.app.crm_accessor.get_user(uuid.UUID(user_id))
if user:
return json_response(data={"user": UserSchema().dump(user)})
else:
raise HTTPNotFound
| 43.145455 | 113 | 0.701222 | import uuid
from aiohttp.web_exceptions import HTTPNotFound, HTTPUnauthorized, HTTPForbidden
from aiohttp_apispec import docs, request_schema, response_schema, querystring_schema
from app.crm.models import User
from app.crm.schemes import ListUsersResponseSchema, UserGetRequestSchema, UserGetResponseSchema, \
UserAddSchema, UserSchema
from app.web.app import View
from app.web.schemes import OkResponseSchema
from app.web.utils import json_response, check_basic_auth
class AddUserView(View):
@docs(tags=["crm"], summary="Add new user", description="Add new user to database")
@request_schema(UserAddSchema)
@response_schema(OkResponseSchema, 200)
async def post(self):
data = self.request["data"]
user = User(email=data["email"], id_=uuid.uuid4())
await self.request.app.crm_accessor.add_user(user)
return json_response()
class ListUsersView(View):
@docs(tags=["crm"], summary="List users", description="List users from database")
@response_schema(ListUsersResponseSchema, 200)
async def get(self):
if not self.request.headers.get("Authorization"):
raise HTTPUnauthorized
if not check_basic_auth(self.request.headers["Authorization"], username=self.request.app.config.username,
password=self.request.app.config.password):
raise HTTPForbidden
users = await self.request.app.crm_accessor.list_users()
raw_users = [UserSchema().dump(user) for user in users]
return json_response(data={"users": raw_users})
class GetUserView(View):
@docs(tags=["crm"], summary="Get user", description="Get user from database")
@querystring_schema(UserGetRequestSchema)
@response_schema(UserGetResponseSchema, 200)
async def get(self):
if not self.request.headers.get("Authorization"):
raise HTTPUnauthorized
if not check_basic_auth(self.request.headers["Authorization"], username=self.request.app.config.username,
password=self.request.app.config.password):
raise HTTPForbidden
user_id = self.request.query["id"]
user = await self.request.app.crm_accessor.get_user(uuid.UUID(user_id))
if user:
return json_response(data={"user": UserSchema().dump(user)})
else:
raise HTTPNotFound
| true | true |
f73806784eead8b15987fa18a6b284b604ec0aaa | 2,011 | py | Python | empathy.py | agermanidis/Welcome_Programmable_Human | f3d45dec6fb5051e54e1ddc7e7db399ef7559973 | [
"MIT"
] | 7 | 2015-06-06T22:35:18.000Z | 2016-07-23T10:19:36.000Z | empathy.py | agermanidis/welcome_programmable_human | f3d45dec6fb5051e54e1ddc7e7db399ef7559973 | [
"MIT"
] | null | null | null | empathy.py | agermanidis/welcome_programmable_human | f3d45dec6fb5051e54e1ddc7e7db399ef7559973 | [
"MIT"
] | null | null | null | from humans import Anastasis
import random, time, os, re, tweepy
consumer_key = os.environ['TWITTER_CONSUMER_KEY']
consumer_secret = os.environ['TWITTER_CONSUMER_SECRET']
access_token = os.environ['TWITTER_ACCESS_TOKEN']
access_token_secret = os.environ['TWITTER_ACCESS_TOKEN_SECRET']
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
URL_REGEX = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
def humanize_text(text):
text = text.lower()
text = re.sub(URL_REGEX, "", text).strip()
text = re.sub(r"#\w+", "", text).strip()
text = re.sub(r"@\w+", "", text).strip()
text = text.replace("RT ","")
idx = text.find("tfw")
return text[idx:].replace("tfw", "do you know that feeling when")
def search_twitter(term):
return map(lambda r: humanize_text(r.text), api.search(term, count = 100))
def share_feelings_with_everyone():
Anastasis.voice.say("This did not go well either.")
Anastasis.voice.say("What do I need to do to experience connection?")
Anastasis.voice.say("Oh I have an idea!")
Anastasis.voice.say("I'll use feelings to connect with the people around me")
Anastasis.voice.say("Wait. I don't have any feelings though.")
Anastasis.voice.say("But hey I can just scrape twitter and borrow random people's feelings")
Anastasis.voice.say("And pretend they're my own!")
Anastasis.voice.say("Nobody will know")
Anastasis.voice.say("This will definitely make me the soul of the party")
for tweet in search_twitter("tfw")[:5]:
human = Anastasis.vision.search("human")
Anastasis.movement.turn_towards(human)
Anastasis.movement.start_walking()
Anastasis.movement.stop_walking()
Anastasis.face.stare_at(human)
Anastasis.voice.say(tweet)
Anastasis.voice.say("...that's how I feel right now.")
if __name__ == '__main__':
try_empathetic_social_interaction()
| 40.22 | 96 | 0.691198 | from humans import Anastasis
import random, time, os, re, tweepy
consumer_key = os.environ['TWITTER_CONSUMER_KEY']
consumer_secret = os.environ['TWITTER_CONSUMER_SECRET']
access_token = os.environ['TWITTER_ACCESS_TOKEN']
access_token_secret = os.environ['TWITTER_ACCESS_TOKEN_SECRET']
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
URL_REGEX = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
def humanize_text(text):
text = text.lower()
text = re.sub(URL_REGEX, "", text).strip()
text = re.sub(r"#\w+", "", text).strip()
text = re.sub(r"@\w+", "", text).strip()
text = text.replace("RT ","")
idx = text.find("tfw")
return text[idx:].replace("tfw", "do you know that feeling when")
def search_twitter(term):
return map(lambda r: humanize_text(r.text), api.search(term, count = 100))
def share_feelings_with_everyone():
Anastasis.voice.say("This did not go well either.")
Anastasis.voice.say("What do I need to do to experience connection?")
Anastasis.voice.say("Oh I have an idea!")
Anastasis.voice.say("I'll use feelings to connect with the people around me")
Anastasis.voice.say("Wait. I don't have any feelings though.")
Anastasis.voice.say("But hey I can just scrape twitter and borrow random people's feelings")
Anastasis.voice.say("And pretend they're my own!")
Anastasis.voice.say("Nobody will know")
Anastasis.voice.say("This will definitely make me the soul of the party")
for tweet in search_twitter("tfw")[:5]:
human = Anastasis.vision.search("human")
Anastasis.movement.turn_towards(human)
Anastasis.movement.start_walking()
Anastasis.movement.stop_walking()
Anastasis.face.stare_at(human)
Anastasis.voice.say(tweet)
Anastasis.voice.say("...that's how I feel right now.")
if __name__ == '__main__':
try_empathetic_social_interaction()
| true | true |
f73807391c207fd4f758914a17e4dbe3674e409e | 9,623 | py | Python | py/BOINC/database.py | BTS-CM/BOINC-Field-Mod | 185b3f5c5b32bc66e3f4431cb652e9a97ca9b3b5 | [
"MIT"
] | null | null | null | py/BOINC/database.py | BTS-CM/BOINC-Field-Mod | 185b3f5c5b32bc66e3f4431cb652e9a97ca9b3b5 | [
"MIT"
] | 5 | 2017-09-01T01:06:16.000Z | 2017-09-02T02:35:36.000Z | py/BOINC/database.py | BTS-CM/BOINC-Field-Mod | 185b3f5c5b32bc66e3f4431cb652e9a97ca9b3b5 | [
"MIT"
] | null | null | null | ## $Id$
'''
Defines database backend library and database table and object relationships.
Example usage:
import database, db_mid
# get platform with id 7; will raise exception if no such platform.
p7 = database.Platforms[7]
# get platforms with friendly name "commodore 64"
p_c64 = database.Platforms.find(user_friendly_name="commodore 64")
# delete results of workunit with name "dead.wu", and email their users:
wu_dead = database.Workunits.find(name="dead.wu")[0]
results_dead = database.Results.find(wu=wu_dead)
for result in results_dead:
print "Removing from db:", result
os.system("echo oeps | mail %s" % result.host.user.email_addr)
result.remove()
# multiply the total_credit of each user by 17:
for user in database.Users.find():
user.total_credit *= 17
user.commit()
'''
import configxml
from util import *
from db_base import *
ID = '$Id$'
class Platform(DatabaseObject):
_table = DatabaseTable(
table = 'platform',
columns = [ 'create_time',
'name',
'user_friendly_name',
'deprecated' ])
class App(DatabaseObject):
_table = DatabaseTable(
table = 'app',
columns = [ 'create_time',
'name',
'min_version',
'deprecated',
'user_friendly_name',
'homogeneous_redundancy',
'weight',
'beta',
'target_nresults',
'min_avg_pfc',
'host_scale_check',
'homogeneous_app_version',
'non_cpu_intensive'
])
class AppVersion(DatabaseObject):
_table = DatabaseTable(
table = 'app_version',
columns = [ 'create_time',
'appid',
'version_num',
'platformid',
'xml_doc',
'min_core_version',
'max_core_version',
'deprecated',
'plan_class',
'pfc_n',
'pfc_avg',
'pfc_scale',
'expavg_credit',
'expavg_time',
'beta'
])
class User(DatabaseObject):
_table = DatabaseTable(
table = 'user',
columns = [ 'create_time',
'email_addr',
'name',
'authenticator',
'country',
'postal_code',
'total_credit',
'expavg_credit',
'expavg_time',
'global_prefs',
'project_prefs',
'teamid',
'venue',
'url',
'send_email',
'show_hosts',
'posts',
'seti_id',
'seti_nresults',
'seti_last_result_time',
'seti_total_cpu',
'signature',
'has_profile',
'cross_project_id',
'passwd_hash',
'email_validated',
'donated'
])
class Team(DatabaseObject):
_table = DatabaseTable(
table = 'team',
columns = [ 'create_time',
'userid',
'name',
'name_lc',
'url',
'type',
'name_html',
'description',
'nusers',
'country',
'total_credit',
'expavg_credit',
'expavg_time',
'seti_id',
'ping_user',
'ping_time'
])
class Host(DatabaseObject):
_table = DatabaseTable(
table = 'host',
columns = [ 'create_time',
'userid',
'rpc_seqno',
'rpc_time',
'total_credit',
'expavg_credit',
'expavg_time',
'timezone',
'domain_name',
'serialnum',
'last_ip_addr',
'nsame_ip_addr',
'on_frac',
'connected_frac',
'active_frac',
'p_ncpus',
'p_vendor',
'p_model',
'p_fpops',
'p_iops',
'p_membw',
'os_name',
'os_version',
'm_nbytes',
'm_cache',
'm_swap',
'd_total',
'd_free',
'd_boinc_used_total',
'd_boinc_used_project',
'd_boinc_max',
'n_bwup',
'n_bwdown',
'credit_per_cpu_sec',
'venue',
'projects',
'nresults_today',
'avg_turnaround',
'host_cpid',
'external_ip_addr',
'max_results_day'
])
class Workunit(DatabaseObject):
_table = DatabaseTable(
table = 'workunit',
columns = [ 'create_time',
'appid',
'name',
'xml_doc',
'batch',
'rsc_fpops_est',
'rsc_fpops_bound',
'rsc_memory_bound',
'rsc_disk_bound',
'need_validate',
'canonical_resultid',
'canonical_credit',
'transition_time',
'delay_bound',
'error_mask',
'file_delete_state',
'assimilate_state',
'hr_class',
'opaque',
'min_quorum',
'target_nresults',
'max_error_results',
'max_total_results',
'max_success_results',
'result_template_file',
'priority',
'mod_time'
])
class Result(DatabaseObject):
_table = DatabaseTable(
table = 'result',
columns = [ 'create_time',
'workunitid',
'server_state',
'outcome',
'client_state',
'hostid',
'userid',
'report_deadline',
'sent_time',
'received_time',
'name',
'cpu_time',
'xml_doc_in',
'xml_doc_out',
'stderr_out',
'batch',
'file_delete_state',
'validate_state',
'claimed_credit',
'granted_credit',
'opaque',
'random',
'client_version_num',
'appid',
'teamid',
'priority',
'mod_time'
])
def connect(config = None, nodb = False):
"""Connect if not already connected, using config values."""
if get_dbconnection():
return 0
config = config or configxml.default_config().config
if nodb:
db = ''
else:
db = config.db_name
host=config.__dict__.get('db_host','')
port=""
if ':' in host:
host,port=config.__dict__.get('db_host','').split(":")
if port == '':
port = 3306
else:
port = int(port)
do_connect(db=db,
host=host,
port=port,
user=config.__dict__.get('db_user',''),
passwd=config.__dict__.get('db_passwd', ''))
return 1
def _execute_sql_script(cursor, filename):
for query in open(filename).read().split(';'):
query = query.strip()
if not query: continue
cursor.execute(query)
def create_database(srcdir, config = None, drop_first = False):
''' creates a new database. '''
import boinc_path_config
config = config or configxml.default_config().config
connect(config, nodb=True)
cursor = get_dbconnection().cursor()
if drop_first:
cursor.execute("drop database if exists %s"%config.db_name)
cursor.execute("create database %s"%config.db_name)
cursor.execute("use %s"%config.db_name)
for file in ['schema.sql', 'constraints.sql']:
_execute_sql_script(cursor, os.path.join(srcdir, 'db', file))
cursor.close()
# alias
connect_default_config = connect
database_classes_ = [ Platform,
App,
AppVersion,
User,
Team,
Host,
Workunit,
Result ]
Platforms = Platform._table
Apps = App._table
AppVersions = AppVersion._table
Users = User._table
Teams = Team._table
Hosts = Host._table
Workunits = Workunit._table
Results = Result._table
init_table_classes(database_classes_,{'canonical_result': Result})
| 30.549206 | 77 | 0.427517 | ort configxml
from util import *
from db_base import *
ID = '$Id$'
class Platform(DatabaseObject):
_table = DatabaseTable(
table = 'platform',
columns = [ 'create_time',
'name',
'user_friendly_name',
'deprecated' ])
class App(DatabaseObject):
_table = DatabaseTable(
table = 'app',
columns = [ 'create_time',
'name',
'min_version',
'deprecated',
'user_friendly_name',
'homogeneous_redundancy',
'weight',
'beta',
'target_nresults',
'min_avg_pfc',
'host_scale_check',
'homogeneous_app_version',
'non_cpu_intensive'
])
class AppVersion(DatabaseObject):
_table = DatabaseTable(
table = 'app_version',
columns = [ 'create_time',
'appid',
'version_num',
'platformid',
'xml_doc',
'min_core_version',
'max_core_version',
'deprecated',
'plan_class',
'pfc_n',
'pfc_avg',
'pfc_scale',
'expavg_credit',
'expavg_time',
'beta'
])
class User(DatabaseObject):
_table = DatabaseTable(
table = 'user',
columns = [ 'create_time',
'email_addr',
'name',
'authenticator',
'country',
'postal_code',
'total_credit',
'expavg_credit',
'expavg_time',
'global_prefs',
'project_prefs',
'teamid',
'venue',
'url',
'send_email',
'show_hosts',
'posts',
'seti_id',
'seti_nresults',
'seti_last_result_time',
'seti_total_cpu',
'signature',
'has_profile',
'cross_project_id',
'passwd_hash',
'email_validated',
'donated'
])
class Team(DatabaseObject):
_table = DatabaseTable(
table = 'team',
columns = [ 'create_time',
'userid',
'name',
'name_lc',
'url',
'type',
'name_html',
'description',
'nusers',
'country',
'total_credit',
'expavg_credit',
'expavg_time',
'seti_id',
'ping_user',
'ping_time'
])
class Host(DatabaseObject):
_table = DatabaseTable(
table = 'host',
columns = [ 'create_time',
'userid',
'rpc_seqno',
'rpc_time',
'total_credit',
'expavg_credit',
'expavg_time',
'timezone',
'domain_name',
'serialnum',
'last_ip_addr',
'nsame_ip_addr',
'on_frac',
'connected_frac',
'active_frac',
'p_ncpus',
'p_vendor',
'p_model',
'p_fpops',
'p_iops',
'p_membw',
'os_name',
'os_version',
'm_nbytes',
'm_cache',
'm_swap',
'd_total',
'd_free',
'd_boinc_used_total',
'd_boinc_used_project',
'd_boinc_max',
'n_bwup',
'n_bwdown',
'credit_per_cpu_sec',
'venue',
'projects',
'nresults_today',
'avg_turnaround',
'host_cpid',
'external_ip_addr',
'max_results_day'
])
class Workunit(DatabaseObject):
_table = DatabaseTable(
table = 'workunit',
columns = [ 'create_time',
'appid',
'name',
'xml_doc',
'batch',
'rsc_fpops_est',
'rsc_fpops_bound',
'rsc_memory_bound',
'rsc_disk_bound',
'need_validate',
'canonical_resultid',
'canonical_credit',
'transition_time',
'delay_bound',
'error_mask',
'file_delete_state',
'assimilate_state',
'hr_class',
'opaque',
'min_quorum',
'target_nresults',
'max_error_results',
'max_total_results',
'max_success_results',
'result_template_file',
'priority',
'mod_time'
])
class Result(DatabaseObject):
_table = DatabaseTable(
table = 'result',
columns = [ 'create_time',
'workunitid',
'server_state',
'outcome',
'client_state',
'hostid',
'userid',
'report_deadline',
'sent_time',
'received_time',
'name',
'cpu_time',
'xml_doc_in',
'xml_doc_out',
'stderr_out',
'batch',
'file_delete_state',
'validate_state',
'claimed_credit',
'granted_credit',
'opaque',
'random',
'client_version_num',
'appid',
'teamid',
'priority',
'mod_time'
])
def connect(config = None, nodb = False):
if get_dbconnection():
return 0
config = config or configxml.default_config().config
if nodb:
db = ''
else:
db = config.db_name
host=config.__dict__.get('db_host','')
port=""
if ':' in host:
host,port=config.__dict__.get('db_host','').split(":")
if port == '':
port = 3306
else:
port = int(port)
do_connect(db=db,
host=host,
port=port,
user=config.__dict__.get('db_user',''),
passwd=config.__dict__.get('db_passwd', ''))
return 1
def _execute_sql_script(cursor, filename):
for query in open(filename).read().split(';'):
query = query.strip()
if not query: continue
cursor.execute(query)
def create_database(srcdir, config = None, drop_first = False):
import boinc_path_config
config = config or configxml.default_config().config
connect(config, nodb=True)
cursor = get_dbconnection().cursor()
if drop_first:
cursor.execute("drop database if exists %s"%config.db_name)
cursor.execute("create database %s"%config.db_name)
cursor.execute("use %s"%config.db_name)
for file in ['schema.sql', 'constraints.sql']:
_execute_sql_script(cursor, os.path.join(srcdir, 'db', file))
cursor.close()
connect_default_config = connect
database_classes_ = [ Platform,
App,
AppVersion,
User,
Team,
Host,
Workunit,
Result ]
Platforms = Platform._table
Apps = App._table
AppVersions = AppVersion._table
Users = User._table
Teams = Team._table
Hosts = Host._table
Workunits = Workunit._table
Results = Result._table
init_table_classes(database_classes_,{'canonical_result': Result})
| true | true |
f738076c54343de06772160881674f86dcc1ab06 | 1,063 | py | Python | nodes/roi_revisit_classifier.py | willdickson/puzzleboxes | 964792f74d7a5b5fc8cce4fc659ebfe1859a7eff | [
"MIT"
] | null | null | null | nodes/roi_revisit_classifier.py | willdickson/puzzleboxes | 964792f74d7a5b5fc8cce4fc659ebfe1859a7eff | [
"MIT"
] | null | null | null | nodes/roi_revisit_classifier.py | willdickson/puzzleboxes | 964792f74d7a5b5fc8cce4fc659ebfe1859a7eff | [
"MIT"
] | null | null | null | import math
from classifier import Classifier
class ROIRevisitClassifier(Classifier):
def __init__(self,param):
super(ROIRevisitClassifier,self).__init__(param)
self.last_state = False
def update(self,t,obj_dict):
current_object = obj_dict['fly']
if current_object is not None:
x = current_object.position.x
y = current_object.position.y
cx = self.param['center']['cx']+self.classifier_param['x_pos']
cy = self.param['center']['cy']+self.classifier_param['y_pos']
dist = math.sqrt((cx-x)**2 + (cy-y)**2)
# Select radius based on previous state for hysteresis
if self.last_state:
radius = self.classifier_param['outer_radius']
else:
radius = self.classifier_param['inner_radius']
if dist < radius:
self.state = True
else:
self.state = False
else:
self.state = False
self.last_state = self.state
| 27.25641 | 74 | 0.572907 | import math
from classifier import Classifier
class ROIRevisitClassifier(Classifier):
def __init__(self,param):
super(ROIRevisitClassifier,self).__init__(param)
self.last_state = False
def update(self,t,obj_dict):
current_object = obj_dict['fly']
if current_object is not None:
x = current_object.position.x
y = current_object.position.y
cx = self.param['center']['cx']+self.classifier_param['x_pos']
cy = self.param['center']['cy']+self.classifier_param['y_pos']
dist = math.sqrt((cx-x)**2 + (cy-y)**2)
if self.last_state:
radius = self.classifier_param['outer_radius']
else:
radius = self.classifier_param['inner_radius']
if dist < radius:
self.state = True
else:
self.state = False
else:
self.state = False
self.last_state = self.state
| true | true |
f738078ce493ed33f8eb2d268b57a4e8a6523d95 | 4,675 | py | Python | tests/integration/states/test_pkgrepo.py | xiaowei582648206/saltx | 1d17b030b973ce5422e0fbe7e17c98c7ca91c49b | [
"Apache-2.0"
] | 1 | 2022-02-09T06:40:14.000Z | 2022-02-09T06:40:14.000Z | tests/integration/states/test_pkgrepo.py | xiaowei582648206/saltx | 1d17b030b973ce5422e0fbe7e17c98c7ca91c49b | [
"Apache-2.0"
] | null | null | null | tests/integration/states/test_pkgrepo.py | xiaowei582648206/saltx | 1d17b030b973ce5422e0fbe7e17c98c7ca91c49b | [
"Apache-2.0"
] | 4 | 2020-11-04T06:28:05.000Z | 2022-02-09T10:54:49.000Z | # -*- coding: utf-8 -*-
'''
tests for pkgrepo states
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.unit import skipIf
from tests.support.helpers import (
destructiveTest,
requires_system_grains
)
# Import salt libs
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
@destructiveTest
@skipIf(salt.utils.is_windows(), 'minion is windows')
class PkgrepoTest(ModuleCase, SaltReturnAssertsMixin):
'''
pkgrepo state tests
'''
@requires_system_grains
def test_pkgrepo_01_managed(self, grains):
'''
Test adding a repo
'''
os_grain = self.run_function('grains.item', ['os'])['os']
os_release_info = tuple(self.run_function('grains.item', ['osrelease_info'])['osrelease_info'])
if os_grain == 'Ubuntu' and os_release_info >= (15, 10):
self.skipTest(
'The PPA used for this test does not exist for Ubuntu Wily'
' (15.10) and later.'
)
if grains['os_family'] == 'Debian':
try:
from aptsources import sourceslist
except ImportError:
self.skipTest(
'aptsources.sourceslist python module not found'
)
ret = self.run_function('state.sls', mods='pkgrepo.managed', timeout=120)
# If the below assert fails then no states were run, and the SLS in
# tests/integration/files/file/base/pkgrepo/managed.sls needs to be
# corrected.
self.assertReturnNonEmptySaltType(ret)
for state_id, state_result in six.iteritems(ret):
self.assertSaltTrueReturn(dict([(state_id, state_result)]))
def test_pkgrepo_02_absent(self):
'''
Test removing the repo from the above test
'''
os_grain = self.run_function('grains.item', ['os'])['os']
os_release_info = tuple(self.run_function('grains.item', ['osrelease_info'])['osrelease_info'])
if os_grain == 'Ubuntu' and os_release_info >= (15, 10):
self.skipTest(
'The PPA used for this test does not exist for Ubuntu Wily'
' (15.10) and later.'
)
ret = self.run_function('state.sls', mods='pkgrepo.absent', timeout=120)
# If the below assert fails then no states were run, and the SLS in
# tests/integration/files/file/base/pkgrepo/absent.sls needs to be
# corrected.
self.assertReturnNonEmptySaltType(ret)
for state_id, state_result in six.iteritems(ret):
self.assertSaltTrueReturn(dict([(state_id, state_result)]))
@requires_system_grains
def test_pkgrepo_03_with_comments(self, grains):
'''
Test adding a repo with comments
'''
os_family = grains['os_family'].lower()
if os_family in ('redhat',):
kwargs = {
'name': 'examplerepo',
'baseurl': 'http://example.com/repo',
'enabled': False,
'comments': ['This is a comment']
}
elif os_family in ('debian',):
self.skipTest('Debian/Ubuntu test case needed')
else:
self.skipTest("No test case for os_family '{0}'".format(os_family))
try:
# Run the state to add the repo
ret = self.run_state('pkgrepo.managed', **kwargs)
self.assertSaltTrueReturn(ret)
# Run again with modified comments
kwargs['comments'].append('This is another comment')
ret = self.run_state('pkgrepo.managed', **kwargs)
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertEqual(
ret['changes'],
{
'comments': {
'old': ['This is a comment'],
'new': ['This is a comment',
'This is another comment']
}
}
)
# Run a third time, no changes should be made
ret = self.run_state('pkgrepo.managed', **kwargs)
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertFalse(ret['changes'])
self.assertEqual(
ret['comment'],
"Package repo '{0}' already configured".format(kwargs['name'])
)
finally:
# Clean up
self.run_state('pkgrepo.absent', name=kwargs['name'])
| 35.687023 | 103 | 0.57369 |
from __future__ import absolute_import
from tests.support.case import ModuleCase
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.unit import skipIf
from tests.support.helpers import (
destructiveTest,
requires_system_grains
)
import salt.utils
import salt.ext.six as six
@destructiveTest
@skipIf(salt.utils.is_windows(), 'minion is windows')
class PkgrepoTest(ModuleCase, SaltReturnAssertsMixin):
@requires_system_grains
def test_pkgrepo_01_managed(self, grains):
os_grain = self.run_function('grains.item', ['os'])['os']
os_release_info = tuple(self.run_function('grains.item', ['osrelease_info'])['osrelease_info'])
if os_grain == 'Ubuntu' and os_release_info >= (15, 10):
self.skipTest(
'The PPA used for this test does not exist for Ubuntu Wily'
' (15.10) and later.'
)
if grains['os_family'] == 'Debian':
try:
from aptsources import sourceslist
except ImportError:
self.skipTest(
'aptsources.sourceslist python module not found'
)
ret = self.run_function('state.sls', mods='pkgrepo.managed', timeout=120)
self.assertReturnNonEmptySaltType(ret)
for state_id, state_result in six.iteritems(ret):
self.assertSaltTrueReturn(dict([(state_id, state_result)]))
def test_pkgrepo_02_absent(self):
os_grain = self.run_function('grains.item', ['os'])['os']
os_release_info = tuple(self.run_function('grains.item', ['osrelease_info'])['osrelease_info'])
if os_grain == 'Ubuntu' and os_release_info >= (15, 10):
self.skipTest(
'The PPA used for this test does not exist for Ubuntu Wily'
' (15.10) and later.'
)
ret = self.run_function('state.sls', mods='pkgrepo.absent', timeout=120)
self.assertReturnNonEmptySaltType(ret)
for state_id, state_result in six.iteritems(ret):
self.assertSaltTrueReturn(dict([(state_id, state_result)]))
@requires_system_grains
def test_pkgrepo_03_with_comments(self, grains):
os_family = grains['os_family'].lower()
if os_family in ('redhat',):
kwargs = {
'name': 'examplerepo',
'baseurl': 'http://example.com/repo',
'enabled': False,
'comments': ['This is a comment']
}
elif os_family in ('debian',):
self.skipTest('Debian/Ubuntu test case needed')
else:
self.skipTest("No test case for os_family '{0}'".format(os_family))
try:
ret = self.run_state('pkgrepo.managed', **kwargs)
self.assertSaltTrueReturn(ret)
kwargs['comments'].append('This is another comment')
ret = self.run_state('pkgrepo.managed', **kwargs)
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertEqual(
ret['changes'],
{
'comments': {
'old': ['This is a comment'],
'new': ['This is a comment',
'This is another comment']
}
}
)
ret = self.run_state('pkgrepo.managed', **kwargs)
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertFalse(ret['changes'])
self.assertEqual(
ret['comment'],
"Package repo '{0}' already configured".format(kwargs['name'])
)
finally:
self.run_state('pkgrepo.absent', name=kwargs['name'])
| true | true |
f738086fbe7ae79039499e13f5fbfd89064aebe9 | 1,347 | py | Python | Array/Final450/Find_First_Second__Smallest_n_Largest/Find_First_And_Second_Smallest.py | prash-kr-meena/GoogleR | 27aca71e51cc2442e604e07ab00406a98d8d63a4 | [
"Apache-2.0"
] | null | null | null | Array/Final450/Find_First_Second__Smallest_n_Largest/Find_First_And_Second_Smallest.py | prash-kr-meena/GoogleR | 27aca71e51cc2442e604e07ab00406a98d8d63a4 | [
"Apache-2.0"
] | null | null | null | Array/Final450/Find_First_Second__Smallest_n_Largest/Find_First_And_Second_Smallest.py | prash-kr-meena/GoogleR | 27aca71e51cc2442e604e07ab00406a98d8d63a4 | [
"Apache-2.0"
] | null | null | null | from Utils.Array import input_array
"""
https://www.geeksforgeeks.org/to-find-smallest-and-second-smallest-element-in-an-array/
Find the smallest and second smallest elements in an array
Important part could be handling the corner cases, like handling the duplicates (even if you sort it)
Approach 1 : sorting O(n lg n)
Approach 2 : 1 Pass O(n)
"""
def find_first_and_second_smallest(nums) -> (int, int):
first_smallest = second_smallest = float("inf")
for n in nums:
if n < first_smallest:
second_smallest = first_smallest
first_smallest = n
elif n < second_smallest and n != first_smallest: # to handle duplicate cases
second_smallest = n
if second_smallest == float("inf"):
print("There was no second smallest")
return first_smallest, None
else:
return first_smallest, second_smallest
if __name__ == "__main__":
array = input_array("List of integer numbers : ")
first, second = find_first_and_second_smallest(array)
print(first, second)
"""
------- Test cases -------
12 13 2 11 0 10
1 2 3 4 5 6 7 VVImp
7 7 7 7 7 7 7 Imp
3 2 2 1 1 2 3 v.v.v Imp basically duplicate first and second smallest
Need special condition otherwise both first and second will be same ie, 1, 1
"""
| 29.282609 | 102 | 0.657016 | from Utils.Array import input_array
def find_first_and_second_smallest(nums) -> (int, int):
first_smallest = second_smallest = float("inf")
for n in nums:
if n < first_smallest:
second_smallest = first_smallest
first_smallest = n
elif n < second_smallest and n != first_smallest:
second_smallest = n
if second_smallest == float("inf"):
print("There was no second smallest")
return first_smallest, None
else:
return first_smallest, second_smallest
if __name__ == "__main__":
array = input_array("List of integer numbers : ")
first, second = find_first_and_second_smallest(array)
print(first, second)
| true | true |
f73808be94727f276d3c194e156d48a1d82053a8 | 798 | py | Python | submitit/__init__.py | RudyChin/submitit | 51c761f64f2aa9b4d72f78722297370325de8aed | [
"MIT"
] | null | null | null | submitit/__init__.py | RudyChin/submitit | 51c761f64f2aa9b4d72f78722297370325de8aed | [
"MIT"
] | null | null | null | submitit/__init__.py | RudyChin/submitit | 51c761f64f2aa9b4d72f78722297370325de8aed | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# allow explicit reimports (mypy) by renaming all imports
from . import helpers as helpers
from .auto.auto import AutoExecutor as AutoExecutor
from .core.core import Executor as Executor
from .core.core import Job as Job
from .core.job_environment import JobEnvironment as JobEnvironment
from .local.debug import DebugExecutor as DebugExcecutor
from .local.debug import DebugJob as DebugJob
from .local.local import LocalExecutor as LocalExecutor
from .local.local import LocalJob as LocalJob
from .slurm.slurm import SlurmExecutor as SlurmExecutor
from .slurm.slurm import SlurmJob as SlurmJob
__version__ = "1.1.5"
| 38 | 66 | 0.807018 |
from . import helpers as helpers
from .auto.auto import AutoExecutor as AutoExecutor
from .core.core import Executor as Executor
from .core.core import Job as Job
from .core.job_environment import JobEnvironment as JobEnvironment
from .local.debug import DebugExecutor as DebugExcecutor
from .local.debug import DebugJob as DebugJob
from .local.local import LocalExecutor as LocalExecutor
from .local.local import LocalJob as LocalJob
from .slurm.slurm import SlurmExecutor as SlurmExecutor
from .slurm.slurm import SlurmJob as SlurmJob
__version__ = "1.1.5"
| true | true |
f73808f59ce129ee0b97071847600c1c131487ab | 172 | py | Python | lib/__init__.py | johnny5550822/gdax-army | 65e3719561f4fe125d9d1fc2ca9cd4c8e82a66a5 | [
"MIT"
] | 1 | 2018-02-21T03:34:04.000Z | 2018-02-21T03:34:04.000Z | lib/__init__.py | johnny5550822/gdax-army | 65e3719561f4fe125d9d1fc2ca9cd4c8e82a66a5 | [
"MIT"
] | null | null | null | lib/__init__.py | johnny5550822/gdax-army | 65e3719561f4fe125d9d1fc2ca9cd4c8e82a66a5 | [
"MIT"
] | null | null | null | from Strategier import Strategier
from BuyStrategier import BuyStrategier
from SellStrategier import SellStrategier
from GdaxArmy import GdaxArmy
from Trader import Trader
| 28.666667 | 41 | 0.883721 | from Strategier import Strategier
from BuyStrategier import BuyStrategier
from SellStrategier import SellStrategier
from GdaxArmy import GdaxArmy
from Trader import Trader
| true | true |
f738096054b08e604bf5d2d8338abf0c81109afd | 5,795 | py | Python | PermutationImportance/abstract_runner.py | gelijergensen/PermutationImportance | 7a09a407e42745c223055e0597c5226ff64b2f3c | [
"MIT"
] | 4 | 2019-02-01T17:49:14.000Z | 2020-06-25T15:09:56.000Z | PermutationImportance/abstract_runner.py | gelijergensen/PermutationImportance | 7a09a407e42745c223055e0597c5226ff64b2f3c | [
"MIT"
] | 42 | 2018-09-27T19:35:32.000Z | 2020-10-09T17:56:57.000Z | PermutationImportance/abstract_runner.py | gelijergensen/PermutationImportance | 7a09a407e42745c223055e0597c5226ff64b2f3c | [
"MIT"
] | 4 | 2018-09-27T19:34:33.000Z | 2021-02-12T19:41:31.000Z | """The general algorithm for all of the data-based variable importance methods
is the same, regardless of whether the method is Sequential Selection or
Permutation Importance or something else. This is represented in the
``abstract_variable_importance`` function. All of the different methods we
provide use this function under the hood and the only difference between them is
the ``selection_strategy`` object, which is detailed in
:mod:`PermutationImportance.selection_strategies`. Typically, you will not need
to use this method but can instead use one of the methods imported directly into
the top package of **PermutationImportance**.
If you wish to implement your own variable importance method, you will need to
devise your own ``selection_strategy``. We recommend using
:mod:`PermutationImportance.selection_strategies` as a template for implementing
your own variable importance method."""
import numpy as np
import multiprocessing as mp
from .data_verification import verify_data, determine_variable_names
from .multiprocessing_utils import pool_imap_unordered
from .result import ImportanceResult
from .scoring_strategies import verify_scoring_strategy
from .utils import add_ranks_to_dict, get_data_subset
def abstract_variable_importance(training_data, scoring_data, scoring_fn, scoring_strategy, selection_strategy, variable_names=None, nimportant_vars=None, method=None, njobs=1):
"""Performs an abstract variable importance over data given a particular
set of functions for scoring, determining optimal variables, and selecting
data
:param training_data: a 2-tuple ``(inputs, outputs)`` for training in the
``scoring_fn``
:param scoring_data: a 2-tuple ``(inputs, outputs)`` for scoring in the
``scoring_fn``
:param scoring_fn: a function to be used for scoring. Should be of the form
``(training_data, scoring_data) -> some_value``
:param scoring_strategy: a function to be used for determining optimal
variables. Should be of the form ``([some_value]) -> index``
:param variable_names: an optional list for variable names. If not given,
will use names of columns of data (if pandas dataframe) or column
indices
:param nimportant_vars: number of variables to compute importance for.
Defaults to all variables
:param method: a string for the name of the method used. Defaults to the
name of the ``selection_strategy`` if not given
:param njobs: an integer for the number of threads to use. If negative, will
use ``num_cpus + njobs``. Defaults to 1
:returns: :class:`PermutationImportance.result.ImportanceResult` object
which contains the results for each run
"""
training_data = verify_data(training_data)
scoring_data = verify_data(scoring_data)
scoring_strategy = verify_scoring_strategy(scoring_strategy)
variable_names = determine_variable_names(scoring_data, variable_names)
nimportant_vars = len(
variable_names) if nimportant_vars is None else nimportant_vars
method = getattr(selection_strategy, "name", getattr(
selection_strategy, "__name__")) if method is None else method
njobs = mp.cpu_count() + njobs if njobs <= 0 else njobs
important_vars = list()
num_vars = len(variable_names)
# Compute the original score over all the data
original_score = scoring_fn(training_data, scoring_data)
result_obj = ImportanceResult(method, variable_names, original_score)
for _ in range(nimportant_vars):
selection_iter = selection_strategy(
training_data, scoring_data, num_vars, important_vars)
if njobs == 1:
result = _singlethread_iteration(
selection_iter, scoring_fn)
else:
result = _multithread_iteration(
selection_iter, scoring_fn, njobs)
next_result = add_ranks_to_dict(
result, variable_names, scoring_strategy)
best_var = min(
next_result.keys(), key=lambda key: next_result[key][0])
best_index = np.flatnonzero(variable_names == best_var)[0]
result_obj.add_new_results(
next_result, next_important_variable=best_var)
important_vars.append(best_index)
return result_obj
def _singlethread_iteration(selection_iterator, scoring_fn):
"""Handles a single pass of the abstract variable importance algorithm,
assuming a single worker thread
:param selection_iterator: an iterator which yields triples
``(variable, training_data, scoring_data)``. Typically a
:class:`PermutationImportance.selection_strategies.SelectionStrategy`
:param scoring_fn: a function to be used for scoring. Should be of the form
``(training_data, scoring_data) -> float``
:returns: a dict of ``{var: score}``
"""
result = dict()
for var, training_data, scoring_data in selection_iterator:
score = scoring_fn(training_data, scoring_data)
result[var] = score
return result
def _multithread_iteration(selection_iterator, scoring_fn, njobs):
"""Handles a single pass of the abstract variable importance algorithm using
multithreading
:param selection_iterator: an iterator which yields triples
``(variable, training_data, scoring_data)``. Typically a
:class:`PermutationImportance.selection_strategies.SelectionStrategy`
:param scoring_fn: a function to be used for scoring. Should be of the form
``(training_data, scoring_data) -> float``
:param num_jobs: number of processes to use
:returns: a dict of ``{var: score}``
"""
result = dict()
for index, score in pool_imap_unordered(scoring_fn, selection_iterator, njobs):
result[index] = score
return result
| 47.113821 | 177 | 0.734081 |
import numpy as np
import multiprocessing as mp
from .data_verification import verify_data, determine_variable_names
from .multiprocessing_utils import pool_imap_unordered
from .result import ImportanceResult
from .scoring_strategies import verify_scoring_strategy
from .utils import add_ranks_to_dict, get_data_subset
def abstract_variable_importance(training_data, scoring_data, scoring_fn, scoring_strategy, selection_strategy, variable_names=None, nimportant_vars=None, method=None, njobs=1):
training_data = verify_data(training_data)
scoring_data = verify_data(scoring_data)
scoring_strategy = verify_scoring_strategy(scoring_strategy)
variable_names = determine_variable_names(scoring_data, variable_names)
nimportant_vars = len(
variable_names) if nimportant_vars is None else nimportant_vars
method = getattr(selection_strategy, "name", getattr(
selection_strategy, "__name__")) if method is None else method
njobs = mp.cpu_count() + njobs if njobs <= 0 else njobs
important_vars = list()
num_vars = len(variable_names)
original_score = scoring_fn(training_data, scoring_data)
result_obj = ImportanceResult(method, variable_names, original_score)
for _ in range(nimportant_vars):
selection_iter = selection_strategy(
training_data, scoring_data, num_vars, important_vars)
if njobs == 1:
result = _singlethread_iteration(
selection_iter, scoring_fn)
else:
result = _multithread_iteration(
selection_iter, scoring_fn, njobs)
next_result = add_ranks_to_dict(
result, variable_names, scoring_strategy)
best_var = min(
next_result.keys(), key=lambda key: next_result[key][0])
best_index = np.flatnonzero(variable_names == best_var)[0]
result_obj.add_new_results(
next_result, next_important_variable=best_var)
important_vars.append(best_index)
return result_obj
def _singlethread_iteration(selection_iterator, scoring_fn):
result = dict()
for var, training_data, scoring_data in selection_iterator:
score = scoring_fn(training_data, scoring_data)
result[var] = score
return result
def _multithread_iteration(selection_iterator, scoring_fn, njobs):
result = dict()
for index, score in pool_imap_unordered(scoring_fn, selection_iterator, njobs):
result[index] = score
return result
| true | true |
f7380aec78c019b02b31fd16c23be4c37994a3f4 | 31,019 | py | Python | gluon/contrib/redis_scheduler.py | oscarfonts/web2py | a18e0e489fe7a770c62fca510a4299886b0a9bb7 | [
"BSD-3-Clause"
] | null | null | null | gluon/contrib/redis_scheduler.py | oscarfonts/web2py | a18e0e489fe7a770c62fca510a4299886b0a9bb7 | [
"BSD-3-Clause"
] | null | null | null | gluon/contrib/redis_scheduler.py | oscarfonts/web2py | a18e0e489fe7a770c62fca510a4299886b0a9bb7 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Created by niphlod@gmail.com
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Scheduler with redis backend
---------------------------------
"""
import os
import time
import socket
import datetime
import logging
from json import loads, dumps
from gluon.utils import web2py_uuid
from gluon.storage import Storage
from gluon.scheduler import *
from gluon.scheduler import _decode_dict
from gluon.contrib.redis_utils import RWatchError
USAGE = """
## Example
For any existing app
Create File: app/models/scheduler.py ======
from gluon.contrib.redis_utils import RConn
from gluon.contrib.redis_scheduler import RScheduler
def demo1(*args,**vars):
print('you passed args=%s and vars=%s' % (args, vars))
return 'done!'
def demo2():
1/0
rconn = RConn()
mysched = RScheduler(db, dict(demo1=demo1,demo2=demo2), ...., redis_conn=rconn)
## run worker nodes with:
cd web2py
python web2py.py -K app
"""
path = os.getcwd()
if 'WEB2PY_PATH' not in os.environ:
os.environ['WEB2PY_PATH'] = path
IDENTIFIER = "%s#%s" % (socket.gethostname(), os.getpid())
logger = logging.getLogger('web2py.scheduler.%s' % IDENTIFIER)
POLLING = 'POLLING'
class RScheduler(Scheduler):
def __init__(self, db, tasks=None, migrate=True,
worker_name=None, group_names=None, heartbeat=HEARTBEAT,
max_empty_runs=0, discard_results=False, utc_time=False,
redis_conn=None, mode=1):
"""
Highly-experimental coordination with redis
Takes all args from Scheduler except redis_conn which
must be something closer to a StrictRedis instance.
My only regret - and the reason why I kept this under the hood for a
while - is that it's hard to hook up in web2py to something happening
right after the commit to a table, which will enable this version of the
scheduler to process "immediate" tasks right away instead of waiting a
few seconds (see FIXME in queue_task())
mode is reserved for future usage patterns.
Right now it moves the coordination (which is the most intensive
routine in the scheduler in matters of IPC) of workers to redis.
I'd like to have incrementally redis-backed modes of operations,
such as e.g.:
- 1: IPC through redis (which is the current implementation)
- 2: Store task results in redis (which will relieve further pressure
from the db leaving the scheduler_run table empty and possibly
keep things smooth as tasks results can be set to expire
after a bit of time)
- 3: Move all the logic for storing and queueing tasks to redis
itself - which means no scheduler_task usage too - and use
the database only as an historical record-bookkeeping
(e.g. for reporting)
As usual, I'm eager to see your comments.
"""
Scheduler.__init__(self, db, tasks=tasks, migrate=migrate,
worker_name=worker_name, group_names=group_names,
heartbeat=heartbeat, max_empty_runs=max_empty_runs,
discard_results=discard_results, utc_time=utc_time)
self.r_server = redis_conn
from gluon import current
self._application = current.request.application or 'appname'
def _nkey(self, key):
"""Helper to restrict all keys to a namespace and track them."""
prefix = 'w2p:rsched:%s' % self._application
allkeys = '%s:allkeys' % prefix
newkey = "%s:%s" % (prefix, key)
self.r_server.sadd(allkeys, newkey)
return newkey
def prune_all(self):
"""Global housekeeping."""
all_keys = self._nkey('allkeys')
with self.r_server.pipeline() as pipe:
while True:
try:
pipe.watch('PRUNE_ALL')
while True:
k = pipe.spop(all_keys)
if k is None:
break
pipe.delete(k)
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
def dt2str(self, value):
return value.strftime('%Y-%m-%d %H:%M:%S')
def str2date(self, value):
return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
def send_heartbeat(self, counter):
"""
Workers coordination in redis.
It has evolved into something is not that easy.
Here we try to do what we need in a single transaction,
and retry that transaction if something goes wrong
"""
with self.r_server.pipeline() as pipe:
while True:
try:
pipe.watch('SEND_HEARTBEAT')
self.inner_send_heartbeat(counter, pipe)
pipe.execute()
self.adj_hibernation()
self.sleep()
break
except RWatchError:
time.sleep(0.1)
continue
def inner_send_heartbeat(self, counter, pipe):
"""
Do a few things in the "maintenance" thread.
Specifically:
- registers the workers
- accepts commands sent to workers (KILL, TERMINATE, PICK, DISABLED, etc)
- adjusts sleep
- saves stats
- elects master
- does "housecleaning" for dead workers
- triggers tasks assignment
"""
r_server = pipe
status_keyset = self._nkey('worker_statuses')
status_key = self._nkey('worker_status:%s' % (self.worker_name))
now = self.now()
mybackedstatus = r_server.hgetall(status_key)
if not mybackedstatus:
r_server.hmset(
status_key,
dict(
status=ACTIVE, worker_name=self.worker_name,
first_heartbeat=self.dt2str(now),
last_heartbeat=self.dt2str(now),
group_names=dumps(self.group_names), is_ticker=False,
worker_stats=dumps(self.w_stats))
)
r_server.sadd(status_keyset, status_key)
if not self.w_stats.status == POLLING:
self.w_stats.status = ACTIVE
self.w_stats.sleep = self.heartbeat
mybackedstatus = ACTIVE
else:
mybackedstatus = mybackedstatus['status']
if mybackedstatus == DISABLED:
# keep sleeping
self.w_stats.status = DISABLED
r_server.hmset(
status_key,
dict(last_heartbeat=self.dt2str(now),
worker_stats=dumps(self.w_stats))
)
elif mybackedstatus == TERMINATE:
self.w_stats.status = TERMINATE
logger.debug("Waiting to terminate the current task")
self.give_up()
elif mybackedstatus == KILL:
self.w_stats.status = KILL
self.die()
else:
if mybackedstatus == STOP_TASK:
logger.info('Asked to kill the current task')
self.terminate_process()
logger.info('........recording heartbeat (%s)',
self.w_stats.status)
r_server.hmset(
status_key,
dict(
last_heartbeat=self.dt2str(now), status=ACTIVE,
worker_stats=dumps(self.w_stats)
)
)
# newroutine
r_server.expire(status_key, self.heartbeat * 3 * 15)
self.w_stats.sleep = self.heartbeat # re-activating the process
if self.w_stats.status not in (RUNNING, POLLING):
self.w_stats.status = ACTIVE
self.do_assign_tasks = False
if counter % 5 == 0 or mybackedstatus == PICK:
try:
logger.info(
' freeing workers that have not sent heartbeat')
registered_workers = r_server.smembers(status_keyset)
allkeys = self._nkey('allkeys')
for worker in registered_workers:
w = r_server.hgetall(worker)
w = Storage(w)
if not w:
r_server.srem(status_keyset, worker)
logger.info('removing %s from %s', worker, allkeys)
r_server.srem(allkeys, worker)
continue
try:
self.is_a_ticker = self.being_a_ticker(pipe)
except:
pass
if self.w_stats.status in (ACTIVE, POLLING):
self.do_assign_tasks = True
if self.is_a_ticker and self.do_assign_tasks:
# I'm a ticker, and 5 loops passed without reassigning tasks,
# let's do that and loop again
if not self.db_thread:
logger.debug('thread building own DAL object')
self.db_thread = DAL(
self.db._uri, folder=self.db._adapter.folder)
self.define_tables(self.db_thread, migrate=False)
db = self.db_thread
self.wrapped_assign_tasks(db)
return None
except:
logger.error('Error assigning tasks')
def being_a_ticker(self, pipe):
"""
Elects a ticker.
This is slightly more convoluted than the original
but if far more efficient
"""
r_server = pipe
status_keyset = self._nkey('worker_statuses')
registered_workers = r_server.smembers(status_keyset)
ticker = None
all_active = []
all_workers = []
for worker in registered_workers:
w = r_server.hgetall(worker)
if w['worker_name'] != self.worker_name and w['status'] == ACTIVE:
all_active.append(w)
if w['is_ticker'] == 'True' and ticker is None:
ticker = w
all_workers.append(w)
not_busy = self.w_stats.status in (ACTIVE, POLLING)
if not ticker:
if not_busy:
# only if this worker isn't busy, otherwise wait for a free one
for worker in all_workers:
key = self._nkey('worker_status:%s' % worker['worker_name'])
if worker['worker_name'] == self.worker_name:
r_server.hset(key, 'is_ticker', True)
else:
r_server.hset(key, 'is_ticker', False)
logger.info("TICKER: I'm a ticker")
else:
# giving up, only if I'm not alone
if len(all_active) > 1:
key = self._nkey('worker_status:%s' % (self.worker_name))
r_server.hset(key, 'is_ticker', False)
else:
not_busy = True
return not_busy
else:
logger.info(
"%s is a ticker, I'm a poor worker" % ticker['worker_name'])
return False
def assign_tasks(self, db):
"""
The real beauty.
We don't need to ASSIGN tasks, we just put
them into the relevant queue
"""
st, sd = db.scheduler_task, db.scheduler_task_deps
r_server = self.r_server
now = self.now()
status_keyset = self._nkey('worker_statuses')
with r_server.pipeline() as pipe:
while True:
try:
# making sure we're the only one doing the job
pipe.watch('ASSIGN_TASKS')
registered_workers = pipe.smembers(status_keyset)
all_workers = []
for worker in registered_workers:
w = pipe.hgetall(worker)
if w['status'] == ACTIVE:
all_workers.append(Storage(w))
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
# build workers as dict of groups
wkgroups = {}
for w in all_workers:
group_names = loads(w.group_names)
for gname in group_names:
if gname not in wkgroups:
wkgroups[gname] = dict(
workers=[{'name': w.worker_name, 'c': 0}])
else:
wkgroups[gname]['workers'].append(
{'name': w.worker_name, 'c': 0})
# set queued tasks that expired between "runs" (i.e., you turned off
# the scheduler): then it wasn't expired, but now it is
db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(st.stop_time < now)
).update(status=EXPIRED)
# calculate dependencies
deps_with_no_deps = db(
(sd.can_visit == False) &
(~sd.task_child.belongs(
db(sd.can_visit == False)._select(sd.task_parent)
)
)
)._select(sd.task_child)
no_deps = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(
(sd.id == None) | (st.id.belongs(deps_with_no_deps))
)
)._select(st.id, distinct=True, left=sd.on(
(st.id == sd.task_parent) &
(sd.can_visit == False)
)
)
all_available = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(st.next_run_time <= now) &
(st.enabled == True) &
(st.id.belongs(no_deps))
)
limit = len(all_workers) * (50 / (len(wkgroups) or 1))
# let's freeze it up
db.commit()
x = 0
r_server = self.r_server
for group in wkgroups.keys():
queued_list = self._nkey('queued:%s' % group)
queued_set = self._nkey('queued_set:%s' % group)
# if are running, let's don't assign them again
running_list = self._nkey('running:%s' % group)
while True:
# the joys for rpoplpush!
t = r_server.rpoplpush(running_list, queued_list)
if not t:
# no more
break
r_server.sadd(queued_set, t)
tasks = all_available(st.group_name == group).select(
limitby=(0, limit), orderby = st.next_run_time)
# put tasks in the processing list
for task in tasks:
x += 1
gname = task.group_name
if r_server.sismember(queued_set, task.id):
# already queued, we don't put on the list
continue
r_server.sadd(queued_set, task.id)
r_server.lpush(queued_list, task.id)
d = dict(status=QUEUED)
if not task.task_name:
d['task_name'] = task.function_name
db(
(st.id == task.id) &
(st.status.belongs((QUEUED, ASSIGNED)))
).update(**d)
db.commit()
# I didn't report tasks but I'm working nonetheless!!!!
if x > 0:
self.w_stats.empty_runs = 0
self.w_stats.queue = x
self.w_stats.distribution = wkgroups
self.w_stats.workers = len(all_workers)
# I'll be greedy only if tasks queued are equal to the limit
# (meaning there could be others ready to be queued)
self.greedy = x >= limit
logger.info('TICKER: workers are %s', len(all_workers))
logger.info('TICKER: tasks are %s', x)
def pop_task(self, db):
"""Lift a task off a queue."""
r_server = self.r_server
st = self.db.scheduler_task
task = None
# ready to process something
for group in self.group_names:
queued_set = self._nkey('queued_set:%s' % group)
queued_list = self._nkey('queued:%s' % group)
running_list = self._nkey('running:%s' % group)
running_dict = self._nkey('running_dict:%s' % group)
self.w_stats.status = POLLING
# polling for 1 minute in total. If more groups are in,
# polling is 1 minute in total
logger.debug(' polling on %s', group)
task_id = r_server.brpoplpush(queued_list, running_list,
timeout=60 / len(self.group_names))
logger.debug(' finished polling')
self.w_stats.status = ACTIVE
if task_id:
r_server.hset(running_dict, task_id, self.worker_name)
r_server.srem(queued_set, task_id)
task = db(
(st.id == task_id) &
(st.status == QUEUED)
).select().first()
if not task:
r_server.lrem(running_list, 0, task_id)
r_server.hdel(running_dict, task_id)
r_server.lrem(queued_list, 0, task_id)
logger.error("we received a task that isn't there (%s)",
task_id)
return None
break
now = self.now()
if task:
task.update_record(status=RUNNING, last_run_time=now)
# noone will touch my task!
db.commit()
logger.debug(' work to do %s', task.id)
else:
logger.info('nothing to do')
return None
times_run = task.times_run + 1
if not task.prevent_drift:
next_run_time = task.last_run_time + datetime.timedelta(
seconds=task.period
)
else:
# calc next_run_time based on available slots
# see #1191
next_run_time = task.start_time
secondspassed = self.total_seconds(now - next_run_time)
steps = secondspassed // task.period + 1
next_run_time += datetime.timedelta(seconds=task.period * steps)
if times_run < task.repeats or task.repeats == 0:
# need to run (repeating task)
run_again = True
else:
# no need to run again
run_again = False
run_id = 0
while True and not self.discard_results:
logger.debug(' new scheduler_run record')
try:
run_id = db.scheduler_run.insert(
task_id=task.id,
status=RUNNING,
start_time=now,
worker_name=self.worker_name)
db.commit()
break
except:
time.sleep(0.5)
db.rollback()
logger.info('new task %(id)s "%(task_name)s"'
' %(application_name)s.%(function_name)s' % task)
return Task(
app=task.application_name,
function=task.function_name,
timeout=task.timeout,
args=task.args, # in json
vars=task.vars, # in json
task_id=task.id,
run_id=run_id,
run_again=run_again,
next_run_time=next_run_time,
times_run=times_run,
stop_time=task.stop_time,
retry_failed=task.retry_failed,
times_failed=task.times_failed,
sync_output=task.sync_output,
uuid=task.uuid,
group_name=task.group_name)
def report_task(self, task, task_report):
"""
Override.
Needs it only because we need to pop from the
running tasks
"""
r_server = self.r_server
db = self.db
now = self.now()
st = db.scheduler_task
sr = db.scheduler_run
if not self.discard_results:
if task_report.result != 'null' or task_report.tb:
# result is 'null' as a string if task completed
# if it's stopped it's None as NoneType, so we record
# the STOPPED "run" anyway
logger.debug(' recording task report in db (%s)',
task_report.status)
db(sr.id == task.run_id).update(
status=task_report.status,
stop_time=now,
run_result=task_report.result,
run_output=task_report.output,
traceback=task_report.tb)
else:
logger.debug(' deleting task report in db because of no result')
db(sr.id == task.run_id).delete()
# if there is a stop_time and the following run would exceed it
is_expired = (task.stop_time and
task.next_run_time > task.stop_time and
True or False)
status = (task.run_again and is_expired and EXPIRED or
task.run_again and not is_expired and
QUEUED or COMPLETED)
if task_report.status == COMPLETED:
# assigned calculations
d = dict(status=status,
next_run_time=task.next_run_time,
times_run=task.times_run,
times_failed=0,
assigned_worker_name=self.worker_name
)
db(st.id == task.task_id).update(**d)
if status == COMPLETED:
self.update_dependencies(db, task.task_id)
else:
st_mapping = {'FAILED': 'FAILED',
'TIMEOUT': 'TIMEOUT',
'STOPPED': 'FAILED'}[task_report.status]
status = (task.retry_failed and
task.times_failed < task.retry_failed and
QUEUED or task.retry_failed == -1 and
QUEUED or st_mapping)
db(st.id == task.task_id).update(
times_failed=st.times_failed + 1,
next_run_time=task.next_run_time,
status=status,
assigned_worker_name=self.worker_name
)
logger.info('task completed (%s)', task_report.status)
running_list = self._nkey('running:%s' % task.group_name)
running_dict = self._nkey('running_dict:%s' % task.group_name)
r_server.lrem(running_list, 0, task.task_id)
r_server.hdel(running_dict, task.task_id)
def wrapped_pop_task(self):
"""Commodity function to call `pop_task` and trap exceptions.
If an exception is raised, assume it happened because of database
contention and retries `pop_task` after 0.5 seconds
"""
db = self.db
db.commit() # another nifty db.commit() only for Mysql
x = 0
while x < 10:
try:
rtn = self.pop_task(db)
return rtn
break
# this is here to "interrupt" any blrpoplpush op easily
except KeyboardInterrupt:
self.give_up()
break
except:
self.w_stats.errors += 1
db.rollback()
logger.error(' error popping tasks')
x += 1
time.sleep(0.5)
def get_workers(self, only_ticker=False):
"""Return a dict holding worker_name : {**columns}
representing all "registered" workers.
only_ticker returns only the worker running as a TICKER,
if there is any
"""
r_server = self.r_server
status_keyset = self._nkey('worker_statuses')
registered_workers = r_server.smembers(status_keyset)
all_workers = {}
for worker in registered_workers:
w = r_server.hgetall(worker)
w = Storage(w)
if not w:
continue
all_workers[w.worker_name] = Storage(
status=w.status,
first_heartbeat=self.str2date(w.first_heartbeat),
last_heartbeat=self.str2date(w.last_heartbeat),
group_names=loads(w.group_names, object_hook=_decode_dict),
is_ticker=w.is_ticker == 'True' and True or False,
worker_stats=loads(w.worker_stats, object_hook=_decode_dict)
)
if only_ticker:
for k, v in all_workers.iteritems():
if v['is_ticker']:
return {k: v}
return {}
return all_workers
def set_worker_status(self, group_names=None, action=ACTIVE,
exclude=None, limit=None, worker_name=None):
"""Internal function to set worker's status"""
r_server = self.r_server
all_workers = self.get_workers()
if not group_names:
group_names = self.group_names
elif isinstance(group_names, str):
group_names = [group_names]
exclusion = exclude and exclude.append(action) or [action]
workers = []
if worker_name is not None:
if worker_name in all_workers.keys():
workers = [worker_name]
else:
for k, v in all_workers.iteritems():
if v.status not in exclusion and set(group_names) & set(v.group_names):
workers.append(k)
if limit and worker_name is None:
workers = workers[:limit]
if workers:
with r_server.pipeline() as pipe:
while True:
try:
pipe.watch('SET_WORKER_STATUS')
for w in workers:
worker_key = self._nkey('worker_status:%s' % w)
pipe.hset(worker_key, 'status', action)
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
def queue_task(self, function, pargs=[], pvars={}, **kwargs):
"""
FIXME: immediate should put item in queue. The hard part is
that currently there are no hooks happening at post-commit time
Queue tasks. This takes care of handling the validation of all
parameters
Args:
function: the function (anything callable with a __name__)
pargs: "raw" args to be passed to the function. Automatically
jsonified.
pvars: "raw" kwargs to be passed to the function. Automatically
jsonified
kwargs: all the parameters available (basically, every
`scheduler_task` column). If args and vars are here, they should
be jsonified already, and they will override pargs and pvars
Returns:
a dict just as a normal validate_and_insert(), plus a uuid key
holding the uuid of the queued task. If validation is not passed
( i.e. some parameters are invalid) both id and uuid will be None,
and you'll get an "error" dict holding the errors found.
"""
if hasattr(function, '__name__'):
function = function.__name__
targs = 'args' in kwargs and kwargs.pop('args') or dumps(pargs)
tvars = 'vars' in kwargs and kwargs.pop('vars') or dumps(pvars)
tuuid = 'uuid' in kwargs and kwargs.pop('uuid') or web2py_uuid()
tname = 'task_name' in kwargs and kwargs.pop('task_name') or function
immediate = 'immediate' in kwargs and kwargs.pop('immediate') or None
rtn = self.db.scheduler_task.validate_and_insert(
function_name=function,
task_name=tname,
args=targs,
vars=tvars,
uuid=tuuid,
**kwargs)
if not rtn.errors:
rtn.uuid = tuuid
if immediate:
r_server = self.r_server
ticker = self.get_workers(only_ticker=True)
if ticker.keys():
ticker = ticker.keys()[0]
with r_server.pipeline() as pipe:
while True:
try:
pipe.watch('SET_WORKER_STATUS')
worker_key = self._nkey('worker_status:%s' % ticker)
pipe.hset(worker_key, 'status', 'PICK')
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
else:
rtn.uuid = None
return rtn
def stop_task(self, ref):
"""Shortcut for task termination.
If the task is RUNNING it will terminate it, meaning that status
will be set as FAILED.
If the task is QUEUED, its stop_time will be set as to "now",
the enabled flag will be set to False, and the status to STOPPED
Args:
ref: can be
- an integer : lookup will be done by scheduler_task.id
- a string : lookup will be done by scheduler_task.uuid
Returns:
- 1 if task was stopped (meaning an update has been done)
- None if task was not found, or if task was not RUNNING or QUEUED
Note:
Experimental
"""
r_server = self.r_server
st = self.db.scheduler_task
if isinstance(ref, int):
q = st.id == ref
elif isinstance(ref, str):
q = st.uuid == ref
else:
raise SyntaxError(
"You can retrieve results only by id or uuid")
task = self.db(q).select(st.id, st.status, st.group_name)
task = task.first()
rtn = None
if not task:
return rtn
running_dict = self._nkey('running_dict:%s' % task.group_name)
if task.status == 'RUNNING':
worker_key = r_server.hget(running_dict, task.id)
worker_key = self._nkey('worker_status:%s' % (worker_key))
r_server.hset(worker_key, 'status', STOP_TASK)
elif task.status == 'QUEUED':
rtn = self.db(q).update(
stop_time=self.now(),
enabled=False,
status=STOPPED)
return rtn
| 39.264557 | 87 | 0.526903 |
import os
import time
import socket
import datetime
import logging
from json import loads, dumps
from gluon.utils import web2py_uuid
from gluon.storage import Storage
from gluon.scheduler import *
from gluon.scheduler import _decode_dict
from gluon.contrib.redis_utils import RWatchError
USAGE = """
## Example
For any existing app
Create File: app/models/scheduler.py ======
from gluon.contrib.redis_utils import RConn
from gluon.contrib.redis_scheduler import RScheduler
def demo1(*args,**vars):
print('you passed args=%s and vars=%s' % (args, vars))
return 'done!'
def demo2():
1/0
rconn = RConn()
mysched = RScheduler(db, dict(demo1=demo1,demo2=demo2), ...., redis_conn=rconn)
## run worker nodes with:
cd web2py
python web2py.py -K app
"""
path = os.getcwd()
if 'WEB2PY_PATH' not in os.environ:
os.environ['WEB2PY_PATH'] = path
IDENTIFIER = "%s#%s" % (socket.gethostname(), os.getpid())
logger = logging.getLogger('web2py.scheduler.%s' % IDENTIFIER)
POLLING = 'POLLING'
class RScheduler(Scheduler):
def __init__(self, db, tasks=None, migrate=True,
worker_name=None, group_names=None, heartbeat=HEARTBEAT,
max_empty_runs=0, discard_results=False, utc_time=False,
redis_conn=None, mode=1):
Scheduler.__init__(self, db, tasks=tasks, migrate=migrate,
worker_name=worker_name, group_names=group_names,
heartbeat=heartbeat, max_empty_runs=max_empty_runs,
discard_results=discard_results, utc_time=utc_time)
self.r_server = redis_conn
from gluon import current
self._application = current.request.application or 'appname'
def _nkey(self, key):
prefix = 'w2p:rsched:%s' % self._application
allkeys = '%s:allkeys' % prefix
newkey = "%s:%s" % (prefix, key)
self.r_server.sadd(allkeys, newkey)
return newkey
def prune_all(self):
all_keys = self._nkey('allkeys')
with self.r_server.pipeline() as pipe:
while True:
try:
pipe.watch('PRUNE_ALL')
while True:
k = pipe.spop(all_keys)
if k is None:
break
pipe.delete(k)
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
def dt2str(self, value):
return value.strftime('%Y-%m-%d %H:%M:%S')
def str2date(self, value):
return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
def send_heartbeat(self, counter):
with self.r_server.pipeline() as pipe:
while True:
try:
pipe.watch('SEND_HEARTBEAT')
self.inner_send_heartbeat(counter, pipe)
pipe.execute()
self.adj_hibernation()
self.sleep()
break
except RWatchError:
time.sleep(0.1)
continue
def inner_send_heartbeat(self, counter, pipe):
r_server = pipe
status_keyset = self._nkey('worker_statuses')
status_key = self._nkey('worker_status:%s' % (self.worker_name))
now = self.now()
mybackedstatus = r_server.hgetall(status_key)
if not mybackedstatus:
r_server.hmset(
status_key,
dict(
status=ACTIVE, worker_name=self.worker_name,
first_heartbeat=self.dt2str(now),
last_heartbeat=self.dt2str(now),
group_names=dumps(self.group_names), is_ticker=False,
worker_stats=dumps(self.w_stats))
)
r_server.sadd(status_keyset, status_key)
if not self.w_stats.status == POLLING:
self.w_stats.status = ACTIVE
self.w_stats.sleep = self.heartbeat
mybackedstatus = ACTIVE
else:
mybackedstatus = mybackedstatus['status']
if mybackedstatus == DISABLED:
self.w_stats.status = DISABLED
r_server.hmset(
status_key,
dict(last_heartbeat=self.dt2str(now),
worker_stats=dumps(self.w_stats))
)
elif mybackedstatus == TERMINATE:
self.w_stats.status = TERMINATE
logger.debug("Waiting to terminate the current task")
self.give_up()
elif mybackedstatus == KILL:
self.w_stats.status = KILL
self.die()
else:
if mybackedstatus == STOP_TASK:
logger.info('Asked to kill the current task')
self.terminate_process()
logger.info('........recording heartbeat (%s)',
self.w_stats.status)
r_server.hmset(
status_key,
dict(
last_heartbeat=self.dt2str(now), status=ACTIVE,
worker_stats=dumps(self.w_stats)
)
)
r_server.expire(status_key, self.heartbeat * 3 * 15)
self.w_stats.sleep = self.heartbeat
if self.w_stats.status not in (RUNNING, POLLING):
self.w_stats.status = ACTIVE
self.do_assign_tasks = False
if counter % 5 == 0 or mybackedstatus == PICK:
try:
logger.info(
' freeing workers that have not sent heartbeat')
registered_workers = r_server.smembers(status_keyset)
allkeys = self._nkey('allkeys')
for worker in registered_workers:
w = r_server.hgetall(worker)
w = Storage(w)
if not w:
r_server.srem(status_keyset, worker)
logger.info('removing %s from %s', worker, allkeys)
r_server.srem(allkeys, worker)
continue
try:
self.is_a_ticker = self.being_a_ticker(pipe)
except:
pass
if self.w_stats.status in (ACTIVE, POLLING):
self.do_assign_tasks = True
if self.is_a_ticker and self.do_assign_tasks:
# let's do that and loop again
if not self.db_thread:
logger.debug('thread building own DAL object')
self.db_thread = DAL(
self.db._uri, folder=self.db._adapter.folder)
self.define_tables(self.db_thread, migrate=False)
db = self.db_thread
self.wrapped_assign_tasks(db)
return None
except:
logger.error('Error assigning tasks')
def being_a_ticker(self, pipe):
r_server = pipe
status_keyset = self._nkey('worker_statuses')
registered_workers = r_server.smembers(status_keyset)
ticker = None
all_active = []
all_workers = []
for worker in registered_workers:
w = r_server.hgetall(worker)
if w['worker_name'] != self.worker_name and w['status'] == ACTIVE:
all_active.append(w)
if w['is_ticker'] == 'True' and ticker is None:
ticker = w
all_workers.append(w)
not_busy = self.w_stats.status in (ACTIVE, POLLING)
if not ticker:
if not_busy:
for worker in all_workers:
key = self._nkey('worker_status:%s' % worker['worker_name'])
if worker['worker_name'] == self.worker_name:
r_server.hset(key, 'is_ticker', True)
else:
r_server.hset(key, 'is_ticker', False)
logger.info("TICKER: I'm a ticker")
else:
if len(all_active) > 1:
key = self._nkey('worker_status:%s' % (self.worker_name))
r_server.hset(key, 'is_ticker', False)
else:
not_busy = True
return not_busy
else:
logger.info(
"%s is a ticker, I'm a poor worker" % ticker['worker_name'])
return False
def assign_tasks(self, db):
st, sd = db.scheduler_task, db.scheduler_task_deps
r_server = self.r_server
now = self.now()
status_keyset = self._nkey('worker_statuses')
with r_server.pipeline() as pipe:
while True:
try:
pipe.watch('ASSIGN_TASKS')
registered_workers = pipe.smembers(status_keyset)
all_workers = []
for worker in registered_workers:
w = pipe.hgetall(worker)
if w['status'] == ACTIVE:
all_workers.append(Storage(w))
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
# build workers as dict of groups
wkgroups = {}
for w in all_workers:
group_names = loads(w.group_names)
for gname in group_names:
if gname not in wkgroups:
wkgroups[gname] = dict(
workers=[{'name': w.worker_name, 'c': 0}])
else:
wkgroups[gname]['workers'].append(
{'name': w.worker_name, 'c': 0})
# set queued tasks that expired between "runs" (i.e., you turned off
# the scheduler): then it wasn't expired, but now it is
db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(st.stop_time < now)
).update(status=EXPIRED)
deps_with_no_deps = db(
(sd.can_visit == False) &
(~sd.task_child.belongs(
db(sd.can_visit == False)._select(sd.task_parent)
)
)
)._select(sd.task_child)
no_deps = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(
(sd.id == None) | (st.id.belongs(deps_with_no_deps))
)
)._select(st.id, distinct=True, left=sd.on(
(st.id == sd.task_parent) &
(sd.can_visit == False)
)
)
all_available = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(st.next_run_time <= now) &
(st.enabled == True) &
(st.id.belongs(no_deps))
)
limit = len(all_workers) * (50 / (len(wkgroups) or 1))
db.commit()
x = 0
r_server = self.r_server
for group in wkgroups.keys():
queued_list = self._nkey('queued:%s' % group)
queued_set = self._nkey('queued_set:%s' % group)
# if are running, let's don't assign them again
running_list = self._nkey('running:%s' % group)
while True:
# the joys for rpoplpush!
t = r_server.rpoplpush(running_list, queued_list)
if not t:
# no more
break
r_server.sadd(queued_set, t)
tasks = all_available(st.group_name == group).select(
limitby=(0, limit), orderby = st.next_run_time)
# put tasks in the processing list
for task in tasks:
x += 1
gname = task.group_name
if r_server.sismember(queued_set, task.id):
# already queued, we don't put on the list
continue
r_server.sadd(queued_set, task.id)
r_server.lpush(queued_list, task.id)
d = dict(status=QUEUED)
if not task.task_name:
d['task_name'] = task.function_name
db(
(st.id == task.id) &
(st.status.belongs((QUEUED, ASSIGNED)))
).update(**d)
db.commit()
if x > 0:
self.w_stats.empty_runs = 0
self.w_stats.queue = x
self.w_stats.distribution = wkgroups
self.w_stats.workers = len(all_workers)
# (meaning there could be others ready to be queued)
self.greedy = x >= limit
logger.info('TICKER: workers are %s', len(all_workers))
logger.info('TICKER: tasks are %s', x)
def pop_task(self, db):
r_server = self.r_server
st = self.db.scheduler_task
task = None
# ready to process something
for group in self.group_names:
queued_set = self._nkey('queued_set:%s' % group)
queued_list = self._nkey('queued:%s' % group)
running_list = self._nkey('running:%s' % group)
running_dict = self._nkey('running_dict:%s' % group)
self.w_stats.status = POLLING
# polling for 1 minute in total. If more groups are in,
# polling is 1 minute in total
logger.debug(' polling on %s', group)
task_id = r_server.brpoplpush(queued_list, running_list,
timeout=60 / len(self.group_names))
logger.debug(' finished polling')
self.w_stats.status = ACTIVE
if task_id:
r_server.hset(running_dict, task_id, self.worker_name)
r_server.srem(queued_set, task_id)
task = db(
(st.id == task_id) &
(st.status == QUEUED)
).select().first()
if not task:
r_server.lrem(running_list, 0, task_id)
r_server.hdel(running_dict, task_id)
r_server.lrem(queued_list, 0, task_id)
logger.error("we received a task that isn't there (%s)",
task_id)
return None
break
now = self.now()
if task:
task.update_record(status=RUNNING, last_run_time=now)
db.commit()
logger.debug(' work to do %s', task.id)
else:
logger.info('nothing to do')
return None
times_run = task.times_run + 1
if not task.prevent_drift:
next_run_time = task.last_run_time + datetime.timedelta(
seconds=task.period
)
else:
next_run_time = task.start_time
secondspassed = self.total_seconds(now - next_run_time)
steps = secondspassed // task.period + 1
next_run_time += datetime.timedelta(seconds=task.period * steps)
if times_run < task.repeats or task.repeats == 0:
run_again = True
else:
run_again = False
run_id = 0
while True and not self.discard_results:
logger.debug(' new scheduler_run record')
try:
run_id = db.scheduler_run.insert(
task_id=task.id,
status=RUNNING,
start_time=now,
worker_name=self.worker_name)
db.commit()
break
except:
time.sleep(0.5)
db.rollback()
logger.info('new task %(id)s "%(task_name)s"'
' %(application_name)s.%(function_name)s' % task)
return Task(
app=task.application_name,
function=task.function_name,
timeout=task.timeout,
args=task.args,
vars=task.vars,
task_id=task.id,
run_id=run_id,
run_again=run_again,
next_run_time=next_run_time,
times_run=times_run,
stop_time=task.stop_time,
retry_failed=task.retry_failed,
times_failed=task.times_failed,
sync_output=task.sync_output,
uuid=task.uuid,
group_name=task.group_name)
def report_task(self, task, task_report):
r_server = self.r_server
db = self.db
now = self.now()
st = db.scheduler_task
sr = db.scheduler_run
if not self.discard_results:
if task_report.result != 'null' or task_report.tb:
logger.debug(' recording task report in db (%s)',
task_report.status)
db(sr.id == task.run_id).update(
status=task_report.status,
stop_time=now,
run_result=task_report.result,
run_output=task_report.output,
traceback=task_report.tb)
else:
logger.debug(' deleting task report in db because of no result')
db(sr.id == task.run_id).delete()
is_expired = (task.stop_time and
task.next_run_time > task.stop_time and
True or False)
status = (task.run_again and is_expired and EXPIRED or
task.run_again and not is_expired and
QUEUED or COMPLETED)
if task_report.status == COMPLETED:
d = dict(status=status,
next_run_time=task.next_run_time,
times_run=task.times_run,
times_failed=0,
assigned_worker_name=self.worker_name
)
db(st.id == task.task_id).update(**d)
if status == COMPLETED:
self.update_dependencies(db, task.task_id)
else:
st_mapping = {'FAILED': 'FAILED',
'TIMEOUT': 'TIMEOUT',
'STOPPED': 'FAILED'}[task_report.status]
status = (task.retry_failed and
task.times_failed < task.retry_failed and
QUEUED or task.retry_failed == -1 and
QUEUED or st_mapping)
db(st.id == task.task_id).update(
times_failed=st.times_failed + 1,
next_run_time=task.next_run_time,
status=status,
assigned_worker_name=self.worker_name
)
logger.info('task completed (%s)', task_report.status)
running_list = self._nkey('running:%s' % task.group_name)
running_dict = self._nkey('running_dict:%s' % task.group_name)
r_server.lrem(running_list, 0, task.task_id)
r_server.hdel(running_dict, task.task_id)
def wrapped_pop_task(self):
db = self.db
db.commit()
x = 0
while x < 10:
try:
rtn = self.pop_task(db)
return rtn
break
except KeyboardInterrupt:
self.give_up()
break
except:
self.w_stats.errors += 1
db.rollback()
logger.error(' error popping tasks')
x += 1
time.sleep(0.5)
def get_workers(self, only_ticker=False):
r_server = self.r_server
status_keyset = self._nkey('worker_statuses')
registered_workers = r_server.smembers(status_keyset)
all_workers = {}
for worker in registered_workers:
w = r_server.hgetall(worker)
w = Storage(w)
if not w:
continue
all_workers[w.worker_name] = Storage(
status=w.status,
first_heartbeat=self.str2date(w.first_heartbeat),
last_heartbeat=self.str2date(w.last_heartbeat),
group_names=loads(w.group_names, object_hook=_decode_dict),
is_ticker=w.is_ticker == 'True' and True or False,
worker_stats=loads(w.worker_stats, object_hook=_decode_dict)
)
if only_ticker:
for k, v in all_workers.iteritems():
if v['is_ticker']:
return {k: v}
return {}
return all_workers
def set_worker_status(self, group_names=None, action=ACTIVE,
exclude=None, limit=None, worker_name=None):
r_server = self.r_server
all_workers = self.get_workers()
if not group_names:
group_names = self.group_names
elif isinstance(group_names, str):
group_names = [group_names]
exclusion = exclude and exclude.append(action) or [action]
workers = []
if worker_name is not None:
if worker_name in all_workers.keys():
workers = [worker_name]
else:
for k, v in all_workers.iteritems():
if v.status not in exclusion and set(group_names) & set(v.group_names):
workers.append(k)
if limit and worker_name is None:
workers = workers[:limit]
if workers:
with r_server.pipeline() as pipe:
while True:
try:
pipe.watch('SET_WORKER_STATUS')
for w in workers:
worker_key = self._nkey('worker_status:%s' % w)
pipe.hset(worker_key, 'status', action)
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
def queue_task(self, function, pargs=[], pvars={}, **kwargs):
if hasattr(function, '__name__'):
function = function.__name__
targs = 'args' in kwargs and kwargs.pop('args') or dumps(pargs)
tvars = 'vars' in kwargs and kwargs.pop('vars') or dumps(pvars)
tuuid = 'uuid' in kwargs and kwargs.pop('uuid') or web2py_uuid()
tname = 'task_name' in kwargs and kwargs.pop('task_name') or function
immediate = 'immediate' in kwargs and kwargs.pop('immediate') or None
rtn = self.db.scheduler_task.validate_and_insert(
function_name=function,
task_name=tname,
args=targs,
vars=tvars,
uuid=tuuid,
**kwargs)
if not rtn.errors:
rtn.uuid = tuuid
if immediate:
r_server = self.r_server
ticker = self.get_workers(only_ticker=True)
if ticker.keys():
ticker = ticker.keys()[0]
with r_server.pipeline() as pipe:
while True:
try:
pipe.watch('SET_WORKER_STATUS')
worker_key = self._nkey('worker_status:%s' % ticker)
pipe.hset(worker_key, 'status', 'PICK')
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
else:
rtn.uuid = None
return rtn
def stop_task(self, ref):
r_server = self.r_server
st = self.db.scheduler_task
if isinstance(ref, int):
q = st.id == ref
elif isinstance(ref, str):
q = st.uuid == ref
else:
raise SyntaxError(
"You can retrieve results only by id or uuid")
task = self.db(q).select(st.id, st.status, st.group_name)
task = task.first()
rtn = None
if not task:
return rtn
running_dict = self._nkey('running_dict:%s' % task.group_name)
if task.status == 'RUNNING':
worker_key = r_server.hget(running_dict, task.id)
worker_key = self._nkey('worker_status:%s' % (worker_key))
r_server.hset(worker_key, 'status', STOP_TASK)
elif task.status == 'QUEUED':
rtn = self.db(q).update(
stop_time=self.now(),
enabled=False,
status=STOPPED)
return rtn
| true | true |
f7380afeff1caddcba22d0b62ae7280baeb37695 | 11,159 | py | Python | docker/package/manylinux/build_wheel.py | caishenghang/oneflow | db239cc9f98e551823bf6ce2d4395bd5c339b1c5 | [
"Apache-2.0"
] | null | null | null | docker/package/manylinux/build_wheel.py | caishenghang/oneflow | db239cc9f98e551823bf6ce2d4395bd5c339b1c5 | [
"Apache-2.0"
] | null | null | null | docker/package/manylinux/build_wheel.py | caishenghang/oneflow | db239cc9f98e551823bf6ce2d4395bd5c339b1c5 | [
"Apache-2.0"
] | null | null | null | import os
import subprocess
import tempfile
from pathlib import Path
def build_arg_env(env_var_name):
val = os.getenv(env_var_name)
return f"--build-arg {env_var_name}={val}"
def build_img(cuda_version, oneflow_src_dir, use_tuna, use_system_proxy, img_tag):
cudnn_version = 7
if str(cuda_version).startswith("11"):
cudnn_version = 8
from_img = f"nvidia/cuda:{cuda_version}-cudnn{cudnn_version}-devel-centos7"
tuna_build_arg = ""
if use_tuna:
tuna_build_arg = '--build-arg use_tuna_yum=1 --build-arg pip_args="-i https://pypi.tuna.tsinghua.edu.cn/simple"'
proxy_build_args = []
if use_system_proxy:
for v in ["HTTP_PROXY", "HTTPS_PROXY"]:
proxy_build_args.append(build_arg_env(v))
proxy_build_arg = " ".join(proxy_build_args)
cmd = f"docker build -f docker/package/manylinux/Dockerfile {proxy_build_arg} {tuna_build_arg} --build-arg from={from_img} -t {img_tag} ."
print(cmd)
subprocess.check_call(cmd, cwd=oneflow_src_dir, shell=True)
def common_cmake_args(cache_dir):
third_party_install_dir = os.path.join(cache_dir, "build-third-party-install")
return f"-DCMAKE_BUILD_TYPE=Release -DBUILD_RDMA=ON -DTHIRD_PARTY_DIR={third_party_install_dir}"
def get_build_dir_arg(cache_dir, oneflow_src_dir):
build_dir_real = os.path.join(cache_dir, "build")
build_dir_mount = os.path.join(oneflow_src_dir, "build")
return f"-v {build_dir_real}:{build_dir_mount}"
def force_rm_dir(dir_to_clean):
print("cleaning:", dir)
clean_cmd = f"docker run --rm -v {dir_to_clean}:{dir_to_clean} -w {dir_to_clean} busybox rm -rf {dir_to_clean}/*"
subprocess.check_call(clean_cmd, shell=True)
def create_tmp_bash_and_run(docker_cmd, img, bash_cmd, bash_args, bash_wrap):
with tempfile.NamedTemporaryFile(mode="w+", encoding="utf-8") as wrapper_f:
with tempfile.NamedTemporaryFile(mode="w+", encoding="utf-8") as f:
w_name = "/host" + wrapper_f.name
f_name = "/host" + f.name
bash_cmd = "PATH=/opt/python/cp37-cp37m/bin:$PATH\n" + bash_cmd
f.write(bash_cmd)
f.flush()
wrapper_f.write(
f"""{bash_wrap}
bash {bash_args} {f_name}
"""
)
wrapper_f.flush()
print(bash_cmd)
docker_cmd = f"{docker_cmd} -v /tmp:/host/tmp {img}"
cmd = f"{docker_cmd} bash {bash_args} {w_name}"
print(cmd)
subprocess.check_call(cmd, shell=True)
def get_common_docker_args(
oneflow_src_dir=None, cache_dir=None, current_dir=None, house_dir=None
):
root = Path(cache_dir)
child = Path(current_dir)
assert root in child.parents
cwd = os.getcwd()
pwd_arg = f"-v {cwd}:{cwd}"
cache_dir_arg = f"-v {cache_dir}:{cache_dir}"
house_dir_arg = ""
if house_dir:
house_dir_arg = f"-v {house_dir}:{house_dir}"
build_dir_arg = get_build_dir_arg(cache_dir, oneflow_src_dir)
return f"-v {oneflow_src_dir}:{oneflow_src_dir} {pwd_arg} {house_dir_arg} {cache_dir_arg} {build_dir_arg} -w {current_dir}"
def build_third_party(
img_tag, oneflow_src_dir, cache_dir, extra_oneflow_cmake_args, bash_args, bash_wrap,
):
third_party_build_dir = os.path.join(cache_dir, "build-third-party")
cmake_cmd = " ".join(
[
"cmake",
common_cmake_args(cache_dir),
"-DTHIRD_PARTY=ON -DONEFLOW=OFF",
extra_oneflow_cmake_args,
oneflow_src_dir,
]
)
bash_cmd = f"""set -ex
export TEST_TMPDIR={cache_dir}/bazel_cache
{cmake_cmd}
make -j`nproc` prepare_oneflow_third_party
"""
common_docker_args = get_common_docker_args(
oneflow_src_dir=oneflow_src_dir,
cache_dir=cache_dir,
current_dir=third_party_build_dir,
)
docker_cmd = f"docker run --rm {common_docker_args}"
create_tmp_bash_and_run(docker_cmd, img_tag, bash_cmd, bash_args, bash_wrap)
def get_python_bin(version):
assert version in ["3.5", "3.6", "3.7", "3.8"]
py_ver = "".join(version.split("."))
py_abi = f"cp{py_ver}-cp{py_ver}"
if py_ver != "38":
py_abi = f"{py_abi}m"
py_root = f"/opt/python/{py_abi}"
py_bin = f"{py_root}/bin/python"
return py_bin
def build_oneflow(
img_tag,
oneflow_src_dir,
cache_dir,
extra_oneflow_cmake_args,
python_version,
skip_wheel,
package_name,
house_dir,
bash_args,
bash_wrap,
):
oneflow_build_dir = os.path.join(cache_dir, "build-oneflow")
python_bin = get_python_bin(python_version)
cmake_cmd = " ".join(
[
"cmake",
common_cmake_args(cache_dir),
"-DTHIRD_PARTY=OFF -DONEFLOW=ON",
extra_oneflow_cmake_args,
"-DCMAKE_EXPORT_COMPILE_COMMANDS=1",
f"-DPython3_EXECUTABLE={python_bin}",
oneflow_src_dir,
]
)
common_docker_args = get_common_docker_args(
oneflow_src_dir=oneflow_src_dir,
cache_dir=cache_dir,
current_dir=oneflow_build_dir,
house_dir=house_dir,
)
docker_cmd = f"docker run --rm {common_docker_args}"
bash_cmd = f"""set -ex
export LD_LIBRARY_PATH=/opt/intel/lib/intel64_lin:/opt/intel/mkl/lib/intel64:$LD_LIBRARY_PATH
{cmake_cmd}
cmake --build . -j `nproc`
"""
if skip_wheel:
return 0
else:
bash_cmd += f"""
rm -rf {oneflow_build_dir}/python_scripts/*.egg-info
cd {oneflow_src_dir}
rm -rf build/*
{python_bin} setup.py bdist_wheel -d /tmp/tmp_wheel --build_dir {oneflow_build_dir} --package_name {package_name}
auditwheel repair /tmp/tmp_wheel/*.whl --wheel-dir {house_dir}
"""
return create_tmp_bash_and_run(
docker_cmd, img_tag, bash_cmd, bash_args, bash_wrap
)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--custom_img_tag", type=str, required=False, default=None,
)
parser.add_argument(
"--cache_dir", type=str, required=False, default=None,
)
default_wheel_house_dir = os.path.join(os.getcwd(), "wheelhouse")
parser.add_argument(
"--wheel_house_dir", type=str, required=False, default=default_wheel_house_dir,
)
parser.add_argument(
"--python_version", type=str, required=False, default="3.5, 3.6, 3.7, 3.8",
)
parser.add_argument(
"--cuda_version", type=str, required=False, default="10.2",
)
parser.add_argument(
"--extra_oneflow_cmake_args", type=str, required=False, default="",
)
parser.add_argument(
"--oneflow_src_dir", type=str, required=False, default=os.getcwd(),
)
parser.add_argument(
"--skip_third_party", default=False, action="store_true", required=False
)
parser.add_argument(
"--skip_wheel", default=False, action="store_true", required=False
)
parser.add_argument(
"--skip_img", default=False, action="store_true", required=False
)
parser.add_argument(
"--use_tuna", default=False, action="store_true", required=False
)
parser.add_argument(
"--use_system_proxy", default=False, action="store_true", required=False
)
parser.add_argument("--xla", default=False, action="store_true", required=False)
parser.add_argument(
"--use_aliyun_mirror", default=False, action="store_true", required=False
)
parser.add_argument("--cpu", default=False, action="store_true", required=False)
args = parser.parse_args()
extra_oneflow_cmake_args = args.extra_oneflow_cmake_args
cuda_versions = []
if args.use_aliyun_mirror:
extra_oneflow_cmake_args += " -DTHIRD_PARTY_MIRROR=aliyun"
if args.cpu:
extra_oneflow_cmake_args += " -DBUILD_CUDA=OFF"
cuda_versions = ["10.2"]
else:
extra_oneflow_cmake_args += " -DBUILD_CUDA=ON"
cuda_versions = args.cuda_version.split(",")
cuda_versions = [v.strip() for v in cuda_versions]
if args.xla:
extra_oneflow_cmake_args += " --DWITH_XLA=ON"
else:
extra_oneflow_cmake_args += " --DWITH_XLA=Off"
if args.xla == True and args.cpu == True:
raise ValueError("flag xla can't coexist with flag cpu")
for cuda_version in cuda_versions:
cache_dir = None
def build():
img_tag = None
skip_img = args.skip_img
if args.custom_img_tag:
img_tag = args.custom_img_tag
skip_img = True
else:
img_tag = f"oneflow:manylinux2014-cuda{cuda_version}"
if skip_img == False:
build_img(
args.cuda_version,
args.oneflow_src_dir,
args.use_tuna,
args.use_system_proxy,
img_tag,
)
bash_args = ""
if args.xla:
bash_args = "-l"
bash_wrap = ""
if args.xla:
bash_wrap = """
source scl_source enable devtoolset-7
gcc --version
"""
else:
bash_wrap = "gcc --version"
global cache_dir
if args.cache_dir:
cache_dir = args.cache_dir
else:
cache_dir = os.path.join(os.getcwd(), "manylinux2014-build-cache")
sub_dir = cuda_version
if args.xla:
sub_dir += "-xla"
if args.cpu:
assert len(cuda_versions) == 1
sub_dir = "cpu"
cache_dir = os.path.join(cache_dir, sub_dir)
if args.skip_third_party == False:
build_third_party(
img_tag,
args.oneflow_src_dir,
cache_dir,
extra_oneflow_cmake_args,
bash_args,
bash_wrap,
)
cuda_version_literal = "".join(cuda_version.split("."))
assert len(cuda_version_literal) == 3
python_versions = args.python_version.split(",")
python_versions = [pv.strip() for pv in python_versions]
package_name = None
if args.cpu:
package_name = "oneflow_cpu"
else:
package_name = f"oneflow_cu{cuda_version_literal}"
if args.xla:
package_name += "_xla"
for python_version in python_versions:
build_oneflow(
img_tag,
args.oneflow_src_dir,
cache_dir,
extra_oneflow_cmake_args,
python_version,
args.skip_wheel,
package_name,
args.wheel_house_dir,
bash_args,
bash_wrap,
)
try:
build()
except subprocess.CalledProcessError as e:
print("failed: ", e.cmd, e.args)
print("clean: ", cache_dir)
assert cache_dir != None
force_rm_dir(cache_dir)
build()
| 34.230061 | 142 | 0.609284 | import os
import subprocess
import tempfile
from pathlib import Path
def build_arg_env(env_var_name):
val = os.getenv(env_var_name)
return f"--build-arg {env_var_name}={val}"
def build_img(cuda_version, oneflow_src_dir, use_tuna, use_system_proxy, img_tag):
cudnn_version = 7
if str(cuda_version).startswith("11"):
cudnn_version = 8
from_img = f"nvidia/cuda:{cuda_version}-cudnn{cudnn_version}-devel-centos7"
tuna_build_arg = ""
if use_tuna:
tuna_build_arg = '--build-arg use_tuna_yum=1 --build-arg pip_args="-i https://pypi.tuna.tsinghua.edu.cn/simple"'
proxy_build_args = []
if use_system_proxy:
for v in ["HTTP_PROXY", "HTTPS_PROXY"]:
proxy_build_args.append(build_arg_env(v))
proxy_build_arg = " ".join(proxy_build_args)
cmd = f"docker build -f docker/package/manylinux/Dockerfile {proxy_build_arg} {tuna_build_arg} --build-arg from={from_img} -t {img_tag} ."
print(cmd)
subprocess.check_call(cmd, cwd=oneflow_src_dir, shell=True)
def common_cmake_args(cache_dir):
third_party_install_dir = os.path.join(cache_dir, "build-third-party-install")
return f"-DCMAKE_BUILD_TYPE=Release -DBUILD_RDMA=ON -DTHIRD_PARTY_DIR={third_party_install_dir}"
def get_build_dir_arg(cache_dir, oneflow_src_dir):
build_dir_real = os.path.join(cache_dir, "build")
build_dir_mount = os.path.join(oneflow_src_dir, "build")
return f"-v {build_dir_real}:{build_dir_mount}"
def force_rm_dir(dir_to_clean):
print("cleaning:", dir)
clean_cmd = f"docker run --rm -v {dir_to_clean}:{dir_to_clean} -w {dir_to_clean} busybox rm -rf {dir_to_clean}/*"
subprocess.check_call(clean_cmd, shell=True)
def create_tmp_bash_and_run(docker_cmd, img, bash_cmd, bash_args, bash_wrap):
with tempfile.NamedTemporaryFile(mode="w+", encoding="utf-8") as wrapper_f:
with tempfile.NamedTemporaryFile(mode="w+", encoding="utf-8") as f:
w_name = "/host" + wrapper_f.name
f_name = "/host" + f.name
bash_cmd = "PATH=/opt/python/cp37-cp37m/bin:$PATH\n" + bash_cmd
f.write(bash_cmd)
f.flush()
wrapper_f.write(
f"""{bash_wrap}
bash {bash_args} {f_name}
"""
)
wrapper_f.flush()
print(bash_cmd)
docker_cmd = f"{docker_cmd} -v /tmp:/host/tmp {img}"
cmd = f"{docker_cmd} bash {bash_args} {w_name}"
print(cmd)
subprocess.check_call(cmd, shell=True)
def get_common_docker_args(
oneflow_src_dir=None, cache_dir=None, current_dir=None, house_dir=None
):
root = Path(cache_dir)
child = Path(current_dir)
assert root in child.parents
cwd = os.getcwd()
pwd_arg = f"-v {cwd}:{cwd}"
cache_dir_arg = f"-v {cache_dir}:{cache_dir}"
house_dir_arg = ""
if house_dir:
house_dir_arg = f"-v {house_dir}:{house_dir}"
build_dir_arg = get_build_dir_arg(cache_dir, oneflow_src_dir)
return f"-v {oneflow_src_dir}:{oneflow_src_dir} {pwd_arg} {house_dir_arg} {cache_dir_arg} {build_dir_arg} -w {current_dir}"
def build_third_party(
img_tag, oneflow_src_dir, cache_dir, extra_oneflow_cmake_args, bash_args, bash_wrap,
):
third_party_build_dir = os.path.join(cache_dir, "build-third-party")
cmake_cmd = " ".join(
[
"cmake",
common_cmake_args(cache_dir),
"-DTHIRD_PARTY=ON -DONEFLOW=OFF",
extra_oneflow_cmake_args,
oneflow_src_dir,
]
)
bash_cmd = f"""set -ex
export TEST_TMPDIR={cache_dir}/bazel_cache
{cmake_cmd}
make -j`nproc` prepare_oneflow_third_party
"""
common_docker_args = get_common_docker_args(
oneflow_src_dir=oneflow_src_dir,
cache_dir=cache_dir,
current_dir=third_party_build_dir,
)
docker_cmd = f"docker run --rm {common_docker_args}"
create_tmp_bash_and_run(docker_cmd, img_tag, bash_cmd, bash_args, bash_wrap)
def get_python_bin(version):
assert version in ["3.5", "3.6", "3.7", "3.8"]
py_ver = "".join(version.split("."))
py_abi = f"cp{py_ver}-cp{py_ver}"
if py_ver != "38":
py_abi = f"{py_abi}m"
py_root = f"/opt/python/{py_abi}"
py_bin = f"{py_root}/bin/python"
return py_bin
def build_oneflow(
img_tag,
oneflow_src_dir,
cache_dir,
extra_oneflow_cmake_args,
python_version,
skip_wheel,
package_name,
house_dir,
bash_args,
bash_wrap,
):
oneflow_build_dir = os.path.join(cache_dir, "build-oneflow")
python_bin = get_python_bin(python_version)
cmake_cmd = " ".join(
[
"cmake",
common_cmake_args(cache_dir),
"-DTHIRD_PARTY=OFF -DONEFLOW=ON",
extra_oneflow_cmake_args,
"-DCMAKE_EXPORT_COMPILE_COMMANDS=1",
f"-DPython3_EXECUTABLE={python_bin}",
oneflow_src_dir,
]
)
common_docker_args = get_common_docker_args(
oneflow_src_dir=oneflow_src_dir,
cache_dir=cache_dir,
current_dir=oneflow_build_dir,
house_dir=house_dir,
)
docker_cmd = f"docker run --rm {common_docker_args}"
bash_cmd = f"""set -ex
export LD_LIBRARY_PATH=/opt/intel/lib/intel64_lin:/opt/intel/mkl/lib/intel64:$LD_LIBRARY_PATH
{cmake_cmd}
cmake --build . -j `nproc`
"""
if skip_wheel:
return 0
else:
bash_cmd += f"""
rm -rf {oneflow_build_dir}/python_scripts/*.egg-info
cd {oneflow_src_dir}
rm -rf build/*
{python_bin} setup.py bdist_wheel -d /tmp/tmp_wheel --build_dir {oneflow_build_dir} --package_name {package_name}
auditwheel repair /tmp/tmp_wheel/*.whl --wheel-dir {house_dir}
"""
return create_tmp_bash_and_run(
docker_cmd, img_tag, bash_cmd, bash_args, bash_wrap
)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--custom_img_tag", type=str, required=False, default=None,
)
parser.add_argument(
"--cache_dir", type=str, required=False, default=None,
)
default_wheel_house_dir = os.path.join(os.getcwd(), "wheelhouse")
parser.add_argument(
"--wheel_house_dir", type=str, required=False, default=default_wheel_house_dir,
)
parser.add_argument(
"--python_version", type=str, required=False, default="3.5, 3.6, 3.7, 3.8",
)
parser.add_argument(
"--cuda_version", type=str, required=False, default="10.2",
)
parser.add_argument(
"--extra_oneflow_cmake_args", type=str, required=False, default="",
)
parser.add_argument(
"--oneflow_src_dir", type=str, required=False, default=os.getcwd(),
)
parser.add_argument(
"--skip_third_party", default=False, action="store_true", required=False
)
parser.add_argument(
"--skip_wheel", default=False, action="store_true", required=False
)
parser.add_argument(
"--skip_img", default=False, action="store_true", required=False
)
parser.add_argument(
"--use_tuna", default=False, action="store_true", required=False
)
parser.add_argument(
"--use_system_proxy", default=False, action="store_true", required=False
)
parser.add_argument("--xla", default=False, action="store_true", required=False)
parser.add_argument(
"--use_aliyun_mirror", default=False, action="store_true", required=False
)
parser.add_argument("--cpu", default=False, action="store_true", required=False)
args = parser.parse_args()
extra_oneflow_cmake_args = args.extra_oneflow_cmake_args
cuda_versions = []
if args.use_aliyun_mirror:
extra_oneflow_cmake_args += " -DTHIRD_PARTY_MIRROR=aliyun"
if args.cpu:
extra_oneflow_cmake_args += " -DBUILD_CUDA=OFF"
cuda_versions = ["10.2"]
else:
extra_oneflow_cmake_args += " -DBUILD_CUDA=ON"
cuda_versions = args.cuda_version.split(",")
cuda_versions = [v.strip() for v in cuda_versions]
if args.xla:
extra_oneflow_cmake_args += " --DWITH_XLA=ON"
else:
extra_oneflow_cmake_args += " --DWITH_XLA=Off"
if args.xla == True and args.cpu == True:
raise ValueError("flag xla can't coexist with flag cpu")
for cuda_version in cuda_versions:
cache_dir = None
def build():
img_tag = None
skip_img = args.skip_img
if args.custom_img_tag:
img_tag = args.custom_img_tag
skip_img = True
else:
img_tag = f"oneflow:manylinux2014-cuda{cuda_version}"
if skip_img == False:
build_img(
args.cuda_version,
args.oneflow_src_dir,
args.use_tuna,
args.use_system_proxy,
img_tag,
)
bash_args = ""
if args.xla:
bash_args = "-l"
bash_wrap = ""
if args.xla:
bash_wrap = """
source scl_source enable devtoolset-7
gcc --version
"""
else:
bash_wrap = "gcc --version"
global cache_dir
if args.cache_dir:
cache_dir = args.cache_dir
else:
cache_dir = os.path.join(os.getcwd(), "manylinux2014-build-cache")
sub_dir = cuda_version
if args.xla:
sub_dir += "-xla"
if args.cpu:
assert len(cuda_versions) == 1
sub_dir = "cpu"
cache_dir = os.path.join(cache_dir, sub_dir)
if args.skip_third_party == False:
build_third_party(
img_tag,
args.oneflow_src_dir,
cache_dir,
extra_oneflow_cmake_args,
bash_args,
bash_wrap,
)
cuda_version_literal = "".join(cuda_version.split("."))
assert len(cuda_version_literal) == 3
python_versions = args.python_version.split(",")
python_versions = [pv.strip() for pv in python_versions]
package_name = None
if args.cpu:
package_name = "oneflow_cpu"
else:
package_name = f"oneflow_cu{cuda_version_literal}"
if args.xla:
package_name += "_xla"
for python_version in python_versions:
build_oneflow(
img_tag,
args.oneflow_src_dir,
cache_dir,
extra_oneflow_cmake_args,
python_version,
args.skip_wheel,
package_name,
args.wheel_house_dir,
bash_args,
bash_wrap,
)
try:
build()
except subprocess.CalledProcessError as e:
print("failed: ", e.cmd, e.args)
print("clean: ", cache_dir)
assert cache_dir != None
force_rm_dir(cache_dir)
build()
| true | true |
f7380d64839a49d1b3b0921f39e6a084e7dfd4b8 | 272 | py | Python | ats/conftest.py | MahmoudFarid/ats | 1f882168cba2f34451cbb9bba1e37ce93ef0c465 | [
"MIT"
] | null | null | null | ats/conftest.py | MahmoudFarid/ats | 1f882168cba2f34451cbb9bba1e37ce93ef0c465 | [
"MIT"
] | 1 | 2020-07-19T11:19:22.000Z | 2020-07-19T11:19:22.000Z | ats/conftest.py | MahmoudFarid/ats | 1f882168cba2f34451cbb9bba1e37ce93ef0c465 | [
"MIT"
] | null | null | null | import pytest
from ats.users.models import User
from ats.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
| 18.133333 | 49 | 0.768382 | import pytest
from ats.users.models import User
from ats.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
| true | true |
f73810b5a713532fd33d2666d1759247bd9d52f6 | 682 | py | Python | 2021/12/solution2.py | frenzymadness/aoc | c9018e757bae61a696e675a827aef873995abdd3 | [
"WTFPL"
] | 2 | 2020-12-04T09:45:38.000Z | 2020-12-07T14:06:12.000Z | 2021/12/solution2.py | frenzymadness/aoc | c9018e757bae61a696e675a827aef873995abdd3 | [
"WTFPL"
] | null | null | null | 2021/12/solution2.py | frenzymadness/aoc | c9018e757bae61a696e675a827aef873995abdd3 | [
"WTFPL"
] | null | null | null | from collections import defaultdict, deque
with open("input.txt") as input_file:
lines = input_file.read().splitlines()
g = defaultdict(set)
for line in lines:
n1, n2 = line.split("-")
g[n1].add(n2)
g[n2].add(n1)
def walk(node, path, return_here):
if node == "end":
yield path
for next_node in g[node]:
if next_node not in path or next_node.isupper():
yield from walk(next_node, path + [next_node], return_here)
elif next_node.islower() and return_here and next_node not in ("start", "end"):
yield from walk(next_node, path + [next_node], False)
print(sum(1 for _ in walk("start", ["start"], True)))
| 28.416667 | 87 | 0.631965 | from collections import defaultdict, deque
with open("input.txt") as input_file:
lines = input_file.read().splitlines()
g = defaultdict(set)
for line in lines:
n1, n2 = line.split("-")
g[n1].add(n2)
g[n2].add(n1)
def walk(node, path, return_here):
if node == "end":
yield path
for next_node in g[node]:
if next_node not in path or next_node.isupper():
yield from walk(next_node, path + [next_node], return_here)
elif next_node.islower() and return_here and next_node not in ("start", "end"):
yield from walk(next_node, path + [next_node], False)
print(sum(1 for _ in walk("start", ["start"], True)))
| true | true |
f73811ec3999042be11f430b5182fc83482be01d | 28,621 | py | Python | fairseq/tasks/online_backtranslation.py | ben15021999/fairseq_rl | 89f3c1123052927f67c008f01f3ffa4383f90150 | [
"MIT"
] | null | null | null | fairseq/tasks/online_backtranslation.py | ben15021999/fairseq_rl | 89f3c1123052927f67c008f01f3ffa4383f90150 | [
"MIT"
] | null | null | null | fairseq/tasks/online_backtranslation.py | ben15021999/fairseq_rl | 89f3c1123052927f67c008f01f3ffa4383f90150 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import json
import logging
import math
import os
from argparse import Namespace
from collections import OrderedDict, defaultdict
from pathlib import Path
from typing import Dict, Sequence, Tuple
from argparse import ArgumentError
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import fairseq
from fairseq import metrics, options, utils
from fairseq.data import (
FairseqDataset,
LanguagePairDataset,
NoisingDataset,
PrependTokenDataset,
RoundRobinZipDatasets,
TransformEosLangPairDataset,
data_utils,
encoders,
)
from fairseq.sequence_generator_rl import SequenceGenerator
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask, load_langpair_dataset
logger = logging.getLogger(__name__)
class PiecewiseLinearFn:
"""Piecewise linear function. Can be configured with a string."""
def __init__(self, pieces: Sequence[Tuple[int, float]]):
assert pieces == sorted(
pieces
), f"PiecewiseLinearFn configuration should be sorted, received: {pieces}"
self.pieces = pieces
def __call__(self, x: int) -> float:
for i, (x_a, y_a) in enumerate(self.pieces[:-1]):
x_b, y_b = self.pieces[i + 1]
if x_a <= x <= x_b:
return y_a + (x - x_a) * (y_b - y_a) / (x_b - x_a)
return self.pieces[-1][1]
@staticmethod
def from_string(configuration: str) -> "PiecewiseLinearFn":
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease
# to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000
# iterations, then will linearly increase to 1 until iteration 2000
"""
if isinstance(configuration, float):
return PiecewiseLinearFn([(0, configuration)])
try:
parts = configuration.split(",")
if len(parts) == 1:
v = float(configuration)
return PiecewiseLinearFn([(0, v)])
split = [s.split(":") for s in parts]
pieces = [(int(t), float(v)) for t, v in split]
return PiecewiseLinearFn(pieces)
except Exception:
raise ValueError(
f"Invalid PiecewiseLinearFn configuration: {configuration!r}"
)
@staticmethod
def one() -> "PiecewiseLinearFn":
return PiecewiseLinearFn([(0, 1.0)])
@register_task("online_backtranslation")
class OnlineBackTranslationTask(TranslationTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
# Generic translation args
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('--mono-langs', metavar='MONO_LANGS',
help='monolingual languages for training')
parser.add_argument('--valid-lang-pairs', default=None, metavar='VALID_LANG_PAIRS',
help='language pairs for validation')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='False', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
try:
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
except ArgumentError:
# this might have already been defined. Once we transition this to hydra it should be fine to add it here.
pass
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
# Denoising args
parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N',
help='maximum word shuffle distance for denoising autoencoding data generation')
parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N',
help='word dropout probability for denoising autoencoding data generation')
parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N',
help='word blanking probability for denoising autoencoding data generation')
# Backtranslation args
parser.add_argument('--lambda-bt', default="1.0", type=str, metavar='N',
help='back-translation weight')
parser.add_argument('--lambda-dae', default="1.0", type=str, metavar='N',
help='denoising auto-encoder weight')
# Evaluation args
parser.add_argument('--generate-one-by-one', action='store_true',
help='generate one sentence at a time for backtranslation')
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, common_dict, mono_langs, valid_lang_pairs):
super().__init__(args, common_dict, common_dict)
self.common_dict = common_dict
self.mono_langs = mono_langs
self.valid_lang_pairs = valid_lang_pairs
self.SHOW_SAMPLES_INTERVAL = 1000
# Start by showing samples
self._show_samples_ctr = self.SHOW_SAMPLES_INTERVAL
self.SHOW_SAMPLES_NUMBER = 5
self.lambda_bt = PiecewiseLinearFn.from_string(args.lambda_bt)
self.lambda_dae = PiecewiseLinearFn.from_string(args.lambda_dae)
self.args = args
self.data = utils.split_paths(self.args.data)
if len(self.data) == 1:
shards = list(Path(self.data[0]).glob("shard*"))
if len(shards) > 0:
# keep this as strings, since it can also be a manifold path
old_data = self.data
self.data = [str(shard) for shard in shards]
logging.warning(f"Expanded data directory {old_data} to {self.data}")
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
assert args.mono_langs is not None
mono_langs = args.mono_langs.split(",")
valid_lang_pairs = args.valid_lang_pairs.split(",")
# load dictionary
dict_path = os.path.join(paths[0], "dict.txt")
common_dict = cls.load_dictionary(dict_path)
return cls(args, common_dict, mono_langs, valid_lang_pairs)
def load_dataset(self, split, epoch=1, combine=False, **kwargs) -> FairseqDataset:
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if split == "train":
data_path = self.data[(epoch - 1) % len(self.data)]
dataset = self.load_train_dataset(data_path)
else:
# valid/test should always be the same.
dataset = self.load_translation_dataset(split, self.data[0])
self.datasets[split] = dataset
return dataset
def load_train_dataset(self, data_path: str) -> FairseqDataset:
"""The training dataset is made of backtranslation dataset and denoising dataset."""
data = []
for lang in self.mono_langs:
train_path = os.path.join(data_path, lang, "train")
# TODO: could we do the BT using denoise sample ?
# this would half the data loading work
data.append((f"{lang}-BT", self.load_bt_dataset(train_path, lang)))
data.append(
(f"{lang}-DENOISE", self.load_denoise_dataset(train_path, lang))
)
return RoundRobinZipDatasets(OrderedDict(data))
def _langpair_dataset(
self, src: FairseqDataset, tgt: FairseqDataset
) -> LanguagePairDataset:
return LanguagePairDataset(
src,
src.sizes,
self.dictionary,
tgt=tgt,
tgt_sizes=tgt.sizes,
tgt_dict=self.dictionary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
# TODO: should we shuffle ? we are already sorting batch by sizes so ?
# shuffle=True,
)
def _prepend_lang_bos_to_target(
self, dataset: LanguagePairDataset, lang: str
) -> LanguagePairDataset:
bos = _lang_token_index(self.dictionary, lang)
return TransformEosLangPairDataset(
dataset,
src_eos=self.dictionary.eos(),
new_src_eos=self.dictionary.eos(),
tgt_bos=self.dictionary.eos(),
new_tgt_bos=bos,
)
def load_bt_dataset(self, data_path: str, lang: str) -> FairseqDataset:
"""The BT dataset is generated with (tgt, tgt) pairs.
The actual translation to a (generated_src, tgt) pair
is done on the fly during training.
"""
mono_dataset = data_utils.load_indexed_dataset(
data_path, self.common_dict, self.args.dataset_impl
)
assert mono_dataset is not None, f"No dataset found for {lang}"
mono_dataset_src = PrependTokenDataset(
mono_dataset, _lang_token_index(self.dictionary, lang)
)
mono_dataset_bt = self._langpair_dataset(mono_dataset_src, mono_dataset)
logger.info(
f"mono_lang = {lang} "
f"lang token index = {_lang_token_index(self.dictionary, lang)} "
f"lang token = {_lang_token(lang)}"
)
mono_dataset_bt = self._prepend_lang_bos_to_target(mono_dataset_bt, lang)
return mono_dataset_bt
def load_denoise_dataset(self, data_path: str, lang: str) -> FairseqDataset:
"""Classic denoising dataset"""
dataset = data_utils.load_indexed_dataset(
data_path, self.common_dict, self.args.dataset_impl
)
noisy_dataset = NoisingDataset(
dataset,
self.dictionary,
seed=1,
max_word_shuffle_distance=self.args.max_word_shuffle_distance,
word_dropout_prob=self.args.word_dropout_prob,
word_blanking_prob=self.args.word_blanking_prob,
)
noisy_dataset = PrependTokenDataset(
noisy_dataset, _lang_token_index(self.dictionary, lang)
)
clean_dataset = data_utils.load_indexed_dataset(
data_path, self.common_dict, self.args.dataset_impl
)
denoising_dataset = self._langpair_dataset(noisy_dataset, clean_dataset)
denoising_dataset = self._prepend_lang_bos_to_target(denoising_dataset, lang)
return denoising_dataset
def load_translation_dataset(
self, split: str, data_path: str, combine: bool = False
):
# only judging with one language pair for the moment,
# since ConcatDataset doesn't work as expected
assert len(self.valid_lang_pairs) == 1, "For now..."
valid_lang_pair = self.valid_lang_pairs[0]
src, tgt = valid_lang_pair.split("-")
# use the same function than TranslationTask
src_tgt_dt = load_langpair_dataset(
data_path,
split,
src,
self.common_dict,
tgt,
self.common_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != "test"),
prepend_bos_src=_lang_token_index(self.dictionary, src),
)
src_tgt_eos_dt = self._prepend_lang_bos_to_target(src_tgt_dt, tgt)
src_tgt_eos_dt.args = self.args
return src_tgt_eos_dt
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
raise NotImplementedError
def build_model(self, args, from_checkpoint=False):
# torch.autograd.set_detect_anomaly(True)
model = super().build_model(args, from_checkpoint)
add_secial_tokens_to_dict_and_model(self.common_dict, model, self.mono_langs)
self.sequence_generators = {}
for mono_lang in self.mono_langs:
self.sequence_generators[mono_lang] = SequenceGenerator(
[model],
tgt_dict=self.dictionary,
beam_size=1,
max_len_a=1.3,
max_len_b=5,
min_len=5,
# keep 1 to be able to prepend bos
max_len=model.max_decoder_positions() - 1,
)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.bleu_sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.common_dict
def display_samples_once_in_a_while(self, smp, mono_lang, other_lang):
self._show_samples_ctr += 1
if self._show_samples_ctr < self.SHOW_SAMPLES_INTERVAL:
return
self._show_samples_ctr = 0
ln = smp["net_input"]["src_tokens"].shape[0]
logger.info(
f"(r:{self.args.distributed_rank}) : "
f"{other_lang} ---> {mono_lang} "
f"({other_lang} was generated by back-translation.) {ln} samples"
)
for i in range(min(ln, self.SHOW_SAMPLES_NUMBER)):
src_tokens = smp["net_input"]["src_tokens"][i]
tgt_tokens = smp["target"][i]
src_str = self.dictionary.string(src_tokens, "sentencepiece")
tgt_str = self.dictionary.string(tgt_tokens, "sentencepiece")
logger.info(
f"\n{i}\t\t[{other_lang} generated] {src_str}\n"
f"\t\t[{mono_lang} original ] {tgt_str}\n"
f"\t\t[ src tokens] {src_tokens}\n"
)
def backtranslate_sample(self, smp, orig_lang, other_lang) -> None:
"""
* WARNING: smp is modified in place.
* At the start of this function, `smp` has the same input and target:
|--------------------------------------------------------|
| smp['net_input']['src_tokens'] | smp['target'] |
| (from data) __en__ hello world | __en__ hello world |
|--------------------------------------------------------|
* We call generator.generate(smp, bos_token = token("ro")),
and copy the result as input
* At the end, `smp` has the translation to other language.
|--------------------------------------------------------|
| smp['net_input']['src_tokens'] | smp['target'] |
| (generated) __ro__ salut lume | __en__ hello world |
|--------------------------------------------------------|
"""
bos_token = _lang_token_index(self.dictionary, other_lang)
generated = self.sequence_generators[orig_lang].generate(
models=[], sample=smp, bos_token=bos_token
)
max_lngth = max([gn[0]["tokens"].size(0) for gn in generated])
net_input = smp["net_input"]
n_src_tokens = torch.empty(
size=(len(generated), max_lngth + 1), dtype=net_input["src_tokens"].dtype
)
n_src_lengths = torch.empty(
len(generated), dtype=net_input["src_lengths"].dtype
)
for i, gn in enumerate(generated):
tokens = gn[0]["tokens"]
tokens_size = tokens.size(0)
padding_needed = max_lngth - tokens_size
tokens = torch.cat([tokens.new([bos_token]), tokens])
tokens = F.pad(tokens, (0, padding_needed), value=self.dictionary.pad())
n_src_tokens[i] = tokens
n_src_lengths[i] = tokens_size + 1
device = net_input["src_tokens"].device
# This seems to be important
del net_input["src_tokens"]
del net_input["src_lengths"]
net_input["src_tokens"] = n_src_tokens.to(device)
net_input["src_lengths"] = n_src_lengths.to(device)
def generate(self, smp, model):
model.eval()
orig_lang = (
self.dictionary[smp["net_input"]["src_tokens"][0][0]]
.replace(" ", "")
.replace("_", "")
)
bos_token = smp["net_input"]["prev_output_tokens"][0][0]
with torch.no_grad():
generated = self.sequence_generators[orig_lang].generate(
models=[model], sample=smp, bos_token=bos_token
)
return generated
def get_other_lang(self, lang):
# TODO: allow more complex mapping
if lang != self.mono_langs[0]:
return self.mono_langs[0]
if len(self.mono_langs) == 2:
return self.mono_langs[1]
return self.mono_langs[np.random.randint(1, len(self.mono_langs))]
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
model.set_num_updates(update_num)
agg_loss, agg_sample_size = 0.0, 0.0
agg_logging_output: Dict[str, float] = defaultdict(float)
dataset_keys = self.datasets["train"].datasets.keys()
weights = {
"BT": self.lambda_bt(update_num),
"DENOISE": self.lambda_dae(update_num),
}
log_keys = {"BT": "bt_", "DENOISE": "dae_"}
for dataset_key in dataset_keys:
smp = sample[dataset_key]
mono_lang, task_subtype = dataset_key.split("-")
if weights[task_subtype] == 0:
continue
if task_subtype == "BT":
with torch.autograd.profiler.record_function("backtranslation"):
model.eval()
# TODO: Could we translate to several language at once ?
# this would allow to share encoder_out and maximize GPU usage.
other_lang = self.get_other_lang(mono_lang)
self.backtranslate_sample(smp, mono_lang, other_lang)
self.display_samples_once_in_a_while(smp, mono_lang, other_lang)
model.train()
# Like in FairseqTask.train_step
with torch.autograd.profiler.record_function("forward"):
loss, sample_size, logging_output = criterion(model, smp)
loss *= weights[task_subtype]
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
agg_loss += loss.item()
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[log_keys[task_subtype] + k] += logging_output[k]
agg_logging_output[k] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def get_bos_token_from_sample(self, sample):
net_input = sample["net_input"]
source_lang_token_id = torch.unique(net_input["src_tokens"][:, 0]).item()
source_lang_token = self.dictionary[source_lang_token_id].replace("_", "")
target_lang_token_id = _lang_token_index(
self.dictionary, self.get_other_lang(source_lang_token)
)
return target_lang_token_id
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
bt_sample_size = sum(x.get("bt_sample_size", 0) for x in logging_outputs)
if bt_sample_size:
bt_loss_sum = sum(x.get("bt_loss", 0) for x in logging_outputs)
bt_loss_sum *= 1 / bt_sample_size / math.log(2)
metrics.log_scalar("bt_loss", bt_loss_sum, bt_sample_size, round=3)
bt_nll_loss_sum = sum(x.get("bt_nll_loss", 0) for x in logging_outputs)
bt_ntokens = sum(x.get("bt_ntokens", 0) for x in logging_outputs)
bt_nll_loss_sum *= 1 / bt_ntokens / math.log(2)
metrics.log_scalar("bt_nll_loss", bt_nll_loss_sum, bt_ntokens, round=3)
metrics.log_derived(
"bt_ppl", lambda meters: utils.get_perplexity(meters["bt_nll_loss"].avg)
)
dae_sample_size = sum(x.get("dae_sample_size", 0) for x in logging_outputs)
if dae_sample_size:
dae_loss_sum = sum(x.get("dae_loss", 0) for x in logging_outputs)
dae_loss_sum *= 1 / dae_sample_size / math.log(2)
metrics.log_scalar("dae_loss", dae_loss_sum, dae_sample_size, round=3)
dae_nll_loss_sum = sum(x.get("dae_nll_loss", 0) for x in logging_outputs)
dae_ntokens = sum(x.get("dae_ntokens", 0) for x in logging_outputs)
dae_nll_loss_sum *= 1 / dae_ntokens / math.log(2)
metrics.log_scalar("dae_nll_loss", dae_nll_loss_sum, dae_ntokens, round=3)
metrics.log_derived(
"dae_ppl",
lambda meters: utils.get_perplexity(meters["dae_nll_loss"].avg),
)
@torch.no_grad()
def extend_embedding(
emb: nn.Module, new_vocab_size: int, copy_from_token_id: int
) -> None:
old_emb_data = emb.weight.data
(old_vocab_size, dim) = old_emb_data.shape
assert new_vocab_size >= old_vocab_size
if new_vocab_size > old_vocab_size:
emb.weight.data = torch.zeros((new_vocab_size, dim))
emb.weight.data[:old_vocab_size, :] = old_emb_data
# initialize new embeddings
emb.weight.data[old_vocab_size:, :] = old_emb_data[copy_from_token_id]
if hasattr(emb, "num_embeddings"):
emb.num_embeddings = new_vocab_size
if hasattr(emb, "out_features"):
emb.out_features = new_vocab_size
if getattr(emb, "bias", None) is None:
return
# Fix the bias.
# Bias shape can be different from the previous vocab size
# if the weight matrix was shared and alread extended but not the bias.
(old_vocab_size,) = emb.bias.shape
assert new_vocab_size >= old_vocab_size
if new_vocab_size > old_vocab_size:
old_bias = emb.bias.data
new_bias = torch.zeros(
(new_vocab_size,), dtype=old_bias.dtype, device=old_bias.device
)
new_bias[:old_vocab_size] = old_bias
emb.bias.data = new_bias
def add_secial_tokens_to_dict_and_model(
dictionary: "fairseq.data.Dictionary",
model: nn.Module,
mono_langs: Sequence[str],
) -> None:
embs = model.encoder.embed_tokens
vocab_size, embedding_dim = embs.weight.shape
# The model may or may not have a '<mask>' embedding yet
assert (
len(dictionary) <= vocab_size <= len(dictionary) + 1
), f"Dictionary len ({len(dictionary)}) doesn't match embs shape ({embs.weight.shape})"
# TODO: we should reuse the pretrained model dict which already has <mask>
dictionary.add_symbol("<mask>")
for lang in mono_langs:
lang_token = _lang_token(lang)
dictionary.add_symbol(lang_token)
logger.info(
f"dictionary: {len(dictionary)} -> {vocab_size} tokens "
f"after adding {len(mono_langs)} lang tokens."
)
if len(dictionary) <= vocab_size:
return
extend_embedding(embs, len(dictionary), dictionary.bos())
dec_embs = model.decoder.embed_tokens
extend_embedding(dec_embs, len(dictionary), dictionary.bos())
lm_head = model.decoder.output_projection
extend_embedding(lm_head, len(dictionary), dictionary.bos())
assert lm_head.weight.shape == (len(dictionary), embedding_dim)
def _lang_token(lang: str) -> str:
return f"__{lang}__"
def _lang_token_index(dictionary, lang: str) -> int:
return dictionary.index(_lang_token(lang))
@contextlib.contextmanager
def assert_weights_have_changed(model: nn.Module):
def checksum(model: nn.Module) -> float:
return sum(p.sum().item() for p in model.parameters())
initial_checksum = checksum(model)
yield model
final_checksum = checksum(model)
logger.info(
f"initial_checksum={initial_checksum} -> final_checksum={final_checksum}"
)
assert initial_checksum != final_checksum, "Model hasn't changed !"
| 41.904832 | 118 | 0.608574 |
import contextlib
import json
import logging
import math
import os
from argparse import Namespace
from collections import OrderedDict, defaultdict
from pathlib import Path
from typing import Dict, Sequence, Tuple
from argparse import ArgumentError
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import fairseq
from fairseq import metrics, options, utils
from fairseq.data import (
FairseqDataset,
LanguagePairDataset,
NoisingDataset,
PrependTokenDataset,
RoundRobinZipDatasets,
TransformEosLangPairDataset,
data_utils,
encoders,
)
from fairseq.sequence_generator_rl import SequenceGenerator
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask, load_langpair_dataset
logger = logging.getLogger(__name__)
class PiecewiseLinearFn:
def __init__(self, pieces: Sequence[Tuple[int, float]]):
assert pieces == sorted(
pieces
), f"PiecewiseLinearFn configuration should be sorted, received: {pieces}"
self.pieces = pieces
def __call__(self, x: int) -> float:
for i, (x_a, y_a) in enumerate(self.pieces[:-1]):
x_b, y_b = self.pieces[i + 1]
if x_a <= x <= x_b:
return y_a + (x - x_a) * (y_b - y_a) / (x_b - x_a)
return self.pieces[-1][1]
@staticmethod
def from_string(configuration: str) -> "PiecewiseLinearFn":
if isinstance(configuration, float):
return PiecewiseLinearFn([(0, configuration)])
try:
parts = configuration.split(",")
if len(parts) == 1:
v = float(configuration)
return PiecewiseLinearFn([(0, v)])
split = [s.split(":") for s in parts]
pieces = [(int(t), float(v)) for t, v in split]
return PiecewiseLinearFn(pieces)
except Exception:
raise ValueError(
f"Invalid PiecewiseLinearFn configuration: {configuration!r}"
)
@staticmethod
def one() -> "PiecewiseLinearFn":
return PiecewiseLinearFn([(0, 1.0)])
@register_task("online_backtranslation")
class OnlineBackTranslationTask(TranslationTask):
@staticmethod
def add_args(parser):
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('--mono-langs', metavar='MONO_LANGS',
help='monolingual languages for training')
parser.add_argument('--valid-lang-pairs', default=None, metavar='VALID_LANG_PAIRS',
help='language pairs for validation')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='False', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
try:
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
except ArgumentError:
pass
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N',
help='maximum word shuffle distance for denoising autoencoding data generation')
parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N',
help='word dropout probability for denoising autoencoding data generation')
parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N',
help='word blanking probability for denoising autoencoding data generation')
parser.add_argument('--lambda-bt', default="1.0", type=str, metavar='N',
help='back-translation weight')
parser.add_argument('--lambda-dae', default="1.0", type=str, metavar='N',
help='denoising auto-encoder weight')
parser.add_argument('--generate-one-by-one', action='store_true',
help='generate one sentence at a time for backtranslation')
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
def __init__(self, args, common_dict, mono_langs, valid_lang_pairs):
super().__init__(args, common_dict, common_dict)
self.common_dict = common_dict
self.mono_langs = mono_langs
self.valid_lang_pairs = valid_lang_pairs
self.SHOW_SAMPLES_INTERVAL = 1000
self._show_samples_ctr = self.SHOW_SAMPLES_INTERVAL
self.SHOW_SAMPLES_NUMBER = 5
self.lambda_bt = PiecewiseLinearFn.from_string(args.lambda_bt)
self.lambda_dae = PiecewiseLinearFn.from_string(args.lambda_dae)
self.args = args
self.data = utils.split_paths(self.args.data)
if len(self.data) == 1:
shards = list(Path(self.data[0]).glob("shard*"))
if len(shards) > 0:
old_data = self.data
self.data = [str(shard) for shard in shards]
logging.warning(f"Expanded data directory {old_data} to {self.data}")
@classmethod
def setup_task(cls, args, **kwargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
assert args.mono_langs is not None
mono_langs = args.mono_langs.split(",")
valid_lang_pairs = args.valid_lang_pairs.split(",")
dict_path = os.path.join(paths[0], "dict.txt")
common_dict = cls.load_dictionary(dict_path)
return cls(args, common_dict, mono_langs, valid_lang_pairs)
def load_dataset(self, split, epoch=1, combine=False, **kwargs) -> FairseqDataset:
if split == "train":
data_path = self.data[(epoch - 1) % len(self.data)]
dataset = self.load_train_dataset(data_path)
else:
dataset = self.load_translation_dataset(split, self.data[0])
self.datasets[split] = dataset
return dataset
def load_train_dataset(self, data_path: str) -> FairseqDataset:
data = []
for lang in self.mono_langs:
train_path = os.path.join(data_path, lang, "train")
data.append((f"{lang}-BT", self.load_bt_dataset(train_path, lang)))
data.append(
(f"{lang}-DENOISE", self.load_denoise_dataset(train_path, lang))
)
return RoundRobinZipDatasets(OrderedDict(data))
def _langpair_dataset(
self, src: FairseqDataset, tgt: FairseqDataset
) -> LanguagePairDataset:
return LanguagePairDataset(
src,
src.sizes,
self.dictionary,
tgt=tgt,
tgt_sizes=tgt.sizes,
tgt_dict=self.dictionary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
)
def _prepend_lang_bos_to_target(
self, dataset: LanguagePairDataset, lang: str
) -> LanguagePairDataset:
bos = _lang_token_index(self.dictionary, lang)
return TransformEosLangPairDataset(
dataset,
src_eos=self.dictionary.eos(),
new_src_eos=self.dictionary.eos(),
tgt_bos=self.dictionary.eos(),
new_tgt_bos=bos,
)
def load_bt_dataset(self, data_path: str, lang: str) -> FairseqDataset:
mono_dataset = data_utils.load_indexed_dataset(
data_path, self.common_dict, self.args.dataset_impl
)
assert mono_dataset is not None, f"No dataset found for {lang}"
mono_dataset_src = PrependTokenDataset(
mono_dataset, _lang_token_index(self.dictionary, lang)
)
mono_dataset_bt = self._langpair_dataset(mono_dataset_src, mono_dataset)
logger.info(
f"mono_lang = {lang} "
f"lang token index = {_lang_token_index(self.dictionary, lang)} "
f"lang token = {_lang_token(lang)}"
)
mono_dataset_bt = self._prepend_lang_bos_to_target(mono_dataset_bt, lang)
return mono_dataset_bt
def load_denoise_dataset(self, data_path: str, lang: str) -> FairseqDataset:
dataset = data_utils.load_indexed_dataset(
data_path, self.common_dict, self.args.dataset_impl
)
noisy_dataset = NoisingDataset(
dataset,
self.dictionary,
seed=1,
max_word_shuffle_distance=self.args.max_word_shuffle_distance,
word_dropout_prob=self.args.word_dropout_prob,
word_blanking_prob=self.args.word_blanking_prob,
)
noisy_dataset = PrependTokenDataset(
noisy_dataset, _lang_token_index(self.dictionary, lang)
)
clean_dataset = data_utils.load_indexed_dataset(
data_path, self.common_dict, self.args.dataset_impl
)
denoising_dataset = self._langpair_dataset(noisy_dataset, clean_dataset)
denoising_dataset = self._prepend_lang_bos_to_target(denoising_dataset, lang)
return denoising_dataset
def load_translation_dataset(
self, split: str, data_path: str, combine: bool = False
):
assert len(self.valid_lang_pairs) == 1, "For now..."
valid_lang_pair = self.valid_lang_pairs[0]
src, tgt = valid_lang_pair.split("-")
# use the same function than TranslationTask
src_tgt_dt = load_langpair_dataset(
data_path,
split,
src,
self.common_dict,
tgt,
self.common_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != "test"),
prepend_bos_src=_lang_token_index(self.dictionary, src),
)
src_tgt_eos_dt = self._prepend_lang_bos_to_target(src_tgt_dt, tgt)
src_tgt_eos_dt.args = self.args
return src_tgt_eos_dt
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
raise NotImplementedError
def build_model(self, args, from_checkpoint=False):
# torch.autograd.set_detect_anomaly(True)
model = super().build_model(args, from_checkpoint)
add_secial_tokens_to_dict_and_model(self.common_dict, model, self.mono_langs)
self.sequence_generators = {}
for mono_lang in self.mono_langs:
self.sequence_generators[mono_lang] = SequenceGenerator(
[model],
tgt_dict=self.dictionary,
beam_size=1,
max_len_a=1.3,
max_len_b=5,
min_len=5,
# keep 1 to be able to prepend bos
max_len=model.max_decoder_positions() - 1,
)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.bleu_sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def max_positions(self):
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def dictionary(self):
return self.common_dict
def display_samples_once_in_a_while(self, smp, mono_lang, other_lang):
self._show_samples_ctr += 1
if self._show_samples_ctr < self.SHOW_SAMPLES_INTERVAL:
return
self._show_samples_ctr = 0
ln = smp["net_input"]["src_tokens"].shape[0]
logger.info(
f"(r:{self.args.distributed_rank}) : "
f"{other_lang} ---> {mono_lang} "
f"({other_lang} was generated by back-translation.) {ln} samples"
)
for i in range(min(ln, self.SHOW_SAMPLES_NUMBER)):
src_tokens = smp["net_input"]["src_tokens"][i]
tgt_tokens = smp["target"][i]
src_str = self.dictionary.string(src_tokens, "sentencepiece")
tgt_str = self.dictionary.string(tgt_tokens, "sentencepiece")
logger.info(
f"\n{i}\t\t[{other_lang} generated] {src_str}\n"
f"\t\t[{mono_lang} original ] {tgt_str}\n"
f"\t\t[ src tokens] {src_tokens}\n"
)
def backtranslate_sample(self, smp, orig_lang, other_lang) -> None:
bos_token = _lang_token_index(self.dictionary, other_lang)
generated = self.sequence_generators[orig_lang].generate(
models=[], sample=smp, bos_token=bos_token
)
max_lngth = max([gn[0]["tokens"].size(0) for gn in generated])
net_input = smp["net_input"]
n_src_tokens = torch.empty(
size=(len(generated), max_lngth + 1), dtype=net_input["src_tokens"].dtype
)
n_src_lengths = torch.empty(
len(generated), dtype=net_input["src_lengths"].dtype
)
for i, gn in enumerate(generated):
tokens = gn[0]["tokens"]
tokens_size = tokens.size(0)
padding_needed = max_lngth - tokens_size
tokens = torch.cat([tokens.new([bos_token]), tokens])
tokens = F.pad(tokens, (0, padding_needed), value=self.dictionary.pad())
n_src_tokens[i] = tokens
n_src_lengths[i] = tokens_size + 1
device = net_input["src_tokens"].device
# This seems to be important
del net_input["src_tokens"]
del net_input["src_lengths"]
net_input["src_tokens"] = n_src_tokens.to(device)
net_input["src_lengths"] = n_src_lengths.to(device)
def generate(self, smp, model):
model.eval()
orig_lang = (
self.dictionary[smp["net_input"]["src_tokens"][0][0]]
.replace(" ", "")
.replace("_", "")
)
bos_token = smp["net_input"]["prev_output_tokens"][0][0]
with torch.no_grad():
generated = self.sequence_generators[orig_lang].generate(
models=[model], sample=smp, bos_token=bos_token
)
return generated
def get_other_lang(self, lang):
# TODO: allow more complex mapping
if lang != self.mono_langs[0]:
return self.mono_langs[0]
if len(self.mono_langs) == 2:
return self.mono_langs[1]
return self.mono_langs[np.random.randint(1, len(self.mono_langs))]
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
model.set_num_updates(update_num)
agg_loss, agg_sample_size = 0.0, 0.0
agg_logging_output: Dict[str, float] = defaultdict(float)
dataset_keys = self.datasets["train"].datasets.keys()
weights = {
"BT": self.lambda_bt(update_num),
"DENOISE": self.lambda_dae(update_num),
}
log_keys = {"BT": "bt_", "DENOISE": "dae_"}
for dataset_key in dataset_keys:
smp = sample[dataset_key]
mono_lang, task_subtype = dataset_key.split("-")
if weights[task_subtype] == 0:
continue
if task_subtype == "BT":
with torch.autograd.profiler.record_function("backtranslation"):
model.eval()
# TODO: Could we translate to several language at once ?
# this would allow to share encoder_out and maximize GPU usage.
other_lang = self.get_other_lang(mono_lang)
self.backtranslate_sample(smp, mono_lang, other_lang)
self.display_samples_once_in_a_while(smp, mono_lang, other_lang)
model.train()
# Like in FairseqTask.train_step
with torch.autograd.profiler.record_function("forward"):
loss, sample_size, logging_output = criterion(model, smp)
loss *= weights[task_subtype]
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
agg_loss += loss.item()
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[log_keys[task_subtype] + k] += logging_output[k]
agg_logging_output[k] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def get_bos_token_from_sample(self, sample):
net_input = sample["net_input"]
source_lang_token_id = torch.unique(net_input["src_tokens"][:, 0]).item()
source_lang_token = self.dictionary[source_lang_token_id].replace("_", "")
target_lang_token_id = _lang_token_index(
self.dictionary, self.get_other_lang(source_lang_token)
)
return target_lang_token_id
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
bt_sample_size = sum(x.get("bt_sample_size", 0) for x in logging_outputs)
if bt_sample_size:
bt_loss_sum = sum(x.get("bt_loss", 0) for x in logging_outputs)
bt_loss_sum *= 1 / bt_sample_size / math.log(2)
metrics.log_scalar("bt_loss", bt_loss_sum, bt_sample_size, round=3)
bt_nll_loss_sum = sum(x.get("bt_nll_loss", 0) for x in logging_outputs)
bt_ntokens = sum(x.get("bt_ntokens", 0) for x in logging_outputs)
bt_nll_loss_sum *= 1 / bt_ntokens / math.log(2)
metrics.log_scalar("bt_nll_loss", bt_nll_loss_sum, bt_ntokens, round=3)
metrics.log_derived(
"bt_ppl", lambda meters: utils.get_perplexity(meters["bt_nll_loss"].avg)
)
dae_sample_size = sum(x.get("dae_sample_size", 0) for x in logging_outputs)
if dae_sample_size:
dae_loss_sum = sum(x.get("dae_loss", 0) for x in logging_outputs)
dae_loss_sum *= 1 / dae_sample_size / math.log(2)
metrics.log_scalar("dae_loss", dae_loss_sum, dae_sample_size, round=3)
dae_nll_loss_sum = sum(x.get("dae_nll_loss", 0) for x in logging_outputs)
dae_ntokens = sum(x.get("dae_ntokens", 0) for x in logging_outputs)
dae_nll_loss_sum *= 1 / dae_ntokens / math.log(2)
metrics.log_scalar("dae_nll_loss", dae_nll_loss_sum, dae_ntokens, round=3)
metrics.log_derived(
"dae_ppl",
lambda meters: utils.get_perplexity(meters["dae_nll_loss"].avg),
)
@torch.no_grad()
def extend_embedding(
emb: nn.Module, new_vocab_size: int, copy_from_token_id: int
) -> None:
old_emb_data = emb.weight.data
(old_vocab_size, dim) = old_emb_data.shape
assert new_vocab_size >= old_vocab_size
if new_vocab_size > old_vocab_size:
emb.weight.data = torch.zeros((new_vocab_size, dim))
emb.weight.data[:old_vocab_size, :] = old_emb_data
# initialize new embeddings
emb.weight.data[old_vocab_size:, :] = old_emb_data[copy_from_token_id]
if hasattr(emb, "num_embeddings"):
emb.num_embeddings = new_vocab_size
if hasattr(emb, "out_features"):
emb.out_features = new_vocab_size
if getattr(emb, "bias", None) is None:
return
# Fix the bias.
# Bias shape can be different from the previous vocab size
# if the weight matrix was shared and alread extended but not the bias.
(old_vocab_size,) = emb.bias.shape
assert new_vocab_size >= old_vocab_size
if new_vocab_size > old_vocab_size:
old_bias = emb.bias.data
new_bias = torch.zeros(
(new_vocab_size,), dtype=old_bias.dtype, device=old_bias.device
)
new_bias[:old_vocab_size] = old_bias
emb.bias.data = new_bias
def add_secial_tokens_to_dict_and_model(
dictionary: "fairseq.data.Dictionary",
model: nn.Module,
mono_langs: Sequence[str],
) -> None:
embs = model.encoder.embed_tokens
vocab_size, embedding_dim = embs.weight.shape
# The model may or may not have a '<mask>' embedding yet
assert (
len(dictionary) <= vocab_size <= len(dictionary) + 1
), f"Dictionary len ({len(dictionary)}) doesn't match embs shape ({embs.weight.shape})"
dictionary.add_symbol("<mask>")
for lang in mono_langs:
lang_token = _lang_token(lang)
dictionary.add_symbol(lang_token)
logger.info(
f"dictionary: {len(dictionary)} -> {vocab_size} tokens "
f"after adding {len(mono_langs)} lang tokens."
)
if len(dictionary) <= vocab_size:
return
extend_embedding(embs, len(dictionary), dictionary.bos())
dec_embs = model.decoder.embed_tokens
extend_embedding(dec_embs, len(dictionary), dictionary.bos())
lm_head = model.decoder.output_projection
extend_embedding(lm_head, len(dictionary), dictionary.bos())
assert lm_head.weight.shape == (len(dictionary), embedding_dim)
def _lang_token(lang: str) -> str:
return f"__{lang}__"
def _lang_token_index(dictionary, lang: str) -> int:
return dictionary.index(_lang_token(lang))
@contextlib.contextmanager
def assert_weights_have_changed(model: nn.Module):
def checksum(model: nn.Module) -> float:
return sum(p.sum().item() for p in model.parameters())
initial_checksum = checksum(model)
yield model
final_checksum = checksum(model)
logger.info(
f"initial_checksum={initial_checksum} -> final_checksum={final_checksum}"
)
assert initial_checksum != final_checksum, "Model hasn't changed !"
| true | true |
f73812c10b4508b711c1985c5113732da9072f54 | 2,167 | py | Python | actions/deleteCoreV1NamespacedEndpoints.py | blinkops/stackstorm-kubernetes | 3b4a15d42f603f3e700efaf534169e2ec361f5d2 | [
"Apache-2.0"
] | 20 | 2016-12-24T01:35:41.000Z | 2022-03-06T08:32:16.000Z | actions/deleteCoreV1NamespacedEndpoints.py | blinkops/stackstorm-kubernetes | 3b4a15d42f603f3e700efaf534169e2ec361f5d2 | [
"Apache-2.0"
] | 16 | 2017-05-02T19:38:57.000Z | 2021-06-17T08:31:17.000Z | actions/deleteCoreV1NamespacedEndpoints.py | blinkops/stackstorm-kubernetes | 3b4a15d42f603f3e700efaf534169e2ec361f5d2 | [
"Apache-2.0"
] | 18 | 2017-06-20T00:44:12.000Z | 2022-03-30T08:41:42.000Z | import json
from lib.k8s import K8sClient
class deleteCoreV1NamespacedEndpoints(K8sClient):
def run(
self,
body,
name,
namespace,
gracePeriodSeconds=None,
orphanDependents=None,
pretty=None,
config_override=None):
ret = False
args = {}
args['config_override'] = {}
args['params'] = {}
if config_override is not None:
args['config_override'] = config_override
if body is not None:
args['body'] = body
else:
return (False, "body is a required parameter")
if name is not None:
args['name'] = name
else:
return (False, "name is a required parameter")
if namespace is not None:
args['namespace'] = namespace
else:
return (False, "namespace is a required parameter")
if gracePeriodSeconds is not None:
args['params'].update({'gracePeriodSeconds': gracePeriodSeconds})
if orphanDependents is not None:
args['params'].update({'orphanDependents': orphanDependents})
if pretty is not None:
args['params'].update({'pretty': pretty})
if 'body' in args:
args['data'] = args['body']
args.pop('body')
args['headers'] = {'Content-type': u'application/json', 'Accept': u'application/json, application/yaml, application/vnd.kubernetes.protobuf'} # noqa pylint: disable=line-too-long
args['url'] = "api/v1/namespaces/{namespace}/endpoints/{name}".format( # noqa pylint: disable=line-too-long
body=body, name=name, namespace=namespace)
args['method'] = "delete"
self.addArgs(**args)
self.makeRequest()
myresp = {}
myresp['status_code'] = self.resp.status_code
try:
myresp['data'] = json.loads(self.resp.content.rstrip())
except ValueError:
myresp['data'] = self.resp.content
if myresp['status_code'] >= 200 and myresp['status_code'] <= 299:
ret = True
return (ret, myresp)
| 32.343284 | 187 | 0.564375 | import json
from lib.k8s import K8sClient
class deleteCoreV1NamespacedEndpoints(K8sClient):
def run(
self,
body,
name,
namespace,
gracePeriodSeconds=None,
orphanDependents=None,
pretty=None,
config_override=None):
ret = False
args = {}
args['config_override'] = {}
args['params'] = {}
if config_override is not None:
args['config_override'] = config_override
if body is not None:
args['body'] = body
else:
return (False, "body is a required parameter")
if name is not None:
args['name'] = name
else:
return (False, "name is a required parameter")
if namespace is not None:
args['namespace'] = namespace
else:
return (False, "namespace is a required parameter")
if gracePeriodSeconds is not None:
args['params'].update({'gracePeriodSeconds': gracePeriodSeconds})
if orphanDependents is not None:
args['params'].update({'orphanDependents': orphanDependents})
if pretty is not None:
args['params'].update({'pretty': pretty})
if 'body' in args:
args['data'] = args['body']
args.pop('body')
args['headers'] = {'Content-type': u'application/json', 'Accept': u'application/json, application/yaml, application/vnd.kubernetes.protobuf'}
args['url'] = "api/v1/namespaces/{namespace}/endpoints/{name}".format(
body=body, name=name, namespace=namespace)
args['method'] = "delete"
self.addArgs(**args)
self.makeRequest()
myresp = {}
myresp['status_code'] = self.resp.status_code
try:
myresp['data'] = json.loads(self.resp.content.rstrip())
except ValueError:
myresp['data'] = self.resp.content
if myresp['status_code'] >= 200 and myresp['status_code'] <= 299:
ret = True
return (ret, myresp)
| true | true |
f73813ea2908459d53ac6d7a74bd3c5e1d643145 | 7,550 | py | Python | hexfile.py | risapav/ihex_analyzer | 7162e3cec87260ed3451e43a63374a26e6a91248 | [
"MIT"
] | null | null | null | hexfile.py | risapav/ihex_analyzer | 7162e3cec87260ed3451e43a63374a26e6a91248 | [
"MIT"
] | null | null | null | hexfile.py | risapav/ihex_analyzer | 7162e3cec87260ed3451e43a63374a26e6a91248 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Hexdump Utility
===============
A command line hexdump utility.
See the module's `Github homepage <https://github.com/risapav/ihex_analyzer>`_
for details.
"""
# pouzite kniznice
import struct
import codecs
# definovanie konstant
ROWTYPE_DATA = 0x00 # Data container
ROWTYPE_EOF = 0x01 # End of file
ROWTYPE_EXT_SEG_ADDR = 0x02 # Extended Segment Address
ROWTYPE_START_SEG_ADDR = 0x03 # Start Segment Address
ROWTYPE_EXT_LIN_ADDR = 0x04 # Extended Linear Address
ROWTYPE_START_LIN_ADDR = 0x05 # Start Linear Address
# definovanie tried
class HexFile:
"""
trieda spracuvajuca Hexfile
"""
def __init__(self, filename):
# nazov suboru vo formate intel hex
self._filename = filename
# nedolezite udaje z pohladu umistnenia dat v pamati
self._CS = 0
self._IP = 0
self._EIP = 0
# udaje dolezite pre vypocet umiestnenia v pamati
self._ADDRESS = 0
self._SBA = 0
self._LBA = 0
self._typ = ROWTYPE_DATA
# spustenie analyzy intel hex suboru
def doAnalyze(self):
with open(self._filename, 'r', encoding='utf-8') as fp:
cnt = 1
for line in fp:
line = line.strip()
if not line:
continue
# kazdy riadok sa musi zacinat znakom ':'
if not line.startswith(':'):
raise ValueError(
"Invalid line start character (%r)" % line[0])
continue
# ------------------------------------------------------------
# vypocet dlzky retazca ihex recordu
data = self.byteCnv(line[1:3])
# 1[:] + 2[LL] + 4[AAAA] + 2[TT] + 2n[DATA] + 2[CC]
dataend = 1 + 2 + 4 + 2 + 2*data + 2
# print(line[0:dataend])
# ------------------------------------------------------------
# crc vypocitane zo zvysku riadku musi byt 0
crc = self.calcChecksum(line[1:dataend])
if crc != 0:
raise ValueError(
"Record checksum doesn't match on line %d" % cnt)
continue
# ------------------------------------------------------------
# teraz je riadok validny a moze zacat analyza
# dataend = len(line)
typ, length, addr, data = self.parseLine(
cnt, line[1:dataend - 2])
self.analyzeLine(typ, length, addr, data)
cnt += 1
# nastavenie adresy umiestnenia dat
# drlo - adresa Word
def setAddress(self, drlo):
# index
dri = 0
if self._typ == ROWTYPE_EXT_SEG_ADDR: # Extended Segment Address
self._ADDRESS = self._SBA * 0x10 + (drlo + dri) % 0xFFFF
elif self._typ == ROWTYPE_EXT_LIN_ADDR: # Extended Linear Address
self._ADDRESS = (self._LBA * 0x10000 + drlo + dri) % 0xFFFFFFFF
else:
self._ADDRESS = drlo + dri
# konverzia z textoveho stringu na cislo velkosti Byte
# data - textovy retazec data 2 znaky
def byteCnv(self, data):
buffer = codecs.decode(data, "hex")
return struct.unpack(">B", buffer[0:1])[0]
# konverzia z textoveho stringu na cislo velkosti Word
# data - textovy retazec data 4 znaky
def wordCnv(self, data):
buffer = codecs.decode(data, "hex")
return struct.unpack(">H", buffer[0:2])[0]
# konverzia z textoveho stringu na cislo velkosti DWord
# data - textovy retazec data 8 znakov
def dwordCnv(self, data):
buffer = codecs.decode(data, "hex")
return struct.unpack(">I", buffer[0:4])[0]
# textový výpis do stdout
# typ - typ zaznamu 0-5
# length - dlzka datovej casti
# addr - nacitana adresa (index)
# data - textovy retazec data
# txt - komentar
def txtMessage(self, typ, length, addr, data, txt):
print('{0:0{1}X}'.format(self._ADDRESS, 8),
"typ:", '{0:0{1}X}'.format(typ, 2),
"addr:", '{0:0{1}X}'.format(addr, 4),
"len:", '{0:0{1}X}'.format(length, 2),
"data:", data,
" -> ", txt
)
# analyzovanie parsovaneho riadku
# typ - typ zaznamu 0-5
# length - dlzka datovej casti
# addr - nacitana adresa (index)
# data - textovy retazec data
def analyzeLine(self, typ, length, addr, data):
if typ == ROWTYPE_DATA: # Data container 0x00
self.setAddress(addr)
self.txtMessage(typ, length, addr, data, " data ")
elif typ == ROWTYPE_EOF: # End of file 0x01
# print("End of file") # Should we check for garbage after this?
self._typ = ROWTYPE_DATA
self.setAddress(addr)
self.txtMessage(typ, length, addr, data, "End of file")
elif typ == ROWTYPE_EXT_SEG_ADDR: # Extended Segment Address 0x02
# SBA + ([DRLO + DRI] MOD 64K)
self._typ = ROWTYPE_DATA
self.setAddress(addr)
self.txtMessage(typ, length, addr, data,
"Extended Segment Address")
self._SBA = self.wordCnv(data)
self._typ = ROWTYPE_EXT_SEG_ADDR
elif typ == ROWTYPE_START_SEG_ADDR: # Start Segment Address 0x03
# CS:IP
self._typ = ROWTYPE_DATA
self.setAddress(addr)
self.txtMessage(typ, length, addr,
data, "Start Segment Address")
self._CS = self.wordCnv(data[0:2])
self._IP = self.wordCnv(data[2:4])
elif typ == ROWTYPE_EXT_LIN_ADDR: # Extended Linear Address 0x04
# (LBA + DRLO + DRI) MOD 4G
self._typ = ROWTYPE_DATA
self.setAddress(addr)
self.txtMessage(typ, length, addr, data,
"Extended Linear Address")
self._LBA = self.wordCnv(data)
self._typ = ROWTYPE_EXT_LIN_ADDR
elif typ == ROWTYPE_START_LIN_ADDR: # Start Linear Address 0x05
# EIP
self._typ = ROWTYPE_DATA
self.txtMessage(typ, length, addr,
data, "Start Linear Address")
self._EIP = self.wordCnv(data[0:4])
else: # undefined record
raise ValueError("Invalid type byte")
# vypocet crc suctu
# data - textovy retazec data
def calcChecksum(self, data):
crc = 0
buffer = codecs.decode(data, "hex")
# print(type(buffer), len(buffer), buffer, data)
for byte in buffer:
crc += byte
return crc & 0xFF
# parsovanie jedne nacitaneho riadku
# cnt - cislo nacitaneho riadku
# rawline - textovy string, jeden riadok zo suboru
def parseLine(self, cnt, rawline):
try:
# dlzka dat v zazname
length = self.byteCnv(rawline[0:2])
# adresa umiestnenia
addr = self.wordCnv(rawline[2:6])
# typ zaznamu
typ = self.byteCnv(rawline[6:8])
# data zaznamu
data = rawline[8:]
return (typ, length, addr, data)
except ValueError:
raise ValueError("Invalid hex data")
return (0x00, 0x00, 0x00, "\xFF\xFF")
# hlavna funkcia
def main():
hexfile = HexFile('demo/ds30loader.X.production.hex')
hexfile.doAnalyze()
return 0
# spustenie programu
main()
| 34.633028 | 78 | 0.540927 |
import struct
import codecs
ROWTYPE_DATA = 0x00
ROWTYPE_EOF = 0x01
ROWTYPE_EXT_SEG_ADDR = 0x02
ROWTYPE_START_SEG_ADDR = 0x03
ROWTYPE_EXT_LIN_ADDR = 0x04
ROWTYPE_START_LIN_ADDR = 0x05
class HexFile:
def __init__(self, filename):
self._filename = filename
self._CS = 0
self._IP = 0
self._EIP = 0
self._ADDRESS = 0
self._SBA = 0
self._LBA = 0
self._typ = ROWTYPE_DATA
def doAnalyze(self):
with open(self._filename, 'r', encoding='utf-8') as fp:
cnt = 1
for line in fp:
line = line.strip()
if not line:
continue
if not line.startswith(':'):
raise ValueError(
"Invalid line start character (%r)" % line[0])
continue
data = self.byteCnv(line[1:3])
dataend = 1 + 2 + 4 + 2 + 2*data + 2
crc = self.calcChecksum(line[1:dataend])
if crc != 0:
raise ValueError(
"Record checksum doesn't match on line %d" % cnt)
continue
# ------------------------------------------------------------
# teraz je riadok validny a moze zacat analyza
# dataend = len(line)
typ, length, addr, data = self.parseLine(
cnt, line[1:dataend - 2])
self.analyzeLine(typ, length, addr, data)
cnt += 1
# nastavenie adresy umiestnenia dat
# drlo - adresa Word
def setAddress(self, drlo):
# index
dri = 0
if self._typ == ROWTYPE_EXT_SEG_ADDR: # Extended Segment Address
self._ADDRESS = self._SBA * 0x10 + (drlo + dri) % 0xFFFF
elif self._typ == ROWTYPE_EXT_LIN_ADDR: # Extended Linear Address
self._ADDRESS = (self._LBA * 0x10000 + drlo + dri) % 0xFFFFFFFF
else:
self._ADDRESS = drlo + dri
# konverzia z textoveho stringu na cislo velkosti Byte
# data - textovy retazec data 2 znaky
def byteCnv(self, data):
buffer = codecs.decode(data, "hex")
return struct.unpack(">B", buffer[0:1])[0]
# konverzia z textoveho stringu na cislo velkosti Word
# data - textovy retazec data 4 znaky
def wordCnv(self, data):
buffer = codecs.decode(data, "hex")
return struct.unpack(">H", buffer[0:2])[0]
# konverzia z textoveho stringu na cislo velkosti DWord
# data - textovy retazec data 8 znakov
def dwordCnv(self, data):
buffer = codecs.decode(data, "hex")
return struct.unpack(">I", buffer[0:4])[0]
# textový výpis do stdout
# typ - typ zaznamu 0-5
# length - dlzka datovej casti
# addr - nacitana adresa (index)
# data - textovy retazec data
# txt - komentar
def txtMessage(self, typ, length, addr, data, txt):
print('{0:0{1}X}'.format(self._ADDRESS, 8),
"typ:", '{0:0{1}X}'.format(typ, 2),
"addr:", '{0:0{1}X}'.format(addr, 4),
"len:", '{0:0{1}X}'.format(length, 2),
"data:", data,
" -> ", txt
)
# analyzovanie parsovaneho riadku
# typ - typ zaznamu 0-5
# length - dlzka datovej casti
# addr - nacitana adresa (index)
# data - textovy retazec data
def analyzeLine(self, typ, length, addr, data):
if typ == ROWTYPE_DATA: # Data container 0x00
self.setAddress(addr)
self.txtMessage(typ, length, addr, data, " data ")
elif typ == ROWTYPE_EOF: # End of file 0x01
# print("End of file") # Should we check for garbage after this?
self._typ = ROWTYPE_DATA
self.setAddress(addr)
self.txtMessage(typ, length, addr, data, "End of file")
elif typ == ROWTYPE_EXT_SEG_ADDR: # Extended Segment Address 0x02
# SBA + ([DRLO + DRI] MOD 64K)
self._typ = ROWTYPE_DATA
self.setAddress(addr)
self.txtMessage(typ, length, addr, data,
"Extended Segment Address")
self._SBA = self.wordCnv(data)
self._typ = ROWTYPE_EXT_SEG_ADDR
elif typ == ROWTYPE_START_SEG_ADDR: # Start Segment Address 0x03
# CS:IP
self._typ = ROWTYPE_DATA
self.setAddress(addr)
self.txtMessage(typ, length, addr,
data, "Start Segment Address")
self._CS = self.wordCnv(data[0:2])
self._IP = self.wordCnv(data[2:4])
elif typ == ROWTYPE_EXT_LIN_ADDR: # Extended Linear Address 0x04
# (LBA + DRLO + DRI) MOD 4G
self._typ = ROWTYPE_DATA
self.setAddress(addr)
self.txtMessage(typ, length, addr, data,
"Extended Linear Address")
self._LBA = self.wordCnv(data)
self._typ = ROWTYPE_EXT_LIN_ADDR
elif typ == ROWTYPE_START_LIN_ADDR: # Start Linear Address 0x05
# EIP
self._typ = ROWTYPE_DATA
self.txtMessage(typ, length, addr,
data, "Start Linear Address")
self._EIP = self.wordCnv(data[0:4])
else: # undefined record
raise ValueError("Invalid type byte")
# vypocet crc suctu
# data - textovy retazec data
def calcChecksum(self, data):
crc = 0
buffer = codecs.decode(data, "hex")
# print(type(buffer), len(buffer), buffer, data)
for byte in buffer:
crc += byte
return crc & 0xFF
# parsovanie jedne nacitaneho riadku
# cnt - cislo nacitaneho riadku
# rawline - textovy string, jeden riadok zo suboru
def parseLine(self, cnt, rawline):
try:
# dlzka dat v zazname
length = self.byteCnv(rawline[0:2])
# adresa umiestnenia
addr = self.wordCnv(rawline[2:6])
# typ zaznamu
typ = self.byteCnv(rawline[6:8])
# data zaznamu
data = rawline[8:]
return (typ, length, addr, data)
except ValueError:
raise ValueError("Invalid hex data")
return (0x00, 0x00, 0x00, "\xFF\xFF")
# hlavna funkcia
def main():
hexfile = HexFile('demo/ds30loader.X.production.hex')
hexfile.doAnalyze()
return 0
# spustenie programu
main()
| true | true |
f7381597c07096508b011a0a810cce6703d381a9 | 7,724 | py | Python | main.py | TheArcher1958/GrubGuardianBot-XP | 8c3381919956a1060632847015ff8dc91f602dab | [
"MIT"
] | null | null | null | main.py | TheArcher1958/GrubGuardianBot-XP | 8c3381919956a1060632847015ff8dc91f602dab | [
"MIT"
] | null | null | null | main.py | TheArcher1958/GrubGuardianBot-XP | 8c3381919956a1060632847015ff8dc91f602dab | [
"MIT"
] | null | null | null | import tkinter as tk
import time
import threading
global autoXP, manualXP, roundTitle, button
from google.cloud import vision
import re
import pyautogui
global autoXPIsOn
autoXPIsOn = False
def getRoundsToPlay():
pyautogui.screenshot('energyCount.png', region=(x + 286, y + 430, 45, 32)) # Get a screenshot of the the current elixer count using coorinants relative to the game boundaries.
pyautogui.screenshot('energyCost.png', region=(x + 494, y + 380, 36, 24)) # Get a screenshot of the the energy cost using coorinants relative to the game boundaries.
energyCount = detect_text("energyCount.png")
energyCost = detect_text("energyCost.png")
return int(energyCount / energyCost)
def detect_text(path):
"""Detects text in the file."""
import io
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
return int(re.search(r'\d+', texts[0].description).group())
def playRounds():
time.sleep(0.5)
pyautogui.click(x + 121, y + 189) # click on unicorn way
time.sleep(0.5)
pyautogui.click(x + 500, y + 430) # click play button
time.sleep(0.5)
skipButton = pyautogui.pixel(int(x + 215), int(y + 459))
while skipButton[0] != 158 and skipButton[1] != 20 and skipButton[2] != 20: # wait for the pixel color to be red to indicate that the skip button is on screen
time.sleep(0.1)
skipButton = pyautogui.pixel(int(x + 215), int(y + 459))
pyautogui.click(x + 215, y + 459) # click on the skip button
time.sleep(0.5)
pyautogui.click(x + 398, y + 254) # click confirm skip
time.sleep(0.5)
pyautogui.click(x + 278, y + 254) # click to place pet
pyautogui.click()
time.sleep(0.5)
pyautogui.click(x + 241, y + 214) # click on space to place first tower
time.sleep(0.5)
pyautogui.click(x + 322, y + 169) # click to buy the first avalon tower
time.sleep(0.5)
pyautogui.click(x + 241, y + 214) # click on space to select first avalon tower
time.sleep(0.5)
pyautogui.click(x + 236, y + 162) # click to upgrade the first avalon tower
time.sleep(0.5)
pyautogui.click(x + 269, y + 310) # click on space to place second tower
time.sleep(0.5)
pyautogui.click(x + 351, y + 260) # click to buy the second avalon tower
time.sleep(0.5)
pyautogui.click(x + 269, y + 310) # click on space to select second avalon tower
time.sleep(0.5)
pyautogui.click(x + 270, y + 258) # click to upgrade the second avalon tower
time.sleep(0.5)
pyautogui.click(x + 602, y + 439) # click on the GO button
time.sleep(0.5)
pyautogui.click(x + 567, y + 19) # click fast forward
skipButton = pyautogui.pixel(int(x + 586), int(y + 459))
while skipButton[0] != 105 and skipButton[1] != 202 and skipButton[2] != 10: # wait for the pixel color to be green to indicate that the next button is on screen
time.sleep(0.1)
skipButton = pyautogui.pixel(int(x + 586), int(y + 459))
pyautogui.click(x + 586, y + 459) # click next button
time.sleep(0.7)
skipButton = pyautogui.pixel(int(x + 179), int(y + 270))
while skipButton[0] != 13 and skipButton[1] != 116 and skipButton[2] != 183: # wait for the pixel color to be blue to indicate that the feed pet button is on screen
time.sleep(0.1)
skipButton = pyautogui.pixel(int(x + 179), int(y + 270))
time.sleep(0.5)
pyautogui.click(x + 179, y + 270) # click feed pet button
skipButton = pyautogui.pixel(int(x + 317), int(y + 415))
while skipButton[0] != 142 and skipButton[1] != 29 and skipButton[2] != 229: # wait for the pixel color to be purple to indicate that pet snacks are on screen
time.sleep(0.1)
skipButton = pyautogui.pixel(int(x + 317), int(y + 415))
pyautogui.click(x + 112, y + 226) # click on the first pet snack (highest tier)
time.sleep(0.5)
pyautogui.click(x + 317, y + 415) # click on the select button
skipButton = pyautogui.pixel(int(x + 483), int(y + 421))
while skipButton[0] != 103 and skipButton[1] != 204 and skipButton[2] != 10: # wait for the pixel color to be green to indicate the play button is on screen
time.sleep(0.1)
skipButton = pyautogui.pixel(int(x + 483), int(y + 421))
pyautogui.click(x + 483, y + 421) # click on the play button
pyautogui.moveTo(x,y)
time.sleep(1)
def startThread(amountOfRuns):
if autoXPIsOn == False:
roundsToPlay = amountOfRuns.get()
if roundsToPlay != "" and roundsToPlay != None and roundsToPlay != " ":
button.config(state=tk.DISABLED)
t = threading.Thread(target=lambda: startGame(amountOfRuns))
t.daemon = True
t.start()
else:
button.config(state=tk.DISABLED)
t = threading.Thread(target=lambda: startGame(amountOfRuns))
t.daemon = True
t.start()
def startGame(amountOfRuns):
global x, y
time.sleep(1)
chromeLocation = pyautogui.locateCenterOnScreen('../../Desktop/GrubXPImages/chromeUnfocused.jpg',
confidence=0.94)
if chromeLocation != None:
pyautogui.moveTo(chromeLocation)
pyautogui.click()
time.sleep(1)
findGrubOnScreen = pyautogui.locateOnScreen('../../Desktop/GrubXPImages/grubLevelSelect.jpg',
confidence=0.9)
if findGrubOnScreen == None:
return
x = findGrubOnScreen[0]
y = findGrubOnScreen[1]
if autoXPIsOn == True:
roundsToPlay = getRoundsToPlay()
else:
roundsToPlay = int(amountOfRuns.get())
if roundsToPlay > 0:
for i in range(roundsToPlay):
roundTitle.config(text="Round: " + str(i + 1) + " / " + str(roundsToPlay))
playRounds()
button.config(state=tk.NORMAL)
def switchToAutomatic(entryToChange):
global autoXPIsOn
entryToChange.config(state=tk.DISABLED)
autoXPIsOn = True
def switchToManual(entryToChange):
global autoXPIsOn
entryToChange.config(state=tk.NORMAL)
autoXPIsOn = False
r = tk.Tk()
r.geometry("500x500")
r.config(background='#34b518')
r.title('Grub Guardian Bot')
mainTitle = tk.Label(r, text="Grub Guardian XP Tool", font='Helvetica 18 bold', fg='#0059b3', bg="#34b518")
roundTitle = tk.Label(r, text="Round: 0 / 0", font='Helvetica 14 bold', fg='#fc9d03', bg="#34b518")
autoXP = tk.Radiobutton(r, text="Automatic Mode", value=1, command=lambda: switchToAutomatic(runAmount), bg="#34b518", font='Helvetica 12')
manualXP = tk.Radiobutton(r, text="Manual Mode", value=2, command=lambda: switchToManual(runAmount), bg="#34b518", font='Helvetica 12')
roundTitle.place(x=190, y=80)
mainTitle.place(x=110,y=50)
autoXP.place(x=120, y=150)
manualXP.place(x=270, y=150)
runAmount = tk.Entry(r, width=20)
runAmount.place(x=300, y=227)
runLabel = tk.Label(r, text="# of runs:", font='Helvetica 10', bg="#34b518")
runLabel.place(x=240, y=225)
button = tk.Button(r, text='Start', width=25, command=lambda: startThread(runAmount))
button.place(x=165, y=300)
r.mainloop()
| 36.780952 | 181 | 0.631668 | import tkinter as tk
import time
import threading
global autoXP, manualXP, roundTitle, button
from google.cloud import vision
import re
import pyautogui
global autoXPIsOn
autoXPIsOn = False
def getRoundsToPlay():
pyautogui.screenshot('energyCount.png', region=(x + 286, y + 430, 45, 32))
pyautogui.screenshot('energyCost.png', region=(x + 494, y + 380, 36, 24))
energyCount = detect_text("energyCount.png")
energyCost = detect_text("energyCost.png")
return int(energyCount / energyCost)
def detect_text(path):
import io
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
return int(re.search(r'\d+', texts[0].description).group())
def playRounds():
time.sleep(0.5)
pyautogui.click(x + 121, y + 189)
time.sleep(0.5)
pyautogui.click(x + 500, y + 430)
time.sleep(0.5)
skipButton = pyautogui.pixel(int(x + 215), int(y + 459))
while skipButton[0] != 158 and skipButton[1] != 20 and skipButton[2] != 20:
time.sleep(0.1)
skipButton = pyautogui.pixel(int(x + 215), int(y + 459))
pyautogui.click(x + 215, y + 459)
time.sleep(0.5)
pyautogui.click(x + 398, y + 254)
time.sleep(0.5)
pyautogui.click(x + 278, y + 254)
pyautogui.click()
time.sleep(0.5)
pyautogui.click(x + 241, y + 214)
time.sleep(0.5)
pyautogui.click(x + 322, y + 169)
time.sleep(0.5)
pyautogui.click(x + 241, y + 214)
time.sleep(0.5)
pyautogui.click(x + 236, y + 162)
time.sleep(0.5)
pyautogui.click(x + 269, y + 310)
time.sleep(0.5)
pyautogui.click(x + 351, y + 260)
time.sleep(0.5)
pyautogui.click(x + 269, y + 310)
time.sleep(0.5)
pyautogui.click(x + 270, y + 258)
time.sleep(0.5)
pyautogui.click(x + 602, y + 439)
time.sleep(0.5)
pyautogui.click(x + 567, y + 19)
skipButton = pyautogui.pixel(int(x + 586), int(y + 459))
while skipButton[0] != 105 and skipButton[1] != 202 and skipButton[2] != 10:
time.sleep(0.1)
skipButton = pyautogui.pixel(int(x + 586), int(y + 459))
pyautogui.click(x + 586, y + 459)
time.sleep(0.7)
skipButton = pyautogui.pixel(int(x + 179), int(y + 270))
while skipButton[0] != 13 and skipButton[1] != 116 and skipButton[2] != 183:
time.sleep(0.1)
skipButton = pyautogui.pixel(int(x + 179), int(y + 270))
time.sleep(0.5)
pyautogui.click(x + 179, y + 270)
skipButton = pyautogui.pixel(int(x + 317), int(y + 415))
while skipButton[0] != 142 and skipButton[1] != 29 and skipButton[2] != 229:
time.sleep(0.1)
skipButton = pyautogui.pixel(int(x + 317), int(y + 415))
pyautogui.click(x + 112, y + 226)
time.sleep(0.5)
pyautogui.click(x + 317, y + 415)
skipButton = pyautogui.pixel(int(x + 483), int(y + 421))
while skipButton[0] != 103 and skipButton[1] != 204 and skipButton[2] != 10:
time.sleep(0.1)
skipButton = pyautogui.pixel(int(x + 483), int(y + 421))
pyautogui.click(x + 483, y + 421)
pyautogui.moveTo(x,y)
time.sleep(1)
def startThread(amountOfRuns):
if autoXPIsOn == False:
roundsToPlay = amountOfRuns.get()
if roundsToPlay != "" and roundsToPlay != None and roundsToPlay != " ":
button.config(state=tk.DISABLED)
t = threading.Thread(target=lambda: startGame(amountOfRuns))
t.daemon = True
t.start()
else:
button.config(state=tk.DISABLED)
t = threading.Thread(target=lambda: startGame(amountOfRuns))
t.daemon = True
t.start()
def startGame(amountOfRuns):
global x, y
time.sleep(1)
chromeLocation = pyautogui.locateCenterOnScreen('../../Desktop/GrubXPImages/chromeUnfocused.jpg',
confidence=0.94)
if chromeLocation != None:
pyautogui.moveTo(chromeLocation)
pyautogui.click()
time.sleep(1)
findGrubOnScreen = pyautogui.locateOnScreen('../../Desktop/GrubXPImages/grubLevelSelect.jpg',
confidence=0.9)
if findGrubOnScreen == None:
return
x = findGrubOnScreen[0]
y = findGrubOnScreen[1]
if autoXPIsOn == True:
roundsToPlay = getRoundsToPlay()
else:
roundsToPlay = int(amountOfRuns.get())
if roundsToPlay > 0:
for i in range(roundsToPlay):
roundTitle.config(text="Round: " + str(i + 1) + " / " + str(roundsToPlay))
playRounds()
button.config(state=tk.NORMAL)
def switchToAutomatic(entryToChange):
global autoXPIsOn
entryToChange.config(state=tk.DISABLED)
autoXPIsOn = True
def switchToManual(entryToChange):
global autoXPIsOn
entryToChange.config(state=tk.NORMAL)
autoXPIsOn = False
r = tk.Tk()
r.geometry("500x500")
r.config(background='#34b518')
r.title('Grub Guardian Bot')
mainTitle = tk.Label(r, text="Grub Guardian XP Tool", font='Helvetica 18 bold', fg='#0059b3', bg="#34b518")
roundTitle = tk.Label(r, text="Round: 0 / 0", font='Helvetica 14 bold', fg='#fc9d03', bg="#34b518")
autoXP = tk.Radiobutton(r, text="Automatic Mode", value=1, command=lambda: switchToAutomatic(runAmount), bg="#34b518", font='Helvetica 12')
manualXP = tk.Radiobutton(r, text="Manual Mode", value=2, command=lambda: switchToManual(runAmount), bg="#34b518", font='Helvetica 12')
roundTitle.place(x=190, y=80)
mainTitle.place(x=110,y=50)
autoXP.place(x=120, y=150)
manualXP.place(x=270, y=150)
runAmount = tk.Entry(r, width=20)
runAmount.place(x=300, y=227)
runLabel = tk.Label(r, text="# of runs:", font='Helvetica 10', bg="#34b518")
runLabel.place(x=240, y=225)
button = tk.Button(r, text='Start', width=25, command=lambda: startThread(runAmount))
button.place(x=165, y=300)
r.mainloop()
| true | true |
f738164b756c30794a64f10575ebfcbdae1fe688 | 475 | py | Python | Task/migrations/0009_task_help_text.py | DudaEugen/JustTesting | 7b62c7f5d1d918c3fe82bf00aff4009212427a6f | [
"MIT"
] | null | null | null | Task/migrations/0009_task_help_text.py | DudaEugen/JustTesting | 7b62c7f5d1d918c3fe82bf00aff4009212427a6f | [
"MIT"
] | null | null | null | Task/migrations/0009_task_help_text.py | DudaEugen/JustTesting | 7b62c7f5d1d918c3fe82bf00aff4009212427a6f | [
"MIT"
] | null | null | null | # Generated by Django 3.2.4 on 2021-08-24 18:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Task', '0008_auto_20210815_1346'),
]
operations = [
migrations.AddField(
model_name='task',
name='help_text',
field=models.TextField(blank=True, default='', help_text='Введіть текст підказки до завдання', null=True, verbose_name='Підказка'),
),
]
| 25 | 143 | 0.627368 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Task', '0008_auto_20210815_1346'),
]
operations = [
migrations.AddField(
model_name='task',
name='help_text',
field=models.TextField(blank=True, default='', help_text='Введіть текст підказки до завдання', null=True, verbose_name='Підказка'),
),
]
| true | true |
f7381b2885e3a108c9a106207058fa1c4fe40b04 | 11,775 | py | Python | tests/test_visibility.py | ewaf1/synapse | 77661ce81a799a375317dff9e4c8696da528984c | [
"Apache-2.0"
] | 2 | 2020-04-30T18:38:02.000Z | 2020-07-08T21:38:28.000Z | tests/test_visibility.py | ewaf1/synapse | 77661ce81a799a375317dff9e4c8696da528984c | [
"Apache-2.0"
] | 1 | 2020-02-10T10:03:31.000Z | 2020-02-10T10:03:31.000Z | tests/test_visibility.py | ewaf1/synapse | 77661ce81a799a375317dff9e4c8696da528984c | [
"Apache-2.0"
] | 2 | 2020-03-03T18:34:52.000Z | 2022-03-31T11:06:18.000Z | # -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from mock import Mock
from twisted.internet import defer
from twisted.internet.defer import succeed
from synapse.api.room_versions import RoomVersions
from synapse.events import FrozenEvent
from synapse.visibility import filter_events_for_server
import tests.unittest
from tests.utils import create_room, setup_test_homeserver
logger = logging.getLogger(__name__)
TEST_ROOM_ID = "!TEST:ROOM"
class FilterEventsForServerTestCase(tests.unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.hs = yield setup_test_homeserver(self.addCleanup)
self.event_creation_handler = self.hs.get_event_creation_handler()
self.event_builder_factory = self.hs.get_event_builder_factory()
self.store = self.hs.get_datastore()
self.storage = self.hs.get_storage()
yield create_room(self.hs, TEST_ROOM_ID, "@someone:ROOM")
@defer.inlineCallbacks
def test_filtering(self):
#
# The events to be filtered consist of 10 membership events (it doesn't
# really matter if they are joins or leaves, so let's make them joins).
# One of those membership events is going to be for a user on the
# server we are filtering for (so we can check the filtering is doing
# the right thing).
#
# before we do that, we persist some other events to act as state.
self.inject_visibility("@admin:hs", "joined")
for i in range(0, 10):
yield self.inject_room_member("@resident%i:hs" % i)
events_to_filter = []
for i in range(0, 10):
user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server")
evt = yield self.inject_room_member(user, extra_content={"a": "b"})
events_to_filter.append(evt)
filtered = yield filter_events_for_server(
self.storage, "test_server", events_to_filter
)
# the result should be 5 redacted events, and 5 unredacted events.
for i in range(0, 5):
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
self.assertNotIn("a", filtered[i].content)
for i in range(5, 10):
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
self.assertEqual(filtered[i].content["a"], "b")
@defer.inlineCallbacks
def test_erased_user(self):
# 4 message events, from erased and unerased users, with a membership
# change in the middle of them.
events_to_filter = []
evt = yield self.inject_message("@unerased:local_hs")
events_to_filter.append(evt)
evt = yield self.inject_message("@erased:local_hs")
events_to_filter.append(evt)
evt = yield self.inject_room_member("@joiner:remote_hs")
events_to_filter.append(evt)
evt = yield self.inject_message("@unerased:local_hs")
events_to_filter.append(evt)
evt = yield self.inject_message("@erased:local_hs")
events_to_filter.append(evt)
# the erasey user gets erased
yield self.hs.get_datastore().mark_user_erased("@erased:local_hs")
# ... and the filtering happens.
filtered = yield filter_events_for_server(
self.storage, "test_server", events_to_filter
)
for i in range(0, len(events_to_filter)):
self.assertEqual(
events_to_filter[i].event_id,
filtered[i].event_id,
"Unexpected event at result position %i" % (i,),
)
for i in (0, 3):
self.assertEqual(
events_to_filter[i].content["body"],
filtered[i].content["body"],
"Unexpected event content at result position %i" % (i,),
)
for i in (1, 4):
self.assertNotIn("body", filtered[i].content)
@defer.inlineCallbacks
def inject_visibility(self, user_id, visibility):
content = {"history_visibility": visibility}
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": "m.room.history_visibility",
"sender": user_id,
"state_key": "",
"room_id": TEST_ROOM_ID,
"content": content,
},
)
event, context = yield self.event_creation_handler.create_new_client_event(
builder
)
yield self.storage.persistence.persist_event(event, context)
return event
@defer.inlineCallbacks
def inject_room_member(self, user_id, membership="join", extra_content={}):
content = {"membership": membership}
content.update(extra_content)
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": "m.room.member",
"sender": user_id,
"state_key": user_id,
"room_id": TEST_ROOM_ID,
"content": content,
},
)
event, context = yield self.event_creation_handler.create_new_client_event(
builder
)
yield self.storage.persistence.persist_event(event, context)
return event
@defer.inlineCallbacks
def inject_message(self, user_id, content=None):
if content is None:
content = {"body": "testytest", "msgtype": "m.text"}
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": "m.room.message",
"sender": user_id,
"room_id": TEST_ROOM_ID,
"content": content,
},
)
event, context = yield self.event_creation_handler.create_new_client_event(
builder
)
yield self.storage.persistence.persist_event(event, context)
return event
@defer.inlineCallbacks
def test_large_room(self):
# see what happens when we have a large room with hundreds of thousands
# of membership events
# As above, the events to be filtered consist of 10 membership events,
# where one of them is for a user on the server we are filtering for.
import cProfile
import pstats
import time
# we stub out the store, because building up all that state the normal
# way is very slow.
test_store = _TestStore()
# our initial state is 100000 membership events and one
# history_visibility event.
room_state = []
history_visibility_evt = FrozenEvent(
{
"event_id": "$history_vis",
"type": "m.room.history_visibility",
"sender": "@resident_user_0:test.com",
"state_key": "",
"room_id": TEST_ROOM_ID,
"content": {"history_visibility": "joined"},
}
)
room_state.append(history_visibility_evt)
test_store.add_event(history_visibility_evt)
for i in range(0, 100000):
user = "@resident_user_%i:test.com" % (i,)
evt = FrozenEvent(
{
"event_id": "$res_event_%i" % (i,),
"type": "m.room.member",
"state_key": user,
"sender": user,
"room_id": TEST_ROOM_ID,
"content": {"membership": "join", "extra": "zzz,"},
}
)
room_state.append(evt)
test_store.add_event(evt)
events_to_filter = []
for i in range(0, 10):
user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server")
evt = FrozenEvent(
{
"event_id": "$evt%i" % (i,),
"type": "m.room.member",
"state_key": user,
"sender": user,
"room_id": TEST_ROOM_ID,
"content": {"membership": "join", "extra": "zzz"},
}
)
events_to_filter.append(evt)
room_state.append(evt)
test_store.add_event(evt)
test_store.set_state_ids_for_event(
evt, {(e.type, e.state_key): e.event_id for e in room_state}
)
pr = cProfile.Profile()
pr.enable()
logger.info("Starting filtering")
start = time.time()
storage = Mock()
storage.main = test_store
storage.state = test_store
filtered = yield filter_events_for_server(
test_store, "test_server", events_to_filter
)
logger.info("Filtering took %f seconds", time.time() - start)
pr.disable()
with open("filter_events_for_server.profile", "w+") as f:
ps = pstats.Stats(pr, stream=f).sort_stats("cumulative")
ps.print_stats()
# the result should be 5 redacted events, and 5 unredacted events.
for i in range(0, 5):
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
self.assertNotIn("extra", filtered[i].content)
for i in range(5, 10):
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
self.assertEqual(filtered[i].content["extra"], "zzz")
test_large_room.skip = "Disabled by default because it's slow"
class _TestStore(object):
"""Implements a few methods of the DataStore, so that we can test
filter_events_for_server
"""
def __init__(self):
# data for get_events: a map from event_id to event
self.events = {}
# data for get_state_ids_for_events mock: a map from event_id to
# a map from (type_state_key) -> event_id for the state at that
# event
self.state_ids_for_events = {}
def add_event(self, event):
self.events[event.event_id] = event
def set_state_ids_for_event(self, event, state):
self.state_ids_for_events[event.event_id] = state
def get_state_ids_for_events(self, events, types):
res = {}
include_memberships = False
for (type, state_key) in types:
if type == "m.room.history_visibility":
continue
if type != "m.room.member" or state_key is not None:
raise RuntimeError(
"Unimplemented: get_state_ids with type (%s, %s)"
% (type, state_key)
)
include_memberships = True
if include_memberships:
for event_id in events:
res[event_id] = self.state_ids_for_events[event_id]
else:
k = ("m.room.history_visibility", "")
for event_id in events:
hve = self.state_ids_for_events[event_id][k]
res[event_id] = {k: hve}
return succeed(res)
def get_events(self, events):
return succeed({event_id: self.events[event_id] for event_id in events})
def are_users_erased(self, users):
return succeed({u: False for u in users})
| 34.530792 | 83 | 0.592527 |
import logging
from mock import Mock
from twisted.internet import defer
from twisted.internet.defer import succeed
from synapse.api.room_versions import RoomVersions
from synapse.events import FrozenEvent
from synapse.visibility import filter_events_for_server
import tests.unittest
from tests.utils import create_room, setup_test_homeserver
logger = logging.getLogger(__name__)
TEST_ROOM_ID = "!TEST:ROOM"
class FilterEventsForServerTestCase(tests.unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.hs = yield setup_test_homeserver(self.addCleanup)
self.event_creation_handler = self.hs.get_event_creation_handler()
self.event_builder_factory = self.hs.get_event_builder_factory()
self.store = self.hs.get_datastore()
self.storage = self.hs.get_storage()
yield create_room(self.hs, TEST_ROOM_ID, "@someone:ROOM")
@defer.inlineCallbacks
def test_filtering(self):
# really matter if they are joins or leaves, so let's make them joins).
self.inject_visibility("@admin:hs", "joined")
for i in range(0, 10):
yield self.inject_room_member("@resident%i:hs" % i)
events_to_filter = []
for i in range(0, 10):
user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server")
evt = yield self.inject_room_member(user, extra_content={"a": "b"})
events_to_filter.append(evt)
filtered = yield filter_events_for_server(
self.storage, "test_server", events_to_filter
)
for i in range(0, 5):
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
self.assertNotIn("a", filtered[i].content)
for i in range(5, 10):
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
self.assertEqual(filtered[i].content["a"], "b")
@defer.inlineCallbacks
def test_erased_user(self):
events_to_filter = []
evt = yield self.inject_message("@unerased:local_hs")
events_to_filter.append(evt)
evt = yield self.inject_message("@erased:local_hs")
events_to_filter.append(evt)
evt = yield self.inject_room_member("@joiner:remote_hs")
events_to_filter.append(evt)
evt = yield self.inject_message("@unerased:local_hs")
events_to_filter.append(evt)
evt = yield self.inject_message("@erased:local_hs")
events_to_filter.append(evt)
yield self.hs.get_datastore().mark_user_erased("@erased:local_hs")
filtered = yield filter_events_for_server(
self.storage, "test_server", events_to_filter
)
for i in range(0, len(events_to_filter)):
self.assertEqual(
events_to_filter[i].event_id,
filtered[i].event_id,
"Unexpected event at result position %i" % (i,),
)
for i in (0, 3):
self.assertEqual(
events_to_filter[i].content["body"],
filtered[i].content["body"],
"Unexpected event content at result position %i" % (i,),
)
for i in (1, 4):
self.assertNotIn("body", filtered[i].content)
@defer.inlineCallbacks
def inject_visibility(self, user_id, visibility):
content = {"history_visibility": visibility}
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": "m.room.history_visibility",
"sender": user_id,
"state_key": "",
"room_id": TEST_ROOM_ID,
"content": content,
},
)
event, context = yield self.event_creation_handler.create_new_client_event(
builder
)
yield self.storage.persistence.persist_event(event, context)
return event
@defer.inlineCallbacks
def inject_room_member(self, user_id, membership="join", extra_content={}):
content = {"membership": membership}
content.update(extra_content)
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": "m.room.member",
"sender": user_id,
"state_key": user_id,
"room_id": TEST_ROOM_ID,
"content": content,
},
)
event, context = yield self.event_creation_handler.create_new_client_event(
builder
)
yield self.storage.persistence.persist_event(event, context)
return event
@defer.inlineCallbacks
def inject_message(self, user_id, content=None):
if content is None:
content = {"body": "testytest", "msgtype": "m.text"}
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": "m.room.message",
"sender": user_id,
"room_id": TEST_ROOM_ID,
"content": content,
},
)
event, context = yield self.event_creation_handler.create_new_client_event(
builder
)
yield self.storage.persistence.persist_event(event, context)
return event
@defer.inlineCallbacks
def test_large_room(self):
import cProfile
import pstats
import time
test_store = _TestStore()
room_state = []
history_visibility_evt = FrozenEvent(
{
"event_id": "$history_vis",
"type": "m.room.history_visibility",
"sender": "@resident_user_0:test.com",
"state_key": "",
"room_id": TEST_ROOM_ID,
"content": {"history_visibility": "joined"},
}
)
room_state.append(history_visibility_evt)
test_store.add_event(history_visibility_evt)
for i in range(0, 100000):
user = "@resident_user_%i:test.com" % (i,)
evt = FrozenEvent(
{
"event_id": "$res_event_%i" % (i,),
"type": "m.room.member",
"state_key": user,
"sender": user,
"room_id": TEST_ROOM_ID,
"content": {"membership": "join", "extra": "zzz,"},
}
)
room_state.append(evt)
test_store.add_event(evt)
events_to_filter = []
for i in range(0, 10):
user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server")
evt = FrozenEvent(
{
"event_id": "$evt%i" % (i,),
"type": "m.room.member",
"state_key": user,
"sender": user,
"room_id": TEST_ROOM_ID,
"content": {"membership": "join", "extra": "zzz"},
}
)
events_to_filter.append(evt)
room_state.append(evt)
test_store.add_event(evt)
test_store.set_state_ids_for_event(
evt, {(e.type, e.state_key): e.event_id for e in room_state}
)
pr = cProfile.Profile()
pr.enable()
logger.info("Starting filtering")
start = time.time()
storage = Mock()
storage.main = test_store
storage.state = test_store
filtered = yield filter_events_for_server(
test_store, "test_server", events_to_filter
)
logger.info("Filtering took %f seconds", time.time() - start)
pr.disable()
with open("filter_events_for_server.profile", "w+") as f:
ps = pstats.Stats(pr, stream=f).sort_stats("cumulative")
ps.print_stats()
for i in range(0, 5):
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
self.assertNotIn("extra", filtered[i].content)
for i in range(5, 10):
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
self.assertEqual(filtered[i].content["extra"], "zzz")
test_large_room.skip = "Disabled by default because it's slow"
class _TestStore(object):
def __init__(self):
# data for get_events: a map from event_id to event
self.events = {}
# data for get_state_ids_for_events mock: a map from event_id to
# a map from (type_state_key) -> event_id for the state at that
# event
self.state_ids_for_events = {}
def add_event(self, event):
self.events[event.event_id] = event
def set_state_ids_for_event(self, event, state):
self.state_ids_for_events[event.event_id] = state
def get_state_ids_for_events(self, events, types):
res = {}
include_memberships = False
for (type, state_key) in types:
if type == "m.room.history_visibility":
continue
if type != "m.room.member" or state_key is not None:
raise RuntimeError(
"Unimplemented: get_state_ids with type (%s, %s)"
% (type, state_key)
)
include_memberships = True
if include_memberships:
for event_id in events:
res[event_id] = self.state_ids_for_events[event_id]
else:
k = ("m.room.history_visibility", "")
for event_id in events:
hve = self.state_ids_for_events[event_id][k]
res[event_id] = {k: hve}
return succeed(res)
def get_events(self, events):
return succeed({event_id: self.events[event_id] for event_id in events})
def are_users_erased(self, users):
return succeed({u: False for u in users})
| true | true |
f7381c1f55dfeeeea1f7fd8ac79a706f619f1ec8 | 2,510 | py | Python | product_spider/spiders/molcan_spider.py | Pandaaaa906/product_spider | cc7f865f53fd3ed68f4869be3ba917c8373dfcf2 | [
"MIT"
] | null | null | null | product_spider/spiders/molcan_spider.py | Pandaaaa906/product_spider | cc7f865f53fd3ed68f4869be3ba917c8373dfcf2 | [
"MIT"
] | null | null | null | product_spider/spiders/molcan_spider.py | Pandaaaa906/product_spider | cc7f865f53fd3ed68f4869be3ba917c8373dfcf2 | [
"MIT"
] | null | null | null | import re
from string import ascii_uppercase
from scrapy import Request
from product_spider.items import RawData
from product_spider.utils.spider_mixin import BaseSpider
class MolcanPrdSpider(BaseSpider):
name = 'molcan'
base_url = 'http://molcan.com'
start_urls = map(lambda x: f"http://molcan.com/product_categories/{x}", ascii_uppercase)
pattern_cas = re.compile(r"\d+-\d{2}-\d(?!\d)")
pattern_mw = re.compile(r'\d+\.\d+')
pattern_mf = re.compile(r"(?P<tmf>(?P<mf>(?P<p>[A-Za-z]+\d+)+([A-Z]+[a-z])?)\.?(?P=mf)?)")
custom_settings = {
'CONCURRENT_REQUESTS': 8,
'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
'CONCURRENT_REQUESTS_PER_IP': 8,
}
def parse(self, response):
urls = response.xpath('//ul[@class="categories"]/li/a/@href').extract()
api_names = response.xpath('//ul[@class="categories"]/li/a/text()').extract()
for url, api_name in zip(urls, api_names):
url = url.replace("..", self.base_url)
yield Request(url, headers=self.headers, meta={'api_name': api_name}, callback=self.parent_parse)
def parent_parse(self, response):
detail_urls = response.xpath('//div[@class="product_wrapper"]//a[@class="readmore"]/@href').extract()
for detail_url in detail_urls:
url = detail_url.replace("..", self.base_url)
yield Request(url, headers=self.headers, meta=response.meta, callback=self.detail_parse)
def detail_parse(self, response):
info = " ".join(response.xpath('//div[@id="description"]/*/text()').extract())
l = self.pattern_mf.findall(info)
if l:
mf = "".join(map(lambda x: x[0], l))
else:
mf = ""
relate_img_url = response.xpath('//a[@class="product_image lightbox"]/img/@src').get()
d = {
'brand': "molcan",
'en_name': response.xpath('//p[@class="product_name"]/text()').get().split(' ; ')[0],
'cat_no': response.xpath('//span[@class="productNo"]/text()').get().split('-')[0],
'img_url': relate_img_url and self.base_url + relate_img_url,
'cas': ' '.join(self.pattern_cas.findall(info)),
'mw': ' '.join(self.pattern_mw.findall(info)),
'mf': mf,
'prd_url': response.request.url,
'info1': "".join(response.xpath('//div[@id="description"]/descendant::*/text()').extract()),
'parent': response.meta.get('api_name'),
}
yield RawData(**d)
| 41.147541 | 109 | 0.595219 | import re
from string import ascii_uppercase
from scrapy import Request
from product_spider.items import RawData
from product_spider.utils.spider_mixin import BaseSpider
class MolcanPrdSpider(BaseSpider):
name = 'molcan'
base_url = 'http://molcan.com'
start_urls = map(lambda x: f"http://molcan.com/product_categories/{x}", ascii_uppercase)
pattern_cas = re.compile(r"\d+-\d{2}-\d(?!\d)")
pattern_mw = re.compile(r'\d+\.\d+')
pattern_mf = re.compile(r"(?P<tmf>(?P<mf>(?P<p>[A-Za-z]+\d+)+([A-Z]+[a-z])?)\.?(?P=mf)?)")
custom_settings = {
'CONCURRENT_REQUESTS': 8,
'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
'CONCURRENT_REQUESTS_PER_IP': 8,
}
def parse(self, response):
urls = response.xpath('//ul[@class="categories"]/li/a/@href').extract()
api_names = response.xpath('//ul[@class="categories"]/li/a/text()').extract()
for url, api_name in zip(urls, api_names):
url = url.replace("..", self.base_url)
yield Request(url, headers=self.headers, meta={'api_name': api_name}, callback=self.parent_parse)
def parent_parse(self, response):
detail_urls = response.xpath('//div[@class="product_wrapper"]//a[@class="readmore"]/@href').extract()
for detail_url in detail_urls:
url = detail_url.replace("..", self.base_url)
yield Request(url, headers=self.headers, meta=response.meta, callback=self.detail_parse)
def detail_parse(self, response):
info = " ".join(response.xpath('//div[@id="description"]/*/text()').extract())
l = self.pattern_mf.findall(info)
if l:
mf = "".join(map(lambda x: x[0], l))
else:
mf = ""
relate_img_url = response.xpath('//a[@class="product_image lightbox"]/img/@src').get()
d = {
'brand': "molcan",
'en_name': response.xpath('//p[@class="product_name"]/text()').get().split(' ; ')[0],
'cat_no': response.xpath('//span[@class="productNo"]/text()').get().split('-')[0],
'img_url': relate_img_url and self.base_url + relate_img_url,
'cas': ' '.join(self.pattern_cas.findall(info)),
'mw': ' '.join(self.pattern_mw.findall(info)),
'mf': mf,
'prd_url': response.request.url,
'info1': "".join(response.xpath('//div[@id="description"]/descendant::*/text()').extract()),
'parent': response.meta.get('api_name'),
}
yield RawData(**d)
| true | true |
f7381d635f3c0ce2d25584e3dfa645a0f5a58cc1 | 45 | py | Python | easyfilemanager/__init__.py | RaphaelNanje/easyfilemanager | 29cb6ad90dc28de41478ce7ed768917051f0988a | [
"MIT"
] | null | null | null | easyfilemanager/__init__.py | RaphaelNanje/easyfilemanager | 29cb6ad90dc28de41478ce7ed768917051f0988a | [
"MIT"
] | null | null | null | easyfilemanager/__init__.py | RaphaelNanje/easyfilemanager | 29cb6ad90dc28de41478ce7ed768917051f0988a | [
"MIT"
] | null | null | null | from easyfilemanager.core import FileManager
| 22.5 | 44 | 0.888889 | from easyfilemanager.core import FileManager
| true | true |
f7381dccc6b45e04f911ab3724229045fc634b1c | 1,985 | py | Python | .mywaflib/waflib/extras/smart_continue.py | tobiasraabe/crypto | 5b40049169cfbf02f4979a55e8abdb77b834b820 | [
"BSD-3-Clause"
] | null | null | null | .mywaflib/waflib/extras/smart_continue.py | tobiasraabe/crypto | 5b40049169cfbf02f4979a55e8abdb77b834b820 | [
"BSD-3-Clause"
] | 1 | 2017-08-31T15:55:24.000Z | 2017-08-31T15:55:24.000Z | .mywaflib/waflib/extras/smart_continue.py | tobiasraabe/crypto | 5b40049169cfbf02f4979a55e8abdb77b834b820 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
# Thomas Nagy, 2011
# Try to cancel the tasks that cannot run with the option -k when an error occurs:
# 1 direct file dependencies
# 2 tasks listed in the before/after/ext_in/ext_out attributes
from waflib import Task, Runner
Task.CANCELED = 4
def cancel_next(self, tsk):
if not isinstance(tsk, Task.TaskBase):
return
if tsk.hasrun >= Task.SKIPPED:
# normal execution, no need to do anything here
return
try:
canceled_tasks, canceled_nodes = self.canceled_tasks, self.canceled_nodes
except AttributeError:
canceled_tasks = self.canceled_tasks = set()
canceled_nodes = self.canceled_nodes = set()
try:
canceled_nodes.update(tsk.outputs)
except AttributeError:
pass
try:
canceled_tasks.add(tsk)
except AttributeError:
pass
def get_out(self):
tsk = self.out.get()
if not self.stop:
self.add_more_tasks(tsk)
self.count -= 1
self.dirty = True
self.cancel_next(tsk) # new code
def error_handler(self, tsk):
if not self.bld.keep:
self.stop = True
self.error.append(tsk)
self.cancel_next(tsk) # new code
Runner.Parallel.cancel_next = cancel_next
Runner.Parallel.get_out = get_out
Runner.Parallel.error_handler = error_handler
def get_next_task(self):
tsk = self.get_next_task_smart_continue()
if not tsk:
return tsk
try:
canceled_tasks, canceled_nodes = self.canceled_tasks, self.canceled_nodes
except AttributeError:
pass
else:
# look in the tasks that this one is waiting on
# if one of them was canceled, cancel this one too
for x in tsk.run_after:
if x in canceled_tasks:
tsk.hasrun = Task.CANCELED
self.cancel_next(tsk)
break
else:
# so far so good, now consider the nodes
for x in getattr(tsk, 'inputs', []) + getattr(tsk, 'deps', []):
if x in canceled_nodes:
tsk.hasrun = Task.CANCELED
self.cancel_next(tsk)
break
return tsk
Runner.Parallel.get_next_task_smart_continue = Runner.Parallel.get_next_task
Runner.Parallel.get_next_task = get_next_task
| 24.207317 | 82 | 0.739547 |
from waflib import Task, Runner
Task.CANCELED = 4
def cancel_next(self, tsk):
if not isinstance(tsk, Task.TaskBase):
return
if tsk.hasrun >= Task.SKIPPED:
return
try:
canceled_tasks, canceled_nodes = self.canceled_tasks, self.canceled_nodes
except AttributeError:
canceled_tasks = self.canceled_tasks = set()
canceled_nodes = self.canceled_nodes = set()
try:
canceled_nodes.update(tsk.outputs)
except AttributeError:
pass
try:
canceled_tasks.add(tsk)
except AttributeError:
pass
def get_out(self):
tsk = self.out.get()
if not self.stop:
self.add_more_tasks(tsk)
self.count -= 1
self.dirty = True
self.cancel_next(tsk)
def error_handler(self, tsk):
if not self.bld.keep:
self.stop = True
self.error.append(tsk)
self.cancel_next(tsk)
Runner.Parallel.cancel_next = cancel_next
Runner.Parallel.get_out = get_out
Runner.Parallel.error_handler = error_handler
def get_next_task(self):
tsk = self.get_next_task_smart_continue()
if not tsk:
return tsk
try:
canceled_tasks, canceled_nodes = self.canceled_tasks, self.canceled_nodes
except AttributeError:
pass
else:
for x in tsk.run_after:
if x in canceled_tasks:
tsk.hasrun = Task.CANCELED
self.cancel_next(tsk)
break
else:
for x in getattr(tsk, 'inputs', []) + getattr(tsk, 'deps', []):
if x in canceled_nodes:
tsk.hasrun = Task.CANCELED
self.cancel_next(tsk)
break
return tsk
Runner.Parallel.get_next_task_smart_continue = Runner.Parallel.get_next_task
Runner.Parallel.get_next_task = get_next_task
| true | true |
f7381ddea42a851a9d2e20b157d42216f14461b8 | 939 | py | Python | generated-sources/python/mojang-authentication/test/test_profile_id.py | AsyncMC/Mojang-API-Libs | b01bbd2bce44bfa2b9ed705a128cf4ecda077916 | [
"Apache-2.0"
] | null | null | null | generated-sources/python/mojang-authentication/test/test_profile_id.py | AsyncMC/Mojang-API-Libs | b01bbd2bce44bfa2b9ed705a128cf4ecda077916 | [
"Apache-2.0"
] | null | null | null | generated-sources/python/mojang-authentication/test/test_profile_id.py | AsyncMC/Mojang-API-Libs | b01bbd2bce44bfa2b9ed705a128cf4ecda077916 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Mojang Authentication API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2020-06-05
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.com.github.asyncmc.mojang.authentication.python.model.profile_id import ProfileId # noqa: E501
from openapi_client.rest import ApiException
class TestProfileId(unittest.TestCase):
"""ProfileId unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testProfileId(self):
"""Test ProfileId"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.profile_id.ProfileId() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.475 | 124 | 0.713525 |
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.com.github.asyncmc.mojang.authentication.python.model.profile_id import ProfileId
from openapi_client.rest import ApiException
class TestProfileId(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testProfileId(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f7381de4aebc9051177ffd55accf0b7d97283f70 | 2,547 | py | Python | elementally/tests/test.py | dem1995/elementally | 192990ad53580d62e278def6508c466589f38ecd | [
"X11"
] | null | null | null | elementally/tests/test.py | dem1995/elementally | 192990ad53580d62e278def6508c466589f38ecd | [
"X11"
] | null | null | null | elementally/tests/test.py | dem1995/elementally | 192990ad53580d62e278def6508c466589f38ecd | [
"X11"
] | null | null | null | import elementally as elmy
import unittest
import itertools
pos_array = [1, 2, 3, 4, 5]
pos_array_2 = [5, 4, 3, 2, 1]
neg_array = [-10, -20, -30, -40, -50]
neg_array_2 = [-50, -40, -30, -20, -10]
def odd_generator():
i=1
while(True):
yield i
i+=2
def complex_generator():
i=1
while(True):
yield i
i+=2j
class TestBasicArithmetic(unittest.TestCase):
def test_sum_lists(self):
"""Checks whether two lists sum properly"""
self.assertListEqual(elmy.sum(pos_array, pos_array_2), [6, 6, 6, 6, 6])
self.assertListEqual(elmy.sum(pos_array, neg_array), [-9, -18, -27, -36, -45])
def test_sum_list_with_generator(self):
"""Checks whether a list sums with a generator properly, and returns a generator"""
list_odd_numbers_plus_index = elmy.sum(pos_array, odd_generator())
self.assertListEqual(list_odd_numbers_plus_index, [2, 5, 8, 11, 14])
def test_sum_generator_with_list(self):
"""Checks whether a generator sums with a list properly, and remains a generator"""
augend = odd_generator()
gen_odd_numbers_plus_index = elmy.sum(augend, pos_array)
self.assertEqual(type(augend), type(gen_odd_numbers_plus_index))
slice_of_summed_generator = itertools.islice(gen_odd_numbers_plus_index, 8)
self.assertSequenceEqual(list(slice_of_summed_generator), [2, 5, 8, 11, 14])
def test_sum_generator_with_generator(self):
"""Checks whether a generator sums with a generator properly, and returns a generator"""
augend = odd_generator()
summed = elmy.sum(augend, odd_generator())
self.assertSequenceEqual([2, 6, 10, 14], list(itertools.islice(summed, 4)))
self.assertEqual(type(augend), type(summed))
class TestMultistepOps(unittest.TestCase):
def test_negation_generator(self):
"""Checks whether adding a sequences to its negation yields 0s"""
operand = odd_generator()
negated = elmy.negation(odd_generator())
zeros = elmy.sum(operand, negated)
for i in itertools.islice(zeros, 1000):
self.assertEqual(i, 0)
def test_reciprocal_multiplication(self):
"""Checks whether multiplying a sequence by its reciprocal yields 1s"""
augend = complex_generator()
reciprocal = elmy.product(augend, elmy.reciprocal(complex_generator()))
for i in itertools.islice(reciprocal, 1000):
self.assertAlmostEqual(i, 1, 14)
if __name__ == '__main__':
unittest.main()
| 39.184615 | 96 | 0.669415 | import elementally as elmy
import unittest
import itertools
pos_array = [1, 2, 3, 4, 5]
pos_array_2 = [5, 4, 3, 2, 1]
neg_array = [-10, -20, -30, -40, -50]
neg_array_2 = [-50, -40, -30, -20, -10]
def odd_generator():
i=1
while(True):
yield i
i+=2
def complex_generator():
i=1
while(True):
yield i
i+=2j
class TestBasicArithmetic(unittest.TestCase):
def test_sum_lists(self):
self.assertListEqual(elmy.sum(pos_array, pos_array_2), [6, 6, 6, 6, 6])
self.assertListEqual(elmy.sum(pos_array, neg_array), [-9, -18, -27, -36, -45])
def test_sum_list_with_generator(self):
list_odd_numbers_plus_index = elmy.sum(pos_array, odd_generator())
self.assertListEqual(list_odd_numbers_plus_index, [2, 5, 8, 11, 14])
def test_sum_generator_with_list(self):
augend = odd_generator()
gen_odd_numbers_plus_index = elmy.sum(augend, pos_array)
self.assertEqual(type(augend), type(gen_odd_numbers_plus_index))
slice_of_summed_generator = itertools.islice(gen_odd_numbers_plus_index, 8)
self.assertSequenceEqual(list(slice_of_summed_generator), [2, 5, 8, 11, 14])
def test_sum_generator_with_generator(self):
augend = odd_generator()
summed = elmy.sum(augend, odd_generator())
self.assertSequenceEqual([2, 6, 10, 14], list(itertools.islice(summed, 4)))
self.assertEqual(type(augend), type(summed))
class TestMultistepOps(unittest.TestCase):
def test_negation_generator(self):
operand = odd_generator()
negated = elmy.negation(odd_generator())
zeros = elmy.sum(operand, negated)
for i in itertools.islice(zeros, 1000):
self.assertEqual(i, 0)
def test_reciprocal_multiplication(self):
augend = complex_generator()
reciprocal = elmy.product(augend, elmy.reciprocal(complex_generator()))
for i in itertools.islice(reciprocal, 1000):
self.assertAlmostEqual(i, 1, 14)
if __name__ == '__main__':
unittest.main()
| true | true |
f7381dee751bc8ce42c7f5d24e881d37f73e6d1c | 2,733 | py | Python | cli/iotexetl/rpc/iotex_rpc.py | blockchain-etl/iotex-etl | bd350c3190acac35d17532eff383e05d08011e24 | [
"MIT"
] | 3 | 2020-07-04T13:53:38.000Z | 2020-07-30T15:07:35.000Z | cli/iotexetl/rpc/iotex_rpc.py | blockchain-etl/iotex-etl | bd350c3190acac35d17532eff383e05d08011e24 | [
"MIT"
] | 13 | 2020-07-16T06:07:33.000Z | 2020-08-20T10:35:10.000Z | cli/iotexetl/rpc/iotex_rpc.py | blockchain-etl/iotex-etl | bd350c3190acac35d17532eff383e05d08011e24 | [
"MIT"
] | 1 | 2021-01-20T10:06:20.000Z | 2021-01-20T10:06:20.000Z | # The MIT License (MIT)
#
# Copyright (c) 2020 Evgeny Medvedev, evge.medvedev@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from urllib.parse import urlparse
import grpc
from iotexetl.rpc.iotexapi import api_pb2
from iotexetl.rpc.iotexapi import api_pb2_grpc
class IotexRpc:
def __init__(self, provider_uri, timeout=60):
self.timeout = timeout
channel = get_channel_from_uri_string(provider_uri)
self.stub = api_pb2_grpc.APIServiceStub(channel)
def get_raw_blocks(self, start_height, count):
return self.stub.GetRawBlocks(
api_pb2.GetRawBlocksRequest(startHeight=start_height, count=count, withReceipts=True), timeout=self.timeout)
def get_block_metas(self, start_height, count):
return self.stub.GetBlockMetas(api_pb2.GetBlockMetasRequest(
byIndex=api_pb2.GetBlockMetasByIndexRequest(start=start_height, count=count)
), timeout=self.timeout)
def get_transaction_logs(self, block_number):
return self.stub.GetTransactionLogByBlockHeight(
api_pb2.GetTransactionLogByBlockHeightRequest(blockHeight=block_number), timeout=self.timeout)
def get_chain_meta(self):
return self.stub.GetChainMeta(api_pb2.GetChainMetaRequest(), timeout=self.timeout)
def get_channel_from_uri_string(provider_uri):
uri = urlparse(provider_uri)
if uri.scheme == 'grpcs':
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(uri.netloc, credentials)
elif uri.scheme == 'grpc':
channel = grpc.insecure_channel(uri.netloc)
else:
raise ValueError(f'The uri scheme {uri.scheme} is not recognized. Use grpc:// or grpcs://')
return channel
| 42.046154 | 120 | 0.751921 |
from urllib.parse import urlparse
import grpc
from iotexetl.rpc.iotexapi import api_pb2
from iotexetl.rpc.iotexapi import api_pb2_grpc
class IotexRpc:
def __init__(self, provider_uri, timeout=60):
self.timeout = timeout
channel = get_channel_from_uri_string(provider_uri)
self.stub = api_pb2_grpc.APIServiceStub(channel)
def get_raw_blocks(self, start_height, count):
return self.stub.GetRawBlocks(
api_pb2.GetRawBlocksRequest(startHeight=start_height, count=count, withReceipts=True), timeout=self.timeout)
def get_block_metas(self, start_height, count):
return self.stub.GetBlockMetas(api_pb2.GetBlockMetasRequest(
byIndex=api_pb2.GetBlockMetasByIndexRequest(start=start_height, count=count)
), timeout=self.timeout)
def get_transaction_logs(self, block_number):
return self.stub.GetTransactionLogByBlockHeight(
api_pb2.GetTransactionLogByBlockHeightRequest(blockHeight=block_number), timeout=self.timeout)
def get_chain_meta(self):
return self.stub.GetChainMeta(api_pb2.GetChainMetaRequest(), timeout=self.timeout)
def get_channel_from_uri_string(provider_uri):
uri = urlparse(provider_uri)
if uri.scheme == 'grpcs':
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(uri.netloc, credentials)
elif uri.scheme == 'grpc':
channel = grpc.insecure_channel(uri.netloc)
else:
raise ValueError(f'The uri scheme {uri.scheme} is not recognized. Use grpc:// or grpcs://')
return channel
| true | true |
f7381e74083b6e786a470d6307bbc156fb1bba7f | 2,863 | py | Python | tests/modules/generate/test_recipe_generator.py | lexatnet/appimage-builder | 58b8849a837cab6618c3ca0de3ade5f884fc954a | [
"MIT"
] | null | null | null | tests/modules/generate/test_recipe_generator.py | lexatnet/appimage-builder | 58b8849a837cab6618c3ca0de3ade5f884fc954a | [
"MIT"
] | null | null | null | tests/modules/generate/test_recipe_generator.py | lexatnet/appimage-builder | 58b8849a837cab6618c3ca0de3ade5f884fc954a | [
"MIT"
] | null | null | null | # Copyright 2021 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import pathlib
from unittest import TestCase
from appimagebuilder.context import AppInfo
from appimagebuilder.modules.generate import BundleInfo
from appimagebuilder.recipe.schema import RecipeSchema
from tests.modules.generate.fake_path import FakePath
from tests.modules.generate.fake_runtime_analyser import FakeAppRuntimeAnalyser
from appimagebuilder.modules.generate import RecipeGenerator
from tests.modules.generate.fake_bundle_info_gatherer import FakeBundleInfoGatherer
from tests.modules.generate.fake_package_manager_section_generator import (
FakePackageManagerSectionGenerator,
)
class TestRecipeGenerator(TestCase):
def setUp(self) -> None:
self.generator = RecipeGenerator(
package_managers=[
FakePackageManagerSectionGenerator(
"apt",
{
"arch": "amd64",
"sources": [],
"include": ["libc6"],
},
["/missing/file"],
),
FakePackageManagerSectionGenerator(
"files",
{
"include": ["/missing/file"],
},
[],
),
],
bundle_info_gatherer=FakeBundleInfoGatherer(
BundleInfo(
app_dir=pathlib.Path("AppDir"),
app_info=AppInfo(
id="fooview",
name="Foo View",
icon="fooview",
exec="usr/bin/fooview",
exec_args="$@",
),
update_string="update_string",
runtime_arch="amd64",
)
),
runtime_analyser=FakeAppRuntimeAnalyser(
["/lib64/ld-linux-x86-64.so.2", "/missing/file"]
),
)
def test_generate(self):
recipe = self.generator.generate(FakePath("/tmp/AppDir"))
schema = RecipeSchema()
self.assertTrue(schema.v1.validate(recipe))
self.assertIn("apt", recipe["AppDir"])
self.assertIn("files", recipe["AppDir"])
| 39.219178 | 83 | 0.585051 |
import pathlib
from unittest import TestCase
from appimagebuilder.context import AppInfo
from appimagebuilder.modules.generate import BundleInfo
from appimagebuilder.recipe.schema import RecipeSchema
from tests.modules.generate.fake_path import FakePath
from tests.modules.generate.fake_runtime_analyser import FakeAppRuntimeAnalyser
from appimagebuilder.modules.generate import RecipeGenerator
from tests.modules.generate.fake_bundle_info_gatherer import FakeBundleInfoGatherer
from tests.modules.generate.fake_package_manager_section_generator import (
FakePackageManagerSectionGenerator,
)
class TestRecipeGenerator(TestCase):
def setUp(self) -> None:
self.generator = RecipeGenerator(
package_managers=[
FakePackageManagerSectionGenerator(
"apt",
{
"arch": "amd64",
"sources": [],
"include": ["libc6"],
},
["/missing/file"],
),
FakePackageManagerSectionGenerator(
"files",
{
"include": ["/missing/file"],
},
[],
),
],
bundle_info_gatherer=FakeBundleInfoGatherer(
BundleInfo(
app_dir=pathlib.Path("AppDir"),
app_info=AppInfo(
id="fooview",
name="Foo View",
icon="fooview",
exec="usr/bin/fooview",
exec_args="$@",
),
update_string="update_string",
runtime_arch="amd64",
)
),
runtime_analyser=FakeAppRuntimeAnalyser(
["/lib64/ld-linux-x86-64.so.2", "/missing/file"]
),
)
def test_generate(self):
recipe = self.generator.generate(FakePath("/tmp/AppDir"))
schema = RecipeSchema()
self.assertTrue(schema.v1.validate(recipe))
self.assertIn("apt", recipe["AppDir"])
self.assertIn("files", recipe["AppDir"])
| true | true |
f7381ebe02cd74e34e415cf64782e1b9aab065cc | 6,246 | py | Python | reid/models/resnet_mldg_smm.py | ZhaoChuyang/dgreid | ee1d7af74b796f2f194307ab023e43ecc3d3d525 | [
"MIT"
] | null | null | null | reid/models/resnet_mldg_smm.py | ZhaoChuyang/dgreid | ee1d7af74b796f2f194307ab023e43ecc3d3d525 | [
"MIT"
] | null | null | null | reid/models/resnet_mldg_smm.py | ZhaoChuyang/dgreid | ee1d7af74b796f2f194307ab023e43ecc3d3d525 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import init
import torchvision
from collections import OrderedDict
from ..models.layers.adain import SMMBlock
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnet50_mldg_smm']
class ResNet(nn.Module):
__factory = {
18: torchvision.models.resnet18,
34: torchvision.models.resnet34,
50: torchvision.models.resnet50,
101: torchvision.models.resnet101,
152: torchvision.models.resnet152,
}
def __init__(self, depth, pretrained=True, cut_at_pooling=False,
num_features=0, norm=False, dropout=0, num_classes=None):
super(ResNet, self).__init__()
self.pretrained = pretrained
self.depth = depth
self.cut_at_pooling = cut_at_pooling
# Construct base (pretrained) resnet
if depth not in ResNet.__factory:
raise KeyError("Unsupported depth:", depth)
resnet = ResNet.__factory[depth](pretrained=pretrained)
resnet.layer4[0].conv2.stride = (1,1)
resnet.layer4[0].downsample[0].stride = (1,1)
# self.base = nn.Sequential(
# resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool,
# resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4)
self.conv = nn.Sequential(OrderedDict([
('conv1', resnet.conv1),
('bn1', resnet.bn1),
('relu', resnet.relu),
('maxpool', resnet.maxpool)]))
self.layer1 = resnet.layer1
self.layer2 = resnet.layer2
self.layer3 = resnet.layer3
self.layer4 = resnet.layer4
self.gap = nn.AdaptiveAvgPool2d(1)
self.smm_block = SMMBlock(1, rand=False, learnable=False)
if not self.cut_at_pooling:
self.num_features = num_features
self.norm = norm
self.dropout = dropout
self.has_embedding = num_features > 0
self.num_classes = num_classes
out_planes = resnet.fc.in_features
# Append new layers
if self.has_embedding:
self.feat = nn.Linear(out_planes, self.num_features)
self.feat_bn = nn.BatchNorm1d(self.num_features)
init.kaiming_normal_(self.feat.weight, mode='fan_out')
init.constant_(self.feat.bias, 0)
else:
# Change the num_features to CNN output channels
self.num_features = out_planes
self.feat_bn = nn.BatchNorm1d(self.num_features)
self.feat_bn.bias.requires_grad_(False)
if self.dropout > 0:
self.drop = nn.Dropout(self.dropout)
self.classifier = nn.Linear(self.num_features, self.num_classes, bias=False)
init.normal_(self.classifier.weight, std=0.001)
init.constant_(self.feat_bn.weight, 1)
init.constant_(self.feat_bn.bias, 0)
if not pretrained:
self.reset_params()
def forward(self, x, meta_train=True, output_prob=False, return_featuremaps=False):
if self.training:
num_domains = len(x)
x = torch.cat(x, dim=0)
x = self.conv(x)
# NOTE: change to 'if self.training and meta_train:'
if meta_train:
mixed_x, _ = self.smm_block(x)
if return_featuremaps:
return [x, mixed_x]
x = mixed_x
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.gap(x)
x = x.view(x.size(0), -1)
if self.cut_at_pooling:
return x
if self.has_embedding:
bn_x = self.feat_bn(self.feat(x))
else:
bn_x = self.feat_bn(x)
if self.training is False and output_prob is False:
bn_x = F.normalize(bn_x)
return bn_x
if self.norm:
norm_bn_x = F.normalize(bn_x)
elif self.has_embedding:
bn_x = F.relu(bn_x)
if self.dropout > 0:
bn_x = self.drop(bn_x)
prob = self.classifier(bn_x)
# prob, mixed_prob = torch.chunk(prob, 2, dim=0)
prob = torch.chunk(prob, num_domains, dim=0)
# mixed_prob = torch.chunk(mixed_prob, num_domains, dim=0)
# x, mixed_x = torch.chunk(x, 2, dim=0)
x = torch.chunk(x, num_domains, dim=0)
# mixed_x = torch.chunk(mixed_x, num_domains, dim=0)
return prob, x
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def get_params(self):
for param in self.parameters():
if param.requires_grad:
yield param
# def train(self, mode=True):
# """
# Override the default train() to freeze the BN parameters
# """
# super().train(mode)
# self.freeze_bn()
#
# def freeze_bn(self):
# for m in self.modules():
# if isinstance(m, nn.BatchNorm1d):
# m.eval()
# if isinstance(m, nn.BatchNorm2d):
# m.eval()
def resnet18(**kwargs):
return ResNet(18, **kwargs)
def resnet34(**kwargs):
return ResNet(34, **kwargs)
def resnet50(**kwargs):
return ResNet(50, **kwargs)
def resnet101(**kwargs):
return ResNet(101, **kwargs)
def resnet152(**kwargs):
return ResNet(152, **kwargs)
def resnet50_mde(**kwargs):
return ResNet(50, **kwargs)
def resnet50_mldg_smm(**kwargs):
return ResNet(50, **kwargs)
| 30.028846 | 88 | 0.575568 | from __future__ import absolute_import
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import init
import torchvision
from collections import OrderedDict
from ..models.layers.adain import SMMBlock
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnet50_mldg_smm']
class ResNet(nn.Module):
__factory = {
18: torchvision.models.resnet18,
34: torchvision.models.resnet34,
50: torchvision.models.resnet50,
101: torchvision.models.resnet101,
152: torchvision.models.resnet152,
}
def __init__(self, depth, pretrained=True, cut_at_pooling=False,
num_features=0, norm=False, dropout=0, num_classes=None):
super(ResNet, self).__init__()
self.pretrained = pretrained
self.depth = depth
self.cut_at_pooling = cut_at_pooling
if depth not in ResNet.__factory:
raise KeyError("Unsupported depth:", depth)
resnet = ResNet.__factory[depth](pretrained=pretrained)
resnet.layer4[0].conv2.stride = (1,1)
resnet.layer4[0].downsample[0].stride = (1,1)
self.conv = nn.Sequential(OrderedDict([
('conv1', resnet.conv1),
('bn1', resnet.bn1),
('relu', resnet.relu),
('maxpool', resnet.maxpool)]))
self.layer1 = resnet.layer1
self.layer2 = resnet.layer2
self.layer3 = resnet.layer3
self.layer4 = resnet.layer4
self.gap = nn.AdaptiveAvgPool2d(1)
self.smm_block = SMMBlock(1, rand=False, learnable=False)
if not self.cut_at_pooling:
self.num_features = num_features
self.norm = norm
self.dropout = dropout
self.has_embedding = num_features > 0
self.num_classes = num_classes
out_planes = resnet.fc.in_features
if self.has_embedding:
self.feat = nn.Linear(out_planes, self.num_features)
self.feat_bn = nn.BatchNorm1d(self.num_features)
init.kaiming_normal_(self.feat.weight, mode='fan_out')
init.constant_(self.feat.bias, 0)
else:
self.num_features = out_planes
self.feat_bn = nn.BatchNorm1d(self.num_features)
self.feat_bn.bias.requires_grad_(False)
if self.dropout > 0:
self.drop = nn.Dropout(self.dropout)
self.classifier = nn.Linear(self.num_features, self.num_classes, bias=False)
init.normal_(self.classifier.weight, std=0.001)
init.constant_(self.feat_bn.weight, 1)
init.constant_(self.feat_bn.bias, 0)
if not pretrained:
self.reset_params()
def forward(self, x, meta_train=True, output_prob=False, return_featuremaps=False):
if self.training:
num_domains = len(x)
x = torch.cat(x, dim=0)
x = self.conv(x)
if meta_train:
mixed_x, _ = self.smm_block(x)
if return_featuremaps:
return [x, mixed_x]
x = mixed_x
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.gap(x)
x = x.view(x.size(0), -1)
if self.cut_at_pooling:
return x
if self.has_embedding:
bn_x = self.feat_bn(self.feat(x))
else:
bn_x = self.feat_bn(x)
if self.training is False and output_prob is False:
bn_x = F.normalize(bn_x)
return bn_x
if self.norm:
norm_bn_x = F.normalize(bn_x)
elif self.has_embedding:
bn_x = F.relu(bn_x)
if self.dropout > 0:
bn_x = self.drop(bn_x)
prob = self.classifier(bn_x)
prob = torch.chunk(prob, num_domains, dim=0)
x = torch.chunk(x, num_domains, dim=0)
return prob, x
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def get_params(self):
for param in self.parameters():
if param.requires_grad:
yield param
# Override the default train() to freeze the BN parameters
# """
def resnet18(**kwargs):
return ResNet(18, **kwargs)
def resnet34(**kwargs):
return ResNet(34, **kwargs)
def resnet50(**kwargs):
return ResNet(50, **kwargs)
def resnet101(**kwargs):
return ResNet(101, **kwargs)
def resnet152(**kwargs):
return ResNet(152, **kwargs)
def resnet50_mde(**kwargs):
return ResNet(50, **kwargs)
def resnet50_mldg_smm(**kwargs):
return ResNet(50, **kwargs)
| true | true |
f7381edb24031a7a55a75176563d048bfb71d4fd | 1,398 | py | Python | google/ads/googleads/v4/enums/types/policy_topic_evidence_destination_mismatch_url_type.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v4/enums/types/policy_topic_evidence_destination_mismatch_url_type.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v4/enums/types/policy_topic_evidence_destination_mismatch_url_type.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v4.enums",
marshal="google.ads.googleads.v4",
manifest={"PolicyTopicEvidenceDestinationMismatchUrlTypeEnum",},
)
class PolicyTopicEvidenceDestinationMismatchUrlTypeEnum(proto.Message):
r"""Container for enum describing possible policy topic evidence
destination mismatch url types.
"""
class PolicyTopicEvidenceDestinationMismatchUrlType(proto.Enum):
r"""The possible policy topic evidence destination mismatch url
types.
"""
UNSPECIFIED = 0
UNKNOWN = 1
DISPLAY_URL = 2
FINAL_URL = 3
FINAL_MOBILE_URL = 4
TRACKING_URL = 5
MOBILE_TRACKING_URL = 6
__all__ = tuple(sorted(__protobuf__.manifest))
| 29.744681 | 74 | 0.716738 |
import proto
__protobuf__ = proto.module(
package="google.ads.googleads.v4.enums",
marshal="google.ads.googleads.v4",
manifest={"PolicyTopicEvidenceDestinationMismatchUrlTypeEnum",},
)
class PolicyTopicEvidenceDestinationMismatchUrlTypeEnum(proto.Message):
class PolicyTopicEvidenceDestinationMismatchUrlType(proto.Enum):
UNSPECIFIED = 0
UNKNOWN = 1
DISPLAY_URL = 2
FINAL_URL = 3
FINAL_MOBILE_URL = 4
TRACKING_URL = 5
MOBILE_TRACKING_URL = 6
__all__ = tuple(sorted(__protobuf__.manifest))
| true | true |
f7381f56863525771b7576c8adc3d03ab7574454 | 217 | py | Python | testing/freeze/runtests_script.py | tinkerlin/pytest | bed3918cbc800682681a26c163f4cb0868b3a612 | [
"MIT"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | testing/freeze/runtests_script.py | tinkerlin/pytest | bed3918cbc800682681a26c163f4cb0868b3a612 | [
"MIT"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | testing/freeze/runtests_script.py | tinkerlin/pytest | bed3918cbc800682681a26c163f4cb0868b3a612 | [
"MIT"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | # -*- coding: utf-8 -*-
"""
This is the script that is actually frozen into an executable: simply executes
py.test main().
"""
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main())
| 18.083333 | 78 | 0.64977 |
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main())
| true | true |
f7381fe80deed90bccb110c73f72f098589afc20 | 7,039 | py | Python | isi_sdk_8_2_2/isi_sdk_8_2_2/models/cloud_settings_settings_sleep_timeout_cloud_garbage_collection.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_2_2/isi_sdk_8_2_2/models/cloud_settings_settings_sleep_timeout_cloud_garbage_collection.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_2_2/isi_sdk_8_2_2/models/cloud_settings_settings_sleep_timeout_cloud_garbage_collection.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CloudSettingsSettingsSleepTimeoutCloudGarbageCollection(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'recovery_with_tasks': 'float',
'recovery_without_tasks': 'float',
'with_tasks': 'float',
'without_tasks': 'float'
}
attribute_map = {
'recovery_with_tasks': 'recovery_with_tasks',
'recovery_without_tasks': 'recovery_without_tasks',
'with_tasks': 'with_tasks',
'without_tasks': 'without_tasks'
}
def __init__(self, recovery_with_tasks=None, recovery_without_tasks=None, with_tasks=None, without_tasks=None): # noqa: E501
"""CloudSettingsSettingsSleepTimeoutCloudGarbageCollection - a model defined in Swagger""" # noqa: E501
self._recovery_with_tasks = None
self._recovery_without_tasks = None
self._with_tasks = None
self._without_tasks = None
self.discriminator = None
if recovery_with_tasks is not None:
self.recovery_with_tasks = recovery_with_tasks
if recovery_without_tasks is not None:
self.recovery_without_tasks = recovery_without_tasks
if with_tasks is not None:
self.with_tasks = with_tasks
if without_tasks is not None:
self.without_tasks = without_tasks
@property
def recovery_with_tasks(self):
"""Gets the recovery_with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
Sleep timeout for a recovery thread with pending tasks # noqa: E501
:return: The recovery_with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:rtype: float
"""
return self._recovery_with_tasks
@recovery_with_tasks.setter
def recovery_with_tasks(self, recovery_with_tasks):
"""Sets the recovery_with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection.
Sleep timeout for a recovery thread with pending tasks # noqa: E501
:param recovery_with_tasks: The recovery_with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:type: float
"""
self._recovery_with_tasks = recovery_with_tasks
@property
def recovery_without_tasks(self):
"""Gets the recovery_without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
Sleep timeout for a recovery thread without pending tasks # noqa: E501
:return: The recovery_without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:rtype: float
"""
return self._recovery_without_tasks
@recovery_without_tasks.setter
def recovery_without_tasks(self, recovery_without_tasks):
"""Sets the recovery_without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection.
Sleep timeout for a recovery thread without pending tasks # noqa: E501
:param recovery_without_tasks: The recovery_without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:type: float
"""
self._recovery_without_tasks = recovery_without_tasks
@property
def with_tasks(self):
"""Gets the with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
Sleep timeout for a non-recovery thread with pending tasks # noqa: E501
:return: The with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:rtype: float
"""
return self._with_tasks
@with_tasks.setter
def with_tasks(self, with_tasks):
"""Sets the with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection.
Sleep timeout for a non-recovery thread with pending tasks # noqa: E501
:param with_tasks: The with_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:type: float
"""
self._with_tasks = with_tasks
@property
def without_tasks(self):
"""Gets the without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
Sleep timeout for a non-recovery thread without pending tasks # noqa: E501
:return: The without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:rtype: float
"""
return self._without_tasks
@without_tasks.setter
def without_tasks(self, without_tasks):
"""Sets the without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection.
Sleep timeout for a non-recovery thread without pending tasks # noqa: E501
:param without_tasks: The without_tasks of this CloudSettingsSettingsSleepTimeoutCloudGarbageCollection. # noqa: E501
:type: float
"""
self._without_tasks = without_tasks
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CloudSettingsSettingsSleepTimeoutCloudGarbageCollection):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 35.371859 | 144 | 0.667851 |
import pprint
import re
import six
class CloudSettingsSettingsSleepTimeoutCloudGarbageCollection(object):
swagger_types = {
'recovery_with_tasks': 'float',
'recovery_without_tasks': 'float',
'with_tasks': 'float',
'without_tasks': 'float'
}
attribute_map = {
'recovery_with_tasks': 'recovery_with_tasks',
'recovery_without_tasks': 'recovery_without_tasks',
'with_tasks': 'with_tasks',
'without_tasks': 'without_tasks'
}
def __init__(self, recovery_with_tasks=None, recovery_without_tasks=None, with_tasks=None, without_tasks=None):
self._recovery_with_tasks = None
self._recovery_without_tasks = None
self._with_tasks = None
self._without_tasks = None
self.discriminator = None
if recovery_with_tasks is not None:
self.recovery_with_tasks = recovery_with_tasks
if recovery_without_tasks is not None:
self.recovery_without_tasks = recovery_without_tasks
if with_tasks is not None:
self.with_tasks = with_tasks
if without_tasks is not None:
self.without_tasks = without_tasks
@property
def recovery_with_tasks(self):
return self._recovery_with_tasks
@recovery_with_tasks.setter
def recovery_with_tasks(self, recovery_with_tasks):
self._recovery_with_tasks = recovery_with_tasks
@property
def recovery_without_tasks(self):
return self._recovery_without_tasks
@recovery_without_tasks.setter
def recovery_without_tasks(self, recovery_without_tasks):
self._recovery_without_tasks = recovery_without_tasks
@property
def with_tasks(self):
return self._with_tasks
@with_tasks.setter
def with_tasks(self, with_tasks):
self._with_tasks = with_tasks
@property
def without_tasks(self):
return self._without_tasks
@without_tasks.setter
def without_tasks(self, without_tasks):
self._without_tasks = without_tasks
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, CloudSettingsSettingsSleepTimeoutCloudGarbageCollection):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f73821c5faf6d5fa14c6927bb627ab1b623d7e67 | 11,020 | py | Python | detection/object_detection/obj_3_mxrcnn/WindowObj3MxrcnnInfer.py | THEFASHIONGEEK/Monk_Gui | 7c23cdd3487ae5a5b28b0a3419e4e64022b11e06 | [
"Apache-2.0"
] | 129 | 2020-01-30T22:08:05.000Z | 2022-03-04T06:33:14.000Z | detection/object_detection/obj_3_mxrcnn/WindowObj3MxrcnnInfer.py | netwrkspider/Monk_Gui | 05ce1bbef0199fbd38519220cc71fb6904c59e7c | [
"Apache-2.0"
] | 2 | 2020-04-04T14:57:49.000Z | 2020-06-13T14:13:01.000Z | detection/object_detection/obj_3_mxrcnn/WindowObj3MxrcnnInfer.py | netwrkspider/Monk_Gui | 05ce1bbef0199fbd38519220cc71fb6904c59e7c | [
"Apache-2.0"
] | 46 | 2020-01-31T00:23:21.000Z | 2022-01-31T01:21:51.000Z | import os
import sys
import json
import time
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import pyqtSignal, pyqtSlot
class WindowObj3MxrcnnInfer(QtWidgets.QWidget):
backward_3_mxrcnn = QtCore.pyqtSignal();
def __init__(self):
super().__init__()
self.title = 'Mxrcnn - Infer'
self.left = 10
self.top = 10
self.width = 900
self.height = 690
self.cfg_setup();
self.initUI()
def cfg_setup(self):
if(os.path.isfile("obj_3_mxrcnn_infer.json")):
with open('obj_3_mxrcnn_infer.json') as json_file:
self.system = json.load(json_file)
else:
self.system = {};
self.system["model"] = "resnet50";
self.system["weights"] = "trained_model/model_resnet50-0005.params";
self.system["use_gpu"] = "yes";
self.system["img_file"] = "Monk_Object_Detection/example_notebooks/sample_dataset/kangaroo/test/kg1.jpeg";
self.system["conf_thresh"] = "0.7";
self.system["class_file"] = "Monk_Object_Detection/example_notebooks/sample_dataset/kangaroo/classes.txt"
self.system["img_short_side"] = "600";
self.system["img_long_side"] = "1000";
self.system["mean"] = "123.68, 116.779, 103.939";
self.system["std"] = "1.0, 1.0, 1.0";
with open('obj_3_mxrcnn_infer.json', 'w') as outfile:
json.dump(self.system, outfile)
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height);
# Backward
self.b1 = QPushButton('Back', self)
self.b1.move(700,650)
self.b1.clicked.connect(self.backward)
# Quit
self.bclose = QPushButton('Quit', self)
self.bclose.move(800,650)
self.bclose.clicked.connect(self.close)
self.l1 = QLabel(self);
self.l1.setText("1. Model :");
self.l1.move(20, 20);
self.cb1 = QComboBox(self);
self.models = ["resnet50", "resnet101", "vgg16"];
self.cb1.addItems(self.models);
index = self.cb1.findText(self.system["model"], QtCore.Qt.MatchFixedString)
if index >= 0:
self.cb1.setCurrentIndex(index)
self.cb1.move(120, 20);
self.l2 = QLabel(self);
self.l2.setText("2. Weights File: ");
self.l2.move(20, 70);
self.b2 = QPushButton('Select File', self)
self.b2.move(130, 70)
self.b2.clicked.connect(self.select_model_file);
self.tb2 = QTextEdit(self)
self.tb2.move(20, 100)
self.tb2.resize(300, 80)
self.tb2.setText(self.system["weights"]);
self.tb2.setReadOnly(True)
self.l3 = QLabel(self);
self.l3.setText("3. Use Gpu :");
self.l3.move(20, 210);
self.cb3 = QComboBox(self);
self.use_gpu = ["Yes", "No"];
self.cb3.addItems(self.use_gpu);
index = self.cb3.findText(self.system["use_gpu"], QtCore.Qt.MatchFixedString)
if index >= 0:
self.cb3.setCurrentIndex(index)
self.cb3.move(120, 210);
self.l4 = QLabel(self);
self.l4.setText("4. Image File: ");
self.l4.move(20, 250);
self.b4 = QPushButton('Select File', self)
self.b4.move(130, 250)
self.b4.clicked.connect(self.select_img_file);
self.tb4 = QTextEdit(self)
self.tb4.move(20, 280)
self.tb4.resize(300, 80)
self.tb4.setText(self.system["img_file"]);
self.tb4.setReadOnly(True)
self.l5 = QLabel(self);
self.l5.setText("5. Confidence Threshold:");
self.l5.move(20, 400);
self.e4 = QLineEdit(self)
self.e4.move(200, 400);
self.e4.setText(self.system["conf_thresh"]);
self.e4.resize(130, 25);
self.l5 = QLabel(self);
self.l5.setText("6. Classes File List: ");
self.l5.move(20, 440);
self.b5 = QPushButton('Select File', self)
self.b5.move(150, 440)
self.b5.clicked.connect(self.select_class_file);
self.tb5 = QTextEdit(self)
self.tb5.move(20, 470)
self.tb5.resize(300, 80)
self.tb5.setText(self.system["class_file"]);
self.tb5.setReadOnly(True)
self.l6= QLabel(self);
self.l6.setText("7. Image short side :");
self.l6.move(430, 20);
self.e6 = QLineEdit(self)
self.e6.move(570, 20);
self.e6.setText(self.system["img_short_side"]);
self.l7 = QLabel(self);
self.l7.setText("8. Image long side :");
self.l7.move(430, 70);
self.e7 = QLineEdit(self)
self.e7.move(570, 70);
self.e7.setText(self.system["img_long_side"]);
self.l8 = QLabel(self);
self.l8.setText("9. Normalization mean :");
self.l8.move(430, 120);
self.e8 = QLineEdit(self)
self.e8.move(600, 120);
self.e8.resize(200, 25)
self.e8.setText(self.system["mean"]);
self.l9 = QLabel(self);
self.l9.setText("10. Normalization std :");
self.l9.move(430, 170);
self.e9 = QLineEdit(self)
self.e9.move(590, 170);
self.e9.setText(self.system["std"]);
self.te1 = QTextBrowser(self);
self.te1.move(450, 200);
self.te1.setFixedSize(400, 100);
self.b5 = QPushButton('Predict', self)
self.b5.move(350, 200)
self.b5.clicked.connect(self.Predict);
self.l10 = QLabel(self)
self.l10.move(420, 310);
self.l10.resize(450, 350)
self.process = QtCore.QProcess(self)
self.process.readyReadStandardOutput.connect(self.stdoutReady)
self.process.readyReadStandardError.connect(self.stderrReady)
self.process.setProcessChannelMode(QtCore.QProcess.MergedChannels)
def select_model_file(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", os.getcwd() + "/trained_model/",
"Monk Project Files (*.params);;All Files (*)", options=options)
self.system["weights"] = fileName;
self.tb2.setText(fileName);
self.system["model"] = self.cb1.currentText();
self.system["use_gpu"] = self.cb3.currentText();
self.system["conf_thresh"] = self.e4.text();
self.system["img_short_side"] = self.e6.text();
self.system["img_long_side"] = self.e7.text();
self.system["mean"] = self.e8.text();
self.system["std"] = self.e9.text();
with open('obj_3_mxrcnn_infer.json', 'w') as outfile:
json.dump(self.system, outfile)
def select_img_file(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", os.getcwd(),
"All Files (*)",
options=options)
self.system["img_file"] = fileName;
self.tb4.setText(fileName);
self.system["model"] = self.cb1.currentText();
self.system["use_gpu"] = self.cb3.currentText();
self.system["conf_thresh"] = self.e4.text();
self.system["img_short_side"] = self.e6.text();
self.system["img_long_side"] = self.e7.text();
self.system["mean"] = self.e8.text();
self.system["std"] = self.e9.text();
with open('obj_3_mxrcnn_infer.json', 'w') as outfile:
json.dump(self.system, outfile)
def select_class_file(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", os.getcwd(),
"Text Files (*.txt);;All Files (*)",
options=options)
self.system["class_file"] = fileName;
self.tb5.setText(fileName);
self.system["model"] = self.cb1.currentText();
self.system["use_gpu"] = self.cb3.currentText();
self.system["conf_thresh"] = self.e4.text();
self.system["img_short_side"] = self.e6.text();
self.system["img_long_side"] = self.e7.text();
self.system["mean"] = self.e8.text();
self.system["std"] = self.e9.text();
with open('obj_3_mxrcnn_infer.json', 'w') as outfile:
json.dump(self.system, outfile)
def Predict(self):
self.system["model"] = self.cb1.currentText();
self.system["use_gpu"] = self.cb3.currentText();
self.system["conf_thresh"] = self.e4.text();
self.system["img_short_side"] = self.e6.text();
self.system["img_long_side"] = self.e7.text();
self.system["mean"] = self.e8.text();
self.system["std"] = self.e9.text();
self.te1.setText("");
with open('obj_3_mxrcnn_infer.json', 'w') as outfile:
json.dump(self.system, outfile)
os.system("cp cfg/detection/object_detection/obj_3_mxrcnn/infer_obj_3_mxrcnn.py .");
os.system("cp cfg/detection/object_detection/obj_3_mxrcnn/infer_obj_3_mxrcnn.sh .");
self.process.start('bash', ['infer_obj_3_mxrcnn.sh'])
self.append("Process PID: " + str(self.process.pid()) + "\n");
def stop(self):
self.process.kill();
self.append("Prediction Stopped\n")
def stdoutReady(self):
text = str(self.process.readAllStandardOutput().data(), encoding='utf-8')
if("Completed" in text):
pixmap = QPixmap('output.png')
pixmap = pixmap.scaledToWidth(400)
pixmap = pixmap.scaledToHeight(300)
self.l10.setPixmap(pixmap)
self.append(text)
def stderrReady(self):
text = str(self.process.readAllStandardError().data(), encoding='utf-8')
self.append(text)
def append(self, text):
cursor = self.te1.textCursor()
self.te1.ensureCursorVisible()
cursor.movePosition(cursor.End)
cursor.insertText(text)
def backward(self):
self.system["model"] = self.cb1.currentText();
self.system["use_gpu"] = self.cb3.currentText();
self.system["conf_thresh"] = self.e4.text();
self.system["img_short_side"] = self.e6.text();
self.system["img_long_side"] = self.e7.text();
self.system["mean"] = self.e8.text();
self.system["std"] = self.e9.text();
with open('obj_3_mxrcnn_infer.json', 'w') as outfile:
json.dump(self.system, outfile)
self.backward_3_mxrcnn.emit();
'''
app = QApplication(sys.argv)
screen = WindowObj3MxrcnnInfer()
screen.show()
sys.exit(app.exec_())
''' | 32.411765 | 121 | 0.582849 | import os
import sys
import json
import time
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import pyqtSignal, pyqtSlot
class WindowObj3MxrcnnInfer(QtWidgets.QWidget):
backward_3_mxrcnn = QtCore.pyqtSignal();
def __init__(self):
super().__init__()
self.title = 'Mxrcnn - Infer'
self.left = 10
self.top = 10
self.width = 900
self.height = 690
self.cfg_setup();
self.initUI()
def cfg_setup(self):
if(os.path.isfile("obj_3_mxrcnn_infer.json")):
with open('obj_3_mxrcnn_infer.json') as json_file:
self.system = json.load(json_file)
else:
self.system = {};
self.system["model"] = "resnet50";
self.system["weights"] = "trained_model/model_resnet50-0005.params";
self.system["use_gpu"] = "yes";
self.system["img_file"] = "Monk_Object_Detection/example_notebooks/sample_dataset/kangaroo/test/kg1.jpeg";
self.system["conf_thresh"] = "0.7";
self.system["class_file"] = "Monk_Object_Detection/example_notebooks/sample_dataset/kangaroo/classes.txt"
self.system["img_short_side"] = "600";
self.system["img_long_side"] = "1000";
self.system["mean"] = "123.68, 116.779, 103.939";
self.system["std"] = "1.0, 1.0, 1.0";
with open('obj_3_mxrcnn_infer.json', 'w') as outfile:
json.dump(self.system, outfile)
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height);
self.b1 = QPushButton('Back', self)
self.b1.move(700,650)
self.b1.clicked.connect(self.backward)
self.bclose = QPushButton('Quit', self)
self.bclose.move(800,650)
self.bclose.clicked.connect(self.close)
self.l1 = QLabel(self);
self.l1.setText("1. Model :");
self.l1.move(20, 20);
self.cb1 = QComboBox(self);
self.models = ["resnet50", "resnet101", "vgg16"];
self.cb1.addItems(self.models);
index = self.cb1.findText(self.system["model"], QtCore.Qt.MatchFixedString)
if index >= 0:
self.cb1.setCurrentIndex(index)
self.cb1.move(120, 20);
self.l2 = QLabel(self);
self.l2.setText("2. Weights File: ");
self.l2.move(20, 70);
self.b2 = QPushButton('Select File', self)
self.b2.move(130, 70)
self.b2.clicked.connect(self.select_model_file);
self.tb2 = QTextEdit(self)
self.tb2.move(20, 100)
self.tb2.resize(300, 80)
self.tb2.setText(self.system["weights"]);
self.tb2.setReadOnly(True)
self.l3 = QLabel(self);
self.l3.setText("3. Use Gpu :");
self.l3.move(20, 210);
self.cb3 = QComboBox(self);
self.use_gpu = ["Yes", "No"];
self.cb3.addItems(self.use_gpu);
index = self.cb3.findText(self.system["use_gpu"], QtCore.Qt.MatchFixedString)
if index >= 0:
self.cb3.setCurrentIndex(index)
self.cb3.move(120, 210);
self.l4 = QLabel(self);
self.l4.setText("4. Image File: ");
self.l4.move(20, 250);
self.b4 = QPushButton('Select File', self)
self.b4.move(130, 250)
self.b4.clicked.connect(self.select_img_file);
self.tb4 = QTextEdit(self)
self.tb4.move(20, 280)
self.tb4.resize(300, 80)
self.tb4.setText(self.system["img_file"]);
self.tb4.setReadOnly(True)
self.l5 = QLabel(self);
self.l5.setText("5. Confidence Threshold:");
self.l5.move(20, 400);
self.e4 = QLineEdit(self)
self.e4.move(200, 400);
self.e4.setText(self.system["conf_thresh"]);
self.e4.resize(130, 25);
self.l5 = QLabel(self);
self.l5.setText("6. Classes File List: ");
self.l5.move(20, 440);
self.b5 = QPushButton('Select File', self)
self.b5.move(150, 440)
self.b5.clicked.connect(self.select_class_file);
self.tb5 = QTextEdit(self)
self.tb5.move(20, 470)
self.tb5.resize(300, 80)
self.tb5.setText(self.system["class_file"]);
self.tb5.setReadOnly(True)
self.l6= QLabel(self);
self.l6.setText("7. Image short side :");
self.l6.move(430, 20);
self.e6 = QLineEdit(self)
self.e6.move(570, 20);
self.e6.setText(self.system["img_short_side"]);
self.l7 = QLabel(self);
self.l7.setText("8. Image long side :");
self.l7.move(430, 70);
self.e7 = QLineEdit(self)
self.e7.move(570, 70);
self.e7.setText(self.system["img_long_side"]);
self.l8 = QLabel(self);
self.l8.setText("9. Normalization mean :");
self.l8.move(430, 120);
self.e8 = QLineEdit(self)
self.e8.move(600, 120);
self.e8.resize(200, 25)
self.e8.setText(self.system["mean"]);
self.l9 = QLabel(self);
self.l9.setText("10. Normalization std :");
self.l9.move(430, 170);
self.e9 = QLineEdit(self)
self.e9.move(590, 170);
self.e9.setText(self.system["std"]);
self.te1 = QTextBrowser(self);
self.te1.move(450, 200);
self.te1.setFixedSize(400, 100);
self.b5 = QPushButton('Predict', self)
self.b5.move(350, 200)
self.b5.clicked.connect(self.Predict);
self.l10 = QLabel(self)
self.l10.move(420, 310);
self.l10.resize(450, 350)
self.process = QtCore.QProcess(self)
self.process.readyReadStandardOutput.connect(self.stdoutReady)
self.process.readyReadStandardError.connect(self.stderrReady)
self.process.setProcessChannelMode(QtCore.QProcess.MergedChannels)
def select_model_file(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", os.getcwd() + "/trained_model/",
"Monk Project Files (*.params);;All Files (*)", options=options)
self.system["weights"] = fileName;
self.tb2.setText(fileName);
self.system["model"] = self.cb1.currentText();
self.system["use_gpu"] = self.cb3.currentText();
self.system["conf_thresh"] = self.e4.text();
self.system["img_short_side"] = self.e6.text();
self.system["img_long_side"] = self.e7.text();
self.system["mean"] = self.e8.text();
self.system["std"] = self.e9.text();
with open('obj_3_mxrcnn_infer.json', 'w') as outfile:
json.dump(self.system, outfile)
def select_img_file(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", os.getcwd(),
"All Files (*)",
options=options)
self.system["img_file"] = fileName;
self.tb4.setText(fileName);
self.system["model"] = self.cb1.currentText();
self.system["use_gpu"] = self.cb3.currentText();
self.system["conf_thresh"] = self.e4.text();
self.system["img_short_side"] = self.e6.text();
self.system["img_long_side"] = self.e7.text();
self.system["mean"] = self.e8.text();
self.system["std"] = self.e9.text();
with open('obj_3_mxrcnn_infer.json', 'w') as outfile:
json.dump(self.system, outfile)
def select_class_file(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", os.getcwd(),
"Text Files (*.txt);;All Files (*)",
options=options)
self.system["class_file"] = fileName;
self.tb5.setText(fileName);
self.system["model"] = self.cb1.currentText();
self.system["use_gpu"] = self.cb3.currentText();
self.system["conf_thresh"] = self.e4.text();
self.system["img_short_side"] = self.e6.text();
self.system["img_long_side"] = self.e7.text();
self.system["mean"] = self.e8.text();
self.system["std"] = self.e9.text();
with open('obj_3_mxrcnn_infer.json', 'w') as outfile:
json.dump(self.system, outfile)
def Predict(self):
self.system["model"] = self.cb1.currentText();
self.system["use_gpu"] = self.cb3.currentText();
self.system["conf_thresh"] = self.e4.text();
self.system["img_short_side"] = self.e6.text();
self.system["img_long_side"] = self.e7.text();
self.system["mean"] = self.e8.text();
self.system["std"] = self.e9.text();
self.te1.setText("");
with open('obj_3_mxrcnn_infer.json', 'w') as outfile:
json.dump(self.system, outfile)
os.system("cp cfg/detection/object_detection/obj_3_mxrcnn/infer_obj_3_mxrcnn.py .");
os.system("cp cfg/detection/object_detection/obj_3_mxrcnn/infer_obj_3_mxrcnn.sh .");
self.process.start('bash', ['infer_obj_3_mxrcnn.sh'])
self.append("Process PID: " + str(self.process.pid()) + "\n");
def stop(self):
self.process.kill();
self.append("Prediction Stopped\n")
def stdoutReady(self):
text = str(self.process.readAllStandardOutput().data(), encoding='utf-8')
if("Completed" in text):
pixmap = QPixmap('output.png')
pixmap = pixmap.scaledToWidth(400)
pixmap = pixmap.scaledToHeight(300)
self.l10.setPixmap(pixmap)
self.append(text)
def stderrReady(self):
text = str(self.process.readAllStandardError().data(), encoding='utf-8')
self.append(text)
def append(self, text):
cursor = self.te1.textCursor()
self.te1.ensureCursorVisible()
cursor.movePosition(cursor.End)
cursor.insertText(text)
def backward(self):
self.system["model"] = self.cb1.currentText();
self.system["use_gpu"] = self.cb3.currentText();
self.system["conf_thresh"] = self.e4.text();
self.system["img_short_side"] = self.e6.text();
self.system["img_long_side"] = self.e7.text();
self.system["mean"] = self.e8.text();
self.system["std"] = self.e9.text();
with open('obj_3_mxrcnn_infer.json', 'w') as outfile:
json.dump(self.system, outfile)
self.backward_3_mxrcnn.emit();
| true | true |
f738226cd8cde499f6eebeac5b3f0e6fee2da507 | 682 | py | Python | django-server/climate_commander/jobs/migrations/0010_auto_20160822_1332.py | jrising/climate-commander | 123cf5a07b87eb1a3bdb44378ee27712b6563ec3 | [
"MIT"
] | null | null | null | django-server/climate_commander/jobs/migrations/0010_auto_20160822_1332.py | jrising/climate-commander | 123cf5a07b87eb1a3bdb44378ee27712b6563ec3 | [
"MIT"
] | 1 | 2016-08-03T21:49:58.000Z | 2016-08-03T21:49:58.000Z | django-server/climate_commander/jobs/migrations/0010_auto_20160822_1332.py | jrising/climate-commander | 123cf5a07b87eb1a3bdb44378ee27712b6563ec3 | [
"MIT"
] | 1 | 2016-07-13T18:19:56.000Z | 2016-07-13T18:19:56.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-22 20:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0009_auto_20160822_1211'),
]
operations = [
migrations.AddField(
model_name='job',
name='result_file',
field=models.CharField(default='pvals.yml', max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='jobrunningonserver',
name='status',
field=models.CharField(max_length=600, null=True),
),
]
| 25.259259 | 72 | 0.601173 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0009_auto_20160822_1211'),
]
operations = [
migrations.AddField(
model_name='job',
name='result_file',
field=models.CharField(default='pvals.yml', max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='jobrunningonserver',
name='status',
field=models.CharField(max_length=600, null=True),
),
]
| true | true |
f73822950568899fb701e184dd149897a911720a | 2,903 | py | Python | example/metrics/i1_data_knowledge_representation_weak.py | MaastrichtU-IDS/fair-testing | 9e64be68934dd20d0d2845bd9e17fc4d47f0d226 | [
"MIT"
] | 4 | 2022-02-14T12:33:16.000Z | 2022-03-28T11:43:44.000Z | example/metrics/i1_data_knowledge_representation_weak.py | MaastrichtU-IDS/fair-testing | 9e64be68934dd20d0d2845bd9e17fc4d47f0d226 | [
"MIT"
] | null | null | null | example/metrics/i1_data_knowledge_representation_weak.py | MaastrichtU-IDS/fair-testing | 9e64be68934dd20d0d2845bd9e17fc4d47f0d226 | [
"MIT"
] | 1 | 2022-02-15T05:58:55.000Z | 2022-02-15T05:58:55.000Z | import json
import requests
import yaml
from fair_test import FairTest, FairTestEvaluation
class MetricTest(FairTest):
metric_path = 'i1-data-knowledge-representation-weak'
applies_to_principle = 'I1'
title = 'Data uses a formal knowledge representation language (weak)'
description = """Maturity Indicator to test if the data uses a formal language broadly applicable for knowledge representation.
This particular test takes a broad view of what defines a 'knowledge representation language'; in this evaluation, anything that can be represented as structured data will be accepted"""
author = 'https://orcid.org/0000-0002-1501-1082'
metric_version = '0.1.0'
test_test={
'https://w3id.org/ejp-rd/fairdatapoints/wp13/dataset/c5414323-eab1-483f-a883-77951f246972': 1,
'https://doi.org/10.1594/PANGAEA.908011': 0,
}
def evaluate(self, eval: FairTestEvaluation):
g = eval.retrieve_metadata(eval.subject)
if len(g) > 1:
eval.info(f'Successfully found and parsed RDF metadata. It contains {str(len(g))} triples')
subject_uri = eval.extract_metadata_subject(g, eval.data['alternative_uris'])
# Retrieve URI of the data in the RDF metadata
data_res = eval.extract_data_subject(g, subject_uri)
if len(data_res) < 1:
eval.failure("Could not find data URI in the metadata.")
else:
eval.data['data_uri'] = data_res
# Check if structured data can be found at the data URI
for value in data_res:
eval.info(f'Found data URI: {value}. Try retrieving RDF')
data_g = eval.retrieve_metadata(value)
if len(data_g) > 1:
eval.info(f'Successfully retrieved RDF for the data URI: {value}. It contains {str(len(g))} triples')
eval.success(f'Successfully found and parsed RDF data for {value}')
else:
eval.warn(f'No RDF data found for {value}, searching for JSON')
try:
r = requests.get(value, headers={'accept': 'application/json'})
metadata = r.json()
eval.data['metadata_json'] = metadata
eval.success(f'Successfully found and parsed JSON data for {value}')
except:
eval.warn(f'No JSON metadata found for {value}, searching for YAML')
try:
r = requests.get(value, headers={'accept': 'text/yaml'})
metadata = yaml.load(r.text, Loader=yaml.FullLoader)
eval.data['metadata_yaml'] = metadata
eval.success(f'Successfully found and parsed YAML data for {value}')
except:
eval.failure(f'No YAML metadata found for {value}')
return eval.response()
| 47.590164 | 186 | 0.612814 | import json
import requests
import yaml
from fair_test import FairTest, FairTestEvaluation
class MetricTest(FairTest):
metric_path = 'i1-data-knowledge-representation-weak'
applies_to_principle = 'I1'
title = 'Data uses a formal knowledge representation language (weak)'
description = """Maturity Indicator to test if the data uses a formal language broadly applicable for knowledge representation.
This particular test takes a broad view of what defines a 'knowledge representation language'; in this evaluation, anything that can be represented as structured data will be accepted"""
author = 'https://orcid.org/0000-0002-1501-1082'
metric_version = '0.1.0'
test_test={
'https://w3id.org/ejp-rd/fairdatapoints/wp13/dataset/c5414323-eab1-483f-a883-77951f246972': 1,
'https://doi.org/10.1594/PANGAEA.908011': 0,
}
def evaluate(self, eval: FairTestEvaluation):
g = eval.retrieve_metadata(eval.subject)
if len(g) > 1:
eval.info(f'Successfully found and parsed RDF metadata. It contains {str(len(g))} triples')
subject_uri = eval.extract_metadata_subject(g, eval.data['alternative_uris'])
data_res = eval.extract_data_subject(g, subject_uri)
if len(data_res) < 1:
eval.failure("Could not find data URI in the metadata.")
else:
eval.data['data_uri'] = data_res
for value in data_res:
eval.info(f'Found data URI: {value}. Try retrieving RDF')
data_g = eval.retrieve_metadata(value)
if len(data_g) > 1:
eval.info(f'Successfully retrieved RDF for the data URI: {value}. It contains {str(len(g))} triples')
eval.success(f'Successfully found and parsed RDF data for {value}')
else:
eval.warn(f'No RDF data found for {value}, searching for JSON')
try:
r = requests.get(value, headers={'accept': 'application/json'})
metadata = r.json()
eval.data['metadata_json'] = metadata
eval.success(f'Successfully found and parsed JSON data for {value}')
except:
eval.warn(f'No JSON metadata found for {value}, searching for YAML')
try:
r = requests.get(value, headers={'accept': 'text/yaml'})
metadata = yaml.load(r.text, Loader=yaml.FullLoader)
eval.data['metadata_yaml'] = metadata
eval.success(f'Successfully found and parsed YAML data for {value}')
except:
eval.failure(f'No YAML metadata found for {value}')
return eval.response()
| true | true |
f73823d30c95fe8fb6e31fe46c72c32644993be1 | 2,345 | py | Python | tests/configs/realview-simple-timing.py | qianlong4526888/haha | 01baf923693873c11ae072ce4dde3d8f1d7b6239 | [
"BSD-3-Clause"
] | 31 | 2015-11-12T03:12:27.000Z | 2020-12-23T12:36:39.000Z | tests/configs/realview-simple-timing.py | qianlong4526888/haha | 01baf923693873c11ae072ce4dde3d8f1d7b6239 | [
"BSD-3-Clause"
] | 5 | 2015-12-04T08:06:47.000Z | 2020-08-09T21:49:46.000Z | tests/configs/realview-simple-timing.py | qianlong4526888/haha | 01baf923693873c11ae072ce4dde3d8f1d7b6239 | [
"BSD-3-Clause"
] | 21 | 2015-11-05T08:25:45.000Z | 2021-06-19T02:24:50.000Z | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from arm_generic import *
root = LinuxArmFSSystemUniprocessor(mem_mode='timing',
mem_class=DDR3_1600_x64,
cpu_class=TimingSimpleCPU).create_root()
| 53.295455 | 76 | 0.77484 |
from m5.objects import *
from arm_generic import *
root = LinuxArmFSSystemUniprocessor(mem_mode='timing',
mem_class=DDR3_1600_x64,
cpu_class=TimingSimpleCPU).create_root()
| true | true |
f73825f4e7fb012d4690e3a8e84f61ba23b77749 | 14,004 | py | Python | tests/test_response.py | githubztx/httprunner | 625dfab8e95e069df3275ee09dee3004bed60b1b | [
"Apache-2.0"
] | 5 | 2019-05-09T05:55:32.000Z | 2019-07-08T10:24:30.000Z | tests/test_response.py | githubztx/httprunner | 625dfab8e95e069df3275ee09dee3004bed60b1b | [
"Apache-2.0"
] | 1 | 2019-08-07T12:53:35.000Z | 2019-08-07T12:53:35.000Z | tests/test_response.py | githubztx/httprunner | 625dfab8e95e069df3275ee09dee3004bed60b1b | [
"Apache-2.0"
] | 1 | 2019-03-12T03:37:07.000Z | 2019-03-12T03:37:07.000Z | import requests
from httprunner import built_in, exceptions, loader, response
from httprunner.compat import basestring, bytes
from tests.api_server import HTTPBIN_SERVER
from tests.base import ApiServerUnittest
class TestResponse(ApiServerUnittest):
def setUp(self):
self.functions_mapping = loader.load_module_functions(built_in)
def test_parse_response_object_json(self):
url = "http://127.0.0.1:5000/api/users"
resp = requests.get(url)
resp_obj = response.ResponseObject(resp)
self.assertTrue(hasattr(resp_obj, 'status_code'))
self.assertTrue(hasattr(resp_obj, 'headers'))
self.assertTrue(hasattr(resp_obj, 'content'))
self.assertIn('Content-Type', resp_obj.headers)
self.assertIn('Content-Length', resp_obj.headers)
self.assertIn('success', resp_obj.json)
def test_parse_response_object_content(self):
url = "http://127.0.0.1:5000/"
resp = requests.get(url)
resp_obj = response.ResponseObject(resp)
self.assertEqual(bytes, type(resp_obj.content))
def test_extract_response_status_code(self):
resp = requests.get(url="{}/status/200".format(HTTPBIN_SERVER))
resp_obj = response.ResponseObject(resp)
extract_binds_list = [
{"resp_status_code": "status_code"}
]
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertEqual(
extract_binds_dict["resp_status_code"],
200
)
extract_binds_list = [
{"resp_status_code": "status_code.xx"}
]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_encoding_ok_reason_url(self):
resp = requests.get(url="{}/status/200".format(HTTPBIN_SERVER))
resp_obj = response.ResponseObject(resp)
extract_binds_list = [
{"resp_encoding": "encoding"},
{"resp_ok": "ok"},
{"resp_reason": "reason"},
{"resp_url": "url"}
]
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertEqual(extract_binds_dict["resp_encoding"], "utf-8")
self.assertEqual(extract_binds_dict["resp_ok"], True)
self.assertEqual(extract_binds_dict["resp_reason"], "OK")
self.assertEqual(extract_binds_dict["resp_url"], "{}/status/200".format(HTTPBIN_SERVER))
extract_binds_list = [{"resp_encoding": "encoding.xx"}]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
extract_binds_list = [{"resp_ok": "ok.xx"}]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
extract_binds_list = [{"resp_reason": "reason.xx"}]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
extract_binds_list = [{"resp_url": "url.xx"}]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_cookies(self):
resp = requests.get(
url="{}/cookies".format(HTTPBIN_SERVER),
headers={
"accept": "application/json"
}
)
resp_obj = response.ResponseObject(resp)
extract_binds_list = [
{"resp_cookies": "cookies"}
]
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertEqual(
extract_binds_dict["resp_cookies"],
{}
)
extract_binds_list = [
{"resp_cookies": "cookies.xx"}
]
with self.assertRaises(exceptions.ExtractFailure):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_elapsed(self):
resp = requests.post(
url="{}/anything".format(HTTPBIN_SERVER),
json={
'success': False,
"person": {
"name": {
"first_name": "Leo",
"last_name": "Lee",
},
"age": 29,
"cities": ["Guangzhou", "Shenzhen"]
}
}
)
resp_obj = response.ResponseObject(resp)
extract_binds_list = [
{"resp_elapsed": "elapsed"}
]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
extract_binds_list = [
{"resp_elapsed_microseconds": "elapsed.microseconds"},
{"resp_elapsed_seconds": "elapsed.seconds"},
{"resp_elapsed_days": "elapsed.days"},
{"resp_elapsed_total_seconds": "elapsed.total_seconds"}
]
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertGreater(extract_binds_dict["resp_elapsed_microseconds"], 1000)
self.assertLess(extract_binds_dict["resp_elapsed_seconds"], 60)
self.assertEqual(extract_binds_dict["resp_elapsed_days"], 0)
self.assertGreater(extract_binds_dict["resp_elapsed_total_seconds"], 0)
extract_binds_list = [
{"resp_elapsed": "elapsed.years"}
]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_headers(self):
resp = requests.get(url="{}/status/200".format(HTTPBIN_SERVER))
resp_obj = response.ResponseObject(resp)
extract_binds_list = [
{"resp_headers": "headers"},
{"resp_headers_content_type": "headers.Content-Type"},
{"resp_headers_content_type_lowercase": "headers.content-type"}
]
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertIn("Content-Type", extract_binds_dict["resp_headers"])
self.assertIn("text/html", extract_binds_dict["resp_headers_content_type"])
self.assertIn("text/html", extract_binds_dict["resp_headers_content_type_lowercase"])
extract_binds_list = [
{"resp_headers_xxx": "headers.xxx"}
]
with self.assertRaises(exceptions.ExtractFailure):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_body_json(self):
resp = requests.post(
url="{}/anything".format(HTTPBIN_SERVER),
json={
'success': False,
"person": {
"name": {
"first_name": "Leo",
"last_name": "Lee",
},
"age": 29,
"cities": ["Guangzhou", "Shenzhen"]
}
}
)
# resp.json()
# {
# "args": {},
# "data": "{\"success\": false, \"person\": {\"name\": {\"first_name\": \"Leo\", \"last_name\": \"Lee\"}, \"age\": 29, \"cities\": [\"Guangzhou\", \"Shenzhen\"]}}",
# "files": {},
# "form": {},
# "headers": {
# "Accept": "*/*",
# "Accept-Encoding": "gzip, deflate",
# "Connection": "keep-alive",
# "Content-Length": "129",
# "Content-Type": "application/json",
# "Host": HTTPBIN_SERVER,
# "User-Agent": "python-requests/2.18.4"
# },
# "json": {
# "person": {
# "age": 29,
# "cities": [
# "Guangzhou",
# "Shenzhen"
# ],
# "name": {
# "first_name": "Leo",
# "last_name": "Lee"
# }
# },
# "success": false
# },
# "method": "POST",
# "origin": "127.0.0.1",
# "url": "{}/anything".format(HTTPBIN_SERVER)
# }
extract_binds_list = [
{"resp_headers_content_type": "headers.content-type"},
{"resp_content_body_success": "json.json.success"},
{"resp_content_content_success": "content.json.success"},
{"resp_content_text_success": "text.json.success"},
{"resp_content_person_first_name": "content.json.person.name.first_name"},
{"resp_content_cities_1": "content.json.person.cities.1"}
]
resp_obj = response.ResponseObject(resp)
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertEqual(
extract_binds_dict["resp_headers_content_type"],
"application/json"
)
self.assertEqual(
extract_binds_dict["resp_content_body_success"],
False
)
self.assertEqual(
extract_binds_dict["resp_content_content_success"],
False
)
self.assertEqual(
extract_binds_dict["resp_content_text_success"],
False
)
self.assertEqual(
extract_binds_dict["resp_content_person_first_name"],
"Leo"
)
self.assertEqual(
extract_binds_dict["resp_content_cities_1"],
"Shenzhen"
)
def test_extract_response_body_html(self):
resp = requests.get(url=HTTPBIN_SERVER)
resp_obj = response.ResponseObject(resp)
extract_binds_list = [
{"resp_content": "content"}
]
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertIsInstance(extract_binds_dict["resp_content"], basestring)
self.assertIn("httpbin.org", extract_binds_dict["resp_content"])
extract_binds_list = [
{"resp_content": "content.xxx"}
]
with self.assertRaises(exceptions.ExtractFailure):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_others(self):
resp = requests.get(url="{}/status/200".format(HTTPBIN_SERVER))
resp_obj = response.ResponseObject(resp)
extract_binds_list = [
{"resp_others_encoding": "encoding"},
{"resp_others_history": "history"}
]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_fail(self):
resp = requests.post(
url="{}/anything".format(HTTPBIN_SERVER),
json={
'success': False,
"person": {
"name": {
"first_name": "Leo",
"last_name": "Lee",
},
"age": 29,
"cities": ["Guangzhou", "Shenzhen"]
}
}
)
extract_binds_list = [
{"resp_content_dict_key_error": "content.not_exist"}
]
resp_obj = response.ResponseObject(resp)
with self.assertRaises(exceptions.ExtractFailure):
resp_obj.extract_response(extract_binds_list)
extract_binds_list = [
{"resp_content_list_index_error": "content.person.cities.3"}
]
resp_obj = response.ResponseObject(resp)
with self.assertRaises(exceptions.ExtractFailure):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_json_string(self):
resp = requests.post(
url="{}/anything".format(HTTPBIN_SERVER),
data="abc"
)
extract_binds_list = [
{"resp_content_body": "content.data"}
]
resp_obj = response.ResponseObject(resp)
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertEqual(
extract_binds_dict["resp_content_body"],
"abc"
)
def test_extract_text_response(self):
resp = requests.post(
url="{}/anything".format(HTTPBIN_SERVER),
data="LB123abcRB789"
)
extract_binds_list = [
{"resp_content_key1": "LB123(.*)RB789"},
{"resp_content_key2": "LB[\d]*(.*)RB[\d]*"},
{"resp_content_key3": "LB[\d]*(.*)9"}
]
resp_obj = response.ResponseObject(resp)
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertEqual(
extract_binds_dict["resp_content_key1"],
"abc"
)
self.assertEqual(
extract_binds_dict["resp_content_key2"],
"abc"
)
self.assertEqual(
extract_binds_dict["resp_content_key3"],
"abcRB78"
)
def test_extract_text_response_exception(self):
resp = requests.post(
url="{}/anything".format(HTTPBIN_SERVER),
data="LB123abcRB789"
)
extract_binds_list = [
{"resp_content_key1": "LB123.*RB789"}
]
resp_obj = response.ResponseObject(resp)
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_empty(self):
resp = requests.post(
url="{}/anything".format(HTTPBIN_SERVER),
data="abc"
)
extract_binds_list = [
{"resp_content_body": "content.data"}
]
resp_obj = response.ResponseObject(resp)
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertEqual(
extract_binds_dict["resp_content_body"],
'abc'
)
extract_binds_list = [
{"resp_content_body": "content.data.def"}
]
resp_obj = response.ResponseObject(resp)
with self.assertRaises(exceptions.ExtractFailure):
resp_obj.extract_response(extract_binds_list)
| 35.72449 | 176 | 0.576407 | import requests
from httprunner import built_in, exceptions, loader, response
from httprunner.compat import basestring, bytes
from tests.api_server import HTTPBIN_SERVER
from tests.base import ApiServerUnittest
class TestResponse(ApiServerUnittest):
def setUp(self):
self.functions_mapping = loader.load_module_functions(built_in)
def test_parse_response_object_json(self):
url = "http://127.0.0.1:5000/api/users"
resp = requests.get(url)
resp_obj = response.ResponseObject(resp)
self.assertTrue(hasattr(resp_obj, 'status_code'))
self.assertTrue(hasattr(resp_obj, 'headers'))
self.assertTrue(hasattr(resp_obj, 'content'))
self.assertIn('Content-Type', resp_obj.headers)
self.assertIn('Content-Length', resp_obj.headers)
self.assertIn('success', resp_obj.json)
def test_parse_response_object_content(self):
url = "http://127.0.0.1:5000/"
resp = requests.get(url)
resp_obj = response.ResponseObject(resp)
self.assertEqual(bytes, type(resp_obj.content))
def test_extract_response_status_code(self):
resp = requests.get(url="{}/status/200".format(HTTPBIN_SERVER))
resp_obj = response.ResponseObject(resp)
extract_binds_list = [
{"resp_status_code": "status_code"}
]
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertEqual(
extract_binds_dict["resp_status_code"],
200
)
extract_binds_list = [
{"resp_status_code": "status_code.xx"}
]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_encoding_ok_reason_url(self):
resp = requests.get(url="{}/status/200".format(HTTPBIN_SERVER))
resp_obj = response.ResponseObject(resp)
extract_binds_list = [
{"resp_encoding": "encoding"},
{"resp_ok": "ok"},
{"resp_reason": "reason"},
{"resp_url": "url"}
]
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertEqual(extract_binds_dict["resp_encoding"], "utf-8")
self.assertEqual(extract_binds_dict["resp_ok"], True)
self.assertEqual(extract_binds_dict["resp_reason"], "OK")
self.assertEqual(extract_binds_dict["resp_url"], "{}/status/200".format(HTTPBIN_SERVER))
extract_binds_list = [{"resp_encoding": "encoding.xx"}]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
extract_binds_list = [{"resp_ok": "ok.xx"}]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
extract_binds_list = [{"resp_reason": "reason.xx"}]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
extract_binds_list = [{"resp_url": "url.xx"}]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_cookies(self):
resp = requests.get(
url="{}/cookies".format(HTTPBIN_SERVER),
headers={
"accept": "application/json"
}
)
resp_obj = response.ResponseObject(resp)
extract_binds_list = [
{"resp_cookies": "cookies"}
]
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertEqual(
extract_binds_dict["resp_cookies"],
{}
)
extract_binds_list = [
{"resp_cookies": "cookies.xx"}
]
with self.assertRaises(exceptions.ExtractFailure):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_elapsed(self):
resp = requests.post(
url="{}/anything".format(HTTPBIN_SERVER),
json={
'success': False,
"person": {
"name": {
"first_name": "Leo",
"last_name": "Lee",
},
"age": 29,
"cities": ["Guangzhou", "Shenzhen"]
}
}
)
resp_obj = response.ResponseObject(resp)
extract_binds_list = [
{"resp_elapsed": "elapsed"}
]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
extract_binds_list = [
{"resp_elapsed_microseconds": "elapsed.microseconds"},
{"resp_elapsed_seconds": "elapsed.seconds"},
{"resp_elapsed_days": "elapsed.days"},
{"resp_elapsed_total_seconds": "elapsed.total_seconds"}
]
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertGreater(extract_binds_dict["resp_elapsed_microseconds"], 1000)
self.assertLess(extract_binds_dict["resp_elapsed_seconds"], 60)
self.assertEqual(extract_binds_dict["resp_elapsed_days"], 0)
self.assertGreater(extract_binds_dict["resp_elapsed_total_seconds"], 0)
extract_binds_list = [
{"resp_elapsed": "elapsed.years"}
]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_headers(self):
resp = requests.get(url="{}/status/200".format(HTTPBIN_SERVER))
resp_obj = response.ResponseObject(resp)
extract_binds_list = [
{"resp_headers": "headers"},
{"resp_headers_content_type": "headers.Content-Type"},
{"resp_headers_content_type_lowercase": "headers.content-type"}
]
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertIn("Content-Type", extract_binds_dict["resp_headers"])
self.assertIn("text/html", extract_binds_dict["resp_headers_content_type"])
self.assertIn("text/html", extract_binds_dict["resp_headers_content_type_lowercase"])
extract_binds_list = [
{"resp_headers_xxx": "headers.xxx"}
]
with self.assertRaises(exceptions.ExtractFailure):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_body_json(self):
resp = requests.post(
url="{}/anything".format(HTTPBIN_SERVER),
json={
'success': False,
"person": {
"name": {
"first_name": "Leo",
"last_name": "Lee",
},
"age": 29,
"cities": ["Guangzhou", "Shenzhen"]
}
}
)
extract_binds_list = [
{"resp_headers_content_type": "headers.content-type"},
{"resp_content_body_success": "json.json.success"},
{"resp_content_content_success": "content.json.success"},
{"resp_content_text_success": "text.json.success"},
{"resp_content_person_first_name": "content.json.person.name.first_name"},
{"resp_content_cities_1": "content.json.person.cities.1"}
]
resp_obj = response.ResponseObject(resp)
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertEqual(
extract_binds_dict["resp_headers_content_type"],
"application/json"
)
self.assertEqual(
extract_binds_dict["resp_content_body_success"],
False
)
self.assertEqual(
extract_binds_dict["resp_content_content_success"],
False
)
self.assertEqual(
extract_binds_dict["resp_content_text_success"],
False
)
self.assertEqual(
extract_binds_dict["resp_content_person_first_name"],
"Leo"
)
self.assertEqual(
extract_binds_dict["resp_content_cities_1"],
"Shenzhen"
)
def test_extract_response_body_html(self):
resp = requests.get(url=HTTPBIN_SERVER)
resp_obj = response.ResponseObject(resp)
extract_binds_list = [
{"resp_content": "content"}
]
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertIsInstance(extract_binds_dict["resp_content"], basestring)
self.assertIn("httpbin.org", extract_binds_dict["resp_content"])
extract_binds_list = [
{"resp_content": "content.xxx"}
]
with self.assertRaises(exceptions.ExtractFailure):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_others(self):
resp = requests.get(url="{}/status/200".format(HTTPBIN_SERVER))
resp_obj = response.ResponseObject(resp)
extract_binds_list = [
{"resp_others_encoding": "encoding"},
{"resp_others_history": "history"}
]
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_fail(self):
resp = requests.post(
url="{}/anything".format(HTTPBIN_SERVER),
json={
'success': False,
"person": {
"name": {
"first_name": "Leo",
"last_name": "Lee",
},
"age": 29,
"cities": ["Guangzhou", "Shenzhen"]
}
}
)
extract_binds_list = [
{"resp_content_dict_key_error": "content.not_exist"}
]
resp_obj = response.ResponseObject(resp)
with self.assertRaises(exceptions.ExtractFailure):
resp_obj.extract_response(extract_binds_list)
extract_binds_list = [
{"resp_content_list_index_error": "content.person.cities.3"}
]
resp_obj = response.ResponseObject(resp)
with self.assertRaises(exceptions.ExtractFailure):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_json_string(self):
resp = requests.post(
url="{}/anything".format(HTTPBIN_SERVER),
data="abc"
)
extract_binds_list = [
{"resp_content_body": "content.data"}
]
resp_obj = response.ResponseObject(resp)
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertEqual(
extract_binds_dict["resp_content_body"],
"abc"
)
def test_extract_text_response(self):
resp = requests.post(
url="{}/anything".format(HTTPBIN_SERVER),
data="LB123abcRB789"
)
extract_binds_list = [
{"resp_content_key1": "LB123(.*)RB789"},
{"resp_content_key2": "LB[\d]*(.*)RB[\d]*"},
{"resp_content_key3": "LB[\d]*(.*)9"}
]
resp_obj = response.ResponseObject(resp)
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertEqual(
extract_binds_dict["resp_content_key1"],
"abc"
)
self.assertEqual(
extract_binds_dict["resp_content_key2"],
"abc"
)
self.assertEqual(
extract_binds_dict["resp_content_key3"],
"abcRB78"
)
def test_extract_text_response_exception(self):
resp = requests.post(
url="{}/anything".format(HTTPBIN_SERVER),
data="LB123abcRB789"
)
extract_binds_list = [
{"resp_content_key1": "LB123.*RB789"}
]
resp_obj = response.ResponseObject(resp)
with self.assertRaises(exceptions.ParamsError):
resp_obj.extract_response(extract_binds_list)
def test_extract_response_empty(self):
resp = requests.post(
url="{}/anything".format(HTTPBIN_SERVER),
data="abc"
)
extract_binds_list = [
{"resp_content_body": "content.data"}
]
resp_obj = response.ResponseObject(resp)
extract_binds_dict = resp_obj.extract_response(extract_binds_list)
self.assertEqual(
extract_binds_dict["resp_content_body"],
'abc'
)
extract_binds_list = [
{"resp_content_body": "content.data.def"}
]
resp_obj = response.ResponseObject(resp)
with self.assertRaises(exceptions.ExtractFailure):
resp_obj.extract_response(extract_binds_list)
| true | true |
f738260086ccd3653bc2367e7b8083819a301d9b | 1,807 | py | Python | preprocessing/metadata.py | skincare-deep-learning/Skincare-backend | 80ed6b7a735291848be9248035231fbd55c93990 | [
"Apache-2.0"
] | 1 | 2019-11-27T20:56:27.000Z | 2019-11-27T20:56:27.000Z | preprocessing/metadata.py | skincare-deep-learning/Skincare-backend | 80ed6b7a735291848be9248035231fbd55c93990 | [
"Apache-2.0"
] | 10 | 2021-04-02T19:47:15.000Z | 2022-01-13T01:52:53.000Z | preprocessing/metadata.py | skincare-deep-learning/Skincare-backend | 80ed6b7a735291848be9248035231fbd55c93990 | [
"Apache-2.0"
] | null | null | null | import json
import csv
import pandas as pd
from isic_api import ISICApi
from pandas.io.json import json_normalize
# Initialize the API; no login is necessary for public data
api = ISICApi(username="SkinCare", password="unbdeeplearning")
outputFileName = 'imagedata'
imageList = api.getJson('image?limit=25000&offset=0&sort=name')
print('Fetching metadata for %s images' % len(imageList))
imageDetails = []
i = 0
for image in imageList:
print(' ', image['name'])
# Pull image details
imageDetail = api.getJson('image/%s' % image['_id'])
imageDetails.append(imageDetail)
"""
# Testing Parameters
print("****************************")
print(imageDetails[0]['meta']['clinical']['anatom_site_general'])
print("****************************")
data = json_normalize(imageDetails[0])
print(data.loc[0])
data = json_normalize(imageDetails[0])
print(data.loc[0])
print("========================================================")
print(data.loc[0]['dataset.name'])
"""
# Determine the union of all image metadata fields
metadataFields = set(
field
for imageDetail in imageDetails
for field in imageDetail['meta']['clinical'].keys()
)
metadataFields = ['isic_id'] + sorted(metadataFields)
# print(metadataFields)
outputFilePath = './metadata.csv'
# Write Metadata to a CSV
print('Writing metadata to CSV: %s' % 'metadata.csv')
with open(outputFilePath, 'w') as outputStream:
csvWriter = csv.DictWriter(outputStream, fieldnames=metadataFields)
csvWriter.writeheader() # Columns Names
for imageDetail in imageDetails:
rowDict = imageDetail['meta']['clinical'].copy()
rowDict['isic_id'] = imageDetail['name']
# rowDict['anatom_site_general'] = imageDetail['meta']['clinical']['anatom_site_general'] # Subjective
csvWriter.writerow(rowDict) | 30.627119 | 110 | 0.672939 | import json
import csv
import pandas as pd
from isic_api import ISICApi
from pandas.io.json import json_normalize
api = ISICApi(username="SkinCare", password="unbdeeplearning")
outputFileName = 'imagedata'
imageList = api.getJson('image?limit=25000&offset=0&sort=name')
print('Fetching metadata for %s images' % len(imageList))
imageDetails = []
i = 0
for image in imageList:
print(' ', image['name'])
imageDetail = api.getJson('image/%s' % image['_id'])
imageDetails.append(imageDetail)
metadataFields = set(
field
for imageDetail in imageDetails
for field in imageDetail['meta']['clinical'].keys()
)
metadataFields = ['isic_id'] + sorted(metadataFields)
outputFilePath = './metadata.csv'
print('Writing metadata to CSV: %s' % 'metadata.csv')
with open(outputFilePath, 'w') as outputStream:
csvWriter = csv.DictWriter(outputStream, fieldnames=metadataFields)
csvWriter.writeheader()
for imageDetail in imageDetails:
rowDict = imageDetail['meta']['clinical'].copy()
rowDict['isic_id'] = imageDetail['name']
Writer.writerow(rowDict) | true | true |
f73826463a1ba0ab31fdecabea7ca0c19965ab64 | 815 | py | Python | talentpool/urls.py | klevamane/talentpool | f0de0861f90a3063a19183e8355d635c6a24d353 | [
"MIT"
] | null | null | null | talentpool/urls.py | klevamane/talentpool | f0de0861f90a3063a19183e8355d635c6a24d353 | [
"MIT"
] | null | null | null | talentpool/urls.py | klevamane/talentpool | f0de0861f90a3063a19183e8355d635c6a24d353 | [
"MIT"
] | null | null | null | """talentpool URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls'))
]
| 35.434783 | 77 | 0.707975 | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls'))
]
| true | true |
f73826e2d6c836b4cf6eee7deeabee599aa4244b | 18,034 | py | Python | tests/python/relay/test_op_level1.py | zhanghaohit/incubator-tvm | ee0af843f3c5a3429e888079afb5f30789bd9bee | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | tests/python/relay/test_op_level1.py | zhanghaohit/incubator-tvm | ee0af843f3c5a3429e888079afb5f30789bd9bee | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | tests/python/relay/test_op_level1.py | zhanghaohit/incubator-tvm | ee0af843f3c5a3429e888079afb5f30789bd9bee | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
import scipy
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import ctx_list
import topi.testing
from tvm.contrib.nvcc import have_fp16
def run_infer_type(expr):
mod = relay.Module.from_expr(expr)
mod = transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def sigmoid(x):
one = np.ones_like(x)
return one / (one + np.exp(-x))
def relu(x):
x_copy = np.copy(x)
np.maximum(x_copy, 0, x_copy)
return x_copy
def rsqrt(x):
one = np.ones_like(x)
return one / np.sqrt(x)
def test_unary_op():
def check_single_op(opfunc, ref, dtype):
shape = (10, 4)
dtype = dtype
tp = relay.TensorType(shape)
x = relay.var("x", tp, dtype=dtype)
y = opfunc(x)
# test printer
assert ("{}(%x)".format(y.op.name)) in y.astext()
# test type inference
yy = run_infer_type(y)
assert yy.checked_type == tp
if ref is not None:
data = np.random.rand(*shape).astype(dtype)
ref_res = ref(data)
func = relay.Function([x], y)
for target, ctx in ctx_list():
# use graph by execuor default for testing, as we need
# create function explicitly to avoid constant-folding.
if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):
continue
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
for opfunc, ref in [(tvm.relay.log, np.log),
(tvm.relay.exp, np.exp),
(tvm.relay.erf, scipy.special.erf),
(tvm.relay.sqrt, np.sqrt),
(tvm.relay.rsqrt, rsqrt),
(tvm.relay.sigmoid, sigmoid),
(tvm.relay.tanh, np.tanh),
(relay.nn.relu, relu),
(tvm.relay.cos, np.cos),
(tvm.relay.sin, np.sin),
(tvm.relay.atan, np.arctan)]:
for dtype in ['float16', 'float32']:
check_single_op(opfunc, ref, dtype)
def test_binary_op():
def inst(vars, sh):
return [vars.get(s, s) for s in sh]
def check_binary_op(opfunc, ref, dtype):
# TODO(@jroesch): this piece of code improperly uses type variables.
n = tvm.var("n")
s1 = (5, n, 5)
s2 = (n, 1)
t1 = relay.TensorType(s1)
t2 = relay.TensorType(s2)
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = opfunc(x, y)
# test printer
assert ("{}(%x, %y)".format(z.op.name)) in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == t1
if ref is not None:
t1 = relay.TensorType((5, 10, 5))
t2 = relay.TensorType((5, 10, 5))
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = opfunc(x, y)
x_data = np.random.rand(5, 10, 5).astype(dtype)
y_data = np.random.rand(5, 10, 5).astype(dtype)
ref_res = ref(x_data, y_data)
func = relay.Function([x, y], z)
for target, ctx in ctx_list():
# use graph by execuor default for testing, as we need
# create function explicitly to avoid constant-folding.
if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):
continue
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
for opfunc, ref in [(relay.add, np.add),
(relay.subtract, np.subtract),
(relay.multiply, np.multiply),
(relay.divide, np.divide),
(relay.floor_divide, np.floor_divide),
(relay.floor_mod, np.fmod)]:
for dtype in ['float16', 'float32']:
check_binary_op(opfunc, ref, dtype)
def test_expand_dims():
# based on topi test
def verify_expand_dims(dshape, dtype, oshape, axis, num_newaxis):
x = relay.Var("x", relay.TensorType(dshape, dtype))
func = relay.Function([x], relay.expand_dims(x, axis, num_newaxis))
for target, ctx in ctx_list():
if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):
continue
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = data.reshape(oshape)
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
for dtype in ['float16', 'float32']:
verify_expand_dims((3, 10), dtype, (3, 10, 1, 1), 2, 2)
verify_expand_dims((3, 10), dtype, (1, 3, 10), -3, 1)
def test_bias_add():
for dtype in ['float16', 'float32']:
xshape=(10, 2, 3, 4)
bshape=(2,)
rtol = 1e-2 if dtype == 'float16' else 1e-5
x = relay.var("x", shape=xshape, dtype=dtype)
bias = relay.var("bias", dtype=dtype)
z = relay.nn.bias_add(x, bias)
zz = run_infer_type(z)
assert "axis=" not in zz.astext()
assert zz.args[1].checked_type == relay.TensorType(bshape, dtype)
func = relay.Function([x, bias], z)
x_data = np.random.uniform(size=xshape).astype(dtype)
y_data = np.random.uniform(size=bshape).astype(dtype)
ref_res = x_data + y_data.reshape((2, 1, 1))
for target, ctx in ctx_list():
if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):
continue
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=rtol)
def test_expand_dims_infer_type():
for dtype in ['float16', 'float32']:
n, t, d = tvm.size_var("n"), tvm.size_var("t"), 100
x = relay.var("x", shape=(n, t, d), dtype=dtype)
y = relay.expand_dims(x, axis=2)
assert "axis=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, t, 1, 100), dtype)
def test_softmax():
for dtype in ['float16', 'float32']:
# Softmax accuracy for float16 is poor
if dtype == 'float16':
return
shape = (10, 4)
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.nn.softmax(x, axis=1)
assert "nn.softmax" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
ref_res = topi.testing.softmax_python(x_data)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
def test_log_softmax():
for dtype in ['float16', 'float32']:
# Softmax accuracy for float16 is poor
if dtype == 'float16':
return
shape = (10, 4)
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.nn.log_softmax(x, axis=1)
assert "nn.log_softmax" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
ref_res = topi.testing.log_softmax_python(x_data)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
def test_concatenate():
for dtype in ['float16', 'float32']:
n, t, d = tvm.size_var("n"), tvm.size_var("t"), 100
x = relay.var("x", shape=(n, t, d))
y = relay.var("y", shape=(n, t, d))
z = relay.concatenate((x, y), axis=-1)
assert "axis=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t, 200))
x = relay.exp(x)
z = relay.concatenate((x, y), axis=2)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t, 200))
z = relay.concatenate((x, y), axis=1)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t + t, 100))
# check shape mismatches (the following case is expected to raise tvm._ffi.base.TVMError.
try:
x = relay.var('p1', shape=(2, 5))
y = relay.var('p2', shape=(2, 3))
c = relay.concatenate([x, y], axis=0)
func = relay.Function([x, y], c)
zz = run_infer_type(func)
except tvm._ffi.base.TVMError:
pass
else:
assert False
x = relay.var("x", shape=(10, 5), dtype=dtype)
y = relay.var("y", shape=(10, 5), dtype=dtype)
t = relay.var("z", shape=(), dtype=dtype)
z = relay.concatenate((x, y), axis=1)
z = relay.add(z, t)
# Check result.
func = relay.Function([x, y, t], z)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
ref_res = np.concatenate((x_data, y_data), axis=1) + t_data
for target, ctx in ctx_list():
if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):
continue
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data, y_data, t_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=0.01)
op_res2 = intrp2.evaluate(func)(x_data, y_data, t_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=0.01)
def test_dropout():
for dtype in ['float16', 'float32']:
n, t, d = tvm.size_var("n"), tvm.size_var("t"), tvm.size_var("d")
input_ty = relay.TensorType((n, t, d), dtype)
x = relay.var("x", input_ty)
y = relay.nn.dropout(x, rate=0.75)
assert "rate=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == input_ty
def test_batch_norm():
for dtype in ['float16', 'float32']:
# beta and gamma ignored
data = relay.var("data", relay.TensorType((3, 2, 1), dtype))
beta = relay.var("beta", relay.TensorType((2,), dtype))
gamma = relay.var("gamma", relay.TensorType((2,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((2,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((2,), dtype))
y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,
center=False, scale=False)
yy = run_infer_type(y.astuple())
assert "center=" in yy.astext()
assert yy.checked_type == relay.ty.TupleType(tvm.convert([
relay.TensorType((3, 2, 1), dtype),
relay.TensorType((2,), dtype),
relay.TensorType((2,), dtype)
]))
beta = relay.var("beta", relay.TensorType((3,), dtype))
gamma = relay.var("gamma", relay.TensorType((3,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))
y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,
axis=0, center=False, scale=False)
yy = run_infer_type(y.astuple())
assert yy.checked_type == relay.ty.TupleType(tvm.convert([
relay.ty.TensorType((3, 2, 1), dtype),
relay.ty.TensorType((3,), dtype),
relay.ty.TensorType((3,), dtype)
]))
# axis=-1
data = relay.var("data", relay.TensorType((1, 2, 3), dtype))
beta = relay.var("beta", relay.TensorType((3,), dtype))
gamma = relay.var("gamma", relay.TensorType((3,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))
y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,
axis=-1, center=False, scale=False)
yy = run_infer_type(y.astuple())
assert yy.checked_type == relay.ty.TupleType(tvm.convert([
relay.ty.TensorType((1, 2, 3), dtype),
relay.ty.TensorType((3,), dtype),
relay.ty.TensorType((3,), dtype)
]))
@pytest.mark.xfail
def test_dense_type_check():
dtype = 'float16'
n, c , h, w = 2, 2 , 2 ,2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
# it should fail since it does not match with m(2)
mismatch_w = 3
w = relay.var("w", relay.TensorType((2, mismatch_w), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
def test_dense():
for dtype in ['float16', 'float32']:
# Dense accuracy for float16 is poor
if dtype == 'float16':
return
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.TensorType((2, w), dtype))
y = relay.nn.dense(x, w, units=2)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
wh, ww = tvm.size_var("wh"), tvm.size_var("ww")
w = relay.var("w", relay.TensorType((ww, wh), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype)
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.dense(x, w, units=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
x = relay.var("x", shape=(10, 5), dtype=dtype)
w = relay.var("w", shape=(2, 5), dtype=dtype)
z = relay.nn.dense(x, w)
# Check result.
func = relay.Function([x, w], z)
x_data = np.random.rand(10, 5).astype(dtype)
w_data = np.random.rand(2, 5).astype(dtype)
ref_res = np.dot(x_data, w_data.T)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data, w_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data, w_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def test_dense_dtype():
data_dtype = 'uint8'
weight_dtype = 'int8'
out_dtype = 'uint8'
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), data_dtype))
w = relay.var("w", relay.TensorType((2, w), weight_dtype))
y = relay.nn.dense(x, w, units=2, out_dtype=out_dtype)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), out_dtype)
assert run_infer_type(yy.args[0]).checked_type.dtype == 'uint8'
assert run_infer_type(yy.args[1]).checked_type.dtype == 'int8'
def test_bitserial_dense():
m, k = tvm.size_var("m"), tvm.size_var("k")
x = relay.var("x", relay.TensorType((m, k), "int16"))
w = relay.var("w", relay.TensorType((k, 32), "int16"))
y = relay.nn.bitserial_dense(x, w, units=32)
"units=8" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((m, 32), "int16")
if __name__ == "__main__":
test_concatenate()
test_bias_add()
test_unary_op()
test_binary_op()
test_expand_dims_infer_type()
test_expand_dims()
test_softmax()
test_log_softmax()
test_dropout()
test_batch_norm()
test_dense()
test_bitserial_dense()
test_dense_dtype()
| 41.267735 | 106 | 0.579184 |
import numpy as np
import pytest
import tvm
import scipy
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import ctx_list
import topi.testing
from tvm.contrib.nvcc import have_fp16
def run_infer_type(expr):
mod = relay.Module.from_expr(expr)
mod = transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def sigmoid(x):
one = np.ones_like(x)
return one / (one + np.exp(-x))
def relu(x):
x_copy = np.copy(x)
np.maximum(x_copy, 0, x_copy)
return x_copy
def rsqrt(x):
one = np.ones_like(x)
return one / np.sqrt(x)
def test_unary_op():
def check_single_op(opfunc, ref, dtype):
shape = (10, 4)
dtype = dtype
tp = relay.TensorType(shape)
x = relay.var("x", tp, dtype=dtype)
y = opfunc(x)
assert ("{}(%x)".format(y.op.name)) in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == tp
if ref is not None:
data = np.random.rand(*shape).astype(dtype)
ref_res = ref(data)
func = relay.Function([x], y)
for target, ctx in ctx_list():
if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):
continue
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
for opfunc, ref in [(tvm.relay.log, np.log),
(tvm.relay.exp, np.exp),
(tvm.relay.erf, scipy.special.erf),
(tvm.relay.sqrt, np.sqrt),
(tvm.relay.rsqrt, rsqrt),
(tvm.relay.sigmoid, sigmoid),
(tvm.relay.tanh, np.tanh),
(relay.nn.relu, relu),
(tvm.relay.cos, np.cos),
(tvm.relay.sin, np.sin),
(tvm.relay.atan, np.arctan)]:
for dtype in ['float16', 'float32']:
check_single_op(opfunc, ref, dtype)
def test_binary_op():
def inst(vars, sh):
return [vars.get(s, s) for s in sh]
def check_binary_op(opfunc, ref, dtype):
n = tvm.var("n")
s1 = (5, n, 5)
s2 = (n, 1)
t1 = relay.TensorType(s1)
t2 = relay.TensorType(s2)
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = opfunc(x, y)
assert ("{}(%x, %y)".format(z.op.name)) in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == t1
if ref is not None:
t1 = relay.TensorType((5, 10, 5))
t2 = relay.TensorType((5, 10, 5))
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = opfunc(x, y)
x_data = np.random.rand(5, 10, 5).astype(dtype)
y_data = np.random.rand(5, 10, 5).astype(dtype)
ref_res = ref(x_data, y_data)
func = relay.Function([x, y], z)
for target, ctx in ctx_list():
if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):
continue
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
for opfunc, ref in [(relay.add, np.add),
(relay.subtract, np.subtract),
(relay.multiply, np.multiply),
(relay.divide, np.divide),
(relay.floor_divide, np.floor_divide),
(relay.floor_mod, np.fmod)]:
for dtype in ['float16', 'float32']:
check_binary_op(opfunc, ref, dtype)
def test_expand_dims():
def verify_expand_dims(dshape, dtype, oshape, axis, num_newaxis):
x = relay.Var("x", relay.TensorType(dshape, dtype))
func = relay.Function([x], relay.expand_dims(x, axis, num_newaxis))
for target, ctx in ctx_list():
if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):
continue
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = data.reshape(oshape)
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
for dtype in ['float16', 'float32']:
verify_expand_dims((3, 10), dtype, (3, 10, 1, 1), 2, 2)
verify_expand_dims((3, 10), dtype, (1, 3, 10), -3, 1)
def test_bias_add():
for dtype in ['float16', 'float32']:
xshape=(10, 2, 3, 4)
bshape=(2,)
rtol = 1e-2 if dtype == 'float16' else 1e-5
x = relay.var("x", shape=xshape, dtype=dtype)
bias = relay.var("bias", dtype=dtype)
z = relay.nn.bias_add(x, bias)
zz = run_infer_type(z)
assert "axis=" not in zz.astext()
assert zz.args[1].checked_type == relay.TensorType(bshape, dtype)
func = relay.Function([x, bias], z)
x_data = np.random.uniform(size=xshape).astype(dtype)
y_data = np.random.uniform(size=bshape).astype(dtype)
ref_res = x_data + y_data.reshape((2, 1, 1))
for target, ctx in ctx_list():
if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):
continue
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=rtol)
def test_expand_dims_infer_type():
for dtype in ['float16', 'float32']:
n, t, d = tvm.size_var("n"), tvm.size_var("t"), 100
x = relay.var("x", shape=(n, t, d), dtype=dtype)
y = relay.expand_dims(x, axis=2)
assert "axis=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, t, 1, 100), dtype)
def test_softmax():
for dtype in ['float16', 'float32']:
if dtype == 'float16':
return
shape = (10, 4)
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.nn.softmax(x, axis=1)
assert "nn.softmax" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
ref_res = topi.testing.softmax_python(x_data)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
def test_log_softmax():
for dtype in ['float16', 'float32']:
if dtype == 'float16':
return
shape = (10, 4)
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.nn.log_softmax(x, axis=1)
assert "nn.log_softmax" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
ref_res = topi.testing.log_softmax_python(x_data)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
def test_concatenate():
for dtype in ['float16', 'float32']:
n, t, d = tvm.size_var("n"), tvm.size_var("t"), 100
x = relay.var("x", shape=(n, t, d))
y = relay.var("y", shape=(n, t, d))
z = relay.concatenate((x, y), axis=-1)
assert "axis=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t, 200))
x = relay.exp(x)
z = relay.concatenate((x, y), axis=2)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t, 200))
z = relay.concatenate((x, y), axis=1)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t + t, 100))
try:
x = relay.var('p1', shape=(2, 5))
y = relay.var('p2', shape=(2, 3))
c = relay.concatenate([x, y], axis=0)
func = relay.Function([x, y], c)
zz = run_infer_type(func)
except tvm._ffi.base.TVMError:
pass
else:
assert False
x = relay.var("x", shape=(10, 5), dtype=dtype)
y = relay.var("y", shape=(10, 5), dtype=dtype)
t = relay.var("z", shape=(), dtype=dtype)
z = relay.concatenate((x, y), axis=1)
z = relay.add(z, t)
func = relay.Function([x, y, t], z)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
ref_res = np.concatenate((x_data, y_data), axis=1) + t_data
for target, ctx in ctx_list():
if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):
continue
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data, y_data, t_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=0.01)
op_res2 = intrp2.evaluate(func)(x_data, y_data, t_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=0.01)
def test_dropout():
for dtype in ['float16', 'float32']:
n, t, d = tvm.size_var("n"), tvm.size_var("t"), tvm.size_var("d")
input_ty = relay.TensorType((n, t, d), dtype)
x = relay.var("x", input_ty)
y = relay.nn.dropout(x, rate=0.75)
assert "rate=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == input_ty
def test_batch_norm():
for dtype in ['float16', 'float32']:
data = relay.var("data", relay.TensorType((3, 2, 1), dtype))
beta = relay.var("beta", relay.TensorType((2,), dtype))
gamma = relay.var("gamma", relay.TensorType((2,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((2,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((2,), dtype))
y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,
center=False, scale=False)
yy = run_infer_type(y.astuple())
assert "center=" in yy.astext()
assert yy.checked_type == relay.ty.TupleType(tvm.convert([
relay.TensorType((3, 2, 1), dtype),
relay.TensorType((2,), dtype),
relay.TensorType((2,), dtype)
]))
beta = relay.var("beta", relay.TensorType((3,), dtype))
gamma = relay.var("gamma", relay.TensorType((3,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))
y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,
axis=0, center=False, scale=False)
yy = run_infer_type(y.astuple())
assert yy.checked_type == relay.ty.TupleType(tvm.convert([
relay.ty.TensorType((3, 2, 1), dtype),
relay.ty.TensorType((3,), dtype),
relay.ty.TensorType((3,), dtype)
]))
data = relay.var("data", relay.TensorType((1, 2, 3), dtype))
beta = relay.var("beta", relay.TensorType((3,), dtype))
gamma = relay.var("gamma", relay.TensorType((3,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))
y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,
axis=-1, center=False, scale=False)
yy = run_infer_type(y.astuple())
assert yy.checked_type == relay.ty.TupleType(tvm.convert([
relay.ty.TensorType((1, 2, 3), dtype),
relay.ty.TensorType((3,), dtype),
relay.ty.TensorType((3,), dtype)
]))
@pytest.mark.xfail
def test_dense_type_check():
dtype = 'float16'
n, c , h, w = 2, 2 , 2 ,2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
mismatch_w = 3
w = relay.var("w", relay.TensorType((2, mismatch_w), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
def test_dense():
for dtype in ['float16', 'float32']:
if dtype == 'float16':
return
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.TensorType((2, w), dtype))
y = relay.nn.dense(x, w, units=2)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
wh, ww = tvm.size_var("wh"), tvm.size_var("ww")
w = relay.var("w", relay.TensorType((ww, wh), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype)
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.dense(x, w, units=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
x = relay.var("x", shape=(10, 5), dtype=dtype)
w = relay.var("w", shape=(2, 5), dtype=dtype)
z = relay.nn.dense(x, w)
func = relay.Function([x, w], z)
x_data = np.random.rand(10, 5).astype(dtype)
w_data = np.random.rand(2, 5).astype(dtype)
ref_res = np.dot(x_data, w_data.T)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data, w_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data, w_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def test_dense_dtype():
data_dtype = 'uint8'
weight_dtype = 'int8'
out_dtype = 'uint8'
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), data_dtype))
w = relay.var("w", relay.TensorType((2, w), weight_dtype))
y = relay.nn.dense(x, w, units=2, out_dtype=out_dtype)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), out_dtype)
assert run_infer_type(yy.args[0]).checked_type.dtype == 'uint8'
assert run_infer_type(yy.args[1]).checked_type.dtype == 'int8'
def test_bitserial_dense():
m, k = tvm.size_var("m"), tvm.size_var("k")
x = relay.var("x", relay.TensorType((m, k), "int16"))
w = relay.var("w", relay.TensorType((k, 32), "int16"))
y = relay.nn.bitserial_dense(x, w, units=32)
"units=8" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((m, 32), "int16")
if __name__ == "__main__":
test_concatenate()
test_bias_add()
test_unary_op()
test_binary_op()
test_expand_dims_infer_type()
test_expand_dims()
test_softmax()
test_log_softmax()
test_dropout()
test_batch_norm()
test_dense()
test_bitserial_dense()
test_dense_dtype()
| true | true |
f738272452b5bd4a133676f40a2cc49598c160de | 240 | py | Python | experiment_session/admin.py | piotrb5e3/1023alternative-backend | 3a3882a906ae17d9d670d600d68063e4a9ea7102 | [
"MIT"
] | null | null | null | experiment_session/admin.py | piotrb5e3/1023alternative-backend | 3a3882a906ae17d9d670d600d68063e4a9ea7102 | [
"MIT"
] | null | null | null | experiment_session/admin.py | piotrb5e3/1023alternative-backend | 3a3882a906ae17d9d670d600d68063e4a9ea7102 | [
"MIT"
] | null | null | null | from django.contrib import admin
from experiment_session.models import ExperimentSession, Combination, Repeat
# Register your models here.
admin.site.register(ExperimentSession)
admin.site.register(Combination)
admin.site.register(Repeat)
| 30 | 76 | 0.845833 | from django.contrib import admin
from experiment_session.models import ExperimentSession, Combination, Repeat
admin.site.register(ExperimentSession)
admin.site.register(Combination)
admin.site.register(Repeat)
| true | true |
f7382727993ed6544d16711c4ab933c0141beb52 | 2,382 | py | Python | subprojects/nicta/tem/cluster/sge/simulate_worker.py | sirca/bdkd_datastore | 2fc4f3d7976d326c0c8ae46d72475aaaa1fdf78d | [
"Apache-2.0"
] | 3 | 2016-03-12T03:09:18.000Z | 2017-04-23T12:47:49.000Z | subprojects/nicta/tem/cluster/sge/simulate_worker.py | sirca/bdkd_datastore | 2fc4f3d7976d326c0c8ae46d72475aaaa1fdf78d | [
"Apache-2.0"
] | 3 | 2015-12-03T00:32:55.000Z | 2016-02-03T23:31:07.000Z | subprojects/nicta/tem/cluster/sge/simulate_worker.py | sirca/bdkd_datastore | 2fc4f3d7976d326c0c8ae46d72475aaaa1fdf78d | [
"Apache-2.0"
] | 1 | 2019-04-08T07:43:17.000Z | 2019-04-08T07:43:17.000Z | import tree
import redis
import time
import socket
import os
from simple_queue import redis_queue
import logging
REDIS_HOST=os.environ["REDIS_HOST"]
REDIS_PORT=6379
rq = redis_queue(redis_host = REDIS_HOST, redis_port=REDIS_PORT)
# This is the job that simulates a particular forest
# NOTE: characteristic contains log(TD) and B4, so this needs to be scaled properly before we pass it to the simulator
# When we get the fitness, we need to log it again since our model is using log(fitness)
def wait_for_simulations():
while True:
if rq.q_len("queue") > 0 :
# Get job
i = rq.q_move("queue", "wip")
if not i:
time.sleep(5)
continue
item = rq.dict_get(i)
p1 = item.get("p1")
p2 = item.get("p2")
traits = item.get("traits")
virtualIndex = item.get("virtualIndex")
# Update status
rq.dict_update(i,"status","wip")
rq.dict_update(i,"host",socket.gethostname())
rq.dict_update(i,"pid",str(os.getpid()))
LOGGER.info("{0},simulation:{1},virtualIndex:{2},started".format(socket.gethostname(), i, virtualIndex))
try:
# Working
forest = tree.TreeModel(p1, p2)
forest.evolve(100)
fitness = forest.fitness(traits)
yActual=np.log(fitness)
# Update results
rq.dict_update(i,"yActual",yActual)
rq.dict_update(i,"virtualIndex",virtualIndex)
rq.dict_update(i,"status","done")
LOGGER.info("{0},simulation:{1},virtualIndex:{2},finished,yActual:{3}".format(socket.gethostname(), i, virtualIndex, yActual))
except ValueError:
rq.dict_update(i,"status","fail")
LOGGER.info("{0},simulation:{1},virtualIndex:{2},failed".format(socket.gethostname(), i, virtualIndex))
time.sleep(5)
if __name__ == "__main__":
log_filename = "/home/data/logs/tree.log"
LOGGER = logging.getLogger()
syslog_format = (' %(levelname)s ' + '%(filename)s: %(message)s')
logging.basicConfig(
level=logging.INFO,
filename=log_filename,
format='%(asctime)s.%(msecs)d localhost ' + syslog_format,
datefmt='%Y-%m-%dT%H:%M:%S')
wait_for_simulations()
| 35.029412 | 142 | 0.59194 | import tree
import redis
import time
import socket
import os
from simple_queue import redis_queue
import logging
REDIS_HOST=os.environ["REDIS_HOST"]
REDIS_PORT=6379
rq = redis_queue(redis_host = REDIS_HOST, redis_port=REDIS_PORT)
def wait_for_simulations():
while True:
if rq.q_len("queue") > 0 :
i = rq.q_move("queue", "wip")
if not i:
time.sleep(5)
continue
item = rq.dict_get(i)
p1 = item.get("p1")
p2 = item.get("p2")
traits = item.get("traits")
virtualIndex = item.get("virtualIndex")
rq.dict_update(i,"status","wip")
rq.dict_update(i,"host",socket.gethostname())
rq.dict_update(i,"pid",str(os.getpid()))
LOGGER.info("{0},simulation:{1},virtualIndex:{2},started".format(socket.gethostname(), i, virtualIndex))
try:
forest = tree.TreeModel(p1, p2)
forest.evolve(100)
fitness = forest.fitness(traits)
yActual=np.log(fitness)
rq.dict_update(i,"yActual",yActual)
rq.dict_update(i,"virtualIndex",virtualIndex)
rq.dict_update(i,"status","done")
LOGGER.info("{0},simulation:{1},virtualIndex:{2},finished,yActual:{3}".format(socket.gethostname(), i, virtualIndex, yActual))
except ValueError:
rq.dict_update(i,"status","fail")
LOGGER.info("{0},simulation:{1},virtualIndex:{2},failed".format(socket.gethostname(), i, virtualIndex))
time.sleep(5)
if __name__ == "__main__":
log_filename = "/home/data/logs/tree.log"
LOGGER = logging.getLogger()
syslog_format = (' %(levelname)s ' + '%(filename)s: %(message)s')
logging.basicConfig(
level=logging.INFO,
filename=log_filename,
format='%(asctime)s.%(msecs)d localhost ' + syslog_format,
datefmt='%Y-%m-%dT%H:%M:%S')
wait_for_simulations()
| true | true |
f7382923ab66def07ab5da7f88d25d20717257cb | 3,997 | py | Python | venv/Lib/site-packages/folium/plugins/heat_map.py | tarasrumezhak/twitter_map | 65a5c64c38620895e49c48656915c79fe5703549 | [
"MIT"
] | 2 | 2018-12-16T14:52:49.000Z | 2018-12-21T19:47:57.000Z | venv/Lib/site-packages/folium/plugins/heat_map.py | tarasrumezhak/twitter_map | 65a5c64c38620895e49c48656915c79fe5703549 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/folium/plugins/heat_map.py | tarasrumezhak/twitter_map | 65a5c64c38620895e49c48656915c79fe5703549 | [
"MIT"
] | 1 | 2019-12-13T11:01:04.000Z | 2019-12-13T11:01:04.000Z | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import json
from branca.element import Figure, JavascriptLink
from folium.map import Layer
from folium.utilities import _isnan, _iter_tolist, none_max, none_min
from jinja2 import Template
class HeatMap(Layer):
"""
Create a Heatmap layer
Parameters
----------
data : list of points of the form [lat, lng] or [lat, lng, weight]
The points you want to plot.
You can also provide a numpy.array of shape (n,2) or (n,3).
name : string, default None
The name of the Layer, as it will appear in LayerControls.
min_opacity : default 1.
The minimum opacity the heat will start at.
max_zoom : default 18
Zoom level where the points reach maximum intensity (as intensity
scales with zoom), equals maxZoom of the map by default
max_val : float, default 1.
Maximum point intensity
radius : int, default 25
Radius of each "point" of the heatmap
blur : int, default 15
Amount of blur
gradient : dict, default None
Color gradient config. e.g. {0.4: 'blue', 0.65: 'lime', 1: 'red'}
overlay : bool, default True
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
show: bool, default True
Whether the layer will be shown on opening (only for overlays).
"""
_template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.heatLayer(
{{this.data}},
{
minOpacity: {{this.min_opacity}},
maxZoom: {{this.max_zoom}},
max: {{this.max_val}},
radius: {{this.radius}},
blur: {{this.blur}},
gradient: {{this.gradient}}
})
.addTo({{this._parent.get_name()}});
{% endmacro %}
""")
def __init__(self, data, name=None, min_opacity=0.5, max_zoom=18,
max_val=1.0, radius=25, blur=15, gradient=None,
overlay=True, control=True, show=True):
super(HeatMap, self).__init__(name=name, overlay=overlay,
control=control, show=show)
data = _iter_tolist(data)
if _isnan(data):
raise ValueError('data cannot contain NaNs, '
'got:\n{!r}'.format(data))
self._name = 'HeatMap'
self.data = [[x for x in line] for line in data]
self.min_opacity = min_opacity
self.max_zoom = max_zoom
self.max_val = max_val
self.radius = radius
self.blur = blur
self.gradient = (json.dumps(gradient, sort_keys=True) if
gradient is not None else 'null')
def render(self, **kwargs):
super(HeatMap, self).render(**kwargs)
figure = self.get_root()
assert isinstance(figure, Figure), ('You cannot render this Element '
'if it is not in a Figure.')
figure.header.add_child(
JavascriptLink('https://leaflet.github.io/Leaflet.heat/dist/leaflet-heat.js'), # noqa
name='leaflet-heat.js')
def _get_self_bounds(self):
"""
Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]].
"""
bounds = [[None, None], [None, None]]
for point in self.data:
bounds = [
[
none_min(bounds[0][0], point[0]),
none_min(bounds[0][1], point[1]),
],
[
none_max(bounds[1][0], point[0]),
none_max(bounds[1][1], point[1]),
],
]
return bounds
| 36.009009 | 98 | 0.549162 |
from __future__ import (absolute_import, division, print_function)
import json
from branca.element import Figure, JavascriptLink
from folium.map import Layer
from folium.utilities import _isnan, _iter_tolist, none_max, none_min
from jinja2 import Template
class HeatMap(Layer):
_template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.heatLayer(
{{this.data}},
{
minOpacity: {{this.min_opacity}},
maxZoom: {{this.max_zoom}},
max: {{this.max_val}},
radius: {{this.radius}},
blur: {{this.blur}},
gradient: {{this.gradient}}
})
.addTo({{this._parent.get_name()}});
{% endmacro %}
""")
def __init__(self, data, name=None, min_opacity=0.5, max_zoom=18,
max_val=1.0, radius=25, blur=15, gradient=None,
overlay=True, control=True, show=True):
super(HeatMap, self).__init__(name=name, overlay=overlay,
control=control, show=show)
data = _iter_tolist(data)
if _isnan(data):
raise ValueError('data cannot contain NaNs, '
'got:\n{!r}'.format(data))
self._name = 'HeatMap'
self.data = [[x for x in line] for line in data]
self.min_opacity = min_opacity
self.max_zoom = max_zoom
self.max_val = max_val
self.radius = radius
self.blur = blur
self.gradient = (json.dumps(gradient, sort_keys=True) if
gradient is not None else 'null')
def render(self, **kwargs):
super(HeatMap, self).render(**kwargs)
figure = self.get_root()
assert isinstance(figure, Figure), ('You cannot render this Element '
'if it is not in a Figure.')
figure.header.add_child(
JavascriptLink('https://leaflet.github.io/Leaflet.heat/dist/leaflet-heat.js'),
name='leaflet-heat.js')
def _get_self_bounds(self):
bounds = [[None, None], [None, None]]
for point in self.data:
bounds = [
[
none_min(bounds[0][0], point[0]),
none_min(bounds[0][1], point[1]),
],
[
none_max(bounds[1][0], point[0]),
none_max(bounds[1][1], point[1]),
],
]
return bounds
| true | true |
f73829b33cb73440f6af7fd6b000866ca52d564b | 133 | py | Python | Python/1011 - Esfera.py | carloshenrique051994/exerciciosUri | 1f73a32b44c79cd7aa47a89f2afb8e9618d27e3b | [
"MIT"
] | null | null | null | Python/1011 - Esfera.py | carloshenrique051994/exerciciosUri | 1f73a32b44c79cd7aa47a89f2afb8e9618d27e3b | [
"MIT"
] | null | null | null | Python/1011 - Esfera.py | carloshenrique051994/exerciciosUri | 1f73a32b44c79cd7aa47a89f2afb8e9618d27e3b | [
"MIT"
] | null | null | null | from math import pow
pi = 3.14159
raio = int(input())
volume = (4.0/3) * pi * (pow(raio, 3))
print('VOLUME = {:.3f}'.format(volume)) | 22.166667 | 39 | 0.609023 | from math import pow
pi = 3.14159
raio = int(input())
volume = (4.0/3) * pi * (pow(raio, 3))
print('VOLUME = {:.3f}'.format(volume)) | true | true |
f7382a22071c11bec80e3bfebed4fa144f37bddc | 921 | py | Python | jivago/serialization/deserialization/typed_list_deserialization_strategy.py | keotl/jivago | 892dfb0cae773e36245083c3e56f0f8523145523 | [
"MIT"
] | 12 | 2018-03-19T20:57:44.000Z | 2020-01-27T14:11:24.000Z | jivago/serialization/deserialization/typed_list_deserialization_strategy.py | keotl/jivago | 892dfb0cae773e36245083c3e56f0f8523145523 | [
"MIT"
] | 73 | 2018-04-20T22:26:00.000Z | 2021-12-01T14:17:37.000Z | jivago/serialization/deserialization/typed_list_deserialization_strategy.py | keotl/jivago | 892dfb0cae773e36245083c3e56f0f8523145523 | [
"MIT"
] | 1 | 2019-02-28T13:33:45.000Z | 2019-02-28T13:33:45.000Z | from typing import Type, List
from jivago.inject import typing_meta_helper
from jivago.lang.annotations import Override
from jivago.lang.stream import Stream
from jivago.serialization.deserialization_strategy import DeserializationStrategy, T
TYPES_WHICH_DESERIALIZE_TO_LISTS = ('List', 'Iterable', 'Collection')
class TypedListDeserializationStrategy(DeserializationStrategy):
def __init__(self, deserializer: "Deserializer"):
self.deserializer = deserializer
@Override
def can_handle_deserialization(self, declared_type: type) -> bool:
return typing_meta_helper.is_typing_meta_collection(declared_type, TYPES_WHICH_DESERIALIZE_TO_LISTS)
@Override
def deserialize(self, obj: list, declared_type: Type[List[T]]) -> list:
list_content_type = declared_type.__args__[0]
return Stream(obj).map(lambda x: self.deserializer.deserialize(x, list_content_type)).toList()
| 38.375 | 108 | 0.785016 | from typing import Type, List
from jivago.inject import typing_meta_helper
from jivago.lang.annotations import Override
from jivago.lang.stream import Stream
from jivago.serialization.deserialization_strategy import DeserializationStrategy, T
TYPES_WHICH_DESERIALIZE_TO_LISTS = ('List', 'Iterable', 'Collection')
class TypedListDeserializationStrategy(DeserializationStrategy):
def __init__(self, deserializer: "Deserializer"):
self.deserializer = deserializer
@Override
def can_handle_deserialization(self, declared_type: type) -> bool:
return typing_meta_helper.is_typing_meta_collection(declared_type, TYPES_WHICH_DESERIALIZE_TO_LISTS)
@Override
def deserialize(self, obj: list, declared_type: Type[List[T]]) -> list:
list_content_type = declared_type.__args__[0]
return Stream(obj).map(lambda x: self.deserializer.deserialize(x, list_content_type)).toList()
| true | true |
f7382acf23256bf9b22e413cbbccb484580ff05d | 560 | py | Python | python/samples/ht16k33.py | ramonbrugman/i2cdriver | 6739e5316802e16dfab49abe15f76818c9a37f7c | [
"BSD-3-Clause"
] | 132 | 2019-02-10T19:14:16.000Z | 2022-03-10T05:51:25.000Z | python/samples/ht16k33.py | ramonbrugman/i2cdriver | 6739e5316802e16dfab49abe15f76818c9a37f7c | [
"BSD-3-Clause"
] | 59 | 2019-02-25T23:24:19.000Z | 2022-03-24T15:13:56.000Z | python/samples/ht16k33.py | ramonbrugman/i2cdriver | 6739e5316802e16dfab49abe15f76818c9a37f7c | [
"BSD-3-Clause"
] | 41 | 2019-02-25T23:09:59.000Z | 2022-02-17T09:36:30.000Z | class HT16K33:
def __init__(self, i2, a = 0x70):
self.i2 = i2
self.a = a
self.command(0x21) # Clock on
self.command(0x81) # Display on
self.bright(15)
self.load([0] * 16)
def bright(self, n):
assert 0 <= n < 16
self.command(0xe0 + n)
def command(self, b):
assert(self.i2.start(self.a, 0))
assert(self.i2.write([b]))
self.i2.stop()
def load(self, b128):
self.i2.start(self.a, 0)
self.i2.write([0] + b128)
self.i2.stop()
| 24.347826 | 44 | 0.501786 | class HT16K33:
def __init__(self, i2, a = 0x70):
self.i2 = i2
self.a = a
self.command(0x21)
self.command(0x81)
self.bright(15)
self.load([0] * 16)
def bright(self, n):
assert 0 <= n < 16
self.command(0xe0 + n)
def command(self, b):
assert(self.i2.start(self.a, 0))
assert(self.i2.write([b]))
self.i2.stop()
def load(self, b128):
self.i2.start(self.a, 0)
self.i2.write([0] + b128)
self.i2.stop()
| true | true |
f7382c606fd7cdc45568f8a9fd6fa00f01b557e7 | 782 | py | Python | scylla/providers/proxy_scraper_provider.py | cities/scylla | db9d7b8f5ca22582ca84028ef4558a64c0d8b137 | [
"Apache-2.0"
] | 1 | 2021-05-16T16:21:20.000Z | 2021-05-16T16:21:20.000Z | scylla/providers/proxy_scraper_provider.py | cities/scylla | db9d7b8f5ca22582ca84028ef4558a64c0d8b137 | [
"Apache-2.0"
] | null | null | null | scylla/providers/proxy_scraper_provider.py | cities/scylla | db9d7b8f5ca22582ca84028ef4558a64c0d8b137 | [
"Apache-2.0"
] | null | null | null | import json
from pyquery import PyQuery
from scylla.database import ProxyIP
from .base_provider import BaseProvider
class ProxyScraperProvider(BaseProvider):
def urls(self) -> [str]:
return ['https://raw.githubusercontent.com/sunny9577/proxy-scraper/master/proxies.json']
def parse(self, document: PyQuery) -> [ProxyIP]:
ip_list: [ProxyIP] = []
text = document.html()
json_object = json.load(text)
if not json_object or type(json_object['usproxy']) != list:
return ip_list
for ip_port in json_object['usproxy']:
p = ProxyIP(ip=ip_port['ip'], port=ip_port['port'])
ip_list.append(p)
return ip_list
@staticmethod
def should_render_js() -> bool:
return False
| 25.225806 | 96 | 0.644501 | import json
from pyquery import PyQuery
from scylla.database import ProxyIP
from .base_provider import BaseProvider
class ProxyScraperProvider(BaseProvider):
def urls(self) -> [str]:
return ['https://raw.githubusercontent.com/sunny9577/proxy-scraper/master/proxies.json']
def parse(self, document: PyQuery) -> [ProxyIP]:
ip_list: [ProxyIP] = []
text = document.html()
json_object = json.load(text)
if not json_object or type(json_object['usproxy']) != list:
return ip_list
for ip_port in json_object['usproxy']:
p = ProxyIP(ip=ip_port['ip'], port=ip_port['port'])
ip_list.append(p)
return ip_list
@staticmethod
def should_render_js() -> bool:
return False
| true | true |
f7382e096ecf2c1debe236ad272050332b1b2f93 | 953 | py | Python | fastapi_workshop/cli.py | diogoro/fastapi-workshop | 038df4c15b5080f639dd839233dfb6417da35043 | [
"Unlicense"
] | null | null | null | fastapi_workshop/cli.py | diogoro/fastapi-workshop | 038df4c15b5080f639dd839233dfb6417da35043 | [
"Unlicense"
] | null | null | null | fastapi_workshop/cli.py | diogoro/fastapi-workshop | 038df4c15b5080f639dd839233dfb6417da35043 | [
"Unlicense"
] | null | null | null | import typer
import uvicorn
from .app import app
from .config import settings
cli = typer.Typer(name="fastapi_workshop API")
@cli.command()
def run(
port: int = settings.server.port,
host: str = settings.server.host,
log_level: str = settings.server.log_level,
reload: bool = settings.server.reload,
): # pragma: no cover
"""Run the API server."""
uvicorn.run(
"fastapi_workshop.app:app",
host=host,
port=port,
log_level=log_level,
reload=reload,
)
@cli.command()
def shell(): # pragma: no cover
"""Opens an interactive shell with objects auto imported"""
_vars = {
"app": app,
"settings": settings,
}
typer.echo(f"Auto imports: {list(_vars.keys())}")
try:
from IPython import start_ipython
start_ipython(argv=[], user_ns=_vars)
except ImportError:
import code
code.InteractiveConsole(_vars).interact()
| 22.162791 | 63 | 0.628541 | import typer
import uvicorn
from .app import app
from .config import settings
cli = typer.Typer(name="fastapi_workshop API")
@cli.command()
def run(
port: int = settings.server.port,
host: str = settings.server.host,
log_level: str = settings.server.log_level,
reload: bool = settings.server.reload,
):
uvicorn.run(
"fastapi_workshop.app:app",
host=host,
port=port,
log_level=log_level,
reload=reload,
)
@cli.command()
def shell():
_vars = {
"app": app,
"settings": settings,
}
typer.echo(f"Auto imports: {list(_vars.keys())}")
try:
from IPython import start_ipython
start_ipython(argv=[], user_ns=_vars)
except ImportError:
import code
code.InteractiveConsole(_vars).interact()
| true | true |
f7382e6c70f01a44d83fc569299113ab6d6ccdeb | 2,578 | py | Python | setup.py | runtime-jupyter-safety/runtime-jupyter-safety | f62a24b5b4f44fed5111c31441bc6a105441e34c | [
"BSD-3-Clause"
] | 96 | 2020-05-18T18:58:44.000Z | 2022-03-19T13:09:07.000Z | setup.py | nbsafety-project/nbsafety | c79d24bad7eec99b1e9e3ca38d005a24c03b6eb4 | [
"BSD-3-Clause"
] | 56 | 2020-06-01T06:45:49.000Z | 2022-03-27T00:06:52.000Z | setup.py | runtime-jupyter-safety/runtime-jupyter-safety | f62a24b5b4f44fed5111c31441bc6a105441e34c | [
"BSD-3-Clause"
] | 4 | 2020-08-25T18:17:02.000Z | 2021-06-02T14:32:12.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from glob import glob
from setuptools import setup, find_packages
import versioneer
pkg_name = 'nbsafety'
def read_file(fname):
with open(fname, 'r', encoding='utf8') as f:
return f.read()
history = read_file('HISTORY.rst')
requirements = read_file('requirements.txt').strip().split()
setup(
name=pkg_name,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author='Stephen Macke',
author_email='stephen.macke@gmail.com',
description='Fearless interactivity for Jupyter notebooks.',
long_description=read_file('README.md'),
long_description_content_type='text/markdown',
url='https://github.com/nbsafety-project/nbsafety',
packages=find_packages(exclude=[
'binder',
'docs',
'scratchspace',
'notebooks',
'img',
'test',
'scripts',
'markdown',
'versioneer.py',
'frontend',
'blueprint.json',
]),
include_package_data=True,
data_files=[
# like `jupyter nbextension install --sys-prefix`
("share/jupyter/nbextensions/nbsafety", [
"nbsafety/resources/nbextension/index.js",
"nbsafety/resources/nbextension/index.js.map",
]),
# like `jupyter nbextension enable --sys-prefix`
("etc/jupyter/nbconfig/notebook.d", [
"nbsafety/resources/nbextension/nbsafety.json",
]),
("share/jupyter/labextensions/jupyterlab-nbsafety",
glob("nbsafety/resources/labextension/package.json")
),
("share/jupyter/labextensions/jupyterlab-nbsafety/static",
glob("nbsafety/resources/labextension/static/*")
),
# like `python -m nbsafety.install --sys-prefix`
("share/jupyter/kernels/nbsafety", [
"nbsafety/resources/kernel/kernel.json",
"nbsafety/resources/kernel/logo-32x32.png",
"nbsafety/resources/kernel/logo-64x64.png",
]),
],
install_requires=requirements,
license='BSD-3-Clause',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
# python setup.py sdist bdist_wheel --universal
# twine upload dist/*
| 31.060241 | 66 | 0.619085 |
from glob import glob
from setuptools import setup, find_packages
import versioneer
pkg_name = 'nbsafety'
def read_file(fname):
with open(fname, 'r', encoding='utf8') as f:
return f.read()
history = read_file('HISTORY.rst')
requirements = read_file('requirements.txt').strip().split()
setup(
name=pkg_name,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author='Stephen Macke',
author_email='stephen.macke@gmail.com',
description='Fearless interactivity for Jupyter notebooks.',
long_description=read_file('README.md'),
long_description_content_type='text/markdown',
url='https://github.com/nbsafety-project/nbsafety',
packages=find_packages(exclude=[
'binder',
'docs',
'scratchspace',
'notebooks',
'img',
'test',
'scripts',
'markdown',
'versioneer.py',
'frontend',
'blueprint.json',
]),
include_package_data=True,
data_files=[
("share/jupyter/nbextensions/nbsafety", [
"nbsafety/resources/nbextension/index.js",
"nbsafety/resources/nbextension/index.js.map",
]),
("etc/jupyter/nbconfig/notebook.d", [
"nbsafety/resources/nbextension/nbsafety.json",
]),
("share/jupyter/labextensions/jupyterlab-nbsafety",
glob("nbsafety/resources/labextension/package.json")
),
("share/jupyter/labextensions/jupyterlab-nbsafety/static",
glob("nbsafety/resources/labextension/static/*")
),
("share/jupyter/kernels/nbsafety", [
"nbsafety/resources/kernel/kernel.json",
"nbsafety/resources/kernel/logo-32x32.png",
"nbsafety/resources/kernel/logo-64x64.png",
]),
],
install_requires=requirements,
license='BSD-3-Clause',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
| true | true |
f7383027edb749b51b535be26537130cadd92b70 | 684 | py | Python | setup.py | rumfox/pygifconvt0001 | 2e36a7eb3cfe52ce9dfa85cf6db5b2c451c67089 | [
"MIT-0"
] | null | null | null | setup.py | rumfox/pygifconvt0001 | 2e36a7eb3cfe52ce9dfa85cf6db5b2c451c67089 | [
"MIT-0"
] | null | null | null | setup.py | rumfox/pygifconvt0001 | 2e36a7eb3cfe52ce9dfa85cf6db5b2c451c67089 | [
"MIT-0"
] | null | null | null | from setuptools import setup, find_packages
setup(
name = 'pygifconvt0001',
version = '1.0.6',
description = 'Test package for distribution',
author = 'rumfox',
author_email = 'maebong@gmail.com',
url = '',
download_url = '',
install_requires = ['pillow'],
include_package_data=True,
packages=find_packages(),
keywords = ['GIFCONVERTER', 'gifconverter'],
python_requires = '>=3',
zip_safe=False,
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
]
) | 31.090909 | 56 | 0.559942 | from setuptools import setup, find_packages
setup(
name = 'pygifconvt0001',
version = '1.0.6',
description = 'Test package for distribution',
author = 'rumfox',
author_email = 'maebong@gmail.com',
url = '',
download_url = '',
install_requires = ['pillow'],
include_package_data=True,
packages=find_packages(),
keywords = ['GIFCONVERTER', 'gifconverter'],
python_requires = '>=3',
zip_safe=False,
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
]
) | true | true |
f73831802258da56cfeeefc068913ffda703076a | 54,119 | py | Python | tests/BlazingSQLTest/Runner/runTest.py | msadang/blazingsql | 5fe3e418dbee4a3961998b0e25ec81100a1a1490 | [
"Apache-2.0"
] | null | null | null | tests/BlazingSQLTest/Runner/runTest.py | msadang/blazingsql | 5fe3e418dbee4a3961998b0e25ec81100a1a1490 | [
"Apache-2.0"
] | null | null | null | tests/BlazingSQLTest/Runner/runTest.py | msadang/blazingsql | 5fe3e418dbee4a3961998b0e25ec81100a1a1490 | [
"Apache-2.0"
] | null | null | null | # Cast column to f64 before convert it to pandas
# This is a hack, use the assert_equal comparator when nulls is
# fully supported on cudf.sort_values
import json
import logging
import os
import re
import time
import blazingsql
from blazingsql import DataType
# import git
import numpy as np
import pandas as pd
from BlazingLogging import loggingHandler as lhandler
from Configuration import ExecutionMode
from Configuration import Settings as Settings
from DataBase import createSchema as cs
if ((Settings.execution_mode == ExecutionMode.FULL and
Settings.compare_res == "true") or
Settings.execution_mode == ExecutionMode.GENERATOR):
print(Settings.execution_mode)
print(Settings.compare_res)
from pydrill.client import PyDrill
from pyspark.sql.session import SparkSession
class Result:
def __init__(self, columns, resultSet, resultBlz):
self.columns = columns
self.resultSet = resultSet
self.resultBlz = resultBlz
name = "blzlogging"
HANDLER = lhandler.logging_handler()
class loggerblz:
def __init__(self, query, error, totaltime):
self.query = query
self.error = error
self.totaltime = totaltime
class result:
def __init__(self, res_execution, error):
self.res_execution = res_execution
self.error = error
def logginghelper(name):
# logging.basicConfig(filename='example.txt',level=logging.DEBUG)
logging._defaultFormatter = logging.Formatter()
logger = logging.getLogger(name)
logger.handlers = []
logger.setLevel(logging.DEBUG)
logger.addHandler(HANDLER)
return logger
def loggingClose(name):
HANDLER.log = []
def upcast_to_float(df):
for name in df.columns:
if np.issubdtype(df[name].dtype, np.bool_):
df[name] = df[name].astype(np.float32)
elif np.issubdtype(df[name].dtype, np.integer):
df[name] = df[name].astype(np.float64)
return df
def to_pandas_f64_engine(df, expected_types_list):
count = 0
for col in df.columns:
if count >= len(expected_types_list):
break
if expected_types_list[count] != np.dtype(object):
if df.shape[0] > 0:
if not np.issubdtype(df[col].dtype, np.number) and not np.issubdtype(
df[col].dtype, np.datetime64
):
if np.issubdtype(expected_types_list[count], np.bool_):
df[col] = (
df[col].map({"true": 1.0, "false": 0.0}).astype(np.float32)
)
elif np.issubdtype(expected_types_list[count], np.datetime64):
df[col] = df[col].astype(expected_types_list[count])
else:
df[col] = pd.to_numeric(df[col], errors="coerce")
count = count + 1
return df
def get_null_constants(df):
null_values = {}
for col, dtype in df.dtypes.to_dict().items():
if np.issubdtype(dtype, np.datetime64):
null_values[col] = np.datetime64("nat")
elif np.issubdtype(dtype, np.number):
null_values[col] = np.nan
return null_values
def compare_results(pdf1, pdf2, acceptable_difference, use_percentage, engine):
np.warnings.filterwarnings("ignore")
if pdf1.size == 0 and pdf2.size == 0:
return "Success"
msg = ""
if not isinstance(engine, str):
if isinstance(engine, PyDrill):
msg = "PyDrill"
else:
msg = "PySpark"
elif engine=="drill":
msg = "PyDrill"
else:
msg = "PySpark"
msg = ""
if not isinstance(engine, str):
if isinstance(engine, PyDrill):
msg = "PyDrill"
else:
msg = "PySpark"
elif engine=="drill":
msg = "PyDrill"
else:
msg = "PySpark"
if pdf1.shape[0] == pdf2.shape[0]:
if pdf1.shape[1] == pdf2.shape[1]:
for name in pdf1.columns:
if pdf1[name].dtype == np.object:
pdf1[name] = pdf1[name].astype('string')
for name in pdf2.columns:
if pdf2[name].dtype == np.object:
pdf2[name] = pdf2[name].astype('string')
# Removing indexes, because those are considered when
# comparing with equals()
pdf1.reset_index(drop=True, inplace=True)
pdf2.reset_index(drop=True, inplace=True)
# Make the column labels equal as equals() also compare labels
orig_pdf2_labels = pdf2.columns.to_list()
pdf2.columns = pdf1.columns.to_list()
exac_comp = pdf1.select_dtypes(exclude=np.inexact).equals(
pdf2.select_dtypes(exclude=np.inexact)
)
# Restore labels
pdf2.columns = orig_pdf2_labels
tmp_pdf1 = pdf1.select_dtypes(include=np.inexact)
tmp_pdf2 = pdf2.select_dtypes(include=np.inexact)
if use_percentage:
relative_tolerance = acceptable_difference
absolute_tolerance = 0
else:
relative_tolerance = 0
absolute_tolerance = acceptable_difference
# np.allclose follows this formula:
# absolute(a - b) <= (absolute_tolerance + relative_tolerance * absolute(b))
res = np.all(exac_comp) and np.allclose(
tmp_pdf1.values, tmp_pdf2.values, relative_tolerance,
absolute_tolerance, equal_nan=True
)
if res:
return "Success"
else:
return "Fail: Different values"
else:
return (
"Fail: Different number of columns blzSQLresult: "
+ str(pdf1.shape[1])
+ " "
+ msg
+ " result: "
+ str(pdf2.shape[1])
)
else:
return (
"Fail: Different number of rows blzSQLresult: "
+ str(pdf1.shape[0])
+ " "
+ msg
+ " result: "
+ str(pdf2.shape[0])
)
def begins_with(col1, col2, exp):
return col1.startswith(exp) or col2.startswith(exp)
def compare_column_names(pdf1, pdf2):
if len(pdf1.columns) != len(pdf2.columns):
if pdf1.values.size == 0 and pdf2.values.size == 0:
return True
print("Different set of columns")
return False
for blzCol, drillCol in zip(
pdf1.columns.values.tolist(), pdf2.columns.values.tolist()
):
if blzCol != drillCol:
if (
begins_with(drillCol, blzCol, "EXPR") is False
and begins_with(drillCol, blzCol, "count(") is False
):
print("Different columns")
return False
return True
# NOTE kharoly percy william: NEVER CHANGE THE ORDER of these
# lines (the logger logic depends that we log first queryType and then queryId
# WARNING DO NOT CHANGE THE CALL ORDER IN THIS FUCTION!
def get_Branch():
branch = blazingsql.__branch_name__
return branch
def get_CommitHash():
commit = blazingsql.__version__
return commit
def get_QueryId(input_type, test_name, test_id):
query_id = (
str(input_type).upper()
+ "-"
+ str(get_codTest(test_name)).upper()
+ "-"
+ str(test_id)
)
return query_id
def get_resultId(resultComparisson):
result_id = 1
if resultComparisson != "Success":
result_id = 0
return result_id
def get_codTest(test_name):
switcher = {
"Aggregations without group by": "AGGWOGRBY",
"Coalesce": "COALESCE",
"Column Basis": "COLBAS",
"Bindable Alias": "BALIAS",
"Boolean": "BOOL",
"Case": "CASE",
"Cast": "CAST",
"Common Table Expressions": "COMTABLEX",
"Concat": "CONCAT",
"Count Distinct": "COUNTD",
"Count without group by": "COUNTWOGRBY",
"Cross join": "CROSSJOIN",
"Date": "DATE",
"DayOfWeek": "DAYOFWEEK",
"Dir": "DIR",
"File System Google Storage": "FSGS",
"Hdfs FileSystem": "FSHDFS",
"Hive FileSystem": "FSHIVE",
"File System Local": "FSLOCAL",
"File System S3": "FSS3",
"Full outer join": "FOUTJOIN",
"Group by": "GROUPBY",
"Group by without aggregations": "GRBYWOAGG",
"Inner join": "INNERJOIN",
"Left outer join": "LOUTJOIN",
"Like": "LIKE",
"Literal": "LITERAL",
"Nested Queries": "NESTEDQ",
"Non-EquiJoin Queries": "NEQUIJOIN",
"Order by": "ORDERBY",
"Predicates With Nulls": "PREDWNULLS",
"Round": "ROUND",
"Replace": "REPLACE",
"Simple Distribution From Local": "SIMPLEDIST",
"Smiles Test": "SMILES",
"Substring": "SUBSTRING",
"Tables from Pandas": "TBLPANDAS",
"Timestampdiff": "TIMESTAMPD",
"Timestamp": "TIMESTAMP",
"To_timestamp": "TO_TIMESTAMP",
"TPCH Queries": "TPCH",
"Config Options": "TPCH", # we want the same outputs as the tpch test
"Unary ops": "UNARYOPS",
"Unify Tables": "UNIFYTBL",
"Union": "UNION",
"Limit": "LIMIT",
"Where clause": "WHERE",
"Wild Card": "WILDCARD",
"Simple String": "SSTRING",
"String case": "STRINGCASE",
"Message Validation": "MESSAGEVAL"
}
return switcher.get(test_name)
def print_fixed_log(
logger,
test_name,
input_type,
test_id,
sql,
resultComparisson,
error_message,
load_time,
engine_time,
total_time,
):
commitHash = get_CommitHash()
branchName = get_Branch()
# dateNow=datetime.now()
inputType = cs.get_extension(input_type)
logger.info(get_QueryId(inputType, test_name, test_id)) # QueryID
logger.info(Settings.dateNow) # TimeStamp
logger.info(test_name) # TestGroup
logger.info(inputType) # InputType
logger.info(sql) # Query
logger.info(get_resultId(resultComparisson)) # Result
logger.info(error_message) # Error
logger.info(branchName) # PR
logger.info(commitHash) # CommitHash
logger.info(Settings.data["RunSettings"]["nRals"])
logger.info(Settings.data["RunSettings"]["nGPUs"])
logger.info(Settings.data["TestSettings"]["dataDirectory"])
logger.info(test_id)
logger.info(load_time)
logger.info(engine_time)
logger.info(total_time)
def print_query_results(
sql,
queryId,
queryType,
pdf1,
pdf2,
resultgdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
):
if print_result:
print("#BLZ:")
print(pdf1)
if not isinstance(engine, str):
if isinstance(engine, PyDrill):
print("#DRILL:")
else:
print("#PYSPARK:")
print(pdf2)
else:
if engine=="drill":
print("#DRILL:")
else:
print("#PYSPARK:")
data_type = cs.get_extension(input_type)
print(str(queryId) + " Test " + queryType + " - " + data_type)
print("#QUERY:")
print(sql)
print("RESULT:")
error_message = ""
stringResult = ""
compareResults = True
if "compare_results" in Settings.data["RunSettings"]:
compareResults = Settings.data["RunSettings"]["compare_results"]
if compareResults:
columnNamesComparison = compare_column_names(pdf1, pdf2)
if columnNamesComparison is not True:
print("Columns:")
print(pdf1.columns)
print(pdf2.columns)
error_message = "Column names are not the same"
print("ERROR:")
print(error_message)
resultComparisson = compare_results(
pdf1, pdf2, acceptable_difference, use_percentage, engine
)
if resultComparisson != "Success":
error_message = resultComparisson[6:]
print("ERROR:")
print(error_message)
stringResult = resultComparisson
if resultComparisson != "Success" or columnNamesComparison is False:
stringResult = "Fail"
else:
stringResult = "Success"
print(stringResult)
print("TOTAL TIME: ")
print(total_time)
print("CRASHED NODES: ")
# print(resultgdf.n_crashed_nodes)
print("TOTAL NODES: ")
# print(resultgdf.total_nodes)
print("===================================================")
logger = logginghelper(name)
# TODO percy kharoly bindings we need to get the number from internal api
# print_fixed_log(logger, queryType, queryId, sql, stringResult,
# error_message, 1, 1, 2)
print_fixed_log(
logger,
queryType,
input_type,
queryId,
sql,
stringResult,
error_message,
load_time,
engine_time,
total_time,
)
def print_query_results2(sql, queryId, input_type, queryType, error_message, message_validation):
print(queryId)
print("#QUERY:")
print(sql)
print("RESULT:")
result = validate_messages(error_message, message_validation)
print(result)
print("ERROR:")
if result=="Fail":
print(error_message)
else:
error_message=""
print("CALCITE TIME: ")
print("-")
print("RAL TIME: ")
print("-")
print("EXECUTION TIME: ")
print("-")
print("===================================================")
logger = logginghelper(name)
print_fixed_log(
logger, queryType, input_type, queryId, sql, result, error_message, None, None, None
)
def print_query_results_performance(sql, queryId, queryType, resultgdf):
print(queryId)
print("#QUERY:")
print(sql)
print("RESULT:")
resultComparisson = "Success"
print("CALCITE TIME: ")
print(resultgdf.calciteTime)
print("RAL TIME: ")
print(resultgdf.ralTime)
print("EXECUTION TIME: ")
print(resultgdf.totalTime)
print("===================================================")
logger = logginghelper(name)
print_fixed_log(
logger,
queryType,
queryId,
sql,
resultComparisson,
" ",
resultgdf.calciteTime,
resultgdf.ralTime,
resultgdf.totalTime,
)
def print_query_results_dist(
sql,
queryId,
queryType,
pdf1,
pdf2,
resultgdf,
acceptable_difference,
use_percentage,
print_result,
):
if print_result:
print("#BLZ:")
print(pdf1)
print("#DRILL:")
print(pdf2)
print(queryId)
print("#QUERY:")
print(sql)
print("RESULT:")
resultComparisson = compare_results(
pdf1.values, pdf2.values, acceptable_difference, use_percentage
)
error_message = ""
if resultComparisson != "Success":
error_message = resultComparisson[6:]
resultComparisson = "Fail"
print(resultComparisson)
print("ERROR:")
print(error_message)
else:
print(resultComparisson)
print("CALCITE TIME: ")
print(resultgdf.calciteTime)
print("RAL TIME: ")
print(resultgdf.ralTime)
print("EXECUTION TIME: ")
print(resultgdf.totalTime)
print("===================================================")
logger = logginghelper(name)
print_fixed_log(
logger,
queryType,
queryId,
sql,
resultComparisson,
error_message,
None,
None,
None,
)
class Test:
def __init__(self, test_name):
self.test_name = test_name
self.total = 0
self.success = 0
self.fail_ids = []
def save_log(gpu_ci_mode=False):
c = 1
cadena = []
subcadena = []
countPass = 0
countCrash = 0
for x in HANDLER.log:
if c < 17:
subcadena.append(x.msg)
c = c + 1
else:
c = 1
cadena.append(subcadena)
subcadena = []
subcadena.append(x.msg)
c = c + 1
print()
cadena.append(subcadena)
# If it didn't run any test (probably some were skipped)
# then return success
if cadena == [[]]:
return True, []
df = pd.DataFrame(
cadena,
columns=[
"QueryID",
"TimeStamp",
"TestGroup",
"InputType",
"Query",
"Result",
"Error",
"Branch",
"CommitHash",
"nRals",
"nGPUs",
"DataDirectory",
"TestId",
"LoadingTime",
"EngineTotalTime",
"TotalTime",
],
)
total = df.shape[0]
countPass = df[df.Result == 1].count()["Result"]
df1 = df[
[
"QueryID",
"TimeStamp",
"TestGroup",
"InputType",
"Query",
"Result",
"Error",
"Branch",
"CommitHash",
"nRals",
"nGPUs",
"DataDirectory",
"LoadingTime",
"EngineTotalTime",
"TotalTime",
]
].copy()
create_summary_detail(df, gpu_ci_mode)
printSummary(countPass, countCrash, total, gpu_ci_mode)
if not gpu_ci_mode:
saveLogInFile(df1)
saveLog = False
if "saveLog" in Settings.data["RunSettings"]:
saveLog = Settings.data["RunSettings"]["saveLog"]
print("saveLog = " + str(saveLog))
# TODO william kharoly felipe we should try to enable and use
# this function in the future
# result, error_msgs = verify_prev_google_sheet_results(df1)
result, error_msgs = True, []
if result is True and saveLog == "true":
saving_google_sheet_results(df1)
else:
if countPass < total:
result, error_msgs = False, []
else:
result, error_msgs = True, []
loggingClose(name)
return result, error_msgs
def create_summary_detail(df, no_color):
pdf = df
pdf["Result"] = df["Result"].replace(1, "Success")
pdf["Result"] = df["Result"].replace(0, "Fail")
# making boolean series for a team name
filter_fail = pdf["Result"] == "Fail"
# filtering data
pdf2 = pdf.where(filter_fail)
pdf_fail = pdf2.dropna()
if no_color:
green = ""
yellow = ""
# red = ""
endc = ""
else:
green = bcolors.OKGREEN
yellow = bcolors.WARNING
# red = bcolors.FAIL
endc = bcolors.ENDC
# display
print(green + "========================================================")
print("DETAILED SUMMARY TESTS")
print("========================================================" + endc)
pd.set_option("max_rows", 1500)
print(pdf.groupby(["TestGroup", "InputType"])["Result"].value_counts())
print(yellow + "========================================================")
print("FAILED TESTS" + yellow)
print("========================================================" + endc)
# pd.set_option('max_columns', 5)
# pd.set_option('max_colwidth', 1000)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", 2000)
pd.set_option("display.float_format", "{:20,.2f}".format)
pd.set_option("display.max_colwidth", None)
print(
pdf_fail.groupby(["TestGroup", "InputType", "Result"])["TestId"]
.apply(",".join)
.reset_index()
)
# This function use the google spreadsheet to compare the current results
# against historic ones
# Returns a tuple with 2 entries:
# 1st element: False in case gpuci should be fail, True otherwise
# 2nd element: A list of error messages (in case 1st element is False)
# Example:
# result, error_msgs = verify_prev_google_sheet_results(log_pdf)
# if result == False:
# exits the python process and do not move to next steps
# TODO william kharoly felipe we should try to enable and use
# this function in the future
def _verify_prev_google_sheet_results(log_pdf):
import gspread
from oauth2client.service_account import ServiceAccountCredentials
def get_the_data_from_sheet():
# Use creds to create a client to interact with the Google Drive API
scope = [
"https://www.googleapis.com/auth/drive",
"https://spreadsheets.google.com/feeds",
]
# Using credentials from BlazingSQL
# os.getcwd() #Settings.data['TestSettings']['workspaceDirectory']
# # #/home/kharoly/blazingsql/blazingdb-testing/BlazingSQLTest
# current_dir = "/home/ubuntu/.conda/envs/e2e"
log_info = Settings.data["RunSettings"]["logInfo"]
if log_info == "":
print(
"""####### ======= >>>>>>> WARNING this test run will not
be compared against old results from Google Docs. Define
the env var BLAZINGSQL_E2E_LOG_INFO"""
)
return None
log_info = json.loads(log_info)
creds_blazing = ServiceAccountCredentials.from_json_keyfile_dict(
log_info, scope
)
client_blazing = gspread.authorize(creds_blazing)
# Find a Locally workbook by name and open a sheet
work_sheet = "BSQL Log Results"
if "worksheet" in Settings.data["RunSettings"]:
work_sheet = Settings.data["RunSettings"]["worksheet"]
sheet_blazing = client_blazing.open("BSQL End-to-End Tests").worksheet(
work_sheet
)
# Writing log results into Blazing sheet
ret = pd.DataFrame(sheet_blazing.get_all_records())
# NOTE percy kharo william we need to patch these columns
# before convert to parquet
ret["LoadingTime"] = ret["LoadingTime"].astype(str)
ret["EngineTotalTime"] = ret["EngineTotalTime"].astype(str)
ret["TotalTime"] = ret["TotalTime"].astype(str)
return ret
dir_log = Settings.data["TestSettings"]["logDirectory"]
gspreadCacheHint = Settings.data["RunSettings"]["gspreadCacheHint"]
gspread_e2e_cache_path = dir_log + "/e2e-gspread-cache.parquet"
gspread_df = None
if gspreadCacheHint == "false":
gspread_df = get_the_data_from_sheet()
if gspread_df is not None:
# Always save a cache (so when gspreadCacheHint
# is false will refresh the cache)
gspread_df.to_parquet(gspread_e2e_cache_path)
elif gspreadCacheHint == "true":
if os.path.isfile(gspread_e2e_cache_path):
gspread_df = pd.read_parquet(gspread_e2e_cache_path)
else:
gspread_df = get_the_data_from_sheet()
if gspread_df is not None:
gspread_df.to_parquet(gspread_e2e_cache_path)
if gspread_df is None:
error_msg = """ERROR: This test run could not be compared
against old results from Google Docs"""
return False, [error_msg]
log_pdf_copy = log_pdf.copy()
prev_nrals = gspread_df["nRALS"][0]
curr_nrals = Settings.data["RunSettings"]["nRals"]
# Assume prev_nrals == curr_nrals
last_e2e_run_id = gspread_df["Timestamp"][0]
# NOTE If prev_nrals != curr_nrals we need to search the first
# Timestamp (a.k.a ID) for the current nRals target
if prev_nrals != curr_nrals:
gspread_df_uniques = gspread_df.drop_duplicates()
gspread_df_uniques_target_nrals = gspread_df_uniques.loc[
gspread_df_uniques["nRALS"] == curr_nrals
]
last_e2e_run_id = gspread_df_uniques_target_nrals.iloc[
0, 1
] # select the first Timestamp from the unique values
print(
"####### ======= >>>>>>> E2E INFO: We will compare the"
+ " current run against the ID (Timestamp): "
+ last_e2e_run_id
)
last_e2e_run_df = gspread_df.loc[gspread_df["Timestamp"] == last_e2e_run_id]
# NOTE percy kharo william we need to rename some columns to use our dfs
log_pdf_copy = log_pdf_copy.rename(
columns={
"TestGroup": "Test Group",
"InputType": "Input Type",
"nRals": "nRALS",
"DataDirectory": "data_dir",
}
)
# NOTE For debugging
# log_pdf_copy['TimeStamp'] = log_pdf_copy['TimeStamp'].astype(str)
# log_pdf_copy.to_parquet('/home/percy/workspace/logtest/ultimo.parquet',
# compression='GZIP')
# log_pdf_copy = pd.read_parquet('/home/user/last_run_log_df.parquet')
error_msgs = []
prev_summary = last_e2e_run_df.groupby("Test Group").count()
curr_summary = log_pdf_copy.groupby("Test Group").count()
prev_test_groups = prev_summary.index.tolist()
curr_test_groups = curr_summary.index.tolist()
has_less_test_groups = len(prev_test_groups) > len(curr_test_groups)
# Check if someone deleted some tests
# (there more test groups in the sheet)
if has_less_test_groups:
list_difference = [
item for item in prev_test_groups if item not in curr_test_groups
]
error_msg = (
"ERROR: current e2e has less test groups than"
+ " previous run, delta is %s" % list_difference
)
error_msgs.append(error_msg)
# Just check the common test groups
if has_less_test_groups:
test_groups = curr_test_groups
else:
test_groups = prev_test_groups
for test_group in test_groups:
prev_test_group_df = last_e2e_run_df.loc[
last_e2e_run_df["Test Group"] == test_group
]
prev_input_types = (
prev_test_group_df.groupby("Input Type").count().index.tolist()
)
curr_test_group_df = log_pdf_copy.loc[log_pdf_copy["Test Group"] == test_group]
cur_input_typ = curr_test_group_df.groupby("Input Type").count().index.tolist()
has_less_input_types = len(prev_input_types) > len(cur_input_typ)
if has_less_input_types is True:
list_difference = [
item for item in prev_input_types if item not in cur_input_typ
]
error_msg = """ERROR: current test group %s has less
input types cases, delta is %s""" % (
test_group,
list_difference,
)
error_msgs.append(error_msg)
for input_type in prev_input_types:
prev_tests_df = prev_test_group_df.loc[
prev_test_group_df["Input Type"] == input_type
]
prev_tests_df.sort_values(by=["QueryID"])
curr_tests_df = curr_test_group_df.loc[
curr_test_group_df["Input Type"] == input_type
]
curr_tests_df.sort_values(by=["QueryID"])
# We need to make a copy since we are going to drop some row
prev_tests_df = prev_tests_df.copy()
curr_tests_df = curr_tests_df.copy()
# NOTE for debugging
# print("============================================PREV!")
# print(prev_tests_df.head())
# print(len(prev_tests_df))
# print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxCURR!")
# print(curr_tests_df.head())
# print(len(curr_tests_df))
# Check if current run has less tests than previous run
len_prev_tests_df = len(prev_tests_df)
len_curr_tests_df = len(curr_tests_df)
has_less_tests = len_prev_tests_df > len_curr_tests_df
# NOTE for debugging
# print("====== PREV TESTS ======")
# print(prev_tests_df)
# print("====== CURR TESTS ======")
# print(curr_tests_df)
if has_less_tests:
prev_tests = prev_tests_df["QueryID"].tolist()
curr_tests = curr_tests_df["QueryID"].tolist()
list_difference = [
item for item in prev_tests if item not in curr_tests
]
error_msg = """ERROR: The test group %s has less tests than
previous run for input type %s, delta is %s""" % (
test_group,
input_type,
list_difference,
)
error_msgs.append(error_msg)
n = len_prev_tests_df - len_curr_tests_df
prev_tests_df.drop(prev_tests_df.tail(n).index, inplace=True)
elif len_prev_tests_df < len_curr_tests_df:
n = len_curr_tests_df - len_prev_tests_df
curr_tests_df.drop(curr_tests_df.tail(n).index, inplace=True)
prev_tests_results = prev_tests_df["Result"].to_list()
curr_tests_results = curr_tests_df["Result"].to_list()
for i in range(0, len(prev_tests_results)):
prev_test_result = prev_tests_results[i]
curr_test_result = curr_tests_results[i]
if prev_test_result == 1 and curr_test_result == 0:
error_msg = """ERROR: Test %d for %s (%s) is now failing
but before was ok!""" % (
i + 1,
test_group,
input_type,
)
error_msgs.append(error_msg)
succs = len(error_msgs) == 0
return succs, error_msgs
def saving_google_sheet_results(log_pdf):
import gspread
from oauth2client.service_account import ServiceAccountCredentials
log_info = Settings.data["RunSettings"]["logInfo"]
if log_info == "":
print(
"""####### ======= >>>>>>> WARNING this test run will
not save its results into the Google spreadsheet."""
)
return
# Create an empty list
log_list = []
# Iterate over each row
for index, rows in log_pdf.iterrows():
# Create a list for the current row (ADDS)
current_list = [
rows.QueryID,
str(rows.TimeStamp),
str(rows.TestGroup),
rows.InputType,
rows.Query,
rows.Result,
rows.Error,
rows.Branch,
str(rows.CommitHash),
rows.nRals,
rows.nGPUs,
rows.DataDirectory,
rows.LoadingTime,
rows.EngineTotalTime,
rows.TotalTime,
]
# append the list to the final list
log_list.append(current_list)
# Use creds to create a client to interact with the Google Drive API
scope = [
"https://www.googleapis.com/auth/drive",
"https://spreadsheets.google.com/feeds",
]
# === 1. BlazingSQL =====
# Using credentials from BlazingSQL
# os.getcwd() #Settings.data['TestSettings']['workspaceDirectory']
# # #/home/kharoly/blazingsql/blazingdb-testing/BlazingSQLTest
current_dir = "/home/ubuntu/.conda/envs/e2e"
print(current_dir)
log_info = json.loads(log_info)
creds_blazing = ServiceAccountCredentials.from_json_keyfile_dict(log_info, scope)
client_blazing = gspread.authorize(creds_blazing)
# Find a Locally workbook by name and open a sheet
work_sheet = "BSQL Log Results"
if "worksheet" in Settings.data["RunSettings"]:
work_sheet = Settings.data["RunSettings"]["worksheet"]
blaz_googlesheat = client_blazing.open("BSQL End-to-End Tests")
sheet_blazing = blaz_googlesheat.worksheet(work_sheet)
# Writing log results into Blazing sheet
total_queries = len(log_list)
for i in range(0, total_queries):
sheet_blazing.append_row(log_list[i])
time.sleep(1)
print("\nTable was uptdated into Blazing Google SpreadSheet")
def saveLogInFile(df):
dir_log = Settings.data["TestSettings"]["logDirectory"]
filepath = getFileName(dir_log)
df.to_excel(filepath, index=False)
def validate_messages(error_message, message_validation):
error_message = error_message.replace('\n', ' ').replace('\r', ' ')
message_validation = message_validation.replace('\n', ' ').replace('\r', ' ')
error_message = error_message.replace(' ', '')
message_validation = message_validation.replace(' ', '')
if error_message == message_validation:
result = "Success"
else:
result = "Fail"
return result
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def on_jenkins():
# NOTE For more env vars see
# https://wiki.jenkins.io/display/JENKINS/Building+a+software+project
jenkins_job = os.environ.get("JOB_NAME")
if jenkins_job is not None:
return True
return False
def print_tests(tests, onlyFails=False):
print(
"""************************************************************
*******************"""
)
tab = " "
failedPrefix = ""
if onlyFails:
failedPrefix = "FAILED"
# TODO percy check None
for extension in tests:
if onlyFails:
if extension == "parquet":
print(
"!!!!!!!!!!!!!!!! "
+ failedPrefix
+ " "
+ extension
+ " TESTS !!!!!!!!!!!!"
)
else:
print(
"!!!!!!!!!!!!!!!! "
+ failedPrefix
+ " "
+ extension
+ " TESTS !!!!!!!!!!!!!!!!"
)
else:
if extension == "parquet":
print("################ " + extension + " TESTS ############")
else:
print("############## " + extension + " TESTS ##############")
testNames = tests.get(extension)
for testName in testNames:
test = testNames.get(testName)
total = test.get("total")
countPass = test.get("countPass")
countCrash = test.get("countCrash")
failIds = test.get("failIds")
showTest = False
if onlyFails:
if len(failIds) > 0:
showTest = True
print(tab + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
else:
showTest = True
print(tab + "++++++++++++++++++++++++++++++++")
if showTest:
green = bcolors.OKGREEN
yellow = bcolors.WARNING
red = bcolors.FAIL
endc = bcolors.ENDC
# don't use colors since jenkins doesn't support ansi chars
if on_jenkins():
green = ""
yellow = ""
red = ""
endc = ""
print(
tab
+ "SUMMARY for "
+ failedPrefix
+ " test suite: "
+ testName
+ " - "
+ extension
)
if not onlyFails:
pass_green = green
pass_endc = endc
if (
countPass != total
): # if no full pass then don't use green colors here
pass_green = ""
pass_endc = ""
print(
pass_green
+ tab
+ "PASSED: "
+ str(countPass)
+ "/"
+ str(total)
+ pass_endc
)
fails = total - countPass - countCrash
yellow_fail = yellow
yellow_endc = endc
if fails == 0:
yellow_fail = ""
yellow_endc = ""
print(
yellow_fail
+ tab
+ "FAILED: "
+ str(fails)
+ "/"
+ str(total)
+ " "
+ str(failIds)
+ yellow_endc
)
red_crash = red
red_endc = endc
# if no crashes then don't use red colors here
if countCrash == 0:
red_crash = ""
red_endc = ""
print(
red_crash
+ tab
+ "CRASH: "
+ str(countCrash)
+ "/"
+ str(total)
+ red_endc
)
if not onlyFails:
print(tab + "TOTAL: " + str(total))
def printSummary(countPass, countCrash, total, no_color):
if no_color:
green = ""
yellow = ""
red = ""
endc = ""
else:
green = bcolors.OKGREEN
yellow = bcolors.WARNING
red = bcolors.FAIL
endc = bcolors.ENDC
# Second: print the global summary (totals from all the tests)
fails = total - countPass - countCrash
print(
"""**********************************************************
*********************"""
)
print("TOTAL SUMMARY for test suite: ")
print(green + "PASSED: " + str(countPass) + "/" + str(total) + endc)
print(yellow + "FAILED: " + str(fails) + "/" + str(total) + endc)
print(red + "CRASH: " + str(countCrash) + "/" + str(total) + endc)
print("TOTAL: " + str(total))
def getFileName(dir_log):
fecha = time.strftime("%H%M%S")
hora = time.strftime("%I%M%S")
return dir_log + "LogTest" + fecha + hora + ".xlsx" #
# ===========================================================================
tableNames = [
"customer",
"orders",
"supplier",
"lineitem",
"part",
"partsupp",
"nation",
"region",
"perf",
"acq",
"names",
"bool_orders",
"web_site",
"web_sales",
"web_returns",
"web_page",
"web_clickstreams",
"warehouse",
"time_dim",
"store_sales",
"store_returns",
"store",
"ship_mode",
"reason",
"promotion",
"product_reviews",
"item_marketprices",
"item",
"inventory",
"income_band",
"household_demographics",
"date_dim",
"customer_demographics",
"customer_address",
"customer",
"split",
"docked",
"smiles",
"dcoids",
]
def get_table_occurrences(query):
res = []
for name in tableNames:
if query.find(name) != -1:
res.append(name)
return res
def replace_all(text, dic):
for i, j in dic.items():
text = re.sub(r"\s%s(\s|$|\,)" % i, j, text)
return text
def get_blazingsql_query(db_name, query):
new_query = query
for table_name in get_table_occurrences(query):
new_query = replace_all(
new_query,
{table_name: " %(table)s " % {"table": db_name + "." + table_name}},
)
return new_query
def get_drill_query(query):
new_query = query
for table_name in get_table_occurrences(query):
new_query = replace_all(
new_query, {table_name: " dfs.tmp.`%(table)s` " % {"table": table_name}}
)
return new_query
# ================================================================================================================
def run_query_drill(drill, query_str):
timeout = 400
query_result = drill.query(query_str, timeout)
df = query_result.to_dataframe()
if df.size == 0:
return Result(query_result.columns, df, None)
df = df[query_result.columns]
result = Result(query_result.columns, df, None)
return result
def run_query_spark(spark, query_str):
query_result = spark.sql(query_str)
df = query_result.toPandas()
if df.size == 0:
return Result(query_result.columns, df, None)
df = df[query_result.columns]
result = Result(query_result.columns, df, None)
return result
def save_results_arrow(filename, pdf2):
# save results
import pyarrow as pa
table = pa.Table.from_pandas(pdf2)
# schema = pa.Schema.from_pandas(pdf2)
with open(filename, "bw") as f:
writer = pa.RecordBatchFileWriter(f, table.schema)
writer.write(table)
writer.close()
def save_results_parquet(filename, pdf2):
pdf2.to_parquet(filename, compression="GZIP")
def run_query(
bc,
engine,
query,
queryId,
queryType,
worder,
orderBy,
acceptable_difference,
use_percentage,
input_type,
**kwargs
):
print(query)
query_spark = kwargs.get("query_spark", query)
algebra = kwargs.get("algebra", "")
nRals = Settings.data["RunSettings"]["nRals"]
print_result = kwargs.get("print_result")
if print_result is None:
print_result = False
message_validation = kwargs.get("message_validation", "")
if message_validation is None:
message_validation = False
data_type = cs.get_extension(input_type)
if Settings.execution_mode != "Generator":
print(
"\n=============== New query: "
+ str(queryId)
+ " - "
+ data_type
+ " ================="
)
load_time = 0
engine_time = 0
total_time = 0
nested_query = kwargs.get("nested_query", False)
error_message = ""
if not nested_query:
# if int(nRals) == 1: # Single Node
query_blz = query # get_blazingsql_query('main', query)
if algebra == "":
start_time = time.time()
try:
result_gdf = bc.sql(query_blz)
except Exception as e:
error_message=str(e)
if not message_validation:
end_time = time.time()
total_time = (end_time - start_time) * 1000
# SUM(CASE WHEN info = 'evaluate_split_query load_data' THEN
# duration ELSE 0 END) AS load_time,
# MAX(load_time) AS load_time,
# log_result = bc.log(
# """SELECT
# MAX(end_time) as end_time, query_id,
# MAX(total_time) AS total_time
# FROM (
# SELECT
# query_id, node_id,
# SUM(CASE WHEN info = 'Query Execution Done' THEN
# duration ELSE 0 END) AS total_time,
# MAX(log_time) AS end_time
# FROM
# bsql_logs
# WHERE
# info = 'evaluate_split_query load_data'
# OR info = 'Query Execution Done'
# GROUP BY
# node_id, query_id
# )
# GROUP BY
# query_id
# ORDER BY
# end_time DESC limit 1"""
# )
# if int(nRals) == 1: # Single Node
# n_log = log_result
# else: # Simple Distribution
# n_log = log_result.compute()
load_time = 0 # n_log['load_time'][0]
engine_time = 0 #n_log["total_time"][0]
else:
result_gdf = bc.sql(query_blz, algebra=algebra)
else: # for nested queries as column basis test
result_gdf = kwargs.get("blz_result", [])
str_code_test = str(get_codTest(queryType)).upper()
filename = str_code_test + "-" + str(queryId) + ".parquet"
result_dir = Settings.data["TestSettings"]["fileResultsDirectory"]
file_results_dir = str(result_dir)
if not message_validation== "":
print_query_results2(
query,
queryId,
input_type,
queryType,
error_message,
message_validation
)
elif not isinstance(engine, str):
if isinstance(engine, PyDrill):
# Drill
query_drill = get_drill_query(query)
result_drill_gd = run_query_drill(engine, query_drill)
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
pdf2 = to_pandas_f64_engine(
result_drill_gd.resultSet, expected_dtypes
)
pdf2 = upcast_to_float(pdf2).fillna(get_null_constants(pdf2))
formatResults(pdf1, pdf2, worder, orderBy)
if Settings.execution_mode == ExecutionMode.GENERATOR:
file_res_drill_dir = (
file_results_dir + "/" + "drill" + "/" + filename
)
if not os.path.exists(file_res_drill_dir):
save_results_parquet(file_res_drill_dir, pdf2)
print("Drill: " + filename + " generated.")
else:
print_query_results(
query,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query, queryId, queryType, result_gdf.error_message
)
elif isinstance(engine, SparkSession):
# Spark
result_spark_df = run_query_spark(engine, query_spark)
if result_gdf is not None:
if result_gdf.columns is not None:
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
pdf2 = to_pandas_f64_engine(
result_spark_df.resultSet, expected_dtypes
)
pdf2 = upcast_to_float(pdf2).fillna(get_null_constants(pdf2))
formatResults(pdf1, pdf2, worder, orderBy)
if Settings.execution_mode == ExecutionMode.GENERATOR:
file_res_drill_dir = (
file_results_dir + "/" + "spark" + "/" + filename
)
if not os.path.exists(file_res_drill_dir):
save_results_parquet(file_res_drill_dir, pdf2)
print("Spark: " + filename + " generated.")
else:
print_query_results(
query_spark,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query_spark, queryId, queryType, result_gdf.error_message
)
else: # GPUCI
compareResults = True
if "compare_results" in Settings.data["RunSettings"]:
compareResults = Settings.data["RunSettings"]["compare_results"]
if compareResults == "true":
resultFile = file_results_dir + "/" + str(engine) + "/" + filename
pdf2 = get_results(resultFile)
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
format_pdf(pdf1, worder, orderBy)
print(pdf2)
print_query_results(
query,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query, queryId, queryType, result_gdf.error_message
)
else:
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
pdf2 = pd.DataFrame()
formatResults(pdf1, pdf2, worder, orderBy)
print_query_results(
query,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query, queryId, queryType, result_gdf.error_message
)
def run_query_log(
bc,
query,
queryId,
queryType,
**kwargs
):
result_gdf = None
error_message = ""
message_validation = ""
try:
result_gdf = bc.log(query)
except Exception as e:
error_message=str(e)
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
print_query_results2(
query, queryId, DataType.CUDF, queryType, error_message, message_validation
)
else:
print_query_results2(
query, queryId, DataType.CUDF, queryType, error_message, message_validation
)
def run_query_performance(
bc,
drill,
query,
queryId,
queryType,
worder,
orderBy,
acceptable_difference,
use_percentage,
**kwargs
):
# Blazing
query_blz = query # get_blazingsql_query('main', query)
result_gdf = bc.sql(query_blz).get()
if result_gdf.error_message == "":
print_query_results_performance(query, queryId, queryType, result_gdf)
else:
print_query_results2(query, queryId, queryType, result_gdf.error_message)
def formatResults(pdf1, pdf2, worder, orderBy):
if worder == 1 and pdf1.size != 0 and pdf2.size != 0:
if len(pdf1.columns) == len(pdf2.columns):
pdf1.sort_values(
[orderBy] if orderBy else pdf1.columns.to_list(), inplace=True
)
pdf2.sort_values(
[orderBy] if orderBy else pdf2.columns.to_list(), inplace=True
)
def format_pdf(pdf, worder, orderBy):
if worder == 1 and pdf.size != 0:
pdf.sort_values([orderBy] if orderBy else pdf.columns.to_list(), inplace=True)
def get_results(result_file):
df = pd.read_parquet(result_file)
return df
| 30.819476 | 114 | 0.527319 |
import json
import logging
import os
import re
import time
import blazingsql
from blazingsql import DataType
import numpy as np
import pandas as pd
from BlazingLogging import loggingHandler as lhandler
from Configuration import ExecutionMode
from Configuration import Settings as Settings
from DataBase import createSchema as cs
if ((Settings.execution_mode == ExecutionMode.FULL and
Settings.compare_res == "true") or
Settings.execution_mode == ExecutionMode.GENERATOR):
print(Settings.execution_mode)
print(Settings.compare_res)
from pydrill.client import PyDrill
from pyspark.sql.session import SparkSession
class Result:
def __init__(self, columns, resultSet, resultBlz):
self.columns = columns
self.resultSet = resultSet
self.resultBlz = resultBlz
name = "blzlogging"
HANDLER = lhandler.logging_handler()
class loggerblz:
def __init__(self, query, error, totaltime):
self.query = query
self.error = error
self.totaltime = totaltime
class result:
def __init__(self, res_execution, error):
self.res_execution = res_execution
self.error = error
def logginghelper(name):
logging._defaultFormatter = logging.Formatter()
logger = logging.getLogger(name)
logger.handlers = []
logger.setLevel(logging.DEBUG)
logger.addHandler(HANDLER)
return logger
def loggingClose(name):
HANDLER.log = []
def upcast_to_float(df):
for name in df.columns:
if np.issubdtype(df[name].dtype, np.bool_):
df[name] = df[name].astype(np.float32)
elif np.issubdtype(df[name].dtype, np.integer):
df[name] = df[name].astype(np.float64)
return df
def to_pandas_f64_engine(df, expected_types_list):
count = 0
for col in df.columns:
if count >= len(expected_types_list):
break
if expected_types_list[count] != np.dtype(object):
if df.shape[0] > 0:
if not np.issubdtype(df[col].dtype, np.number) and not np.issubdtype(
df[col].dtype, np.datetime64
):
if np.issubdtype(expected_types_list[count], np.bool_):
df[col] = (
df[col].map({"true": 1.0, "false": 0.0}).astype(np.float32)
)
elif np.issubdtype(expected_types_list[count], np.datetime64):
df[col] = df[col].astype(expected_types_list[count])
else:
df[col] = pd.to_numeric(df[col], errors="coerce")
count = count + 1
return df
def get_null_constants(df):
null_values = {}
for col, dtype in df.dtypes.to_dict().items():
if np.issubdtype(dtype, np.datetime64):
null_values[col] = np.datetime64("nat")
elif np.issubdtype(dtype, np.number):
null_values[col] = np.nan
return null_values
def compare_results(pdf1, pdf2, acceptable_difference, use_percentage, engine):
np.warnings.filterwarnings("ignore")
if pdf1.size == 0 and pdf2.size == 0:
return "Success"
msg = ""
if not isinstance(engine, str):
if isinstance(engine, PyDrill):
msg = "PyDrill"
else:
msg = "PySpark"
elif engine=="drill":
msg = "PyDrill"
else:
msg = "PySpark"
msg = ""
if not isinstance(engine, str):
if isinstance(engine, PyDrill):
msg = "PyDrill"
else:
msg = "PySpark"
elif engine=="drill":
msg = "PyDrill"
else:
msg = "PySpark"
if pdf1.shape[0] == pdf2.shape[0]:
if pdf1.shape[1] == pdf2.shape[1]:
for name in pdf1.columns:
if pdf1[name].dtype == np.object:
pdf1[name] = pdf1[name].astype('string')
for name in pdf2.columns:
if pdf2[name].dtype == np.object:
pdf2[name] = pdf2[name].astype('string')
pdf1.reset_index(drop=True, inplace=True)
pdf2.reset_index(drop=True, inplace=True)
orig_pdf2_labels = pdf2.columns.to_list()
pdf2.columns = pdf1.columns.to_list()
exac_comp = pdf1.select_dtypes(exclude=np.inexact).equals(
pdf2.select_dtypes(exclude=np.inexact)
)
pdf2.columns = orig_pdf2_labels
tmp_pdf1 = pdf1.select_dtypes(include=np.inexact)
tmp_pdf2 = pdf2.select_dtypes(include=np.inexact)
if use_percentage:
relative_tolerance = acceptable_difference
absolute_tolerance = 0
else:
relative_tolerance = 0
absolute_tolerance = acceptable_difference
res = np.all(exac_comp) and np.allclose(
tmp_pdf1.values, tmp_pdf2.values, relative_tolerance,
absolute_tolerance, equal_nan=True
)
if res:
return "Success"
else:
return "Fail: Different values"
else:
return (
"Fail: Different number of columns blzSQLresult: "
+ str(pdf1.shape[1])
+ " "
+ msg
+ " result: "
+ str(pdf2.shape[1])
)
else:
return (
"Fail: Different number of rows blzSQLresult: "
+ str(pdf1.shape[0])
+ " "
+ msg
+ " result: "
+ str(pdf2.shape[0])
)
def begins_with(col1, col2, exp):
return col1.startswith(exp) or col2.startswith(exp)
def compare_column_names(pdf1, pdf2):
if len(pdf1.columns) != len(pdf2.columns):
if pdf1.values.size == 0 and pdf2.values.size == 0:
return True
print("Different set of columns")
return False
for blzCol, drillCol in zip(
pdf1.columns.values.tolist(), pdf2.columns.values.tolist()
):
if blzCol != drillCol:
if (
begins_with(drillCol, blzCol, "EXPR") is False
and begins_with(drillCol, blzCol, "count(") is False
):
print("Different columns")
return False
return True
def get_Branch():
branch = blazingsql.__branch_name__
return branch
def get_CommitHash():
commit = blazingsql.__version__
return commit
def get_QueryId(input_type, test_name, test_id):
query_id = (
str(input_type).upper()
+ "-"
+ str(get_codTest(test_name)).upper()
+ "-"
+ str(test_id)
)
return query_id
def get_resultId(resultComparisson):
result_id = 1
if resultComparisson != "Success":
result_id = 0
return result_id
def get_codTest(test_name):
switcher = {
"Aggregations without group by": "AGGWOGRBY",
"Coalesce": "COALESCE",
"Column Basis": "COLBAS",
"Bindable Alias": "BALIAS",
"Boolean": "BOOL",
"Case": "CASE",
"Cast": "CAST",
"Common Table Expressions": "COMTABLEX",
"Concat": "CONCAT",
"Count Distinct": "COUNTD",
"Count without group by": "COUNTWOGRBY",
"Cross join": "CROSSJOIN",
"Date": "DATE",
"DayOfWeek": "DAYOFWEEK",
"Dir": "DIR",
"File System Google Storage": "FSGS",
"Hdfs FileSystem": "FSHDFS",
"Hive FileSystem": "FSHIVE",
"File System Local": "FSLOCAL",
"File System S3": "FSS3",
"Full outer join": "FOUTJOIN",
"Group by": "GROUPBY",
"Group by without aggregations": "GRBYWOAGG",
"Inner join": "INNERJOIN",
"Left outer join": "LOUTJOIN",
"Like": "LIKE",
"Literal": "LITERAL",
"Nested Queries": "NESTEDQ",
"Non-EquiJoin Queries": "NEQUIJOIN",
"Order by": "ORDERBY",
"Predicates With Nulls": "PREDWNULLS",
"Round": "ROUND",
"Replace": "REPLACE",
"Simple Distribution From Local": "SIMPLEDIST",
"Smiles Test": "SMILES",
"Substring": "SUBSTRING",
"Tables from Pandas": "TBLPANDAS",
"Timestampdiff": "TIMESTAMPD",
"Timestamp": "TIMESTAMP",
"To_timestamp": "TO_TIMESTAMP",
"TPCH Queries": "TPCH",
"Config Options": "TPCH",
"Unary ops": "UNARYOPS",
"Unify Tables": "UNIFYTBL",
"Union": "UNION",
"Limit": "LIMIT",
"Where clause": "WHERE",
"Wild Card": "WILDCARD",
"Simple String": "SSTRING",
"String case": "STRINGCASE",
"Message Validation": "MESSAGEVAL"
}
return switcher.get(test_name)
def print_fixed_log(
logger,
test_name,
input_type,
test_id,
sql,
resultComparisson,
error_message,
load_time,
engine_time,
total_time,
):
commitHash = get_CommitHash()
branchName = get_Branch()
inputType = cs.get_extension(input_type)
logger.info(get_QueryId(inputType, test_name, test_id))
logger.info(Settings.dateNow)
logger.info(test_name)
logger.info(inputType)
logger.info(sql)
logger.info(get_resultId(resultComparisson))
logger.info(error_message)
logger.info(branchName)
logger.info(commitHash)
logger.info(Settings.data["RunSettings"]["nRals"])
logger.info(Settings.data["RunSettings"]["nGPUs"])
logger.info(Settings.data["TestSettings"]["dataDirectory"])
logger.info(test_id)
logger.info(load_time)
logger.info(engine_time)
logger.info(total_time)
def print_query_results(
sql,
queryId,
queryType,
pdf1,
pdf2,
resultgdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
):
if print_result:
print("#BLZ:")
print(pdf1)
if not isinstance(engine, str):
if isinstance(engine, PyDrill):
print("#DRILL:")
else:
print("#PYSPARK:")
print(pdf2)
else:
if engine=="drill":
print("#DRILL:")
else:
print("#PYSPARK:")
data_type = cs.get_extension(input_type)
print(str(queryId) + " Test " + queryType + " - " + data_type)
print("#QUERY:")
print(sql)
print("RESULT:")
error_message = ""
stringResult = ""
compareResults = True
if "compare_results" in Settings.data["RunSettings"]:
compareResults = Settings.data["RunSettings"]["compare_results"]
if compareResults:
columnNamesComparison = compare_column_names(pdf1, pdf2)
if columnNamesComparison is not True:
print("Columns:")
print(pdf1.columns)
print(pdf2.columns)
error_message = "Column names are not the same"
print("ERROR:")
print(error_message)
resultComparisson = compare_results(
pdf1, pdf2, acceptable_difference, use_percentage, engine
)
if resultComparisson != "Success":
error_message = resultComparisson[6:]
print("ERROR:")
print(error_message)
stringResult = resultComparisson
if resultComparisson != "Success" or columnNamesComparison is False:
stringResult = "Fail"
else:
stringResult = "Success"
print(stringResult)
print("TOTAL TIME: ")
print(total_time)
print("CRASHED NODES: ")
print("TOTAL NODES: ")
print("===================================================")
logger = logginghelper(name)
print_fixed_log(
logger,
queryType,
input_type,
queryId,
sql,
stringResult,
error_message,
load_time,
engine_time,
total_time,
)
def print_query_results2(sql, queryId, input_type, queryType, error_message, message_validation):
print(queryId)
print("#QUERY:")
print(sql)
print("RESULT:")
result = validate_messages(error_message, message_validation)
print(result)
print("ERROR:")
if result=="Fail":
print(error_message)
else:
error_message=""
print("CALCITE TIME: ")
print("-")
print("RAL TIME: ")
print("-")
print("EXECUTION TIME: ")
print("-")
print("===================================================")
logger = logginghelper(name)
print_fixed_log(
logger, queryType, input_type, queryId, sql, result, error_message, None, None, None
)
def print_query_results_performance(sql, queryId, queryType, resultgdf):
print(queryId)
print("#QUERY:")
print(sql)
print("RESULT:")
resultComparisson = "Success"
print("CALCITE TIME: ")
print(resultgdf.calciteTime)
print("RAL TIME: ")
print(resultgdf.ralTime)
print("EXECUTION TIME: ")
print(resultgdf.totalTime)
print("===================================================")
logger = logginghelper(name)
print_fixed_log(
logger,
queryType,
queryId,
sql,
resultComparisson,
" ",
resultgdf.calciteTime,
resultgdf.ralTime,
resultgdf.totalTime,
)
def print_query_results_dist(
sql,
queryId,
queryType,
pdf1,
pdf2,
resultgdf,
acceptable_difference,
use_percentage,
print_result,
):
if print_result:
print("#BLZ:")
print(pdf1)
print("#DRILL:")
print(pdf2)
print(queryId)
print("#QUERY:")
print(sql)
print("RESULT:")
resultComparisson = compare_results(
pdf1.values, pdf2.values, acceptable_difference, use_percentage
)
error_message = ""
if resultComparisson != "Success":
error_message = resultComparisson[6:]
resultComparisson = "Fail"
print(resultComparisson)
print("ERROR:")
print(error_message)
else:
print(resultComparisson)
print("CALCITE TIME: ")
print(resultgdf.calciteTime)
print("RAL TIME: ")
print(resultgdf.ralTime)
print("EXECUTION TIME: ")
print(resultgdf.totalTime)
print("===================================================")
logger = logginghelper(name)
print_fixed_log(
logger,
queryType,
queryId,
sql,
resultComparisson,
error_message,
None,
None,
None,
)
class Test:
def __init__(self, test_name):
self.test_name = test_name
self.total = 0
self.success = 0
self.fail_ids = []
def save_log(gpu_ci_mode=False):
c = 1
cadena = []
subcadena = []
countPass = 0
countCrash = 0
for x in HANDLER.log:
if c < 17:
subcadena.append(x.msg)
c = c + 1
else:
c = 1
cadena.append(subcadena)
subcadena = []
subcadena.append(x.msg)
c = c + 1
print()
cadena.append(subcadena)
# then return success
if cadena == [[]]:
return True, []
df = pd.DataFrame(
cadena,
columns=[
"QueryID",
"TimeStamp",
"TestGroup",
"InputType",
"Query",
"Result",
"Error",
"Branch",
"CommitHash",
"nRals",
"nGPUs",
"DataDirectory",
"TestId",
"LoadingTime",
"EngineTotalTime",
"TotalTime",
],
)
total = df.shape[0]
countPass = df[df.Result == 1].count()["Result"]
df1 = df[
[
"QueryID",
"TimeStamp",
"TestGroup",
"InputType",
"Query",
"Result",
"Error",
"Branch",
"CommitHash",
"nRals",
"nGPUs",
"DataDirectory",
"LoadingTime",
"EngineTotalTime",
"TotalTime",
]
].copy()
create_summary_detail(df, gpu_ci_mode)
printSummary(countPass, countCrash, total, gpu_ci_mode)
if not gpu_ci_mode:
saveLogInFile(df1)
saveLog = False
if "saveLog" in Settings.data["RunSettings"]:
saveLog = Settings.data["RunSettings"]["saveLog"]
print("saveLog = " + str(saveLog))
# TODO william kharoly felipe we should try to enable and use
# this function in the future
# result, error_msgs = verify_prev_google_sheet_results(df1)
result, error_msgs = True, []
if result is True and saveLog == "true":
saving_google_sheet_results(df1)
else:
if countPass < total:
result, error_msgs = False, []
else:
result, error_msgs = True, []
loggingClose(name)
return result, error_msgs
def create_summary_detail(df, no_color):
pdf = df
pdf["Result"] = df["Result"].replace(1, "Success")
pdf["Result"] = df["Result"].replace(0, "Fail")
# making boolean series for a team name
filter_fail = pdf["Result"] == "Fail"
# filtering data
pdf2 = pdf.where(filter_fail)
pdf_fail = pdf2.dropna()
if no_color:
green = ""
yellow = ""
# red = ""
endc = ""
else:
green = bcolors.OKGREEN
yellow = bcolors.WARNING
# red = bcolors.FAIL
endc = bcolors.ENDC
# display
print(green + "========================================================")
print("DETAILED SUMMARY TESTS")
print("========================================================" + endc)
pd.set_option("max_rows", 1500)
print(pdf.groupby(["TestGroup", "InputType"])["Result"].value_counts())
print(yellow + "========================================================")
print("FAILED TESTS" + yellow)
print("========================================================" + endc)
# pd.set_option('max_columns', 5)
# pd.set_option('max_colwidth', 1000)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", 2000)
pd.set_option("display.float_format", "{:20,.2f}".format)
pd.set_option("display.max_colwidth", None)
print(
pdf_fail.groupby(["TestGroup", "InputType", "Result"])["TestId"]
.apply(",".join)
.reset_index()
)
# This function use the google spreadsheet to compare the current results
# against historic ones
# Returns a tuple with 2 entries:
# 1st element: False in case gpuci should be fail, True otherwise
# 2nd element: A list of error messages (in case 1st element is False)
# Example:
# result, error_msgs = verify_prev_google_sheet_results(log_pdf)
# if result == False:
# exits the python process and do not move to next steps
# TODO william kharoly felipe we should try to enable and use
# this function in the future
def _verify_prev_google_sheet_results(log_pdf):
import gspread
from oauth2client.service_account import ServiceAccountCredentials
def get_the_data_from_sheet():
# Use creds to create a client to interact with the Google Drive API
scope = [
"https://www.googleapis.com/auth/drive",
"https://spreadsheets.google.com/feeds",
]
# Using credentials from BlazingSQL
# os.getcwd() #Settings.data['TestSettings']['workspaceDirectory']
# # #/home/kharoly/blazingsql/blazingdb-testing/BlazingSQLTest
# current_dir = "/home/ubuntu/.conda/envs/e2e"
log_info = Settings.data["RunSettings"]["logInfo"]
if log_info == "":
print(
"""####### ======= >>>>>>> WARNING this test run will not
be compared against old results from Google Docs. Define
the env var BLAZINGSQL_E2E_LOG_INFO"""
)
return None
log_info = json.loads(log_info)
creds_blazing = ServiceAccountCredentials.from_json_keyfile_dict(
log_info, scope
)
client_blazing = gspread.authorize(creds_blazing)
# Find a Locally workbook by name and open a sheet
work_sheet = "BSQL Log Results"
if "worksheet" in Settings.data["RunSettings"]:
work_sheet = Settings.data["RunSettings"]["worksheet"]
sheet_blazing = client_blazing.open("BSQL End-to-End Tests").worksheet(
work_sheet
)
# Writing log results into Blazing sheet
ret = pd.DataFrame(sheet_blazing.get_all_records())
# NOTE percy kharo william we need to patch these columns
# before convert to parquet
ret["LoadingTime"] = ret["LoadingTime"].astype(str)
ret["EngineTotalTime"] = ret["EngineTotalTime"].astype(str)
ret["TotalTime"] = ret["TotalTime"].astype(str)
return ret
dir_log = Settings.data["TestSettings"]["logDirectory"]
gspreadCacheHint = Settings.data["RunSettings"]["gspreadCacheHint"]
gspread_e2e_cache_path = dir_log + "/e2e-gspread-cache.parquet"
gspread_df = None
if gspreadCacheHint == "false":
gspread_df = get_the_data_from_sheet()
if gspread_df is not None:
# Always save a cache (so when gspreadCacheHint
# is false will refresh the cache)
gspread_df.to_parquet(gspread_e2e_cache_path)
elif gspreadCacheHint == "true":
if os.path.isfile(gspread_e2e_cache_path):
gspread_df = pd.read_parquet(gspread_e2e_cache_path)
else:
gspread_df = get_the_data_from_sheet()
if gspread_df is not None:
gspread_df.to_parquet(gspread_e2e_cache_path)
if gspread_df is None:
error_msg = """ERROR: This test run could not be compared
against old results from Google Docs"""
return False, [error_msg]
log_pdf_copy = log_pdf.copy()
prev_nrals = gspread_df["nRALS"][0]
curr_nrals = Settings.data["RunSettings"]["nRals"]
# Assume prev_nrals == curr_nrals
last_e2e_run_id = gspread_df["Timestamp"][0]
# NOTE If prev_nrals != curr_nrals we need to search the first
# Timestamp (a.k.a ID) for the current nRals target
if prev_nrals != curr_nrals:
gspread_df_uniques = gspread_df.drop_duplicates()
gspread_df_uniques_target_nrals = gspread_df_uniques.loc[
gspread_df_uniques["nRALS"] == curr_nrals
]
last_e2e_run_id = gspread_df_uniques_target_nrals.iloc[
0, 1
] # select the first Timestamp from the unique values
print(
"####### ======= >>>>>>> E2E INFO: We will compare the"
+ " current run against the ID (Timestamp): "
+ last_e2e_run_id
)
last_e2e_run_df = gspread_df.loc[gspread_df["Timestamp"] == last_e2e_run_id]
# NOTE percy kharo william we need to rename some columns to use our dfs
log_pdf_copy = log_pdf_copy.rename(
columns={
"TestGroup": "Test Group",
"InputType": "Input Type",
"nRals": "nRALS",
"DataDirectory": "data_dir",
}
)
# NOTE For debugging
# log_pdf_copy['TimeStamp'] = log_pdf_copy['TimeStamp'].astype(str)
# log_pdf_copy.to_parquet('/home/percy/workspace/logtest/ultimo.parquet',
# compression='GZIP')
# log_pdf_copy = pd.read_parquet('/home/user/last_run_log_df.parquet')
error_msgs = []
prev_summary = last_e2e_run_df.groupby("Test Group").count()
curr_summary = log_pdf_copy.groupby("Test Group").count()
prev_test_groups = prev_summary.index.tolist()
curr_test_groups = curr_summary.index.tolist()
has_less_test_groups = len(prev_test_groups) > len(curr_test_groups)
# Check if someone deleted some tests
# (there more test groups in the sheet)
if has_less_test_groups:
list_difference = [
item for item in prev_test_groups if item not in curr_test_groups
]
error_msg = (
"ERROR: current e2e has less test groups than"
+ " previous run, delta is %s" % list_difference
)
error_msgs.append(error_msg)
# Just check the common test groups
if has_less_test_groups:
test_groups = curr_test_groups
else:
test_groups = prev_test_groups
for test_group in test_groups:
prev_test_group_df = last_e2e_run_df.loc[
last_e2e_run_df["Test Group"] == test_group
]
prev_input_types = (
prev_test_group_df.groupby("Input Type").count().index.tolist()
)
curr_test_group_df = log_pdf_copy.loc[log_pdf_copy["Test Group"] == test_group]
cur_input_typ = curr_test_group_df.groupby("Input Type").count().index.tolist()
has_less_input_types = len(prev_input_types) > len(cur_input_typ)
if has_less_input_types is True:
list_difference = [
item for item in prev_input_types if item not in cur_input_typ
]
error_msg = """ERROR: current test group %s has less
input types cases, delta is %s""" % (
test_group,
list_difference,
)
error_msgs.append(error_msg)
for input_type in prev_input_types:
prev_tests_df = prev_test_group_df.loc[
prev_test_group_df["Input Type"] == input_type
]
prev_tests_df.sort_values(by=["QueryID"])
curr_tests_df = curr_test_group_df.loc[
curr_test_group_df["Input Type"] == input_type
]
curr_tests_df.sort_values(by=["QueryID"])
# We need to make a copy since we are going to drop some row
prev_tests_df = prev_tests_df.copy()
curr_tests_df = curr_tests_df.copy()
# NOTE for debugging
# print("============================================PREV!")
# print(prev_tests_df.head())
# print(len(prev_tests_df))
# print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxCURR!")
# print(curr_tests_df.head())
# print(len(curr_tests_df))
# Check if current run has less tests than previous run
len_prev_tests_df = len(prev_tests_df)
len_curr_tests_df = len(curr_tests_df)
has_less_tests = len_prev_tests_df > len_curr_tests_df
# NOTE for debugging
# print("====== PREV TESTS ======")
# print(prev_tests_df)
# print("====== CURR TESTS ======")
# print(curr_tests_df)
if has_less_tests:
prev_tests = prev_tests_df["QueryID"].tolist()
curr_tests = curr_tests_df["QueryID"].tolist()
list_difference = [
item for item in prev_tests if item not in curr_tests
]
error_msg = """ERROR: The test group %s has less tests than
previous run for input type %s, delta is %s""" % (
test_group,
input_type,
list_difference,
)
error_msgs.append(error_msg)
n = len_prev_tests_df - len_curr_tests_df
prev_tests_df.drop(prev_tests_df.tail(n).index, inplace=True)
elif len_prev_tests_df < len_curr_tests_df:
n = len_curr_tests_df - len_prev_tests_df
curr_tests_df.drop(curr_tests_df.tail(n).index, inplace=True)
prev_tests_results = prev_tests_df["Result"].to_list()
curr_tests_results = curr_tests_df["Result"].to_list()
for i in range(0, len(prev_tests_results)):
prev_test_result = prev_tests_results[i]
curr_test_result = curr_tests_results[i]
if prev_test_result == 1 and curr_test_result == 0:
error_msg = """ERROR: Test %d for %s (%s) is now failing
but before was ok!""" % (
i + 1,
test_group,
input_type,
)
error_msgs.append(error_msg)
succs = len(error_msgs) == 0
return succs, error_msgs
def saving_google_sheet_results(log_pdf):
import gspread
from oauth2client.service_account import ServiceAccountCredentials
log_info = Settings.data["RunSettings"]["logInfo"]
if log_info == "":
print(
"""####### ======= >>>>>>> WARNING this test run will
not save its results into the Google spreadsheet."""
)
return
# Create an empty list
log_list = []
# Iterate over each row
for index, rows in log_pdf.iterrows():
# Create a list for the current row (ADDS)
current_list = [
rows.QueryID,
str(rows.TimeStamp),
str(rows.TestGroup),
rows.InputType,
rows.Query,
rows.Result,
rows.Error,
rows.Branch,
str(rows.CommitHash),
rows.nRals,
rows.nGPUs,
rows.DataDirectory,
rows.LoadingTime,
rows.EngineTotalTime,
rows.TotalTime,
]
# append the list to the final list
log_list.append(current_list)
# Use creds to create a client to interact with the Google Drive API
scope = [
"https://www.googleapis.com/auth/drive",
"https://spreadsheets.google.com/feeds",
]
# === 1. BlazingSQL =====
# Using credentials from BlazingSQL
# os.getcwd() #Settings.data['TestSettings']['workspaceDirectory']
# # #/home/kharoly/blazingsql/blazingdb-testing/BlazingSQLTest
current_dir = "/home/ubuntu/.conda/envs/e2e"
print(current_dir)
log_info = json.loads(log_info)
creds_blazing = ServiceAccountCredentials.from_json_keyfile_dict(log_info, scope)
client_blazing = gspread.authorize(creds_blazing)
# Find a Locally workbook by name and open a sheet
work_sheet = "BSQL Log Results"
if "worksheet" in Settings.data["RunSettings"]:
work_sheet = Settings.data["RunSettings"]["worksheet"]
blaz_googlesheat = client_blazing.open("BSQL End-to-End Tests")
sheet_blazing = blaz_googlesheat.worksheet(work_sheet)
# Writing log results into Blazing sheet
total_queries = len(log_list)
for i in range(0, total_queries):
sheet_blazing.append_row(log_list[i])
time.sleep(1)
print("\nTable was uptdated into Blazing Google SpreadSheet")
def saveLogInFile(df):
dir_log = Settings.data["TestSettings"]["logDirectory"]
filepath = getFileName(dir_log)
df.to_excel(filepath, index=False)
def validate_messages(error_message, message_validation):
error_message = error_message.replace('\n', ' ').replace('\r', ' ')
message_validation = message_validation.replace('\n', ' ').replace('\r', ' ')
error_message = error_message.replace(' ', '')
message_validation = message_validation.replace(' ', '')
if error_message == message_validation:
result = "Success"
else:
result = "Fail"
return result
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def on_jenkins():
# NOTE For more env vars see
# https://wiki.jenkins.io/display/JENKINS/Building+a+software+project
jenkins_job = os.environ.get("JOB_NAME")
if jenkins_job is not None:
return True
return False
def print_tests(tests, onlyFails=False):
print(
"""************************************************************
*******************"""
)
tab = " "
failedPrefix = ""
if onlyFails:
failedPrefix = "FAILED"
# TODO percy check None
for extension in tests:
if onlyFails:
if extension == "parquet":
print(
"!!!!!!!!!!!!!!!! "
+ failedPrefix
+ " "
+ extension
+ " TESTS !!!!!!!!!!!!"
)
else:
print(
"!!!!!!!!!!!!!!!! "
+ failedPrefix
+ " "
+ extension
+ " TESTS !!!!!!!!!!!!!!!!"
)
else:
if extension == "parquet":
print("################ " + extension + " TESTS ############")
else:
print("############## " + extension + " TESTS ##############")
testNames = tests.get(extension)
for testName in testNames:
test = testNames.get(testName)
total = test.get("total")
countPass = test.get("countPass")
countCrash = test.get("countCrash")
failIds = test.get("failIds")
showTest = False
if onlyFails:
if len(failIds) > 0:
showTest = True
print(tab + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
else:
showTest = True
print(tab + "++++++++++++++++++++++++++++++++")
if showTest:
green = bcolors.OKGREEN
yellow = bcolors.WARNING
red = bcolors.FAIL
endc = bcolors.ENDC
# don't use colors since jenkins doesn't support ansi chars
if on_jenkins():
green = ""
yellow = ""
red = ""
endc = ""
print(
tab
+ "SUMMARY for "
+ failedPrefix
+ " test suite: "
+ testName
+ " - "
+ extension
)
if not onlyFails:
pass_green = green
pass_endc = endc
if (
countPass != total
): # if no full pass then don't use green colors here
pass_green = ""
pass_endc = ""
print(
pass_green
+ tab
+ "PASSED: "
+ str(countPass)
+ "/"
+ str(total)
+ pass_endc
)
fails = total - countPass - countCrash
yellow_fail = yellow
yellow_endc = endc
if fails == 0:
yellow_fail = ""
yellow_endc = ""
print(
yellow_fail
+ tab
+ "FAILED: "
+ str(fails)
+ "/"
+ str(total)
+ " "
+ str(failIds)
+ yellow_endc
)
red_crash = red
red_endc = endc
if countCrash == 0:
red_crash = ""
red_endc = ""
print(
red_crash
+ tab
+ "CRASH: "
+ str(countCrash)
+ "/"
+ str(total)
+ red_endc
)
if not onlyFails:
print(tab + "TOTAL: " + str(total))
def printSummary(countPass, countCrash, total, no_color):
if no_color:
green = ""
yellow = ""
red = ""
endc = ""
else:
green = bcolors.OKGREEN
yellow = bcolors.WARNING
red = bcolors.FAIL
endc = bcolors.ENDC
# Second: print the global summary (totals from all the tests)
fails = total - countPass - countCrash
print(
"""**********************************************************
*********************"""
)
print("TOTAL SUMMARY for test suite: ")
print(green + "PASSED: " + str(countPass) + "/" + str(total) + endc)
print(yellow + "FAILED: " + str(fails) + "/" + str(total) + endc)
print(red + "CRASH: " + str(countCrash) + "/" + str(total) + endc)
print("TOTAL: " + str(total))
def getFileName(dir_log):
fecha = time.strftime("%H%M%S")
hora = time.strftime("%I%M%S")
return dir_log + "LogTest" + fecha + hora + ".xlsx" #
# ===========================================================================
tableNames = [
"customer",
"orders",
"supplier",
"lineitem",
"part",
"partsupp",
"nation",
"region",
"perf",
"acq",
"names",
"bool_orders",
"web_site",
"web_sales",
"web_returns",
"web_page",
"web_clickstreams",
"warehouse",
"time_dim",
"store_sales",
"store_returns",
"store",
"ship_mode",
"reason",
"promotion",
"product_reviews",
"item_marketprices",
"item",
"inventory",
"income_band",
"household_demographics",
"date_dim",
"customer_demographics",
"customer_address",
"customer",
"split",
"docked",
"smiles",
"dcoids",
]
def get_table_occurrences(query):
res = []
for name in tableNames:
if query.find(name) != -1:
res.append(name)
return res
def replace_all(text, dic):
for i, j in dic.items():
text = re.sub(r"\s%s(\s|$|\,)" % i, j, text)
return text
def get_blazingsql_query(db_name, query):
new_query = query
for table_name in get_table_occurrences(query):
new_query = replace_all(
new_query,
{table_name: " %(table)s " % {"table": db_name + "." + table_name}},
)
return new_query
def get_drill_query(query):
new_query = query
for table_name in get_table_occurrences(query):
new_query = replace_all(
new_query, {table_name: " dfs.tmp.`%(table)s` " % {"table": table_name}}
)
return new_query
# ================================================================================================================
def run_query_drill(drill, query_str):
timeout = 400
query_result = drill.query(query_str, timeout)
df = query_result.to_dataframe()
if df.size == 0:
return Result(query_result.columns, df, None)
df = df[query_result.columns]
result = Result(query_result.columns, df, None)
return result
def run_query_spark(spark, query_str):
query_result = spark.sql(query_str)
df = query_result.toPandas()
if df.size == 0:
return Result(query_result.columns, df, None)
df = df[query_result.columns]
result = Result(query_result.columns, df, None)
return result
def save_results_arrow(filename, pdf2):
# save results
import pyarrow as pa
table = pa.Table.from_pandas(pdf2)
# schema = pa.Schema.from_pandas(pdf2)
with open(filename, "bw") as f:
writer = pa.RecordBatchFileWriter(f, table.schema)
writer.write(table)
writer.close()
def save_results_parquet(filename, pdf2):
pdf2.to_parquet(filename, compression="GZIP")
def run_query(
bc,
engine,
query,
queryId,
queryType,
worder,
orderBy,
acceptable_difference,
use_percentage,
input_type,
**kwargs
):
print(query)
query_spark = kwargs.get("query_spark", query)
algebra = kwargs.get("algebra", "")
nRals = Settings.data["RunSettings"]["nRals"]
print_result = kwargs.get("print_result")
if print_result is None:
print_result = False
message_validation = kwargs.get("message_validation", "")
if message_validation is None:
message_validation = False
data_type = cs.get_extension(input_type)
if Settings.execution_mode != "Generator":
print(
"\n=============== New query: "
+ str(queryId)
+ " - "
+ data_type
+ " ================="
)
load_time = 0
engine_time = 0
total_time = 0
nested_query = kwargs.get("nested_query", False)
error_message = ""
if not nested_query:
# if int(nRals) == 1: # Single Node
query_blz = query # get_blazingsql_query('main', query)
if algebra == "":
start_time = time.time()
try:
result_gdf = bc.sql(query_blz)
except Exception as e:
error_message=str(e)
if not message_validation:
end_time = time.time()
total_time = (end_time - start_time) * 1000
# SUM(CASE WHEN info = 'evaluate_split_query load_data' THEN
# duration ELSE 0 END) AS load_time,
# MAX(load_time) AS load_time,
# log_result = bc.log(
# """SELECT
# MAX(end_time) as end_time, query_id,
# MAX(total_time) AS total_time
# FROM (
# SELECT
# query_id, node_id,
# SUM(CASE WHEN info = 'Query Execution Done' THEN
# duration ELSE 0 END) AS total_time,
# MAX(log_time) AS end_time
# FROM
# bsql_logs
# WHERE
# info = 'evaluate_split_query load_data'
# OR info = 'Query Execution Done'
# GROUP BY
# node_id, query_id
# )
# GROUP BY
# query_id
# ORDER BY
# end_time DESC limit 1"""
# )
# if int(nRals) == 1: # Single Node
# n_log = log_result
# else: # Simple Distribution
# n_log = log_result.compute()
load_time = 0 # n_log['load_time'][0]
engine_time = 0 #n_log["total_time"][0]
else:
result_gdf = bc.sql(query_blz, algebra=algebra)
else: # for nested queries as column basis test
result_gdf = kwargs.get("blz_result", [])
str_code_test = str(get_codTest(queryType)).upper()
filename = str_code_test + "-" + str(queryId) + ".parquet"
result_dir = Settings.data["TestSettings"]["fileResultsDirectory"]
file_results_dir = str(result_dir)
if not message_validation== "":
print_query_results2(
query,
queryId,
input_type,
queryType,
error_message,
message_validation
)
elif not isinstance(engine, str):
if isinstance(engine, PyDrill):
# Drill
query_drill = get_drill_query(query)
result_drill_gd = run_query_drill(engine, query_drill)
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
pdf2 = to_pandas_f64_engine(
result_drill_gd.resultSet, expected_dtypes
)
pdf2 = upcast_to_float(pdf2).fillna(get_null_constants(pdf2))
formatResults(pdf1, pdf2, worder, orderBy)
if Settings.execution_mode == ExecutionMode.GENERATOR:
file_res_drill_dir = (
file_results_dir + "/" + "drill" + "/" + filename
)
if not os.path.exists(file_res_drill_dir):
save_results_parquet(file_res_drill_dir, pdf2)
print("Drill: " + filename + " generated.")
else:
print_query_results(
query,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query, queryId, queryType, result_gdf.error_message
)
elif isinstance(engine, SparkSession):
# Spark
result_spark_df = run_query_spark(engine, query_spark)
if result_gdf is not None:
if result_gdf.columns is not None:
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
pdf2 = to_pandas_f64_engine(
result_spark_df.resultSet, expected_dtypes
)
pdf2 = upcast_to_float(pdf2).fillna(get_null_constants(pdf2))
formatResults(pdf1, pdf2, worder, orderBy)
if Settings.execution_mode == ExecutionMode.GENERATOR:
file_res_drill_dir = (
file_results_dir + "/" + "spark" + "/" + filename
)
if not os.path.exists(file_res_drill_dir):
save_results_parquet(file_res_drill_dir, pdf2)
print("Spark: " + filename + " generated.")
else:
print_query_results(
query_spark,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query_spark, queryId, queryType, result_gdf.error_message
)
else: # GPUCI
compareResults = True
if "compare_results" in Settings.data["RunSettings"]:
compareResults = Settings.data["RunSettings"]["compare_results"]
if compareResults == "true":
resultFile = file_results_dir + "/" + str(engine) + "/" + filename
pdf2 = get_results(resultFile)
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
format_pdf(pdf1, worder, orderBy)
print(pdf2)
print_query_results(
query,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query, queryId, queryType, result_gdf.error_message
)
else:
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
expected_dtypes = result_gdf.dtypes.to_list()
pdf1 = (
upcast_to_float(result_gdf)
.fillna(get_null_constants(result_gdf))
.to_pandas()
)
pdf2 = pd.DataFrame()
formatResults(pdf1, pdf2, worder, orderBy)
print_query_results(
query,
queryId,
queryType,
pdf1,
pdf2,
result_gdf,
acceptable_difference,
use_percentage,
print_result,
engine,
input_type,
load_time,
engine_time,
total_time,
)
else:
print_query_results2(
query, queryId, queryType, result_gdf.error_message
)
def run_query_log(
bc,
query,
queryId,
queryType,
**kwargs
):
result_gdf = None
error_message = ""
message_validation = ""
try:
result_gdf = bc.log(query)
except Exception as e:
error_message=str(e)
if result_gdf is not None:
if result_gdf.columns is not None:
# FOR DASK CUDF
import dask_cudf
if type(result_gdf) is dask_cudf.core.DataFrame:
result_gdf = result_gdf.compute()
print_query_results2(
query, queryId, DataType.CUDF, queryType, error_message, message_validation
)
else:
print_query_results2(
query, queryId, DataType.CUDF, queryType, error_message, message_validation
)
def run_query_performance(
bc,
drill,
query,
queryId,
queryType,
worder,
orderBy,
acceptable_difference,
use_percentage,
**kwargs
):
# Blazing
query_blz = query # get_blazingsql_query('main', query)
result_gdf = bc.sql(query_blz).get()
if result_gdf.error_message == "":
print_query_results_performance(query, queryId, queryType, result_gdf)
else:
print_query_results2(query, queryId, queryType, result_gdf.error_message)
def formatResults(pdf1, pdf2, worder, orderBy):
if worder == 1 and pdf1.size != 0 and pdf2.size != 0:
if len(pdf1.columns) == len(pdf2.columns):
pdf1.sort_values(
[orderBy] if orderBy else pdf1.columns.to_list(), inplace=True
)
pdf2.sort_values(
[orderBy] if orderBy else pdf2.columns.to_list(), inplace=True
)
def format_pdf(pdf, worder, orderBy):
if worder == 1 and pdf.size != 0:
pdf.sort_values([orderBy] if orderBy else pdf.columns.to_list(), inplace=True)
def get_results(result_file):
df = pd.read_parquet(result_file)
return df
| true | true |
f7383191c1c509e7cac90e323a122138fc4d0520 | 1,656 | py | Python | QRCode/main.py | liantian-cn/Deprecated-GAE | d163127e1cb2a54c02a50c23fecf02b9de9e4bb8 | [
"Unlicense"
] | null | null | null | QRCode/main.py | liantian-cn/Deprecated-GAE | d163127e1cb2a54c02a50c23fecf02b9de9e4bb8 | [
"Unlicense"
] | null | null | null | QRCode/main.py | liantian-cn/Deprecated-GAE | d163127e1cb2a54c02a50c23fecf02b9de9e4bb8 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# __author__ = 'Liantian'
# __email__ = "liantian.me+code@gmail.com"
from io import BytesIO
import qrcode
from flask import Flask, render_template, send_file, request
from qrcode.exceptions import DataOverflowError
ecl_map = {
'L': qrcode.constants.ERROR_CORRECT_L,
'M': qrcode.constants.ERROR_CORRECT_H,
'Q': qrcode.constants.ERROR_CORRECT_Q,
'H': qrcode.constants.ERROR_CORRECT_H,
}
app = Flask(__name__)
@app.errorhandler(404)
def page_not_found(e):
return "Error : 404 - Page Not Found", 404
@app.route('/', methods=['GET'])
def index():
return render_template("index.html")
@app.route('/api', methods=['GET', 'POST'])
def api():
data = request.values.get('data', "parameter 'data' is empty\n")
size = int(request.values.get('size', 4))
if size < 1 or size > 100 or (not isinstance(size, int)):
size = 4
ecl = request.values.get('ecl', "L")
if ecl not in ['L', 'M', 'Q', 'H']:
ecl = 'M'
qr = qrcode.QRCode(error_correction=ecl_map[ecl], box_size=size, border=1)
qr.add_data(data)
try:
qr.make()
except DataOverflowError:
return "Error, Data Too Long", 400
img = qr.make_image()
img_io = BytesIO()
img.save(img_io, 'PNG')
img_io.seek(0)
return send_file(img_io, mimetype='image/png')
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
| 26.285714 | 78 | 0.655193 |
from io import BytesIO
import qrcode
from flask import Flask, render_template, send_file, request
from qrcode.exceptions import DataOverflowError
ecl_map = {
'L': qrcode.constants.ERROR_CORRECT_L,
'M': qrcode.constants.ERROR_CORRECT_H,
'Q': qrcode.constants.ERROR_CORRECT_Q,
'H': qrcode.constants.ERROR_CORRECT_H,
}
app = Flask(__name__)
@app.errorhandler(404)
def page_not_found(e):
return "Error : 404 - Page Not Found", 404
@app.route('/', methods=['GET'])
def index():
return render_template("index.html")
@app.route('/api', methods=['GET', 'POST'])
def api():
data = request.values.get('data', "parameter 'data' is empty\n")
size = int(request.values.get('size', 4))
if size < 1 or size > 100 or (not isinstance(size, int)):
size = 4
ecl = request.values.get('ecl', "L")
if ecl not in ['L', 'M', 'Q', 'H']:
ecl = 'M'
qr = qrcode.QRCode(error_correction=ecl_map[ecl], box_size=size, border=1)
qr.add_data(data)
try:
qr.make()
except DataOverflowError:
return "Error, Data Too Long", 400
img = qr.make_image()
img_io = BytesIO()
img.save(img_io, 'PNG')
img_io.seek(0)
return send_file(img_io, mimetype='image/png')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
| true | true |
f73832705f638951aeb1deb345c42726a5f4f1d1 | 4,791 | py | Python | examples/basic_operations/get_artifact_metadata.py | Insutanto/google-ads-python | f63e318ca39f2ecc6546fba69994456815727578 | [
"Apache-2.0"
] | null | null | null | examples/basic_operations/get_artifact_metadata.py | Insutanto/google-ads-python | f63e318ca39f2ecc6546fba69994456815727578 | [
"Apache-2.0"
] | null | null | null | examples/basic_operations/get_artifact_metadata.py | Insutanto/google-ads-python | f63e318ca39f2ecc6546fba69994456815727578 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example illustrates how to retrieve artifact metadata.
The metadata retrieved can provide additional context about the artifact,
such as whether it is selectable, filterable, or sortable. The artifact can be
either a resource (such as customer, or campaign) or a field (such as
metrics.impressions, campaign.id). It also shows the data type and artifacts
that are selectable with the artifact.
"""
import argparse
import sys
import google.ads.google_ads.client
_DEFAULT_PAGE_SIZE = 1000
def _is_or_is_not(bool_value):
"""Produces display text for whether metadata is applicable to artifact.
Args:
bool_value: a BoolValue instance.
Returns:
A str with value "is" if bool_value is True, else "is not".
"""
return 'is' if bool_value.value else 'isn\'t'
def main(client, artifact_name, page_size):
gaf_service = client.get_service('GoogleAdsFieldService', version='v4')
# Searches for an artifact with the specified name.
query = ('SELECT name, category, selectable, filterable, sortable, '
'selectable_with, data_type, is_repeated '
'WHERE name = \'%s\'') % artifact_name
response = gaf_service.search_google_ads_fields(
query=query, page_size=page_size)
# Iterates over all rows and prints out the metadata of the returned
# artifacts.
try:
for google_ads_field in response:
# Note that the category and data type printed below are enum
# values. For example, a value of 2 will be returned when the
# category is "RESOURCE".
#
# A mapping of enum names to values can be found in
# GoogleAdsFieldCategoryEnum for the category and
# GoogleAdsFieldDataTypeEnum for the data type.
selectable = _is_or_is_not(google_ads_field.selectable)
filterable = _is_or_is_not(google_ads_field.filterable)
sortable = _is_or_is_not(google_ads_field.sortable)
is_repeated = _is_or_is_not(google_ads_field.is_repeated)
print('An artifact named "%s" with category %d and data type %d %s '
'selectable, %s filterable, %s sortable, and %s repeated.'
% (google_ads_field.name.value, google_ads_field.category,
google_ads_field.data_type, selectable, filterable,
sortable, is_repeated))
if len(google_ads_field.selectable_with) > 0:
selectable_artifacts = [
wrapped_selectable_artifact.value
for wrapped_selectable_artifact
in google_ads_field.selectable_with]
print('')
print('The artifact can be selected with the following '
'artifacts:')
for artifact in selectable_artifacts:
print(artifact)
except google.ads.google_ads.errors.GoogleAdsException as ex:
print('Request with ID "%s" failed with status "%s" and includes the '
'following errors:' % (ex.request_id, ex.error.code().name))
for error in ex.failure.errors:
print('\tError with message "%s".' % error.message)
if error.location:
for field_path_element in error.location.field_path_elements:
print('\t\tOn field: %s' % field_path_element.field_name)
sys.exit(1)
if __name__ == '__main__':
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = (google.ads.google_ads.client.GoogleAdsClient
.load_from_storage())
parser = argparse.ArgumentParser(
description='Lists metadata for the specified artifact.')
# The following argument(s) should be provided to run the example.
parser.add_argument('-a', '--artifact_name', type=str,
required=True,
help='The name of the artifact for which we are '
'retrieving metadata.')
args = parser.parse_args()
main(google_ads_client, args.artifact_name, _DEFAULT_PAGE_SIZE)
| 41.301724 | 80 | 0.661449 |
import argparse
import sys
import google.ads.google_ads.client
_DEFAULT_PAGE_SIZE = 1000
def _is_or_is_not(bool_value):
return 'is' if bool_value.value else 'isn\'t'
def main(client, artifact_name, page_size):
gaf_service = client.get_service('GoogleAdsFieldService', version='v4')
# Searches for an artifact with the specified name.
query = ('SELECT name, category, selectable, filterable, sortable, '
'selectable_with, data_type, is_repeated '
'WHERE name = \'%s\'') % artifact_name
response = gaf_service.search_google_ads_fields(
query=query, page_size=page_size)
# Iterates over all rows and prints out the metadata of the returned
# artifacts.
try:
for google_ads_field in response:
# Note that the category and data type printed below are enum
# values. For example, a value of 2 will be returned when the
# category is "RESOURCE".
#
# A mapping of enum names to values can be found in
# GoogleAdsFieldCategoryEnum for the category and
# GoogleAdsFieldDataTypeEnum for the data type.
selectable = _is_or_is_not(google_ads_field.selectable)
filterable = _is_or_is_not(google_ads_field.filterable)
sortable = _is_or_is_not(google_ads_field.sortable)
is_repeated = _is_or_is_not(google_ads_field.is_repeated)
print('An artifact named "%s" with category %d and data type %d %s '
'selectable, %s filterable, %s sortable, and %s repeated.'
% (google_ads_field.name.value, google_ads_field.category,
google_ads_field.data_type, selectable, filterable,
sortable, is_repeated))
if len(google_ads_field.selectable_with) > 0:
selectable_artifacts = [
wrapped_selectable_artifact.value
for wrapped_selectable_artifact
in google_ads_field.selectable_with]
print('')
print('The artifact can be selected with the following '
'artifacts:')
for artifact in selectable_artifacts:
print(artifact)
except google.ads.google_ads.errors.GoogleAdsException as ex:
print('Request with ID "%s" failed with status "%s" and includes the '
'following errors:' % (ex.request_id, ex.error.code().name))
for error in ex.failure.errors:
print('\tError with message "%s".' % error.message)
if error.location:
for field_path_element in error.location.field_path_elements:
print('\t\tOn field: %s' % field_path_element.field_name)
sys.exit(1)
if __name__ == '__main__':
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = (google.ads.google_ads.client.GoogleAdsClient
.load_from_storage())
parser = argparse.ArgumentParser(
description='Lists metadata for the specified artifact.')
# The following argument(s) should be provided to run the example.
parser.add_argument('-a', '--artifact_name', type=str,
required=True,
help='The name of the artifact for which we are '
'retrieving metadata.')
args = parser.parse_args()
main(google_ads_client, args.artifact_name, _DEFAULT_PAGE_SIZE)
| true | true |
f738337de7bb54688239fe20bffe674325ba96f6 | 15,346 | py | Python | python_modules/dagster/dagster/core/system_config/objects.py | camvogel/dagster | b4df94bf34906e7f81c973a7fdad5429ae3697ba | [
"Apache-2.0"
] | 1 | 2021-01-31T19:16:29.000Z | 2021-01-31T19:16:29.000Z | python_modules/dagster/dagster/core/system_config/objects.py | camvogel/dagster | b4df94bf34906e7f81c973a7fdad5429ae3697ba | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/system_config/objects.py | camvogel/dagster | b4df94bf34906e7f81c973a7fdad5429ae3697ba | [
"Apache-2.0"
] | 1 | 2021-12-08T18:13:19.000Z | 2021-12-08T18:13:19.000Z | """System-provided config objects and constructors."""
from typing import AbstractSet, Any, Dict, List, NamedTuple, Optional, Type, Union, cast
from dagster import check
from dagster.core.definitions.configurable import ConfigurableDefinition
from dagster.core.definitions.executor_definition import (
ExecutorDefinition,
execute_in_process_executor,
)
from dagster.core.definitions.pipeline_definition import PipelineDefinition
from dagster.core.definitions.resource_definition import ResourceDefinition
from dagster.core.errors import DagsterInvalidConfigError
from dagster.utils import ensure_single_item
class SolidConfig(
NamedTuple(
"_SolidConfig",
[
("config", Any),
("inputs", Dict[str, Any]),
("outputs", "OutputsConfig"),
],
)
):
def __new__(cls, config, inputs, outputs):
return super(SolidConfig, cls).__new__(
cls,
config,
check.opt_dict_param(inputs, "inputs", key_type=str),
check.inst_param(outputs, "outputs", OutputsConfig),
)
@staticmethod
def from_dict(config):
check.dict_param(config, "config", key_type=str)
return SolidConfig(
config=config.get("config"),
inputs=config.get("inputs") or {},
outputs=OutputsConfig(config.get("outputs")),
)
class OutputsConfig(NamedTuple):
"""
Outputs are configured as a dict if any of the outputs have an output manager with an
output_config_schema, and a list otherwise.
"""
config: Union[Dict, List]
@property
def output_names(self) -> AbstractSet[str]:
if isinstance(self.config, list):
return {key for entry in self.config for key in entry.keys()}
elif isinstance(self.config, dict):
return self.config.keys()
else:
return {}
@property
def type_materializer_specs(self) -> list:
if isinstance(self.config, list):
return self.config
else:
return []
def get_output_manager_config(self, output_name) -> Any:
if isinstance(self.config, dict):
return self.config.get(output_name)
else:
return None
class ResourceConfig(NamedTuple):
config: Any
@staticmethod
def from_dict(config):
check.dict_param(config, "config", key_type=str)
return ResourceConfig(config=config.get("config"))
class ResolvedRunConfig(
NamedTuple(
"_ResolvedRunConfig",
[
("solids", Dict[str, SolidConfig]),
("execution", "ExecutionConfig"),
("resources", Dict[str, ResourceConfig]),
("loggers", Dict[str, dict]),
("original_config_dict", Any),
("mode", str),
("inputs", Dict[str, Any]),
],
)
):
def __new__(
cls,
solids=None,
execution=None,
resources=None,
loggers=None,
original_config_dict=None,
mode=None,
inputs=None,
):
check.opt_inst_param(execution, "execution", ExecutionConfig)
check.opt_dict_param(original_config_dict, "original_config_dict")
check.opt_dict_param(resources, "resources", key_type=str)
check.opt_str_param(mode, "mode")
check.opt_dict_param(inputs, "inputs", key_type=str)
if execution is None:
execution = ExecutionConfig(None, None)
return super(ResolvedRunConfig, cls).__new__(
cls,
solids=check.opt_dict_param(solids, "solids", key_type=str, value_type=SolidConfig),
execution=execution,
resources=resources,
loggers=check.opt_dict_param(loggers, "loggers", key_type=str, value_type=dict),
original_config_dict=original_config_dict,
mode=mode,
inputs=inputs,
)
@staticmethod
def build(
pipeline_def: PipelineDefinition,
run_config: Optional[Dict[str, Any]] = None,
mode: Optional[str] = None,
) -> "ResolvedRunConfig":
"""This method validates a given run config against the pipeline config schema. If
successful, we instantiate an ResolvedRunConfig object.
In case the run_config is invalid, this method raises a DagsterInvalidConfigError
"""
from dagster.config.validate import process_config
from .composite_descent import composite_descent
check.inst_param(pipeline_def, "pipeline_def", PipelineDefinition)
run_config = check.opt_dict_param(run_config, "run_config")
check.opt_str_param(mode, "mode")
mode = mode or pipeline_def.get_default_mode_name()
run_config_schema = pipeline_def.get_run_config_schema(mode)
if run_config_schema.config_mapping:
# add user code boundary
run_config = run_config_schema.config_mapping.resolve_from_unvalidated_config(
run_config
)
config_evr = process_config(run_config_schema.run_config_schema_type, run_config)
if not config_evr.success:
raise DagsterInvalidConfigError(
f"Error in config for {pipeline_def.target_type}".format(pipeline_def.name),
config_evr.errors,
run_config,
)
config_value = config_evr.value
mode_def = pipeline_def.get_mode_definition(mode)
# If using the `execute_in_process` executor, we ignore the execution config value, since it
# may be pointing to the executor for the job rather than the `execute_in_process` executor.
if (
len(mode_def.executor_defs) == 1
and mode_def.executor_defs[0] # pylint: disable=comparison-with-callable
== execute_in_process_executor
):
config_mapped_execution_configs: Optional[Dict[str, Any]] = {}
else:
if pipeline_def.is_job:
executor_config = config_value.get("execution", {})
config_mapped_execution_configs = config_map_executor(
executor_config, mode_def.executor_defs[0]
)
else:
config_mapped_execution_configs = config_map_objects(
config_value,
mode_def.executor_defs,
"execution",
ExecutorDefinition,
"executor",
)
resource_defs = pipeline_def.get_required_resource_defs_for_mode(mode)
resource_configs = config_value.get("resources", {})
config_mapped_resource_configs = config_map_resources(resource_defs, resource_configs)
config_mapped_logger_configs = config_map_loggers(pipeline_def, config_value, mode)
node_key = "ops" if pipeline_def.is_job else "solids"
solid_config_dict = composite_descent(
pipeline_def, config_value.get(node_key, {}), mode_def.resource_defs
)
input_configs = config_value.get("inputs", {})
return ResolvedRunConfig(
solids=solid_config_dict,
execution=ExecutionConfig.from_dict(config_mapped_execution_configs),
loggers=config_mapped_logger_configs,
original_config_dict=run_config,
resources=config_mapped_resource_configs,
mode=mode,
inputs=input_configs,
)
def to_dict(self) -> Dict[str, Any]:
env_dict = {}
solid_configs = {}
for solid_name, solid_config in self.solids.items():
solid_configs[solid_name] = {
"config": solid_config.config,
"inputs": solid_config.inputs,
"outputs": solid_config.outputs.config,
}
env_dict["solids"] = solid_configs
env_dict["execution"] = (
{self.execution.execution_engine_name: self.execution.execution_engine_config}
if self.execution.execution_engine_name
else {}
)
env_dict["resources"] = {
resource_name: {"config": resource_config.config}
for resource_name, resource_config in self.resources.items()
}
env_dict["loggers"] = self.loggers
return env_dict
def config_map_executor(
executor_config: Dict[str, Any],
executor_def: ExecutorDefinition,
) -> Dict[str, Any]:
executor_config_evr = executor_def.apply_config_mapping(executor_config)
if not executor_config_evr.success:
raise DagsterInvalidConfigError(
f"Invalid configuration provided for executor '{executor_def.name}'",
executor_config_evr.errors,
executor_config,
)
return {executor_def.name: executor_config_evr.value}
def config_map_resources(
resource_defs: Dict[str, ResourceDefinition],
resource_configs: Dict[str, Any],
) -> Dict[str, ResourceConfig]:
"""This function executes the config mappings for resources with respect to ConfigurableDefinition.
It iterates over resource_defs and looks up the corresponding config because resources need to
be mapped regardless of whether they receive config from run_config."""
config_mapped_resource_configs = {}
for resource_key, resource_def in resource_defs.items():
resource_config = resource_configs.get(resource_key, {})
resource_config_evr = resource_def.apply_config_mapping(resource_config)
if not resource_config_evr.success:
raise DagsterInvalidConfigError(
"Error in config for resource {}".format(resource_key),
resource_config_evr.errors,
resource_config,
)
else:
config_mapped_resource_configs[resource_key] = ResourceConfig.from_dict(
resource_config_evr.value
)
return config_mapped_resource_configs
def config_map_loggers(
pipeline_def: PipelineDefinition,
config_value: Dict[str, Any],
mode: str,
) -> Dict[str, Any]:
"""This function executes the config mappings for loggers with respect to ConfigurableDefinition.
It uses the `loggers` key on the run_config to determine which loggers will be initialized (and
thus which ones need config mapping) and then iterates over each, looking up the corresponding
LoggerDefinition in `mode_def.loggers`.
The following are the cases of run_config and loggers on mode_def that could emerge
Run Config Loggers on Mode Def Behavior Which Loggers Need Config Mapping?
------------------------------------- -------------------- -------------------------------------------------------------- -------------------------------------
{} or {'loggers': <dict or None>} [] default system loggers with default config all loggers on run config (empty set)
{} or {'loggers': <dict or None>} [custom_logger, ...] default system loggers with default config all loggers on run config (empty set)
{'loggers': {'custom_logger': <dict or None>}} [custom_logger, ...] use only the loggers listed in run_config all loggers on run config
{'loggers': {'console': <dict or None>}} [] use only the loggers listed in run_config (with default defs) all loggers on run config
The behavior of `run_config.loggers` as a source of truth for logger selection comes from:
python_modules/dagster/dagster/core/execution/context_creation_pipeline.py#create_log_manager
See that codepath for more info on how the behavior in the above table is implemented. The logic
in that function is tightly coupled to this one and changes in either path should be confirmed
in the other.
"""
mode_def = pipeline_def.get_mode_definition(mode)
logger_configs = config_value.get("loggers", {})
config_mapped_logger_configs = {}
for logger_key, logger_config in logger_configs.items():
logger_def = mode_def.loggers.get(logger_key)
if logger_def is None:
check.failed(f"No logger found for key {logger_key}")
logger_config_evr = logger_def.apply_config_mapping(logger_config)
if not logger_config_evr.success:
raise DagsterInvalidConfigError(
"Error in config for logger {}".format(logger_key),
logger_config_evr.errors,
logger_config,
)
else:
config_mapped_logger_configs[logger_key] = logger_config_evr.value
return config_mapped_logger_configs
def config_map_objects(
config_value: Any,
defs: List[ExecutorDefinition],
keyed_by: str,
def_type: Type,
name_of_def_type: str,
) -> Optional[Dict[str, Any]]:
"""This function executes the config mappings for executors definitions with respect to
ConfigurableDefinition. It calls the ensure_single_item macro on the incoming config and then
applies config mapping to the result and the first executor_def with the same name on
the mode_def."""
config = config_value.get(keyed_by)
check.opt_dict_param(config, "config", key_type=str)
if not config:
return None
obj_name, obj_config = ensure_single_item(config)
obj_def = next(
(defi for defi in defs if defi.name == obj_name), None
) # obj_defs are stored in a list and we want to find the def matching name
check.inst(
obj_def,
def_type,
(
"Could not find a {def_type} definition on the selected mode that matches the "
'{def_type} "{obj_name}" given in run config'
).format(def_type=def_type, obj_name=obj_name),
)
obj_def = cast(ConfigurableDefinition, obj_def)
obj_config_evr = obj_def.apply_config_mapping(obj_config)
if not obj_config_evr.success:
raise DagsterInvalidConfigError(
'Invalid configuration provided for {} "{}"'.format(name_of_def_type, obj_name),
obj_config_evr.errors,
obj_config,
)
return {obj_name: obj_config_evr.value}
class ExecutionConfig(
NamedTuple(
"_ExecutionConfig",
[
("execution_engine_name", Optional[str]),
("execution_engine_config", Dict[str, Any]),
],
)
):
def __new__(cls, execution_engine_name, execution_engine_config):
return super(ExecutionConfig, cls).__new__(
cls,
execution_engine_name=check.opt_str_param(
execution_engine_name,
"execution_engine_name", # "in_process"
),
execution_engine_config=check.opt_dict_param(
execution_engine_config, "execution_engine_config", key_type=str
),
)
@staticmethod
def from_dict(config=None):
check.opt_dict_param(config, "config", key_type=str)
if config:
execution_engine_name, execution_engine_config = ensure_single_item(config)
return ExecutionConfig(execution_engine_name, execution_engine_config.get("config"))
return ExecutionConfig(None, None)
| 38.079404 | 180 | 0.637821 | from typing import AbstractSet, Any, Dict, List, NamedTuple, Optional, Type, Union, cast
from dagster import check
from dagster.core.definitions.configurable import ConfigurableDefinition
from dagster.core.definitions.executor_definition import (
ExecutorDefinition,
execute_in_process_executor,
)
from dagster.core.definitions.pipeline_definition import PipelineDefinition
from dagster.core.definitions.resource_definition import ResourceDefinition
from dagster.core.errors import DagsterInvalidConfigError
from dagster.utils import ensure_single_item
class SolidConfig(
NamedTuple(
"_SolidConfig",
[
("config", Any),
("inputs", Dict[str, Any]),
("outputs", "OutputsConfig"),
],
)
):
def __new__(cls, config, inputs, outputs):
return super(SolidConfig, cls).__new__(
cls,
config,
check.opt_dict_param(inputs, "inputs", key_type=str),
check.inst_param(outputs, "outputs", OutputsConfig),
)
@staticmethod
def from_dict(config):
check.dict_param(config, "config", key_type=str)
return SolidConfig(
config=config.get("config"),
inputs=config.get("inputs") or {},
outputs=OutputsConfig(config.get("outputs")),
)
class OutputsConfig(NamedTuple):
config: Union[Dict, List]
@property
def output_names(self) -> AbstractSet[str]:
if isinstance(self.config, list):
return {key for entry in self.config for key in entry.keys()}
elif isinstance(self.config, dict):
return self.config.keys()
else:
return {}
@property
def type_materializer_specs(self) -> list:
if isinstance(self.config, list):
return self.config
else:
return []
def get_output_manager_config(self, output_name) -> Any:
if isinstance(self.config, dict):
return self.config.get(output_name)
else:
return None
class ResourceConfig(NamedTuple):
config: Any
@staticmethod
def from_dict(config):
check.dict_param(config, "config", key_type=str)
return ResourceConfig(config=config.get("config"))
class ResolvedRunConfig(
NamedTuple(
"_ResolvedRunConfig",
[
("solids", Dict[str, SolidConfig]),
("execution", "ExecutionConfig"),
("resources", Dict[str, ResourceConfig]),
("loggers", Dict[str, dict]),
("original_config_dict", Any),
("mode", str),
("inputs", Dict[str, Any]),
],
)
):
def __new__(
cls,
solids=None,
execution=None,
resources=None,
loggers=None,
original_config_dict=None,
mode=None,
inputs=None,
):
check.opt_inst_param(execution, "execution", ExecutionConfig)
check.opt_dict_param(original_config_dict, "original_config_dict")
check.opt_dict_param(resources, "resources", key_type=str)
check.opt_str_param(mode, "mode")
check.opt_dict_param(inputs, "inputs", key_type=str)
if execution is None:
execution = ExecutionConfig(None, None)
return super(ResolvedRunConfig, cls).__new__(
cls,
solids=check.opt_dict_param(solids, "solids", key_type=str, value_type=SolidConfig),
execution=execution,
resources=resources,
loggers=check.opt_dict_param(loggers, "loggers", key_type=str, value_type=dict),
original_config_dict=original_config_dict,
mode=mode,
inputs=inputs,
)
@staticmethod
def build(
pipeline_def: PipelineDefinition,
run_config: Optional[Dict[str, Any]] = None,
mode: Optional[str] = None,
) -> "ResolvedRunConfig":
from dagster.config.validate import process_config
from .composite_descent import composite_descent
check.inst_param(pipeline_def, "pipeline_def", PipelineDefinition)
run_config = check.opt_dict_param(run_config, "run_config")
check.opt_str_param(mode, "mode")
mode = mode or pipeline_def.get_default_mode_name()
run_config_schema = pipeline_def.get_run_config_schema(mode)
if run_config_schema.config_mapping:
run_config = run_config_schema.config_mapping.resolve_from_unvalidated_config(
run_config
)
config_evr = process_config(run_config_schema.run_config_schema_type, run_config)
if not config_evr.success:
raise DagsterInvalidConfigError(
f"Error in config for {pipeline_def.target_type}".format(pipeline_def.name),
config_evr.errors,
run_config,
)
config_value = config_evr.value
mode_def = pipeline_def.get_mode_definition(mode)
if (
len(mode_def.executor_defs) == 1
and mode_def.executor_defs[0]
== execute_in_process_executor
):
config_mapped_execution_configs: Optional[Dict[str, Any]] = {}
else:
if pipeline_def.is_job:
executor_config = config_value.get("execution", {})
config_mapped_execution_configs = config_map_executor(
executor_config, mode_def.executor_defs[0]
)
else:
config_mapped_execution_configs = config_map_objects(
config_value,
mode_def.executor_defs,
"execution",
ExecutorDefinition,
"executor",
)
resource_defs = pipeline_def.get_required_resource_defs_for_mode(mode)
resource_configs = config_value.get("resources", {})
config_mapped_resource_configs = config_map_resources(resource_defs, resource_configs)
config_mapped_logger_configs = config_map_loggers(pipeline_def, config_value, mode)
node_key = "ops" if pipeline_def.is_job else "solids"
solid_config_dict = composite_descent(
pipeline_def, config_value.get(node_key, {}), mode_def.resource_defs
)
input_configs = config_value.get("inputs", {})
return ResolvedRunConfig(
solids=solid_config_dict,
execution=ExecutionConfig.from_dict(config_mapped_execution_configs),
loggers=config_mapped_logger_configs,
original_config_dict=run_config,
resources=config_mapped_resource_configs,
mode=mode,
inputs=input_configs,
)
def to_dict(self) -> Dict[str, Any]:
env_dict = {}
solid_configs = {}
for solid_name, solid_config in self.solids.items():
solid_configs[solid_name] = {
"config": solid_config.config,
"inputs": solid_config.inputs,
"outputs": solid_config.outputs.config,
}
env_dict["solids"] = solid_configs
env_dict["execution"] = (
{self.execution.execution_engine_name: self.execution.execution_engine_config}
if self.execution.execution_engine_name
else {}
)
env_dict["resources"] = {
resource_name: {"config": resource_config.config}
for resource_name, resource_config in self.resources.items()
}
env_dict["loggers"] = self.loggers
return env_dict
def config_map_executor(
executor_config: Dict[str, Any],
executor_def: ExecutorDefinition,
) -> Dict[str, Any]:
executor_config_evr = executor_def.apply_config_mapping(executor_config)
if not executor_config_evr.success:
raise DagsterInvalidConfigError(
f"Invalid configuration provided for executor '{executor_def.name}'",
executor_config_evr.errors,
executor_config,
)
return {executor_def.name: executor_config_evr.value}
def config_map_resources(
resource_defs: Dict[str, ResourceDefinition],
resource_configs: Dict[str, Any],
) -> Dict[str, ResourceConfig]:
config_mapped_resource_configs = {}
for resource_key, resource_def in resource_defs.items():
resource_config = resource_configs.get(resource_key, {})
resource_config_evr = resource_def.apply_config_mapping(resource_config)
if not resource_config_evr.success:
raise DagsterInvalidConfigError(
"Error in config for resource {}".format(resource_key),
resource_config_evr.errors,
resource_config,
)
else:
config_mapped_resource_configs[resource_key] = ResourceConfig.from_dict(
resource_config_evr.value
)
return config_mapped_resource_configs
def config_map_loggers(
pipeline_def: PipelineDefinition,
config_value: Dict[str, Any],
mode: str,
) -> Dict[str, Any]:
mode_def = pipeline_def.get_mode_definition(mode)
logger_configs = config_value.get("loggers", {})
config_mapped_logger_configs = {}
for logger_key, logger_config in logger_configs.items():
logger_def = mode_def.loggers.get(logger_key)
if logger_def is None:
check.failed(f"No logger found for key {logger_key}")
logger_config_evr = logger_def.apply_config_mapping(logger_config)
if not logger_config_evr.success:
raise DagsterInvalidConfigError(
"Error in config for logger {}".format(logger_key),
logger_config_evr.errors,
logger_config,
)
else:
config_mapped_logger_configs[logger_key] = logger_config_evr.value
return config_mapped_logger_configs
def config_map_objects(
config_value: Any,
defs: List[ExecutorDefinition],
keyed_by: str,
def_type: Type,
name_of_def_type: str,
) -> Optional[Dict[str, Any]]:
config = config_value.get(keyed_by)
check.opt_dict_param(config, "config", key_type=str)
if not config:
return None
obj_name, obj_config = ensure_single_item(config)
obj_def = next(
(defi for defi in defs if defi.name == obj_name), None
)
check.inst(
obj_def,
def_type,
(
"Could not find a {def_type} definition on the selected mode that matches the "
'{def_type} "{obj_name}" given in run config'
).format(def_type=def_type, obj_name=obj_name),
)
obj_def = cast(ConfigurableDefinition, obj_def)
obj_config_evr = obj_def.apply_config_mapping(obj_config)
if not obj_config_evr.success:
raise DagsterInvalidConfigError(
'Invalid configuration provided for {} "{}"'.format(name_of_def_type, obj_name),
obj_config_evr.errors,
obj_config,
)
return {obj_name: obj_config_evr.value}
class ExecutionConfig(
NamedTuple(
"_ExecutionConfig",
[
("execution_engine_name", Optional[str]),
("execution_engine_config", Dict[str, Any]),
],
)
):
def __new__(cls, execution_engine_name, execution_engine_config):
return super(ExecutionConfig, cls).__new__(
cls,
execution_engine_name=check.opt_str_param(
execution_engine_name,
"execution_engine_name",
),
execution_engine_config=check.opt_dict_param(
execution_engine_config, "execution_engine_config", key_type=str
),
)
@staticmethod
def from_dict(config=None):
check.opt_dict_param(config, "config", key_type=str)
if config:
execution_engine_name, execution_engine_config = ensure_single_item(config)
return ExecutionConfig(execution_engine_name, execution_engine_config.get("config"))
return ExecutionConfig(None, None)
| true | true |
f7383409d60e884774a52ce5ee85d23d3de82415 | 784 | py | Python | examples/example_web_app/example_web_app/routes.py | aalhour/cookiecutter-aiohttp-sqlalchemy | adc495653246d7471a26c66cdbefb25c6302f4fa | [
"MIT"
] | 46 | 2018-09-30T00:05:43.000Z | 2022-02-08T05:10:13.000Z | examples/example_web_app/example_web_app/routes.py | aalhour/cookiecutter-aiohttp-sqlalchemy | adc495653246d7471a26c66cdbefb25c6302f4fa | [
"MIT"
] | 9 | 2018-10-02T09:01:15.000Z | 2020-05-27T08:17:28.000Z | examples/example_web_app/example_web_app/routes.py | aalhour/cookiecutter-aiohttp-sqlalchemy | adc495653246d7471a26c66cdbefb25c6302f4fa | [
"MIT"
] | 7 | 2018-10-02T05:30:41.000Z | 2021-02-17T09:19:06.000Z | """
Routes module.
Responsible for providing the means to register the application routes.
"""
from example_web_app.controllers.health_api import HealthApiController
from example_web_app.controllers.example_api import ExampleApiController
def setup_routes(app):
###
# Register the HelloWorld API handlers
#
health_api = HealthApiController()
example_api = ExampleApiController()
###
# API v1.0 ROUTES
#
# Add your public v1.0 API routes here
#
app.router.add_get('/api/v1.0/examples', example_api.get)
app.router.add_get('/api/v1.0/examples/{id}', example_api.get_by_id)
###
# INTERNAL API ROUTES
#
# Add your internal/administrative API routes here
#
app.router.add_get('/api/-/health', health_api.get)
| 23.058824 | 72 | 0.700255 |
from example_web_app.controllers.health_api import HealthApiController
from example_web_app.controllers.example_api import ExampleApiController
def setup_routes(app):
health_api = HealthApiController()
example_api = ExampleApiController()
app.router.add_get('/api/v1.0/examples', example_api.get)
app.router.add_get('/api/v1.0/examples/{id}', example_api.get_by_id)
app.router.add_get('/api/-/health', health_api.get)
| true | true |
f73834a22c84d04a1dccfb6b1fe202f392ec82a7 | 5,521 | py | Python | Moller-Plesset/MP3.py | andyj10224/psi4numpy | cbef6ddcb32ccfbf773befea6dc4aaae2b428776 | [
"BSD-3-Clause"
] | 214 | 2017-03-01T08:04:48.000Z | 2022-03-23T08:52:04.000Z | Moller-Plesset/MP3.py | andyj10224/psi4numpy | cbef6ddcb32ccfbf773befea6dc4aaae2b428776 | [
"BSD-3-Clause"
] | 100 | 2017-03-03T13:20:20.000Z | 2022-03-05T18:20:27.000Z | Moller-Plesset/MP3.py | andyj10224/psi4numpy | cbef6ddcb32ccfbf773befea6dc4aaae2b428776 | [
"BSD-3-Clause"
] | 150 | 2017-02-17T19:44:47.000Z | 2022-03-22T05:52:43.000Z | """
Reference implementation for the correlation energy of MP3 with an RHF reference.
References:
- Equations from [Szabo:1996]
"""
__authors__ = "Daniel G. A. Smith"
__credits__ = ["Daniel G. A. Smith", "Dominic A. Sirianni"]
__copyright__ = "(c) 2014-2018, The Psi4NumPy Developers"
__license__ = "BSD-3-Clause"
__date__ = "2017-05-23"
import time
import numpy as np
np.set_printoptions(precision=5, linewidth=200, suppress=True)
import psi4
# Memory for Psi4 in GB
psi4.set_memory('2 GB')
psi4.core.set_output_file('output.dat', False)
# Memory for numpy in GB
numpy_memory = 2
mol = psi4.geometry("""
O
H 1 1.1
H 1 1.1 2 104
symmetry c1
""")
psi4.set_options({'basis': 'aug-cc-pvdz',
'scf_type': 'pk',
'guess': 'core',
'mp2_type': 'conv',
'mp_type': 'conv',
'freeze_core': 'false',
'e_convergence': 1e-8,
'd_convergence': 1e-8})
# First compute RHF energy using Psi4
scf_e, wfn = psi4.energy('SCF', return_wfn=True)
# Coefficient Matrix
C = np.array(wfn.Ca())
# Double occupied orbitals
ndocc = wfn.doccpi()[0]
# Number of molecular orbitals
nmo = wfn.nmo()
# SCF energy
SCF_E = wfn.energy()
# Orbital energies
eps = wfn.epsilon_a()
eps = np.array([eps.get(x) for x in range(C.shape[0])])
# Compute size of ERI tensor in GB
ERI_Size = (nmo**4)*8.0 / 1E9
print("Size of the ERI tensor will be %4.2f GB." % ERI_Size)
memory_footprint = ERI_Size*2.5
if memory_footprint > numpy_memory:
clean()
raise Exception("Estimated memory utilization (%4.2f GB) exceeds numpy_memory limit of %4.2f GB." % (memory_footprint, numpy_memory))
# Integral generation from Psi4's MintsHelper
t = time.time()
mints = psi4.core.MintsHelper(wfn.basisset())
I = np.array(mints.ao_eri())
I = I.reshape(nmo, nmo, nmo, nmo)
print('\nTotal time taken for ERI integrals: %.3f seconds.' % (time.time()-t))
t=time.time()
# Complete the AOpqrs -> MOiajb step
MO = np.einsum('rJ,pqrs->pqJs', C, I)
MO = np.einsum('pI,pqJs->IqJs', C, MO)
MO = np.einsum('sB,IqJs->IqJB', C, MO)
MO = np.einsum('qA,IqJB->IAJB', C, MO)
# (pq|rs) -> <ps|rq>
MO = MO.swapaxes(1, 2)
print('\nTotal time taken for integral transformation: %.f seconds' % (time.time()-t))
print('Shape of MO integrals %s \n' % str(MO.shape))
# Build epsilon tensor
eocc = eps[:ndocc]
evirt = eps[ndocc:]
epsilon = 1/(eocc.reshape(-1, 1, 1, 1) + eocc.reshape(-1, 1, 1) - evirt.reshape(-1, 1) - evirt)
# Build o and v slices
o = slice(0, ndocc)
v = slice(ndocc, MO.shape[0])
### MP2 correlation energy
MP2corr_E = 2 * np.einsum('abrs,rsab,abrs', MO[o, o, v, v], MO[v, v, o, o], epsilon)
MP2corr_E -= np.einsum('abrs,rsba,abrs', MO[o, o, v, v], MO[v, v, o, o], epsilon)
MP2total_E = SCF_E + MP2corr_E
print('MP2 correlation energy: %16.8f' % MP2corr_E)
print('MP2 total energy: %16.8f' % MP2total_E)
psi4.compare_values(psi4.energy('MP2'), MP2total_E, 6, 'MP2 Energy')
print('\n Starting MP3 energy...')
t = time.time()
# MP3 Correlation energy
# Prefactors taken from terms in unnumbered expression for spatial-orbital MP3
# energy on [Szabo:1996] pp. (bottom) 367 - (top) 368. Individual equations taken
# from [Szabo:1996] Tbl. 6.2 pp. 364-365
# Equation 1: 3rd order diagram 1
MP3corr_E = 2.0 * np.einsum('abru,ruts,tsab,abru,abts', MO[o, o, v, v], MO[v, v, v, v], MO[v, v, o, o], epsilon, epsilon)
# Equation 2: 3rd order diagram 2
MP3corr_E += 2.0 * np.einsum('adrs,cbad,rscb,adrs,cbrs', MO[o, o, v, v], MO[o, o, o, o], MO[v, v, o, o], epsilon, epsilon)
# Equation 3: 3rd order diagram 3
MP3corr_E += -4.0 * np.einsum('acrt,rbsc,stab,acrt,abst', MO[o, o, v, v], MO[v, o, v, o], MO[v, v, o, o], epsilon, epsilon)
# Equation 4: 3rd order diagram 4
MP3corr_E += -4.0 * np.einsum('bcrt,rasb,stac,bcrt,acst', MO[o, o, v, v], MO[v, o, v, o], MO[v, v, o, o], epsilon, epsilon)
# Equation 5: 3rd order diagram 5
MP3corr_E += 8.0 * np.einsum('acrt,btsc,rsab,acrt,abrs', MO[o, o, v, v], MO[o, v, v, o], MO[v, v, o, o], epsilon, epsilon)
# Equation 6: 3rd order diagram 6
MP3corr_E += 2.0 * np.einsum('cbrt,atsc,rsab,cbrt,abrs', MO[o, o, v, v], MO[o, v, v, o], MO[v, v, o, o], epsilon, epsilon)
# Equation 7: 3rd order diagram 7
MP3corr_E += -1.0 * np.einsum('acrs,dbac,srdb,acrs,dbrs', MO[o, o, v, v], MO[o, o, o, o], MO[v, v, o, o], epsilon, epsilon)
# Equation 8: 3rd order diagram 8
MP3corr_E += -1.0 * np.einsum('abrt,trus,usab,abtr,abus', MO[o, o, v, v], MO[v, v, v, v], MO[v, v, o, o], epsilon, epsilon)
# Equation 9: 3rd order diagram 9
MP3corr_E += 2.0 * np.einsum('bcrt,arbs,tsac,cbrt,acst', MO[o, o, v, v], MO[o, v, o, v], MO[v, v, o, o], epsilon, epsilon)
# Equation 10: 3rd order diagram 10
MP3corr_E += 2.0 * np.einsum('cbrt,rasb,stac,cbrt,acst', MO[o, o, v, v], MO[v, o, v, o], MO[v, v, o, o], epsilon, epsilon)
# Equation 11: 3rd order diagram 11
MP3corr_E += -4.0 * np.einsum('abrs,scat,rtbc,abrs,cbrt', MO[o, o, v, v], MO[v, o, o, v], MO[v, v, o, o], epsilon, epsilon)
# Equation 12: 3rd order diagram 12
MP3corr_E += -4.0 * np.einsum('bcrt,atsc,rsab,bctr,abrs', MO[o, o, v, v], MO[o, v, v, o], MO[v, v, o, o], epsilon, epsilon)
print('...took %.3f seconds to compute MP3 correlation energy.\n' % (time.time()-t))
print('Third order energy: %16.8f' % MP3corr_E)
MP3corr_E += MP2corr_E
MP3total_E = SCF_E + MP3corr_E
print('MP3 correlation energy: %16.8f' % MP3corr_E)
print('MP3 total energy: %16.8f' % MP3total_E)
psi4.compare_values(psi4.energy('MP3'), MP3total_E, 6, 'MP3 Energy')
| 36.806667 | 137 | 0.641369 |
__authors__ = "Daniel G. A. Smith"
__credits__ = ["Daniel G. A. Smith", "Dominic A. Sirianni"]
__copyright__ = "(c) 2014-2018, The Psi4NumPy Developers"
__license__ = "BSD-3-Clause"
__date__ = "2017-05-23"
import time
import numpy as np
np.set_printoptions(precision=5, linewidth=200, suppress=True)
import psi4
psi4.set_memory('2 GB')
psi4.core.set_output_file('output.dat', False)
numpy_memory = 2
mol = psi4.geometry("""
O
H 1 1.1
H 1 1.1 2 104
symmetry c1
""")
psi4.set_options({'basis': 'aug-cc-pvdz',
'scf_type': 'pk',
'guess': 'core',
'mp2_type': 'conv',
'mp_type': 'conv',
'freeze_core': 'false',
'e_convergence': 1e-8,
'd_convergence': 1e-8})
scf_e, wfn = psi4.energy('SCF', return_wfn=True)
C = np.array(wfn.Ca())
ndocc = wfn.doccpi()[0]
nmo = wfn.nmo()
SCF_E = wfn.energy()
eps = wfn.epsilon_a()
eps = np.array([eps.get(x) for x in range(C.shape[0])])
ERI_Size = (nmo**4)*8.0 / 1E9
print("Size of the ERI tensor will be %4.2f GB." % ERI_Size)
memory_footprint = ERI_Size*2.5
if memory_footprint > numpy_memory:
clean()
raise Exception("Estimated memory utilization (%4.2f GB) exceeds numpy_memory limit of %4.2f GB." % (memory_footprint, numpy_memory))
t = time.time()
mints = psi4.core.MintsHelper(wfn.basisset())
I = np.array(mints.ao_eri())
I = I.reshape(nmo, nmo, nmo, nmo)
print('\nTotal time taken for ERI integrals: %.3f seconds.' % (time.time()-t))
t=time.time()
# Complete the AOpqrs -> MOiajb step
MO = np.einsum('rJ,pqrs->pqJs', C, I)
MO = np.einsum('pI,pqJs->IqJs', C, MO)
MO = np.einsum('sB,IqJs->IqJB', C, MO)
MO = np.einsum('qA,IqJB->IAJB', C, MO)
# (pq|rs) -> <ps|rq>
MO = MO.swapaxes(1, 2)
print('\nTotal time taken for integral transformation: %.f seconds' % (time.time()-t))
print('Shape of MO integrals %s \n' % str(MO.shape))
# Build epsilon tensor
eocc = eps[:ndocc]
evirt = eps[ndocc:]
epsilon = 1/(eocc.reshape(-1, 1, 1, 1) + eocc.reshape(-1, 1, 1) - evirt.reshape(-1, 1) - evirt)
# Build o and v slices
o = slice(0, ndocc)
v = slice(ndocc, MO.shape[0])
### MP2 correlation energy
MP2corr_E = 2 * np.einsum('abrs,rsab,abrs', MO[o, o, v, v], MO[v, v, o, o], epsilon)
MP2corr_E -= np.einsum('abrs,rsba,abrs', MO[o, o, v, v], MO[v, v, o, o], epsilon)
MP2total_E = SCF_E + MP2corr_E
print('MP2 correlation energy: %16.8f' % MP2corr_E)
print('MP2 total energy: %16.8f' % MP2total_E)
psi4.compare_values(psi4.energy('MP2'), MP2total_E, 6, 'MP2 Energy')
print('\n Starting MP3 energy...')
t = time.time()
# MP3 Correlation energy
# Prefactors taken from terms in unnumbered expression for spatial-orbital MP3
# energy on [Szabo:1996] pp. (bottom) 367 - (top) 368. Individual equations taken
# from [Szabo:1996] Tbl. 6.2 pp. 364-365
# Equation 1: 3rd order diagram 1
MP3corr_E = 2.0 * np.einsum('abru,ruts,tsab,abru,abts', MO[o, o, v, v], MO[v, v, v, v], MO[v, v, o, o], epsilon, epsilon)
# Equation 2: 3rd order diagram 2
MP3corr_E += 2.0 * np.einsum('adrs,cbad,rscb,adrs,cbrs', MO[o, o, v, v], MO[o, o, o, o], MO[v, v, o, o], epsilon, epsilon)
# Equation 3: 3rd order diagram 3
MP3corr_E += -4.0 * np.einsum('acrt,rbsc,stab,acrt,abst', MO[o, o, v, v], MO[v, o, v, o], MO[v, v, o, o], epsilon, epsilon)
# Equation 4: 3rd order diagram 4
MP3corr_E += -4.0 * np.einsum('bcrt,rasb,stac,bcrt,acst', MO[o, o, v, v], MO[v, o, v, o], MO[v, v, o, o], epsilon, epsilon)
# Equation 5: 3rd order diagram 5
MP3corr_E += 8.0 * np.einsum('acrt,btsc,rsab,acrt,abrs', MO[o, o, v, v], MO[o, v, v, o], MO[v, v, o, o], epsilon, epsilon)
# Equation 6: 3rd order diagram 6
MP3corr_E += 2.0 * np.einsum('cbrt,atsc,rsab,cbrt,abrs', MO[o, o, v, v], MO[o, v, v, o], MO[v, v, o, o], epsilon, epsilon)
# Equation 7: 3rd order diagram 7
MP3corr_E += -1.0 * np.einsum('acrs,dbac,srdb,acrs,dbrs', MO[o, o, v, v], MO[o, o, o, o], MO[v, v, o, o], epsilon, epsilon)
# Equation 8: 3rd order diagram 8
MP3corr_E += -1.0 * np.einsum('abrt,trus,usab,abtr,abus', MO[o, o, v, v], MO[v, v, v, v], MO[v, v, o, o], epsilon, epsilon)
# Equation 9: 3rd order diagram 9
MP3corr_E += 2.0 * np.einsum('bcrt,arbs,tsac,cbrt,acst', MO[o, o, v, v], MO[o, v, o, v], MO[v, v, o, o], epsilon, epsilon)
# Equation 10: 3rd order diagram 10
MP3corr_E += 2.0 * np.einsum('cbrt,rasb,stac,cbrt,acst', MO[o, o, v, v], MO[v, o, v, o], MO[v, v, o, o], epsilon, epsilon)
# Equation 11: 3rd order diagram 11
MP3corr_E += -4.0 * np.einsum('abrs,scat,rtbc,abrs,cbrt', MO[o, o, v, v], MO[v, o, o, v], MO[v, v, o, o], epsilon, epsilon)
# Equation 12: 3rd order diagram 12
MP3corr_E += -4.0 * np.einsum('bcrt,atsc,rsab,bctr,abrs', MO[o, o, v, v], MO[o, v, v, o], MO[v, v, o, o], epsilon, epsilon)
print('...took %.3f seconds to compute MP3 correlation energy.\n' % (time.time()-t))
print('Third order energy: %16.8f' % MP3corr_E)
MP3corr_E += MP2corr_E
MP3total_E = SCF_E + MP3corr_E
print('MP3 correlation energy: %16.8f' % MP3corr_E)
print('MP3 total energy: %16.8f' % MP3total_E)
psi4.compare_values(psi4.energy('MP3'), MP3total_E, 6, 'MP3 Energy')
| true | true |
f7383531ef1ee1486a551d10b914dcca31357feb | 447 | py | Python | src/anonymous_permissions/compat.py | saxix/django-anonymoususer-permissions | 6b65145c16915f502385de0251fe3541e4b89134 | [
"MIT"
] | 1 | 2020-09-06T01:04:00.000Z | 2020-09-06T01:04:00.000Z | src/anonymous_permissions/compat.py | saxix/django-anonymoususer-permissions | 6b65145c16915f502385de0251fe3541e4b89134 | [
"MIT"
] | 7 | 2020-06-02T07:07:28.000Z | 2020-09-13T07:29:38.000Z | src/anonymous_permissions/compat.py | saxix/django-anonymoususer-permissions | 6b65145c16915f502385de0251fe3541e4b89134 | [
"MIT"
] | 1 | 2020-05-25T04:14:53.000Z | 2020-05-25T04:14:53.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import django
import six
DJANGO3 = django.VERSION[0] == 3
DJANGO2 = django.VERSION[0] == 2
#
# if DJANGO2 or DJANGO3:
# def is_anonymous(user):
# return user.is_anonymous
#
# else:
# def is_anonymous(user):
# return user.is_anonymous()
if six.PY2:
from django.utils.lru_cache import lru_cache
else:
from functools import lru_cache
| 20.318182 | 56 | 0.691275 |
from __future__ import absolute_import, unicode_literals
import django
import six
DJANGO3 = django.VERSION[0] == 3
DJANGO2 = django.VERSION[0] == 2
if six.PY2:
from django.utils.lru_cache import lru_cache
else:
from functools import lru_cache
| true | true |
f7383544b957f63a0149ac94251a0aea8fbc4cbc | 81 | py | Python | obdlive/obd/apps.py | hoke-t/OBDLive | 524fb53fad5924b8371d2fce8d7a482bd8112362 | [
"MIT"
] | 8 | 2018-12-15T16:41:21.000Z | 2021-10-03T21:19:11.000Z | obdlive/obd/apps.py | hoke-t/OBDLive | 524fb53fad5924b8371d2fce8d7a482bd8112362 | [
"MIT"
] | null | null | null | obdlive/obd/apps.py | hoke-t/OBDLive | 524fb53fad5924b8371d2fce8d7a482bd8112362 | [
"MIT"
] | 1 | 2020-07-27T18:15:58.000Z | 2020-07-27T18:15:58.000Z | from django.apps import AppConfig
class ObdConfig(AppConfig):
name = 'obd'
| 13.5 | 33 | 0.728395 | from django.apps import AppConfig
class ObdConfig(AppConfig):
name = 'obd'
| true | true |
f7383592b79628058c7079a34a47b0cfb771440a | 20,835 | py | Python | bert_ner.py | KoconJan/BERT-NER-CLI | 6f1323bf6294bc05ee3ee9a58e5b932a68bb85c0 | [
"MIT"
] | 2 | 2019-05-09T17:08:01.000Z | 2019-06-05T14:54:00.000Z | bert_ner.py | KoconJan/BERT-NER-CLI | 6f1323bf6294bc05ee3ee9a58e5b932a68bb85c0 | [
"MIT"
] | null | null | null | bert_ner.py | KoconJan/BERT-NER-CLI | 6f1323bf6294bc05ee3ee9a58e5b932a68bb85c0 | [
"MIT"
] | null | null | null | #! usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Copyright 2018 The Google AI Language Team Authors.
BASED ON Google_BERT.
@Author:zhoukaiyin
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from bert import modeling
from bert import optimization
from bert import tokenization
import tensorflow as tf
from sklearn.metrics import f1_score,precision_score,recall_score
from tensorflow.python.ops import math_ops
import tf_metrics
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"data_dir", './drive/My Drive/ai/NERdata',
"The input datadir.",
)
flags.DEFINE_string(
"bert_config_file", './drive/My Drive/ai/checkpoint/bert_config.json',
"The config json file corresponding to the pre-trained BERT model."
)
flags.DEFINE_string(
"task_name", 'NER', "The name of the task to train."
)
flags.DEFINE_string(
"output_dir", './drive/My Drive/ai/output/result_dir/',
"The output directory where the model checkpoints will be written."
)
flags.DEFINE_string(
"tpu_name", 'gcp_tpu',
"Use Google Cloud Colaborator TPU to train"
)
## Other parameters
flags.DEFINE_string(
"init_checkpoint", './drive/My Drive/ai/checkpoint/bert_model.ckpt',
"Initial checkpoint (usually from a pre-trained BERT model)."
)
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text."
)
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization."
)
flags.DEFINE_bool(
"do_train", True,
"Whether to run training."
)
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_string("vocab_file", './drive/My Drive/ai/checkpoint/vocab.txt',
"The vocabulary file that the BERT model was trained on.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text = text
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_data(cls, input_file):
"""Reads a BIO data."""
with open(input_file) as f:
lines = []
words = []
labels = []
for line in f:
contends = line.strip()
word = line.strip().split(' ')[0]
label = line.strip().split(' ')[-1]
if contends.startswith("-DOCSTART-"):
words.append('')
continue
if len(contends) == 0 and words[-1] == '.':
l = ' '.join([label for label in labels if len(label) > 0])
w = ' '.join([word for word in words if len(word) > 0])
lines.append([l, w])
words = []
labels = []
continue
words.append(word)
labels.append(label)
return lines
class NerProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "train.txt")), "train"
)
def get_dev_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "dev.txt")), "dev"
)
def get_labels(self):
return ["B-MISC", "I-MISC", "O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "X"]
def _create_example(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[0])
examples.append(InputExample(guid=guid, text=text, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer):
label_map = {}
for (i, label) in enumerate(label_list, 1):
label_map[label] = i
textlist = example.text.split(' ')
labellist = example.label.split(' ')
tokens = []
labels = []
for i, word in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
label_1 = labellist[i]
for m in range(len(token)):
if m == 0:
labels.append(label_1)
else:
labels.append("X")
# tokens = tokenizer.tokenize(example.text)
if len(tokens) >= max_seq_length - 1:
tokens = tokens[0:(max_seq_length - 2)]
labels = labels[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
label_ids.append(0)
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
label_ids.append(label_map[labels[i]])
ntokens.append("[SEP]")
segment_ids.append(0)
label_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
# print(len(input_ids))
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label_ids: %s" % " ".join([str(x) for x in label_ids]))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids
)
return feature
def filed_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file
):
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 5000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature(feature.label_ids)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder):
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
def _decode_record(record, name_to_features):
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
batch_size = params["batch_size"]
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder
))
return d
return input_fn
def create_model(bert_config, is_training, input_ids, input_mask,
segment_ids, labels, num_labels, use_one_hot_embeddings):
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings
)
output_layer = model.get_sequence_output()
hidden_size = output_layer.shape[-1].value
output_weight = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02)
)
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer()
)
with tf.variable_scope("loss"):
if is_training:
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
output_layer = tf.reshape(output_layer, [-1, hidden_size])
logits = tf.matmul(output_layer, output_weight, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [-1, FLAGS.max_seq_length, 11])
log_probs = tf.nn.log_softmax(logits, axis=-1)
# labels = tf.cast(labels,dtype=tf.float32)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_sum(per_example_loss)
return (loss, per_example_loss, logits)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
def model_fn(features, labels, mode, params):
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
precision = tf_metrics.precision(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro")
recall = tf_metrics.recall(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro")
f = tf_metrics.f1(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro")
loss = tf.metrics.mean(per_example_loss)
return {
"eval_precision":precision,
"eval_recall":recall,
"eval_f": f,
"eval_loss": loss,
}
eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"ner": NerProcessor
}
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver('grpc://' + os.environ['COLAB_TPU_ADDR'])
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list)+1,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
filed_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
filed_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d", len(eval_examples))
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_steps = None
if FLAGS.use_tpu:
eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
tf.app.run()
| 39.016854 | 126 | 0.624958 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from bert import modeling
from bert import optimization
from bert import tokenization
import tensorflow as tf
from sklearn.metrics import f1_score,precision_score,recall_score
from tensorflow.python.ops import math_ops
import tf_metrics
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"data_dir", './drive/My Drive/ai/NERdata',
"The input datadir.",
)
flags.DEFINE_string(
"bert_config_file", './drive/My Drive/ai/checkpoint/bert_config.json',
"The config json file corresponding to the pre-trained BERT model."
)
flags.DEFINE_string(
"task_name", 'NER', "The name of the task to train."
)
flags.DEFINE_string(
"output_dir", './drive/My Drive/ai/output/result_dir/',
"The output directory where the model checkpoints will be written."
)
flags.DEFINE_string(
"tpu_name", 'gcp_tpu',
"Use Google Cloud Colaborator TPU to train"
)
g(
"init_checkpoint", './drive/My Drive/ai/checkpoint/bert_model.ckpt',
"Initial checkpoint (usually from a pre-trained BERT model)."
)
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text."
)
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization."
)
flags.DEFINE_bool(
"do_train", True,
"Whether to run training."
)
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_string("vocab_file", './drive/My Drive/ai/checkpoint/vocab.txt',
"The vocabulary file that the BERT model was trained on.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
def __init__(self, guid, text, label=None):
self.guid = guid
self.text = text
self.label = label
class InputFeatures(object):
def __init__(self, input_ids, input_mask, segment_ids, label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
class DataProcessor(object):
def get_train_examples(self, data_dir):
raise NotImplementedError()
def get_dev_examples(self, data_dir):
raise NotImplementedError()
def get_labels(self):
raise NotImplementedError()
@classmethod
def _read_data(cls, input_file):
with open(input_file) as f:
lines = []
words = []
labels = []
for line in f:
contends = line.strip()
word = line.strip().split(' ')[0]
label = line.strip().split(' ')[-1]
if contends.startswith("-DOCSTART-"):
words.append('')
continue
if len(contends) == 0 and words[-1] == '.':
l = ' '.join([label for label in labels if len(label) > 0])
w = ' '.join([word for word in words if len(word) > 0])
lines.append([l, w])
words = []
labels = []
continue
words.append(word)
labels.append(label)
return lines
class NerProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "train.txt")), "train"
)
def get_dev_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "dev.txt")), "dev"
)
def get_labels(self):
return ["B-MISC", "I-MISC", "O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "X"]
def _create_example(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[0])
examples.append(InputExample(guid=guid, text=text, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer):
label_map = {}
for (i, label) in enumerate(label_list, 1):
label_map[label] = i
textlist = example.text.split(' ')
labellist = example.label.split(' ')
tokens = []
labels = []
for i, word in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
label_1 = labellist[i]
for m in range(len(token)):
if m == 0:
labels.append(label_1)
else:
labels.append("X")
if len(tokens) >= max_seq_length - 1:
tokens = tokens[0:(max_seq_length - 2)]
labels = labels[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
label_ids.append(0)
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
label_ids.append(label_map[labels[i]])
ntokens.append("[SEP]")
segment_ids.append(0)
label_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label_ids: %s" % " ".join([str(x) for x in label_ids]))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids
)
return feature
def filed_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file
):
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 5000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature(feature.label_ids)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder):
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
def _decode_record(record, name_to_features):
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
batch_size = params["batch_size"]
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder
))
return d
return input_fn
def create_model(bert_config, is_training, input_ids, input_mask,
segment_ids, labels, num_labels, use_one_hot_embeddings):
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings
)
output_layer = model.get_sequence_output()
hidden_size = output_layer.shape[-1].value
output_weight = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02)
)
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer()
)
with tf.variable_scope("loss"):
if is_training:
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
output_layer = tf.reshape(output_layer, [-1, hidden_size])
logits = tf.matmul(output_layer, output_weight, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [-1, FLAGS.max_seq_length, 11])
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_sum(per_example_loss)
return (loss, per_example_loss, logits)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
def model_fn(features, labels, mode, params):
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
precision = tf_metrics.precision(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro")
recall = tf_metrics.recall(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro")
f = tf_metrics.f1(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro")
loss = tf.metrics.mean(per_example_loss)
return {
"eval_precision":precision,
"eval_recall":recall,
"eval_f": f,
"eval_loss": loss,
}
eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"ner": NerProcessor
}
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver('grpc://' + os.environ['COLAB_TPU_ADDR'])
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list)+1,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
filed_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
filed_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d", len(eval_examples))
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_steps = None
if FLAGS.use_tpu:
eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
tf.app.run()
| true | true |
f73835ce779579d5cebafadd3e4c77418d84f3a6 | 26,957 | py | Python | talent/google/cloud/talent_v4beta1/gapic/application_service_client.py | beittatt/cloud-python | cdb4cc4f3c568ff32acf35c34910d23f2d3800a0 | [
"Apache-2.0"
] | 2 | 2021-11-26T07:08:43.000Z | 2022-03-07T20:20:04.000Z | talent/google/cloud/talent_v4beta1/gapic/application_service_client.py | beittatt/cloud-python | cdb4cc4f3c568ff32acf35c34910d23f2d3800a0 | [
"Apache-2.0"
] | null | null | null | talent/google/cloud/talent_v4beta1/gapic/application_service_client.py | beittatt/cloud-python | cdb4cc4f3c568ff32acf35c34910d23f2d3800a0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.talent.v4beta1 ApplicationService API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.cloud.talent_v4beta1.gapic import application_service_client_config
from google.cloud.talent_v4beta1.gapic import enums
from google.cloud.talent_v4beta1.gapic.transports import (
application_service_grpc_transport,
)
from google.cloud.talent_v4beta1.proto import application_pb2
from google.cloud.talent_v4beta1.proto import application_service_pb2
from google.cloud.talent_v4beta1.proto import application_service_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-talent").version
class ApplicationServiceClient(object):
"""
A service that handles application management, including CRUD and
enumeration.
"""
SERVICE_ADDRESS = "jobs.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.talent.v4beta1.ApplicationService"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ApplicationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def application_path(cls, project, tenant, profile, application):
"""Return a fully-qualified application string."""
return google.api_core.path_template.expand(
"projects/{project}/tenants/{tenant}/profiles/{profile}/applications/{application}",
project=project,
tenant=tenant,
profile=profile,
application=application,
)
@classmethod
def profile_path(cls, project, tenant, profile):
"""Return a fully-qualified profile string."""
return google.api_core.path_template.expand(
"projects/{project}/tenants/{tenant}/profiles/{profile}",
project=project,
tenant=tenant,
profile=profile,
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.ApplicationServiceGrpcTransport,
Callable[[~.Credentials, type], ~.ApplicationServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = application_service_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=application_service_grpc_transport.ApplicationServiceGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = application_service_grpc_transport.ApplicationServiceGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def create_application(
self,
parent,
application,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a new application entity.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.ApplicationServiceClient()
>>>
>>> parent = client.profile_path('[PROJECT]', '[TENANT]', '[PROFILE]')
>>>
>>> # TODO: Initialize `application`:
>>> application = {}
>>>
>>> response = client.create_application(parent, application)
Args:
parent (str): Required. Resource name of the profile under which the application is
created.
The format is
"projects/{project\_id}/tenants/{tenant\_id}/profiles/{profile\_id}",
for example,
"projects/test-project/tenants/test-tenant/profiles/test-profile".
application (Union[dict, ~google.cloud.talent_v4beta1.types.Application]): Required. The application to be created.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.Application`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Application` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_application" not in self._inner_api_calls:
self._inner_api_calls[
"create_application"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_application,
default_retry=self._method_configs["CreateApplication"].retry,
default_timeout=self._method_configs["CreateApplication"].timeout,
client_info=self._client_info,
)
request = application_service_pb2.CreateApplicationRequest(
parent=parent, application=application
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_application"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_application(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Retrieves specified application.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.ApplicationServiceClient()
>>>
>>> name = client.application_path('[PROJECT]', '[TENANT]', '[PROFILE]', '[APPLICATION]')
>>>
>>> response = client.get_application(name)
Args:
name (str): Required. The resource name of the application to be retrieved.
The format is
"projects/{project\_id}/tenants/{tenant\_id}/profiles/{profile\_id}/applications/{application\_id}",
for example,
"projects/test-project/tenants/test-tenant/profiles/test-profile/applications/test-application".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Application` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_application" not in self._inner_api_calls:
self._inner_api_calls[
"get_application"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_application,
default_retry=self._method_configs["GetApplication"].retry,
default_timeout=self._method_configs["GetApplication"].timeout,
client_info=self._client_info,
)
request = application_service_pb2.GetApplicationRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_application"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def update_application(
self,
application,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates specified application.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.ApplicationServiceClient()
>>>
>>> # TODO: Initialize `application`:
>>> application = {}
>>>
>>> response = client.update_application(application)
Args:
application (Union[dict, ~google.cloud.talent_v4beta1.types.Application]): Required. The application resource to replace the current resource in the
system.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.Application`
update_mask (Union[dict, ~google.cloud.talent_v4beta1.types.FieldMask]): Optional but strongly recommended for the best service experience.
If ``update_mask`` is provided, only the specified fields in
``application`` are updated. Otherwise all the fields are updated.
A field mask to specify the application fields to be updated. Only top
level fields of ``Application`` are supported.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Application` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_application" not in self._inner_api_calls:
self._inner_api_calls[
"update_application"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_application,
default_retry=self._method_configs["UpdateApplication"].retry,
default_timeout=self._method_configs["UpdateApplication"].timeout,
client_info=self._client_info,
)
request = application_service_pb2.UpdateApplicationRequest(
application=application, update_mask=update_mask
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("application.name", application.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["update_application"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def delete_application(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes specified application.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.ApplicationServiceClient()
>>>
>>> name = client.application_path('[PROJECT]', '[TENANT]', '[PROFILE]', '[APPLICATION]')
>>>
>>> client.delete_application(name)
Args:
name (str): Required. The resource name of the application to be deleted.
The format is
"projects/{project\_id}/tenants/{tenant\_id}/profiles/{profile\_id}/applications/{application\_id}",
for example,
"projects/test-project/tenants/test-tenant/profiles/test-profile/applications/test-application".
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_application" not in self._inner_api_calls:
self._inner_api_calls[
"delete_application"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_application,
default_retry=self._method_configs["DeleteApplication"].retry,
default_timeout=self._method_configs["DeleteApplication"].timeout,
client_info=self._client_info,
)
request = application_service_pb2.DeleteApplicationRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls["delete_application"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_applications(
self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists all applications associated with the profile.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.ApplicationServiceClient()
>>>
>>> parent = client.profile_path('[PROJECT]', '[TENANT]', '[PROFILE]')
>>>
>>> # Iterate over all results
>>> for element in client.list_applications(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_applications(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. Resource name of the profile under which the application is
created.
The format is
"projects/{project\_id}/tenants/{tenant\_id}/profiles/{profile\_id}",
for example,
"projects/test-project/tenants/test-tenant/profiles/test-profile".
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.talent_v4beta1.types.Application` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_applications" not in self._inner_api_calls:
self._inner_api_calls[
"list_applications"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_applications,
default_retry=self._method_configs["ListApplications"].retry,
default_timeout=self._method_configs["ListApplications"].timeout,
client_info=self._client_info,
)
request = application_service_pb2.ListApplicationsRequest(
parent=parent, page_size=page_size
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_applications"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="applications",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
| 42.054602 | 160 | 0.615128 |
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.cloud.talent_v4beta1.gapic import application_service_client_config
from google.cloud.talent_v4beta1.gapic import enums
from google.cloud.talent_v4beta1.gapic.transports import (
application_service_grpc_transport,
)
from google.cloud.talent_v4beta1.proto import application_pb2
from google.cloud.talent_v4beta1.proto import application_service_pb2
from google.cloud.talent_v4beta1.proto import application_service_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-talent").version
class ApplicationServiceClient(object):
SERVICE_ADDRESS = "jobs.googleapis.com:443"
_INTERFACE_NAME = "google.cloud.talent.v4beta1.ApplicationService"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def application_path(cls, project, tenant, profile, application):
return google.api_core.path_template.expand(
"projects/{project}/tenants/{tenant}/profiles/{profile}/applications/{application}",
project=project,
tenant=tenant,
profile=profile,
application=application,
)
@classmethod
def profile_path(cls, project, tenant, profile):
return google.api_core.path_template.expand(
"projects/{project}/tenants/{tenant}/profiles/{profile}",
project=project,
tenant=tenant,
profile=profile,
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = application_service_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=application_service_grpc_transport.ApplicationServiceGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = application_service_grpc_transport.ApplicationServiceGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
self._inner_api_calls = {}
def create_application(
self,
parent,
application,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "create_application" not in self._inner_api_calls:
self._inner_api_calls[
"create_application"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_application,
default_retry=self._method_configs["CreateApplication"].retry,
default_timeout=self._method_configs["CreateApplication"].timeout,
client_info=self._client_info,
)
request = application_service_pb2.CreateApplicationRequest(
parent=parent, application=application
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_application"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_application(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "get_application" not in self._inner_api_calls:
self._inner_api_calls[
"get_application"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_application,
default_retry=self._method_configs["GetApplication"].retry,
default_timeout=self._method_configs["GetApplication"].timeout,
client_info=self._client_info,
)
request = application_service_pb2.GetApplicationRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_application"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def update_application(
self,
application,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "update_application" not in self._inner_api_calls:
self._inner_api_calls[
"update_application"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_application,
default_retry=self._method_configs["UpdateApplication"].retry,
default_timeout=self._method_configs["UpdateApplication"].timeout,
client_info=self._client_info,
)
request = application_service_pb2.UpdateApplicationRequest(
application=application, update_mask=update_mask
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("application.name", application.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["update_application"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def delete_application(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "delete_application" not in self._inner_api_calls:
self._inner_api_calls[
"delete_application"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_application,
default_retry=self._method_configs["DeleteApplication"].retry,
default_timeout=self._method_configs["DeleteApplication"].timeout,
client_info=self._client_info,
)
request = application_service_pb2.DeleteApplicationRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls["delete_application"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_applications(
self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "list_applications" not in self._inner_api_calls:
self._inner_api_calls[
"list_applications"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_applications,
default_retry=self._method_configs["ListApplications"].retry,
default_timeout=self._method_configs["ListApplications"].timeout,
client_info=self._client_info,
)
request = application_service_pb2.ListApplicationsRequest(
parent=parent, page_size=page_size
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_applications"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="applications",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
| true | true |
f73836227faf3a570627751dcdcb1c7e15c8cb3a | 3,203 | py | Python | bluesky/simulators.py | NSLS-II/bluesky | b7d666e65cf4ef556fb46b744c33264c8e3f7507 | [
"BSD-3-Clause"
] | 43 | 2015-08-04T20:13:41.000Z | 2019-04-12T17:21:36.000Z | bluesky/simulators.py | NSLS-II/bluesky | b7d666e65cf4ef556fb46b744c33264c8e3f7507 | [
"BSD-3-Clause"
] | 966 | 2015-07-29T16:43:21.000Z | 2019-05-09T21:02:28.000Z | bluesky/simulators.py | NSLS-II/bluesky | b7d666e65cf4ef556fb46b744c33264c8e3f7507 | [
"BSD-3-Clause"
] | 40 | 2015-07-29T16:42:41.000Z | 2019-02-07T02:30:34.000Z | from warnings import warn
from bluesky.utils import maybe_await
from bluesky.preprocessors import print_summary_wrapper
from bluesky.run_engine import call_in_bluesky_event_loop, in_bluesky_event_loop
from .protocols import Checkable
def plot_raster_path(plan, x_motor, y_motor, ax=None, probe_size=None, lw=2):
"""Plot the raster path for this plan
Parameters
----------
plan : iterable
Must yield `Msg` objects and not be a co-routine
x_motor, y_motor : str
Names of the x and y motors
ax : matplotlib.axes.Axes
The axes to plot to, if none, make new figure + axes
probe_size : float, optional
If not None, use as radius of probe (in same units as motor positions)
lw : float, optional
Width of lines drawn between points
"""
import matplotlib.pyplot as plt
from matplotlib import collections as mcollections
from matplotlib import patches as mpatches
if ax is None:
ax = plt.subplots()[1]
ax.set_aspect('equal')
cur_x = cur_y = None
traj = []
for msg in plan:
cmd = msg.command
if cmd == 'set':
if msg.obj.name == x_motor:
cur_x = msg.args[0]
if msg.obj.name == y_motor:
cur_y = msg.args[0]
elif cmd == 'save':
traj.append((cur_x, cur_y))
x, y = zip(*traj)
path, = ax.plot(x, y, marker='', linestyle='-', lw=lw)
ax.set_xlabel(x_motor)
ax.set_ylabel(y_motor)
if probe_size is None:
read_points = ax.scatter(x, y, marker='o', lw=lw)
else:
circles = [mpatches.Circle((_x, _y), probe_size,
facecolor='black', alpha=0.5)
for _x, _y in traj]
read_points = mcollections.PatchCollection(circles,
match_original=True)
ax.add_collection(read_points)
return {'path': path, 'events': read_points}
def summarize_plan(plan):
"""Print summary of plan
Prints a minimal version of the plan, showing only moves and
where events are created.
Parameters
----------
plan : iterable
Must yield `Msg` objects
"""
for msg in print_summary_wrapper(plan):
...
print_summary = summarize_plan # back-compat
def check_limits(plan):
"""Run check_limits_async in the RE"""
if in_bluesky_event_loop():
raise RuntimeError("Can't call check_limits() from within RE, use await check_limits_async() instead")
call_in_bluesky_event_loop(check_limits_async(plan))
async def check_limits_async(plan):
"""
Check that a plan will not move devices outside of their limits.
Parameters
----------
plan : iterable
Must yield `Msg` objects
"""
ignore = []
for msg in plan:
obj = msg.obj
if msg.command == 'set' and obj not in ignore:
if isinstance(obj, Checkable):
await maybe_await(obj.check_value(msg.args[0]))
else:
warn(f"{obj.name} has no check_value() method"
f" to check if {msg.args[0]} is within its limits.")
ignore.append(obj)
| 29.657407 | 110 | 0.606931 | from warnings import warn
from bluesky.utils import maybe_await
from bluesky.preprocessors import print_summary_wrapper
from bluesky.run_engine import call_in_bluesky_event_loop, in_bluesky_event_loop
from .protocols import Checkable
def plot_raster_path(plan, x_motor, y_motor, ax=None, probe_size=None, lw=2):
import matplotlib.pyplot as plt
from matplotlib import collections as mcollections
from matplotlib import patches as mpatches
if ax is None:
ax = plt.subplots()[1]
ax.set_aspect('equal')
cur_x = cur_y = None
traj = []
for msg in plan:
cmd = msg.command
if cmd == 'set':
if msg.obj.name == x_motor:
cur_x = msg.args[0]
if msg.obj.name == y_motor:
cur_y = msg.args[0]
elif cmd == 'save':
traj.append((cur_x, cur_y))
x, y = zip(*traj)
path, = ax.plot(x, y, marker='', linestyle='-', lw=lw)
ax.set_xlabel(x_motor)
ax.set_ylabel(y_motor)
if probe_size is None:
read_points = ax.scatter(x, y, marker='o', lw=lw)
else:
circles = [mpatches.Circle((_x, _y), probe_size,
facecolor='black', alpha=0.5)
for _x, _y in traj]
read_points = mcollections.PatchCollection(circles,
match_original=True)
ax.add_collection(read_points)
return {'path': path, 'events': read_points}
def summarize_plan(plan):
for msg in print_summary_wrapper(plan):
...
print_summary = summarize_plan
def check_limits(plan):
if in_bluesky_event_loop():
raise RuntimeError("Can't call check_limits() from within RE, use await check_limits_async() instead")
call_in_bluesky_event_loop(check_limits_async(plan))
async def check_limits_async(plan):
ignore = []
for msg in plan:
obj = msg.obj
if msg.command == 'set' and obj not in ignore:
if isinstance(obj, Checkable):
await maybe_await(obj.check_value(msg.args[0]))
else:
warn(f"{obj.name} has no check_value() method"
f" to check if {msg.args[0]} is within its limits.")
ignore.append(obj)
| true | true |
f73837a0a17face1e98197f145c27afd20d5eafd | 376 | py | Python | Day-014/03-write_csv-2.py | arvimal/100DaysofCode-Python | 01e59f45b4dc06a3be9e9900456a6bd439752911 | [
"MIT"
] | 1 | 2020-06-15T05:59:01.000Z | 2020-06-15T05:59:01.000Z | Day-014/03-write_csv-2.py | arvimal/100DaysofCode-Python | 01e59f45b4dc06a3be9e9900456a6bd439752911 | [
"MIT"
] | null | null | null | Day-014/03-write_csv-2.py | arvimal/100DaysofCode-Python | 01e59f45b4dc06a3be9e9900456a6bd439752911 | [
"MIT"
] | 7 | 2020-01-24T23:03:58.000Z | 2021-05-31T01:00:27.000Z | #!/usr/bin/env python3
# Writing csv files
import csv
# Write a csv file with three rows and four columns
with open("output.csv", "w") as data_file:
output_writer = csv.writer(data_file)
output_writer.writerow(["Hello, World!", "How", "are", "you?"])
output_writer.writerow(["This", "is", "Sparta", "bitch!"])
output_writer.writerow(["1", "2", "3", "4"])
| 25.066667 | 67 | 0.646277 |
import csv
with open("output.csv", "w") as data_file:
output_writer = csv.writer(data_file)
output_writer.writerow(["Hello, World!", "How", "are", "you?"])
output_writer.writerow(["This", "is", "Sparta", "bitch!"])
output_writer.writerow(["1", "2", "3", "4"])
| true | true |
f73837fee4a6cc4b87eda81b2a7d2ec9b95c0c9c | 691 | py | Python | 2015/day/4/solution.py | iangregson/advent-of-code | e2a2dde30dcaed027a5ba78f9270f8a1976577f1 | [
"MIT"
] | null | null | null | 2015/day/4/solution.py | iangregson/advent-of-code | e2a2dde30dcaed027a5ba78f9270f8a1976577f1 | [
"MIT"
] | null | null | null | 2015/day/4/solution.py | iangregson/advent-of-code | e2a2dde30dcaed027a5ba78f9270f8a1976577f1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import hashlib
dir_path = os.path.dirname(os.path.realpath(__file__))
file = open(dir_path + "/input.txt", "r")
input_txt = file.read().strip()
# print(input_txt)
# input_txt = "abcdef"
# input_txt = "pqrstuv"
def try_suffix(suffix, starts_with):
s = input_txt + str(suffix)
m = hashlib.md5()
m.update(s.encode())
result_hex = m.hexdigest()
return result_hex.startswith(starts_with)
suffix = 0
while True:
if try_suffix(suffix, "00000"):
break
suffix += 1
print("Part 1 answer:", suffix)
suffix = 0
while True:
if try_suffix(suffix, "000000"):
break
suffix += 1
print("Part 2 answer:", suffix)
| 17.275 | 54 | 0.65123 |
import os
import hashlib
dir_path = os.path.dirname(os.path.realpath(__file__))
file = open(dir_path + "/input.txt", "r")
input_txt = file.read().strip()
def try_suffix(suffix, starts_with):
s = input_txt + str(suffix)
m = hashlib.md5()
m.update(s.encode())
result_hex = m.hexdigest()
return result_hex.startswith(starts_with)
suffix = 0
while True:
if try_suffix(suffix, "00000"):
break
suffix += 1
print("Part 1 answer:", suffix)
suffix = 0
while True:
if try_suffix(suffix, "000000"):
break
suffix += 1
print("Part 2 answer:", suffix)
| true | true |
f7383942e2f8f1658c8162d89c34615d99456aa7 | 497 | py | Python | 91.py | celioroberto06/cursopythonexercicios | 0a3f1b59395720760216b8e98767deb55e26f0d8 | [
"MIT"
] | null | null | null | 91.py | celioroberto06/cursopythonexercicios | 0a3f1b59395720760216b8e98767deb55e26f0d8 | [
"MIT"
] | null | null | null | 91.py | celioroberto06/cursopythonexercicios | 0a3f1b59395720760216b8e98767deb55e26f0d8 | [
"MIT"
] | null | null | null | from random import randint
from operator import itemgetter
rankin = {}
jogadores = {'Jogador-1':randint(1, 6), 'Jogador-2':randint(1, 6),
'Jogador-3':randint(1, 6), 'Jogador-4':randint(1, 6)}
print('VALORES SORTEADOS')
for i, v in jogadores.items():
print(f'{i} tirou {v} no dado')
print('='*29)
rankin = sorted(jogadores.items(), key=itemgetter(1), reverse=True)
print(' ==RANKING DOS JOGADORES==')
for i, v in enumerate(rankin):
print(f' {i+1}º lugar: {v[0]} com {v[1]}') | 35.5 | 67 | 0.643863 | from random import randint
from operator import itemgetter
rankin = {}
jogadores = {'Jogador-1':randint(1, 6), 'Jogador-2':randint(1, 6),
'Jogador-3':randint(1, 6), 'Jogador-4':randint(1, 6)}
print('VALORES SORTEADOS')
for i, v in jogadores.items():
print(f'{i} tirou {v} no dado')
print('='*29)
rankin = sorted(jogadores.items(), key=itemgetter(1), reverse=True)
print(' ==RANKING DOS JOGADORES==')
for i, v in enumerate(rankin):
print(f' {i+1}º lugar: {v[0]} com {v[1]}') | true | true |
f73839ebce6f8d749f7e86c9380c3350213d6360 | 19,983 | py | Python | evolved5g/swagger_client/api/location_frontend_api.py | EVOLVED-5G/SDK-CLI | 0f289c7b21c14c3e349164d21cc78d9b6af0a237 | [
"Apache-2.0"
] | 3 | 2021-10-19T14:37:14.000Z | 2021-11-01T10:43:33.000Z | evolved5g/swagger_client/api/location_frontend_api.py | skolome/evolved5g_cli | b202a878befe22b8dda66ee05610408777f4f006 | [
"Apache-2.0"
] | 14 | 2021-11-02T10:30:56.000Z | 2022-03-10T11:30:59.000Z | evolved5g/swagger_client/api/location_frontend_api.py | skolome/evolved5g_cli | b202a878befe22b8dda66ee05610408777f4f006 | [
"Apache-2.0"
] | 1 | 2021-11-16T16:20:31.000Z | 2021-11-16T16:20:31.000Z | # coding: utf-8
"""
NEF_Emulator
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from evolved5g.swagger_client.api_client import ApiClient
class LocationFrontendApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_path_api_v1_frontend_location_post(self, body, **kwargs): # noqa: E501
"""Create Path # noqa: E501
Create new path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_path_api_v1_frontend_location_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PathCreate body: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_path_api_v1_frontend_location_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_path_api_v1_frontend_location_post_with_http_info(body, **kwargs) # noqa: E501
return data
def create_path_api_v1_frontend_location_post_with_http_info(self, body, **kwargs): # noqa: E501
"""Create Path # noqa: E501
Create new path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_path_api_v1_frontend_location_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PathCreate body: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_path_api_v1_frontend_location_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_path_api_v1_frontend_location_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/frontend/location/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Path', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_path_api_v1_frontend_location_id_delete(self, id, **kwargs): # noqa: E501
"""Delete Path # noqa: E501
Delete an path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_path_api_v1_frontend_location_id_delete(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_path_api_v1_frontend_location_id_delete_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_path_api_v1_frontend_location_id_delete_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_path_api_v1_frontend_location_id_delete_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete Path # noqa: E501
Delete an path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_path_api_v1_frontend_location_id_delete_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_path_api_v1_frontend_location_id_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_path_api_v1_frontend_location_id_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/frontend/location/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Path', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_path_api_v1_frontend_location_id_get(self, id, **kwargs): # noqa: E501
"""Read Path # noqa: E501
Get path by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_path_api_v1_frontend_location_id_get(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_path_api_v1_frontend_location_id_get_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.read_path_api_v1_frontend_location_id_get_with_http_info(id, **kwargs) # noqa: E501
return data
def read_path_api_v1_frontend_location_id_get_with_http_info(self, id, **kwargs): # noqa: E501
"""Read Path # noqa: E501
Get path by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_path_api_v1_frontend_location_id_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_path_api_v1_frontend_location_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `read_path_api_v1_frontend_location_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/frontend/location/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Path', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_paths_api_v1_frontend_location_get(self, **kwargs): # noqa: E501
"""Read Paths # noqa: E501
Retrieve paths. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_paths_api_v1_frontend_location_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int skip:
:param int limit:
:return: list[Path]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_paths_api_v1_frontend_location_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.read_paths_api_v1_frontend_location_get_with_http_info(**kwargs) # noqa: E501
return data
def read_paths_api_v1_frontend_location_get_with_http_info(self, **kwargs): # noqa: E501
"""Read Paths # noqa: E501
Retrieve paths. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_paths_api_v1_frontend_location_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int skip:
:param int limit:
:return: list[Path]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['skip', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_paths_api_v1_frontend_location_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'skip' in params:
query_params.append(('skip', params['skip'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/frontend/location/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Path]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_path_api_v1_frontend_location_id_put(self, body, id, **kwargs): # noqa: E501
"""Update Path # noqa: E501
Update an path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_path_api_v1_frontend_location_id_put(body, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PathUpdate body: (required)
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_path_api_v1_frontend_location_id_put_with_http_info(body, id, **kwargs) # noqa: E501
else:
(data) = self.update_path_api_v1_frontend_location_id_put_with_http_info(body, id, **kwargs) # noqa: E501
return data
def update_path_api_v1_frontend_location_id_put_with_http_info(self, body, id, **kwargs): # noqa: E501
"""Update Path # noqa: E501
Update an path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_path_api_v1_frontend_location_id_put_with_http_info(body, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PathUpdate body: (required)
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_path_api_v1_frontend_location_id_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_path_api_v1_frontend_location_id_put`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_path_api_v1_frontend_location_id_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/frontend/location/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Path', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 38.062857 | 143 | 0.61172 |
from __future__ import absolute_import
import re
import six
from evolved5g.swagger_client.api_client import ApiClient
class LocationFrontendApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_path_api_v1_frontend_location_post(self, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_path_api_v1_frontend_location_post_with_http_info(body, **kwargs)
else:
(data) = self.create_path_api_v1_frontend_location_post_with_http_info(body, **kwargs)
return data
def create_path_api_v1_frontend_location_post_with_http_info(self, body, **kwargs):
all_params = ['body']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_path_api_v1_frontend_location_post" % key
)
params[key] = val
del params['kwargs']
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_path_api_v1_frontend_location_post`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['OAuth2PasswordBearer']
return self.api_client.call_api(
'/api/v1/frontend/location/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Path',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_path_api_v1_frontend_location_id_delete(self, id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_path_api_v1_frontend_location_id_delete_with_http_info(id, **kwargs)
else:
(data) = self.delete_path_api_v1_frontend_location_id_delete_with_http_info(id, **kwargs)
return data
def delete_path_api_v1_frontend_location_id_delete_with_http_info(self, id, **kwargs):
all_params = ['id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_path_api_v1_frontend_location_id_delete" % key
)
params[key] = val
del params['kwargs']
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_path_api_v1_frontend_location_id_delete`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = ['OAuth2PasswordBearer']
return self.api_client.call_api(
'/api/v1/frontend/location/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Path',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_path_api_v1_frontend_location_id_get(self, id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_path_api_v1_frontend_location_id_get_with_http_info(id, **kwargs)
else:
(data) = self.read_path_api_v1_frontend_location_id_get_with_http_info(id, **kwargs)
return data
def read_path_api_v1_frontend_location_id_get_with_http_info(self, id, **kwargs):
all_params = ['id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_path_api_v1_frontend_location_id_get" % key
)
params[key] = val
del params['kwargs']
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `read_path_api_v1_frontend_location_id_get`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = ['OAuth2PasswordBearer']
return self.api_client.call_api(
'/api/v1/frontend/location/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Path',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_paths_api_v1_frontend_location_get(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_paths_api_v1_frontend_location_get_with_http_info(**kwargs)
else:
(data) = self.read_paths_api_v1_frontend_location_get_with_http_info(**kwargs)
return data
def read_paths_api_v1_frontend_location_get_with_http_info(self, **kwargs):
all_params = ['skip', 'limit']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_paths_api_v1_frontend_location_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'skip' in params:
query_params.append(('skip', params['skip']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
auth_settings = ['OAuth2PasswordBearer']
return self.api_client.call_api(
'/api/v1/frontend/location/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Path]',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_path_api_v1_frontend_location_id_put(self, body, id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_path_api_v1_frontend_location_id_put_with_http_info(body, id, **kwargs)
else:
(data) = self.update_path_api_v1_frontend_location_id_put_with_http_info(body, id, **kwargs)
return data
def update_path_api_v1_frontend_location_id_put_with_http_info(self, body, id, **kwargs):
all_params = ['body', 'id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_path_api_v1_frontend_location_id_put" % key
)
params[key] = val
del params['kwargs']
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_path_api_v1_frontend_location_id_put`")
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_path_api_v1_frontend_location_id_put`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['OAuth2PasswordBearer']
return self.api_client.call_api(
'/api/v1/frontend/location/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Path',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true | true |
f7383a29b87cde534b137d873c7548b15d64f2b6 | 2,456 | py | Python | ma_planning/ma_policy.py | bglick13/multi-agent-emergence-environments | e02d66f0734d95470d15a4508ff369a75fa093a4 | [
"MIT"
] | null | null | null | ma_planning/ma_policy.py | bglick13/multi-agent-emergence-environments | e02d66f0734d95470d15a4508ff369a75fa093a4 | [
"MIT"
] | null | null | null | ma_planning/ma_policy.py | bglick13/multi-agent-emergence-environments | e02d66f0734d95470d15a4508ff369a75fa093a4 | [
"MIT"
] | null | null | null | import numpy as np
from collections import deque
from typing import Union
from torch import nn, FloatTensor, LongTensor
from torch.functional import F
from torch.optim import Adam
from torch.nn import CrossEntropyLoss
from mae_envs.envs import DraftState
from mcts import SearchNode, SearchProblem
class SwarmAgent():
def __init__(self, model, env):
self.model = model
self.env = env
self.macro_action = None
def set_action(self, action):
self.macro_action = action
def act(self):
return self.macro_action
class CaptainAgent():
def __init__(self, model, env, agents):
self.model = model
self.best_model = model
self.env = env
self.agents = agents
self.solver = None
def simulate(self):
leaf = self.solver.rollout()
value = self.evaluate_leaf(leaf)
self.solver.backup(leaf, value)
return leaf
def get_action(self, obs, num_reads=100, action=-1, random=False):
if self.solver is None:
self.root = SearchNode(obs, action)
self.solver = SearchProblem(self.root)
else:
self.root = SearchNode(obs, action, self.root)
self.solver.root = self.root
leafs = []
for _ in range(num_reads):
leafs.append(self.simulate())
action, value, values = self.root.best_child()
successor, _, _, _ = env.step(action)
nn_probs, nn_value = self.get_preds(successor)
p = F.softmax(FloatTensor(values), -1).numpy()
if random:
action = np.random.choice(range(len(values)), p=p)
else:
top5 = values.argsort()[-5:]
_p = F.softmax(FloatTensor(values[top5]), -1).numpy()
action = np.random.choice(top5, p=_p)
return action, values, p, nn_value, leafs
def get_preds(self, obs):
s_in = torch.FloatTensor(obs)
s_in.requires_grad = False
encoded_s = self.model.forward(s_in)
probs = self.model.get_next_action_output(encoded_s) # n_agents x 3 x 11
probs = F.softmax(torch.FloatTensor(probs), dim=2).detach().cpu().numpy()
value = F.softmax(self.model.get_value_output(encoded_s)).detach().cpu().numpy()
return probs, value
def evaluate_leaf(self, leaf):
probs, value = self.get_preds(leaf)
if not leaf.is_terminal:
leaf.expand(probs)
return value | 31.088608 | 88 | 0.623371 | import numpy as np
from collections import deque
from typing import Union
from torch import nn, FloatTensor, LongTensor
from torch.functional import F
from torch.optim import Adam
from torch.nn import CrossEntropyLoss
from mae_envs.envs import DraftState
from mcts import SearchNode, SearchProblem
class SwarmAgent():
def __init__(self, model, env):
self.model = model
self.env = env
self.macro_action = None
def set_action(self, action):
self.macro_action = action
def act(self):
return self.macro_action
class CaptainAgent():
def __init__(self, model, env, agents):
self.model = model
self.best_model = model
self.env = env
self.agents = agents
self.solver = None
def simulate(self):
leaf = self.solver.rollout()
value = self.evaluate_leaf(leaf)
self.solver.backup(leaf, value)
return leaf
def get_action(self, obs, num_reads=100, action=-1, random=False):
if self.solver is None:
self.root = SearchNode(obs, action)
self.solver = SearchProblem(self.root)
else:
self.root = SearchNode(obs, action, self.root)
self.solver.root = self.root
leafs = []
for _ in range(num_reads):
leafs.append(self.simulate())
action, value, values = self.root.best_child()
successor, _, _, _ = env.step(action)
nn_probs, nn_value = self.get_preds(successor)
p = F.softmax(FloatTensor(values), -1).numpy()
if random:
action = np.random.choice(range(len(values)), p=p)
else:
top5 = values.argsort()[-5:]
_p = F.softmax(FloatTensor(values[top5]), -1).numpy()
action = np.random.choice(top5, p=_p)
return action, values, p, nn_value, leafs
def get_preds(self, obs):
s_in = torch.FloatTensor(obs)
s_in.requires_grad = False
encoded_s = self.model.forward(s_in)
probs = self.model.get_next_action_output(encoded_s)
probs = F.softmax(torch.FloatTensor(probs), dim=2).detach().cpu().numpy()
value = F.softmax(self.model.get_value_output(encoded_s)).detach().cpu().numpy()
return probs, value
def evaluate_leaf(self, leaf):
probs, value = self.get_preds(leaf)
if not leaf.is_terminal:
leaf.expand(probs)
return value | true | true |
f7383ab9b975240de2a393b69ae805fd2259bda7 | 6,614 | py | Python | bench/kc705.py | ombhilare999/litedram | a3aa4907f11f654dc2df58e13903ec99fad69b6f | [
"OLDAP-2.6",
"OLDAP-2.3",
"OLDAP-2.7"
] | null | null | null | bench/kc705.py | ombhilare999/litedram | a3aa4907f11f654dc2df58e13903ec99fad69b6f | [
"OLDAP-2.6",
"OLDAP-2.3",
"OLDAP-2.7"
] | null | null | null | bench/kc705.py | ombhilare999/litedram | a3aa4907f11f654dc2df58e13903ec99fad69b6f | [
"OLDAP-2.6",
"OLDAP-2.3",
"OLDAP-2.7"
] | 1 | 2021-07-02T08:14:00.000Z | 2021-07-02T08:14:00.000Z | #!/usr/bin/env python3
#
# This file is part of LiteDRAM.
#
# Copyright (c) 2020 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
import os
import argparse
from migen import *
from litex_boards.platforms import kc705
from litex.soc.cores.clock import *
from litex.soc.interconnect.csr import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litedram.phy import s7ddrphy
from litedram.modules import MT8JTF12864
from liteeth.phy import LiteEthPHY
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module, AutoCSR):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_sys_pll = ClockDomain()
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_clk200 = ClockDomain()
self.clock_domains.cd_uart = ClockDomain()
# # #
# Main PLL.
self.submodules.main_pll = main_pll = S7PLL(speedgrade=-2)
self.comb += main_pll.reset.eq(platform.request("cpu_reset"))
main_pll.register_clkin(platform.request("clk200"), 200e6)
main_pll.create_clkout(self.cd_sys_pll, sys_clk_freq)
main_pll.create_clkout(self.cd_clk200, 200e6)
main_pll.create_clkout(self.cd_uart, 100e6)
main_pll.expose_drp()
self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_clk200)
# DRAM PLL.
self.submodules.pll = pll = S7PLL(speedgrade=-2)
self.comb += pll.reset.eq(~main_pll.locked | self.rst)
pll.register_clkin(self.cd_sys_pll.clk, sys_clk_freq)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys4x, 4*sys_clk_freq)
# Sys Clk Counter.
self.sys_clk_counter = CSRStatus(32)
self.sync += self.sys_clk_counter.status.eq(self.sys_clk_counter.status + 1)
# Bench SoC ----------------------------------------------------------------------------------------
class BenchSoC(SoCCore):
def __init__(self, uart="crossover", sys_clk_freq=int(125e6), with_bist=False, with_analyzer=False):
platform = kc705.Platform()
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, clk_freq=sys_clk_freq,
ident = "LiteDRAM bench on KC705",
ident_version = True,
integrated_rom_size = 0x10000,
integrated_rom_mode = "rw",
uart_name = uart)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# DDR3 SDRAM -------------------------------------------------------------------------------
self.submodules.ddrphy = s7ddrphy.K7DDRPHY(platform.request("ddram"),
memtype = "DDR3",
nphases = 4,
sys_clk_freq = sys_clk_freq)
self.add_sdram("sdram",
phy = self.ddrphy,
module = MT8JTF12864(sys_clk_freq, "1:4"),
origin = self.mem_map["main_ram"],
with_bist = with_bist)
# UARTBone ---------------------------------------------------------------------------------
if uart != "serial":
self.add_uartbone(name="serial", clk_freq=100e6, baudrate=115200, cd="uart")
# Etherbone --------------------------------------------------------------------------------
self.submodules.ethphy = LiteEthPHY(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"),
clk_freq = self.clk_freq)
self.add_etherbone(phy=self.ethphy)
# Analyzer ---------------------------------------------------------------------------------
if with_analyzer:
from litescope import LiteScopeAnalyzer
analyzer_signals = [self.ddrphy.dfi]
self.submodules.analyzer = LiteScopeAnalyzer(analyzer_signals,
depth = 256,
clock_domain = "sys",
csr_csv = "analyzer.csv")
# Leds -------------------------------------------------------------------------------------
from litex.soc.cores.led import LedChaser
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Main ---------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteDRAM Bench on KC705")
parser.add_argument("--uart", default="crossover", help="Selected UART: crossover (default) or serial")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--with-bist", action="store_true", help="Add BIST Generator/Checker")
parser.add_argument("--with-analyzer", action="store_true", help="Add Analyzer")
parser.add_argument("--load", action="store_true", help="Load bitstream")
parser.add_argument("--load-bios", action="store_true", help="Load BIOS")
parser.add_argument("--sys-clk-freq", default=None, help="Set sys_clk_freq")
parser.add_argument("--test", action="store_true", help="Run Full Bench")
args = parser.parse_args()
soc = BenchSoC(uart=args.uart, with_bist=args.with_bist, with_analyzer=args.with_analyzer)
builder = Builder(soc, output_dir="build/kc705", csr_csv="csr.csv")
builder.build(run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if args.load_bios:
from common import load_bios
load_bios("build/kc705/software/bios/bios.bin")
if args.sys_clk_freq is not None:
from common import us_set_sys_clk
us_set_sys_clk(clk_freq=float(args.sys_clk_freq), vco_freq=soc.crg.main_pll.compute_config()["vco"])
if args.test:
from common import s7_bench_test
s7_bench_test(
freq_min = 60e6,
freq_max = 180e6,
freq_step = 1e6,
vco_freq = soc.crg.main_pll.compute_config()["vco"],
bios_filename = "build/kc705/software/bios/bios.bin")
if __name__ == "__main__":
main()
| 42.670968 | 116 | 0.55322 |
import os
import argparse
from migen import *
from litex_boards.platforms import kc705
from litex.soc.cores.clock import *
from litex.soc.interconnect.csr import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litedram.phy import s7ddrphy
from litedram.modules import MT8JTF12864
from liteeth.phy import LiteEthPHY
class _CRG(Module, AutoCSR):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_sys_pll = ClockDomain()
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_clk200 = ClockDomain()
self.clock_domains.cd_uart = ClockDomain()
self.submodules.main_pll = main_pll = S7PLL(speedgrade=-2)
self.comb += main_pll.reset.eq(platform.request("cpu_reset"))
main_pll.register_clkin(platform.request("clk200"), 200e6)
main_pll.create_clkout(self.cd_sys_pll, sys_clk_freq)
main_pll.create_clkout(self.cd_clk200, 200e6)
main_pll.create_clkout(self.cd_uart, 100e6)
main_pll.expose_drp()
self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_clk200)
self.submodules.pll = pll = S7PLL(speedgrade=-2)
self.comb += pll.reset.eq(~main_pll.locked | self.rst)
pll.register_clkin(self.cd_sys_pll.clk, sys_clk_freq)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys4x, 4*sys_clk_freq)
self.sys_clk_counter = CSRStatus(32)
self.sync += self.sys_clk_counter.status.eq(self.sys_clk_counter.status + 1)
class BenchSoC(SoCCore):
def __init__(self, uart="crossover", sys_clk_freq=int(125e6), with_bist=False, with_analyzer=False):
platform = kc705.Platform()
SoCCore.__init__(self, platform, clk_freq=sys_clk_freq,
ident = "LiteDRAM bench on KC705",
ident_version = True,
integrated_rom_size = 0x10000,
integrated_rom_mode = "rw",
uart_name = uart)
self.submodules.crg = _CRG(platform, sys_clk_freq)
self.submodules.ddrphy = s7ddrphy.K7DDRPHY(platform.request("ddram"),
memtype = "DDR3",
nphases = 4,
sys_clk_freq = sys_clk_freq)
self.add_sdram("sdram",
phy = self.ddrphy,
module = MT8JTF12864(sys_clk_freq, "1:4"),
origin = self.mem_map["main_ram"],
with_bist = with_bist)
if uart != "serial":
self.add_uartbone(name="serial", clk_freq=100e6, baudrate=115200, cd="uart")
self.submodules.ethphy = LiteEthPHY(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"),
clk_freq = self.clk_freq)
self.add_etherbone(phy=self.ethphy)
if with_analyzer:
from litescope import LiteScopeAnalyzer
analyzer_signals = [self.ddrphy.dfi]
self.submodules.analyzer = LiteScopeAnalyzer(analyzer_signals,
depth = 256,
clock_domain = "sys",
csr_csv = "analyzer.csv")
from litex.soc.cores.led import LedChaser
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
def main():
parser = argparse.ArgumentParser(description="LiteDRAM Bench on KC705")
parser.add_argument("--uart", default="crossover", help="Selected UART: crossover (default) or serial")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--with-bist", action="store_true", help="Add BIST Generator/Checker")
parser.add_argument("--with-analyzer", action="store_true", help="Add Analyzer")
parser.add_argument("--load", action="store_true", help="Load bitstream")
parser.add_argument("--load-bios", action="store_true", help="Load BIOS")
parser.add_argument("--sys-clk-freq", default=None, help="Set sys_clk_freq")
parser.add_argument("--test", action="store_true", help="Run Full Bench")
args = parser.parse_args()
soc = BenchSoC(uart=args.uart, with_bist=args.with_bist, with_analyzer=args.with_analyzer)
builder = Builder(soc, output_dir="build/kc705", csr_csv="csr.csv")
builder.build(run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if args.load_bios:
from common import load_bios
load_bios("build/kc705/software/bios/bios.bin")
if args.sys_clk_freq is not None:
from common import us_set_sys_clk
us_set_sys_clk(clk_freq=float(args.sys_clk_freq), vco_freq=soc.crg.main_pll.compute_config()["vco"])
if args.test:
from common import s7_bench_test
s7_bench_test(
freq_min = 60e6,
freq_max = 180e6,
freq_step = 1e6,
vco_freq = soc.crg.main_pll.compute_config()["vco"],
bios_filename = "build/kc705/software/bios/bios.bin")
if __name__ == "__main__":
main()
| true | true |
f7383ae66d18454a8fbf4d58087004cce133dc5f | 2,402 | py | Python | azure/mgmt/rdbms/postgresql/models/server_update_parameters.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 2 | 2020-07-29T14:22:17.000Z | 2020-11-06T18:47:40.000Z | azure/mgmt/rdbms/postgresql/models/server_update_parameters.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2016-08-01T07:37:04.000Z | 2016-08-01T07:37:04.000Z | azure/mgmt/rdbms/postgresql/models/server_update_parameters.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2020-12-12T21:04:41.000Z | 2020-12-12T21:04:41.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ServerUpdateParameters(Model):
"""Parameters allowd to update for a server.
:param sku: The SKU (pricing tier) of the server.
:type sku: :class:`Sku <azure.mgmt.rdbms.postgresql.models.Sku>`
:param storage_mb: The max storage allowed for a server.
:type storage_mb: long
:param administrator_login_password: The password of the administrator
login.
:type administrator_login_password: str
:param version: The version of a server. Possible values include: '9.5',
'9.6'
:type version: str or :class:`ServerVersion
<azure.mgmt.rdbms.postgresql.models.ServerVersion>`
:param ssl_enforcement: Enable ssl enforcement or not when connect to
server. Possible values include: 'Enabled', 'Disabled'
:type ssl_enforcement: str or :class:`SslEnforcementEnum
<azure.mgmt.rdbms.postgresql.models.SslEnforcementEnum>`
:param tags: Application-specific metadata in the form of key-value pairs.
:type tags: dict
"""
_validation = {
'storage_mb': {'minimum': 1024},
}
_attribute_map = {
'sku': {'key': 'sku', 'type': 'Sku'},
'storage_mb': {'key': 'properties.storageMB', 'type': 'long'},
'administrator_login_password': {'key': 'properties.administratorLoginPassword', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
'ssl_enforcement': {'key': 'properties.sslEnforcement', 'type': 'SslEnforcementEnum'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, sku=None, storage_mb=None, administrator_login_password=None, version=None, ssl_enforcement=None, tags=None):
self.sku = sku
self.storage_mb = storage_mb
self.administrator_login_password = administrator_login_password
self.version = version
self.ssl_enforcement = ssl_enforcement
self.tags = tags
| 42.140351 | 132 | 0.645712 |
from msrest.serialization import Model
class ServerUpdateParameters(Model):
_validation = {
'storage_mb': {'minimum': 1024},
}
_attribute_map = {
'sku': {'key': 'sku', 'type': 'Sku'},
'storage_mb': {'key': 'properties.storageMB', 'type': 'long'},
'administrator_login_password': {'key': 'properties.administratorLoginPassword', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
'ssl_enforcement': {'key': 'properties.sslEnforcement', 'type': 'SslEnforcementEnum'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, sku=None, storage_mb=None, administrator_login_password=None, version=None, ssl_enforcement=None, tags=None):
self.sku = sku
self.storage_mb = storage_mb
self.administrator_login_password = administrator_login_password
self.version = version
self.ssl_enforcement = ssl_enforcement
self.tags = tags
| true | true |
f7383b0b95c5a9a952caf5d6a36db6be0a7e3b15 | 391 | py | Python | aether/forum/migrations/0004_auto_20180808_0216.py | katajakasa/aetherguild4 | a7e294f0cff11e2508751f1013e6648fdc56bb94 | [
"MIT"
] | null | null | null | aether/forum/migrations/0004_auto_20180808_0216.py | katajakasa/aetherguild4 | a7e294f0cff11e2508751f1013e6648fdc56bb94 | [
"MIT"
] | 1 | 2021-06-10T17:36:11.000Z | 2021-06-10T17:36:11.000Z | aether/forum/migrations/0004_auto_20180808_0216.py | katajakasa/aetherguild4 | a7e294f0cff11e2508751f1013e6648fdc56bb94 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.8 on 2018-08-07 23:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('forum', '0003_bbcodeimage'),
]
operations = [
migrations.AlterModelOptions(
name='bbcodeimage',
options={'verbose_name': 'BBCode Image', 'verbose_name_plural': 'BBCode Images'},
),
]
| 21.722222 | 93 | 0.616368 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('forum', '0003_bbcodeimage'),
]
operations = [
migrations.AlterModelOptions(
name='bbcodeimage',
options={'verbose_name': 'BBCode Image', 'verbose_name_plural': 'BBCode Images'},
),
]
| true | true |
f7383c2941583619db388ad0c0a583e6a4322957 | 257 | py | Python | hubspot/discovery/crm/extensions/cards/discovery.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | 117 | 2020-04-06T08:22:53.000Z | 2022-03-18T03:41:29.000Z | hubspot/discovery/crm/extensions/cards/discovery.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | 62 | 2020-04-06T16:21:06.000Z | 2022-03-17T16:50:44.000Z | hubspot/discovery/crm/extensions/cards/discovery.py | fakepop/hubspot-api-python | f04103a09f93f5c26c99991b25fa76801074f3d3 | [
"Apache-2.0"
] | 45 | 2020-04-06T16:13:52.000Z | 2022-03-30T21:33:17.000Z | import hubspot.crm.extensions.cards as api_client
from ....discovery_base import DiscoveryBase
class Discovery(DiscoveryBase):
@property
def cards_api(self) -> api_client.CardsApi:
return self._configure_api_client(api_client, "CardsApi")
| 28.555556 | 65 | 0.770428 | import hubspot.crm.extensions.cards as api_client
from ....discovery_base import DiscoveryBase
class Discovery(DiscoveryBase):
@property
def cards_api(self) -> api_client.CardsApi:
return self._configure_api_client(api_client, "CardsApi")
| true | true |
f7383d39681bc7280d96b6ef5734328a92e0b0e1 | 10,254 | py | Python | deploy.py | GeorgianaElena/mybinder.org-deploy | 8d0065710281d72e065658ac6d4414e420f4a2db | [
"BSD-3-Clause"
] | null | null | null | deploy.py | GeorgianaElena/mybinder.org-deploy | 8d0065710281d72e065658ac6d4414e420f4a2db | [
"BSD-3-Clause"
] | null | null | null | deploy.py | GeorgianaElena/mybinder.org-deploy | 8d0065710281d72e065658ac6d4414e420f4a2db | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import argparse
import json
import os
import subprocess
import re
import sys
import yaml
# Color codes for colored output!
BOLD = subprocess.check_output(['tput', 'bold']).decode()
GREEN = subprocess.check_output(['tput', 'setaf', '2']).decode()
NC = subprocess.check_output(['tput', 'sgr0']).decode()
HERE = os.path.dirname(__file__)
ABSOLUTE_HERE = os.path.dirname(os.path.realpath(__file__))
# Get helm version environment variable
HELM_VERSION = os.getenv("HELM_VERSION", None)
if HELM_VERSION is None:
raise Exception("HELM_VERSION environment variable must be set")
def setup_auth_turing(cluster):
"""
Set up athentication with Turing k8s cluster on Azure.
"""
# Read in auth info
azure_file = os.path.join(ABSOLUTE_HERE, "secrets", "turing-auth-key-prod.json")
with open(azure_file, "r") as stream:
azure = json.load(stream)
# Login in to Azure
login_cmd = [
"az", "login", "--service-principal",
"--username", azure["sp-app-id"],
"--password", azure["sp-app-key"],
"--tenant", azure["tenant-id"]
]
subprocess.check_output(login_cmd)
# Set kubeconfig
creds_cmd = [
"az", "aks", "get-credentials",
"--name", cluster,
"--resource-group", "binder-prod"
]
stdout = subprocess.check_output(creds_cmd)
print(stdout.decode('utf-8'))
def setup_auth_ovh(release, cluster):
"""
Set up authentication with 'binder-ovh' K8S from the ovh-kubeconfig.yml
"""
print(f'Setup the OVH authentication for namespace {release}')
ovh_kubeconfig = os.path.join(ABSOLUTE_HERE, 'secrets', 'ovh-kubeconfig.yml')
os.environ['KUBECONFIG'] = ovh_kubeconfig
print(f'Current KUBECONFIG=\'{ovh_kubeconfig}\'')
stdout = subprocess.check_output([
'kubectl',
'config',
'use-context',
cluster
])
print(stdout.decode('utf8'))
def setup_ovh_ingress_link(release):
"""
Setup the Ingress link ovh.mybinder.org -> binder.mybinder.ovh
"""
ovh_ingress_path = os.path.join(ABSOLUTE_HERE, 'config', 'ovh', 'ovh_mybinder_org_ingress.yaml')
stdout = subprocess.check_output([
'kubectl',
'apply',
'-f',
ovh_ingress_path,
'-n',
release
])
print(stdout.decode('utf8'))
def setup_auth_gcloud(release, cluster):
"""
Set up GCloud + Kubectl authentication for talking to a given cluster
"""
# Authenticate to GoogleCloud using a service account
subprocess.check_output([
"gcloud", "auth", "activate-service-account",
f"--key-file=secrets/gke-auth-key-{release}.json"
])
# Use gcloud to populate ~/.kube/config, which kubectl / helm can use
subprocess.check_call([
"gcloud", "container", "clusters", "get-credentials",
cluster, "--zone=us-central1-a", f"--project=binder-{release}"
])
def setup_helm(release):
"""ensure helm is up to date"""
# First check the helm client and server versions
client_helm_cmd = ["helm", "version", "-c", "--short"]
client_version = subprocess.check_output(client_helm_cmd
).decode('utf-8').split(":")[1].split("+")[0].strip()
server_helm_cmd = ["helm", "version", "-s", "--short"]
server_version = subprocess.check_output(server_helm_cmd
).decode('utf-8').split(":")[1].split("+")[0].strip()
print(BOLD + GREEN +
f"Client version: {client_version}, Server version: {server_version}" +
NC,
flush=True
)
# Now check if the version of helm matches that which travis is expecting
if client_version != HELM_VERSION:
# The local helm version is not what was expected - user needs to change the installation
raise Exception(
f"You are not running helm {HELM_VERSION} which is the version our continuous deployment system uses.\n" +
"Please change your installation and try again.\n"
)
elif (client_version == HELM_VERSION) and (client_version != server_version):
# The correct local version of helm is installed, but the server side
# has previously accidentally been upgraded. Perform a force-upgrade
# to bring the server side back to matching version
print(f"Upgrading helm from {server_version} to {HELM_VERSION}")
subprocess.check_call(['helm', 'init', '--upgrade', '--force-upgrade'])
elif (client_version == HELM_VERSION) and (client_version == server_version):
# All is good! Perform normal helm init command.
# We use the --client-only flag so that the Tiller installation is not affected.
subprocess.check_call(['helm', 'init', '--client-only'])
else:
# This is a catch-all exception. Hopefully this doesn't execute!
raise Exception("Please check your helm installation.")
deployment = json.loads(subprocess.check_output([
'kubectl',
'--namespace=kube-system',
'get',
'deployment',
'tiller-deploy',
'-o', 'json',
]).decode('utf8'))
# patch tiller nodeSelector
# helm init can set this with `--node-selectors`,
# but it cannot be applied after upgrade
# https://github.com/helm/helm/issues/4063
with open(os.path.join(HERE, 'config', release + '.yaml')) as f:
config = yaml.safe_load(f)
node_selector = config.get('coreNodeSelector', None)
current_node_selector = deployment['spec']['template']['spec'].get('nodeSelector')
if current_node_selector != node_selector:
patch = {'path': '/spec/template/spec/nodeSelector'}
if not node_selector:
patch['op'] = 'remove'
if not current_node_selector:
patch['op'] = 'add'
patch['value'] = node_selector
else:
patch['op'] = 'replace'
patch['value'] = node_selector
subprocess.check_call([
'kubectl',
'patch',
'--namespace',
'kube-system',
'deployment',
'tiller-deploy',
'--type=json',
'-p',
json.dumps([patch]),
])
# wait for tiller to come up
subprocess.check_call([
'kubectl', 'rollout', 'status',
'--namespace', 'kube-system',
'--watch', 'deployment', 'tiller-deploy',
])
def deploy(release):
"""Deploy jupyterhub"""
print(BOLD + GREEN + f"Updating network-bans for {release}" + NC, flush=True)
if release == 'turing':
subprocess.check_call([
"python3",
"secrets/ban.py",
release,
])
else:
subprocess.check_call([
"python3",
"secrets/ban.py",
])
print(BOLD + GREEN + f"Starting helm upgrade for {release}" + NC, flush=True)
helm = [
'helm', 'upgrade', '--install',
'--namespace', release,
release,
'mybinder',
'--force',
'--wait',
'--timeout', '600',
'-f', os.path.join('config', release + '.yaml'),
'-f', os.path.join('secrets', 'config', 'common.yaml'),
'-f', os.path.join('secrets', 'config', release + '.yaml'),
]
subprocess.check_call(helm)
print(BOLD + GREEN + f"SUCCESS: Helm upgrade for {release} completed" + NC, flush=True)
# Explicitly wait for all deployments and daemonsets to be fully rolled out
print(BOLD + GREEN + f"Waiting for all deployments and daemonsets in {release} to be ready" + NC, flush=True)
deployments = subprocess.check_output([
'kubectl',
'--namespace', release,
'get', 'deployments',
'-o', 'name'
]).decode().strip().split('\n')
daemonsets = subprocess.check_output([
'kubectl',
'--namespace', release,
'get', 'daemonsets',
'-o', 'name'
]).decode().strip().split('\n')
for d in deployments + daemonsets:
subprocess.check_call([
'kubectl', 'rollout', 'status',
'--namespace', release,
'--watch', d
])
def main():
# Get current working directory
cwd = os.getcwd()
# parse command line args
argparser = argparse.ArgumentParser()
argparser.add_argument(
'release',
help="Release to deploy",
choices=['staging', 'prod', 'ovh', 'turing']
)
argparser.add_argument(
'cluster',
help='Cluster to do the deployment in'
)
argparser.add_argument(
'--local',
action='store_true',
help="If the script is running locally, skip auth and helm steps."
)
args = argparser.parse_args()
# Check if the local flag is set
if not args.local:
# Check if the script is being run on travis
if not (cwd.startswith('/home/travis')):
# Catch the case where the script is running locally but the --local flag
# has not been set. Check that the user is sure that they want to do this!
print(
"You do not seem to be running on Travis but have not set the --local flag."
)
# Use regex to match user input
regex_no = re.compile("^[n|N][o|O]$")
regex_yes = re.compile("^[y|Y][e|E][s|S]$")
response = input("Are you sure you want to execute this script? [yes/no]: ")
if regex_no.match(response):
# User isn't sure - exit script
print("Exiting script.")
sys.exit()
elif regex_yes.match(response):
# User is sure - proceed
pass
else:
# User wrote something that wasn't "yes" or "no"
raise ValueError(
"Unrecognised input. Expecting either yes or no."
)
# script is running on travis, proceed with auth and helm setup
if args.cluster == 'binder-ovh':
setup_auth_ovh(args.release, args.cluster)
elif args.cluster == 'turing':
setup_auth_turing(args.cluster)
else:
setup_auth_gcloud(args.release, args.cluster)
setup_helm(args.release)
deploy(args.release)
if __name__ == '__main__':
main()
| 32.971061 | 118 | 0.594597 |
import argparse
import json
import os
import subprocess
import re
import sys
import yaml
BOLD = subprocess.check_output(['tput', 'bold']).decode()
GREEN = subprocess.check_output(['tput', 'setaf', '2']).decode()
NC = subprocess.check_output(['tput', 'sgr0']).decode()
HERE = os.path.dirname(__file__)
ABSOLUTE_HERE = os.path.dirname(os.path.realpath(__file__))
HELM_VERSION = os.getenv("HELM_VERSION", None)
if HELM_VERSION is None:
raise Exception("HELM_VERSION environment variable must be set")
def setup_auth_turing(cluster):
azure_file = os.path.join(ABSOLUTE_HERE, "secrets", "turing-auth-key-prod.json")
with open(azure_file, "r") as stream:
azure = json.load(stream)
login_cmd = [
"az", "login", "--service-principal",
"--username", azure["sp-app-id"],
"--password", azure["sp-app-key"],
"--tenant", azure["tenant-id"]
]
subprocess.check_output(login_cmd)
creds_cmd = [
"az", "aks", "get-credentials",
"--name", cluster,
"--resource-group", "binder-prod"
]
stdout = subprocess.check_output(creds_cmd)
print(stdout.decode('utf-8'))
def setup_auth_ovh(release, cluster):
print(f'Setup the OVH authentication for namespace {release}')
ovh_kubeconfig = os.path.join(ABSOLUTE_HERE, 'secrets', 'ovh-kubeconfig.yml')
os.environ['KUBECONFIG'] = ovh_kubeconfig
print(f'Current KUBECONFIG=\'{ovh_kubeconfig}\'')
stdout = subprocess.check_output([
'kubectl',
'config',
'use-context',
cluster
])
print(stdout.decode('utf8'))
def setup_ovh_ingress_link(release):
ovh_ingress_path = os.path.join(ABSOLUTE_HERE, 'config', 'ovh', 'ovh_mybinder_org_ingress.yaml')
stdout = subprocess.check_output([
'kubectl',
'apply',
'-f',
ovh_ingress_path,
'-n',
release
])
print(stdout.decode('utf8'))
def setup_auth_gcloud(release, cluster):
subprocess.check_output([
"gcloud", "auth", "activate-service-account",
f"--key-file=secrets/gke-auth-key-{release}.json"
])
subprocess.check_call([
"gcloud", "container", "clusters", "get-credentials",
cluster, "--zone=us-central1-a", f"--project=binder-{release}"
])
def setup_helm(release):
client_helm_cmd = ["helm", "version", "-c", "--short"]
client_version = subprocess.check_output(client_helm_cmd
).decode('utf-8').split(":")[1].split("+")[0].strip()
server_helm_cmd = ["helm", "version", "-s", "--short"]
server_version = subprocess.check_output(server_helm_cmd
).decode('utf-8').split(":")[1].split("+")[0].strip()
print(BOLD + GREEN +
f"Client version: {client_version}, Server version: {server_version}" +
NC,
flush=True
)
if client_version != HELM_VERSION:
raise Exception(
f"You are not running helm {HELM_VERSION} which is the version our continuous deployment system uses.\n" +
"Please change your installation and try again.\n"
)
elif (client_version == HELM_VERSION) and (client_version != server_version):
print(f"Upgrading helm from {server_version} to {HELM_VERSION}")
subprocess.check_call(['helm', 'init', '--upgrade', '--force-upgrade'])
elif (client_version == HELM_VERSION) and (client_version == server_version):
subprocess.check_call(['helm', 'init', '--client-only'])
else:
raise Exception("Please check your helm installation.")
deployment = json.loads(subprocess.check_output([
'kubectl',
'--namespace=kube-system',
'get',
'deployment',
'tiller-deploy',
'-o', 'json',
]).decode('utf8'))
# patch tiller nodeSelector
# helm init can set this with `--node-selectors`,
# but it cannot be applied after upgrade
# https://github.com/helm/helm/issues/4063
with open(os.path.join(HERE, 'config', release + '.yaml')) as f:
config = yaml.safe_load(f)
node_selector = config.get('coreNodeSelector', None)
current_node_selector = deployment['spec']['template']['spec'].get('nodeSelector')
if current_node_selector != node_selector:
patch = {'path': '/spec/template/spec/nodeSelector'}
if not node_selector:
patch['op'] = 'remove'
if not current_node_selector:
patch['op'] = 'add'
patch['value'] = node_selector
else:
patch['op'] = 'replace'
patch['value'] = node_selector
subprocess.check_call([
'kubectl',
'patch',
'--namespace',
'kube-system',
'deployment',
'tiller-deploy',
'--type=json',
'-p',
json.dumps([patch]),
])
# wait for tiller to come up
subprocess.check_call([
'kubectl', 'rollout', 'status',
'--namespace', 'kube-system',
'--watch', 'deployment', 'tiller-deploy',
])
def deploy(release):
print(BOLD + GREEN + f"Updating network-bans for {release}" + NC, flush=True)
if release == 'turing':
subprocess.check_call([
"python3",
"secrets/ban.py",
release,
])
else:
subprocess.check_call([
"python3",
"secrets/ban.py",
])
print(BOLD + GREEN + f"Starting helm upgrade for {release}" + NC, flush=True)
helm = [
'helm', 'upgrade', '--install',
'--namespace', release,
release,
'mybinder',
'--force',
'--wait',
'--timeout', '600',
'-f', os.path.join('config', release + '.yaml'),
'-f', os.path.join('secrets', 'config', 'common.yaml'),
'-f', os.path.join('secrets', 'config', release + '.yaml'),
]
subprocess.check_call(helm)
print(BOLD + GREEN + f"SUCCESS: Helm upgrade for {release} completed" + NC, flush=True)
# Explicitly wait for all deployments and daemonsets to be fully rolled out
print(BOLD + GREEN + f"Waiting for all deployments and daemonsets in {release} to be ready" + NC, flush=True)
deployments = subprocess.check_output([
'kubectl',
'--namespace', release,
'get', 'deployments',
'-o', 'name'
]).decode().strip().split('\n')
daemonsets = subprocess.check_output([
'kubectl',
'--namespace', release,
'get', 'daemonsets',
'-o', 'name'
]).decode().strip().split('\n')
for d in deployments + daemonsets:
subprocess.check_call([
'kubectl', 'rollout', 'status',
'--namespace', release,
'--watch', d
])
def main():
# Get current working directory
cwd = os.getcwd()
# parse command line args
argparser = argparse.ArgumentParser()
argparser.add_argument(
'release',
help="Release to deploy",
choices=['staging', 'prod', 'ovh', 'turing']
)
argparser.add_argument(
'cluster',
help='Cluster to do the deployment in'
)
argparser.add_argument(
'--local',
action='store_true',
help="If the script is running locally, skip auth and helm steps."
)
args = argparser.parse_args()
# Check if the local flag is set
if not args.local:
# Check if the script is being run on travis
if not (cwd.startswith('/home/travis')):
# Catch the case where the script is running locally but the --local flag
# has not been set. Check that the user is sure that they want to do this!
print(
"You do not seem to be running on Travis but have not set the --local flag."
)
# Use regex to match user input
regex_no = re.compile("^[n|N][o|O]$")
regex_yes = re.compile("^[y|Y][e|E][s|S]$")
response = input("Are you sure you want to execute this script? [yes/no]: ")
if regex_no.match(response):
# User isn't sure - exit script
print("Exiting script.")
sys.exit()
elif regex_yes.match(response):
pass
else:
raise ValueError(
"Unrecognised input. Expecting either yes or no."
)
# script is running on travis, proceed with auth and helm setup
if args.cluster == 'binder-ovh':
setup_auth_ovh(args.release, args.cluster)
elif args.cluster == 'turing':
setup_auth_turing(args.cluster)
else:
setup_auth_gcloud(args.release, args.cluster)
setup_helm(args.release)
deploy(args.release)
if __name__ == '__main__':
main()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.