hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7f95f9615b05c360f3b746d960b369e9ed3a45d | 1,570 | py | Python | hddm/models/hddm_truncated.py | hddm-devs/hddm | fe119ec9834d56c78f8ee3cb59a616c8a5d082bf | [
"BSD-2-Clause-FreeBSD"
] | 155 | 2015-01-10T22:44:55.000Z | 2022-03-27T00:39:33.000Z | hddm/models/hddm_truncated.py | hddm-devs/hddm | fe119ec9834d56c78f8ee3cb59a616c8a5d082bf | [
"BSD-2-Clause-FreeBSD"
] | 38 | 2015-09-26T11:48:56.000Z | 2022-02-18T04:53:51.000Z | hddm/models/hddm_truncated.py | hddm-devs/hddm | fe119ec9834d56c78f8ee3cb59a616c8a5d082bf | [
"BSD-2-Clause-FreeBSD"
] | 97 | 2015-01-21T09:03:41.000Z | 2022-03-26T13:12:15.000Z | from collections import OrderedDict
from hddm.models import HDDMBase
class HDDMTruncated(HDDMBase):
def _create_stochastic_knodes(self, include):
knodes = OrderedDict()
if "a" in include:
knodes.update(
self._create_family_trunc_normal("a", lower=1e-3, upper=1e3, value=1)
)
if "v" in include:
knodes.update(self._create_family_normal("v", value=0))
if "t" in include:
knodes.update(
self._create_family_trunc_normal("t", lower=1e-3, upper=1e3, value=0.01)
)
if "sv" in include:
# TW: Use kabuki.utils.HalfCauchy, S=10, value=1 instead?
knodes.update(
self._create_family_trunc_normal("sv", lower=0, upper=1e3, value=1)
)
# knodes.update(self._create_family_exp('sv', value=1))
if "sz" in include:
knodes.update(
self._create_family_trunc_normal("sz", lower=0, upper=1, value=0.1)
)
if "st" in include:
knodes.update(
self._create_family_trunc_normal("st", lower=0, upper=1e3, value=0.01)
)
if "z" in include:
knodes.update(
self._create_family_trunc_normal("z", lower=0, upper=1, value=0.5)
)
if "p_outlier" in include:
knodes.update(
self._create_family_trunc_normal(
"p_outlier", lower=0, upper=1, value=0.05
)
)
return knodes
| 34.888889 | 88 | 0.543949 | from collections import OrderedDict
from hddm.models import HDDMBase
class HDDMTruncated(HDDMBase):
def _create_stochastic_knodes(self, include):
knodes = OrderedDict()
if "a" in include:
knodes.update(
self._create_family_trunc_normal("a", lower=1e-3, upper=1e3, value=1)
)
if "v" in include:
knodes.update(self._create_family_normal("v", value=0))
if "t" in include:
knodes.update(
self._create_family_trunc_normal("t", lower=1e-3, upper=1e3, value=0.01)
)
if "sv" in include:
knodes.update(
self._create_family_trunc_normal("sv", lower=0, upper=1e3, value=1)
)
if "sz" in include:
knodes.update(
self._create_family_trunc_normal("sz", lower=0, upper=1, value=0.1)
)
if "st" in include:
knodes.update(
self._create_family_trunc_normal("st", lower=0, upper=1e3, value=0.01)
)
if "z" in include:
knodes.update(
self._create_family_trunc_normal("z", lower=0, upper=1, value=0.5)
)
if "p_outlier" in include:
knodes.update(
self._create_family_trunc_normal(
"p_outlier", lower=0, upper=1, value=0.05
)
)
return knodes
| true | true |
f7f9600f84f07b15d9cbc2fb0ec3ee0ce608c06f | 922 | py | Python | bots/iam_group_delete_inline_policy.py | chkp-stuartgreen/cloud-bots | 9ee36270675cb4efc1022bfec2013a7dce7ae8ca | [
"BSD-3-Clause"
] | 68 | 2019-09-26T10:25:35.000Z | 2022-03-27T01:32:46.000Z | bots/iam_group_delete_inline_policy.py | chkp-stuartgreen/cloud-bots | 9ee36270675cb4efc1022bfec2013a7dce7ae8ca | [
"BSD-3-Clause"
] | 18 | 2020-02-17T16:30:47.000Z | 2021-09-26T14:14:54.000Z | bots/iam_group_delete_inline_policy.py | chkp-stuartgreen/cloud-bots | 9ee36270675cb4efc1022bfec2013a7dce7ae8ca | [
"BSD-3-Clause"
] | 42 | 2019-10-03T19:31:42.000Z | 2022-03-21T19:29:12.000Z | """
## iam_group_delete_inline_group
What it does: Deletes a inline policy attached to iam group
Usage: AUTO: iam_group_delete_inline_group
Limitations: none
"""
import boto3
from botocore.exceptions import ClientError
def run_action(session, rule, entity, params):
iam_resource = session.resource('iam')
iam_group = iam_resource.Group(entity['name'])
# take the names of the policies that we need to delete
inline_policies = [policy['name'] for policy in entity['inlinePolicies']]
try:
text_output = ""
# iterates over the policies and delete the inline policies
for policy in iam_group.policies.all():
if policy.name in inline_policies:
policy.delete()
text_output += f"Deleted inline policy '{policy.name}'\n"
except ClientError as e:
text_output = "Unexpected error: %s \n" % e
return text_output | 31.793103 | 77 | 0.67679 |
import boto3
from botocore.exceptions import ClientError
def run_action(session, rule, entity, params):
iam_resource = session.resource('iam')
iam_group = iam_resource.Group(entity['name'])
inline_policies = [policy['name'] for policy in entity['inlinePolicies']]
try:
text_output = ""
for policy in iam_group.policies.all():
if policy.name in inline_policies:
policy.delete()
text_output += f"Deleted inline policy '{policy.name}'\n"
except ClientError as e:
text_output = "Unexpected error: %s \n" % e
return text_output | true | true |
f7f96074843d793b73779707a2acd560569b5b85 | 775 | py | Python | conftest.py | DataFinnovation/Arelle | d4bf45f56fc9249f75ab22e6217dbe55f0510841 | [
"Apache-2.0"
] | 292 | 2015-01-27T03:31:51.000Z | 2022-03-26T07:00:05.000Z | conftest.py | DataFinnovation/Arelle | d4bf45f56fc9249f75ab22e6217dbe55f0510841 | [
"Apache-2.0"
] | 94 | 2015-04-18T23:03:00.000Z | 2022-03-28T17:24:55.000Z | conftest.py | DataFinnovation/Arelle | d4bf45f56fc9249f75ab22e6217dbe55f0510841 | [
"Apache-2.0"
] | 200 | 2015-01-13T03:55:47.000Z | 2022-03-29T12:38:56.000Z | '''
Created on May 14,2012
Use this module to start Arelle in py.test modes
@author: Mark V Systems Limited
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
This module supports the conformance tests to validate that Arelle is
working properly. See arelle_test.py.
'''
import os
def pytest_addoption(parser):
tests_default = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.join('arelle',
os.path.join('config',
'arelle_test.ini')))
parser.addoption('--tests', default=tests_default,
help='.ini file to load test suites from (default is arelle/confi/arelle_test.ini)')
| 32.291667 | 105 | 0.597419 |
import os
def pytest_addoption(parser):
tests_default = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.join('arelle',
os.path.join('config',
'arelle_test.ini')))
parser.addoption('--tests', default=tests_default,
help='.ini file to load test suites from (default is arelle/confi/arelle_test.ini)')
| true | true |
f7f96088b00b62928af84ce163584586cc34a11e | 5,026 | py | Python | mordl/feat_tagger_model.py | fostroll/mordl | 992d724f43709483901dd55d1f9aa80791dbccb2 | [
"BSD-3-Clause"
] | 5 | 2020-07-27T05:38:17.000Z | 2021-12-30T09:44:07.000Z | mordl/feat_tagger_model.py | fostroll/mordl | 992d724f43709483901dd55d1f9aa80791dbccb2 | [
"BSD-3-Clause"
] | null | null | null | mordl/feat_tagger_model.py | fostroll/mordl | 992d724f43709483901dd55d1f9aa80791dbccb2 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# MorDL project: NE tagger model
#
# Copyright (C) 2020-present by Sergei Ternovykh, Anastasiya Nikiforova
# License: BSD, see LICENSE for details
"""
Provides FEAT tagger model inherited from `mordl.BaseTaggerModel`.
"""
from collections.abc import Iterable
from junky import get_func_params
from mordl.base_tagger_model import BaseTaggerModel
from mordl.defaults import CONFIG_ATTR
class FeatTaggerModel(BaseTaggerModel):
"""
The class for prediction the content of a key-value type field. Separated
implementation (predict only one particular field at a time).
Args:
**num_labels** (`int`): the number of target labels. Don't forget to add
`1` for padding.
**labels_pad_idx** (`int`; default=-100): the index of padding element in
the label vocabulary. You can specify here the real index of the padding
intent, but we recommend to keep it as is (with default fake index)
because in practice, learning on padding increasing the resulting model
performance. If you stil want to experiment, along with specifying the
real padding index, you may try not to add `1` to **num_labels** for the
padding intent. The model then put random labels as tags for the padding
part of the input, but they are ignored during the loss computation.
**vec_emb_dim** (`int`; default is `None`): the incoming word-level
embedding vector space dimensionality.
**alphabet_size** (`int`): the length of character vocabulary for the
internal character-level embedding layer. Relevant if either
**rnn_emb_dim** or **cnn_emb_dim** is not `None`.
**char_pad_idx** (`int`; default is `0`): the index of the padding element
in the character vocabulary of the internal character-level embedding
layer. Relevant if either **rnn_emb_dim** or **cnn_emb_dim** is not `None`.
**rnn_emb_dim** (`int`; default is `None`): the internal character RNN
(LSTM) embedding dimensionality. If `None`, the layer is skipped.
**cnn_emb_dim** (`int`; default is `200`): the internal character CNN
embedding dimensionality. If `None`, the layer is skipped.
**cnn_kernels** (`list([int])`; default is `[1, 2, 3, 4, 5, 6]`): CNN
kernel sizes of the internal CNN embedding layer. Relevant if
**cnn_emb_dim** is not `None`.
**upos_emb_dim** (`int`; default is `200`): the auxiliary UPOS label
embedding dimensionality.
**upos_num** (`int`): the length of UPOS vocabulary.
**upos_pad_idx** (`int`; default is `0`): the index of padding element
in the UPOS vocabulary.
**emb_bn** (`bool`; default is `True`): whether batch normalization layer
should be applied after the embedding concatenation.
**emb_do** (`float`; default is `.2`): the dropout rate after the
embedding concatenation.
**final_emb_dim** (`int`; default is `512`): the output dimesionality of
the linear transformation applying to concatenated embeddings.
**pre_bn** (`bool`; default is `True`): whether batch normalization layer
should be applied before the main part of the algorithm.
**pre_do** (`float`; default is `.5`): the dropout rate before the main
part of the algorithm.
**lstm_layers** (`int`; default is `1`): the number of Bidirectional LSTM
layers. If `None`, they are not created.
**lstm_do** (`float`; default is `0`): the dropout between LSTM layers.
Only relevant, if `lstm_layers` > `1`.
**tran_layers** (`int`; default is `None`): the number of Transformer
Encoder layers. If `None`, they are not created.
**tran_heads** (`int`; default is `8`): the number of attention heads of
Transformer Encoder layers. Only relevant, if `tran_layers` > `1`.
**post_bn** (`bool`; default is `True`): whether batch normalization layer
should be applied after the main part of the algorithm.
**post_do** (`float`; default is `.4`): the dropout rate after the main
part of the algorithm.
"""
def __init__(self, num_labels, labels_pad_idx=-100, vec_emb_dim=None,
alphabet_size=0, char_pad_idx=0, rnn_emb_dim=None,
cnn_emb_dim=200, cnn_kernels=[1, 2, 3, 4, 5, 6],
upos_emb_dim=200, upos_num=0, upos_pad_idx=0,
emb_bn=True, emb_do=.2,
final_emb_dim=512, pre_bn=True, pre_do=.5,
lstm_layers=1, lstm_do=0, tran_layers=None, tran_heads=8,
post_bn=True, post_do=.4):
if isinstance(cnn_kernels, Iterable):
cnn_kernels = list(cnn_kernels)
args, kwargs = get_func_params(FeatTaggerModel.__init__, locals())
kwargs_ = {x: y for x, y in kwargs.items() if x not in [
'upos_emb_dim', 'upos_num', 'upos_pad_idx'
]}
if upos_emb_dim:
kwargs_['tag_emb_params'] = {
'dim': upos_emb_dim, 'num': upos_num, 'pad_idx': upos_pad_idx
}
super().__init__(*args, **kwargs_)
setattr(self, CONFIG_ATTR, (args, kwargs))
| 43.327586 | 79 | 0.672503 |
from collections.abc import Iterable
from junky import get_func_params
from mordl.base_tagger_model import BaseTaggerModel
from mordl.defaults import CONFIG_ATTR
class FeatTaggerModel(BaseTaggerModel):
def __init__(self, num_labels, labels_pad_idx=-100, vec_emb_dim=None,
alphabet_size=0, char_pad_idx=0, rnn_emb_dim=None,
cnn_emb_dim=200, cnn_kernels=[1, 2, 3, 4, 5, 6],
upos_emb_dim=200, upos_num=0, upos_pad_idx=0,
emb_bn=True, emb_do=.2,
final_emb_dim=512, pre_bn=True, pre_do=.5,
lstm_layers=1, lstm_do=0, tran_layers=None, tran_heads=8,
post_bn=True, post_do=.4):
if isinstance(cnn_kernels, Iterable):
cnn_kernels = list(cnn_kernels)
args, kwargs = get_func_params(FeatTaggerModel.__init__, locals())
kwargs_ = {x: y for x, y in kwargs.items() if x not in [
'upos_emb_dim', 'upos_num', 'upos_pad_idx'
]}
if upos_emb_dim:
kwargs_['tag_emb_params'] = {
'dim': upos_emb_dim, 'num': upos_num, 'pad_idx': upos_pad_idx
}
super().__init__(*args, **kwargs_)
setattr(self, CONFIG_ATTR, (args, kwargs))
| true | true |
f7f960c892da708be89832a6e7d3ee2cfb58ff29 | 5,775 | py | Python | docs/source/conf.py | 5amessi/camel_tools | 4349f8ccda2cc5d017d3bd5683847cf92bbb1229 | [
"MIT"
] | 211 | 2019-03-26T02:56:16.000Z | 2022-03-26T10:39:47.000Z | docs/source/conf.py | 5amessi/camel_tools | 4349f8ccda2cc5d017d3bd5683847cf92bbb1229 | [
"MIT"
] | 51 | 2019-09-11T09:41:02.000Z | 2022-03-30T06:15:28.000Z | docs/source/conf.py | 5amessi/camel_tools | 4349f8ccda2cc5d017d3bd5683847cf92bbb1229 | [
"MIT"
] | 49 | 2018-12-27T06:58:05.000Z | 2022-03-12T18:21:50.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Extension Imports -------------------------------------------------------
from recommonmark.parser import CommonMarkParser
# -- Project information -----------------------------------------------------
project = 'camel_tools'
copyright = '2018-2021, New York University Abu Dhabi'
author = 'Ossama W. Obeid'
# The short X.Y version
version = '1.2'
# The full version, including alpha/beta/rc tags
release = '1.2.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx'
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# Source parsers
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'camel_tools_doc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'camel_tools.tex', 'CAMeL Tools Documentation',
'Ossama W. Obeid', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'camel_tools', 'camel_tools Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'camel_tools', 'camel_tools Documentation',
author, 'camel_tools',
'A suite of Arabic natural language processing tools developed by the '
'CAMeL Lab at New York University Abu Dhabi.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
autodoc_mock_imports = [
'kenlm'
]
| 29.020101 | 79 | 0.645714 |
from recommonmark.parser import CommonMarkParser
project = 'camel_tools'
copyright = '2018-2021, New York University Abu Dhabi'
author = 'Ossama W. Obeid'
version = '1.2'
release = '1.2.0'
extensions = [
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx'
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None)
}
templates_path = ['_templates']
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
master_doc = 'index'
language = None
exclude_patterns = []
pygments_style = None
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'camel_tools_doc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'camel_tools.tex', 'CAMeL Tools Documentation',
'Ossama W. Obeid', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'camel_tools', 'camel_tools Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'camel_tools', 'camel_tools Documentation',
author, 'camel_tools',
'A suite of Arabic natural language processing tools developed by the '
'CAMeL Lab at New York University Abu Dhabi.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
autodoc_mock_imports = [
'kenlm'
]
| true | true |
f7f961931e4e2947a74e29289b0e354d789d7bdc | 5,338 | py | Python | generate.py | BookML/stylegan2-ada-pytorch | d4b2afe9c27e3c305b721bc886d2cb5229458eba | [
"BSD-Source-Code"
] | 1,054 | 2020-06-19T01:32:35.000Z | 2022-03-28T20:01:49.000Z | generate.py | BookML/stylegan2-ada-pytorch | d4b2afe9c27e3c305b721bc886d2cb5229458eba | [
"BSD-Source-Code"
] | 88 | 2020-06-20T01:57:42.000Z | 2022-03-31T23:45:04.000Z | generate.py | BookML/stylegan2-ada-pytorch | d4b2afe9c27e3c305b721bc886d2cb5229458eba | [
"BSD-Source-Code"
] | 155 | 2020-06-19T02:45:43.000Z | 2022-03-29T19:53:44.000Z | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate images using pretrained network pickle."""
import os
import re
from typing import List, Optional
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
import legacy
#----------------------------------------------------------------------------
def num_range(s: str) -> List[int]:
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2))+1))
vals = s.split(',')
return [int(x) for x in vals]
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--seeds', type=num_range, help='List of random seeds')
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)')
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--projected-w', help='Projection result file', type=str, metavar='FILE')
@click.option('--outdir', help='Where to save the output images', type=str, required=True, metavar='DIR')
def generate_images(
ctx: click.Context,
network_pkl: str,
seeds: Optional[List[int]],
truncation_psi: float,
noise_mode: str,
outdir: str,
class_idx: Optional[int],
projected_w: Optional[str]
):
"""Generate images using pretrained network pickle.
Examples:
\b
# Generate curated MetFaces images without truncation (Fig.10 left)
python generate.py --outdir=out --trunc=1 --seeds=85,265,297,849 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
\b
# Generate uncurated MetFaces images with truncation (Fig.12 upper left)
python generate.py --outdir=out --trunc=0.7 --seeds=600-605 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
\b
# Generate class conditional CIFAR-10 images (Fig.17 left, Car)
python generate.py --outdir=out --seeds=0-35 --class=1 \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/cifar10.pkl
\b
# Render an image from projected W
python generate.py --outdir=out --projected_w=projected_w.npz \\
--network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metfaces.pkl
"""
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
os.makedirs(outdir, exist_ok=True)
# Synthesize the result of a W projection.
if projected_w is not None:
if seeds is not None:
print ('warn: --seeds is ignored when using --projected-w')
print(f'Generating images from projected W "{projected_w}"')
ws = np.load(projected_w)['w']
ws = torch.tensor(ws, device=device) # pylint: disable=not-callable
assert ws.shape[1:] == (G.num_ws, G.w_dim)
for idx, w in enumerate(ws):
img = G.synthesis(w.unsqueeze(0), noise_mode=noise_mode)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
img = PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/proj{idx:02d}.png')
return
if seeds is None:
ctx.fail('--seeds option is required when not using --projected-w')
# Labels.
label = torch.zeros([1, G.c_dim], device=device)
if G.c_dim != 0:
if class_idx is None:
ctx.fail('Must specify class label with --class when using a conditional network')
label[:, class_idx] = 1
else:
if class_idx is not None:
print ('warn: --class=lbl ignored when running on an unconditional network')
# Generate images.
for seed_idx, seed in enumerate(seeds):
print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/seed{seed:04d}.png')
#----------------------------------------------------------------------------
if __name__ == "__main__":
generate_images() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 41.061538 | 132 | 0.630948 |
import os
import re
from typing import List, Optional
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
import legacy
def num_range(s: str) -> List[int]:
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return list(range(int(m.group(1)), int(m.group(2))+1))
vals = s.split(',')
return [int(x) for x in vals]
@click.command()
@click.pass_context
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--seeds', type=num_range, help='List of random seeds')
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)')
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--projected-w', help='Projection result file', type=str, metavar='FILE')
@click.option('--outdir', help='Where to save the output images', type=str, required=True, metavar='DIR')
def generate_images(
ctx: click.Context,
network_pkl: str,
seeds: Optional[List[int]],
truncation_psi: float,
noise_mode: str,
outdir: str,
class_idx: Optional[int],
projected_w: Optional[str]
):
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device)
os.makedirs(outdir, exist_ok=True)
if projected_w is not None:
if seeds is not None:
print ('warn: --seeds is ignored when using --projected-w')
print(f'Generating images from projected W "{projected_w}"')
ws = np.load(projected_w)['w']
ws = torch.tensor(ws, device=device)
assert ws.shape[1:] == (G.num_ws, G.w_dim)
for idx, w in enumerate(ws):
img = G.synthesis(w.unsqueeze(0), noise_mode=noise_mode)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
img = PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/proj{idx:02d}.png')
return
if seeds is None:
ctx.fail('--seeds option is required when not using --projected-w')
label = torch.zeros([1, G.c_dim], device=device)
if G.c_dim != 0:
if class_idx is None:
ctx.fail('Must specify class label with --class when using a conditional network')
label[:, class_idx] = 1
else:
if class_idx is not None:
print ('warn: --class=lbl ignored when running on an unconditional network')
for seed_idx, seed in enumerate(seeds):
print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/seed{seed:04d}.png')
if __name__ == "__main__":
generate_images()
| true | true |
f7f962fa41091aafdd1ac460ba5ee9435454c33e | 7,832 | py | Python | contrib/bitrpc/bitrpc.py | bitfawkes/eGold | 2190beebd747a8ba4e34d39a7e945c72899f7666 | [
"MIT"
] | null | null | null | contrib/bitrpc/bitrpc.py | bitfawkes/eGold | 2190beebd747a8ba4e34d39a7e945c72899f7666 | [
"MIT"
] | null | null | null | contrib/bitrpc/bitrpc.py | bitfawkes/eGold | 2190beebd747a8ba4e34d39a7e945c72899f7666 | [
"MIT"
] | null | null | null | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a eGold address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a eGold address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| 24.098462 | 79 | 0.668029 | from jsonrpc import ServiceProxy
import sys
import string
rpcuser = ""
rpcpass = ""
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a eGold address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a eGold address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| false | true |
f7f9630dcfbb72ebf97d11826e56c1f315b8b6ab | 16,981 | py | Python | src/dispatch/plugins/dispatch_slack/events.py | KATO-Hiro/dispatch | d41f42c0ecf47c5c9012fc6d22046a8124bb7596 | [
"Apache-2.0"
] | null | null | null | src/dispatch/plugins/dispatch_slack/events.py | KATO-Hiro/dispatch | d41f42c0ecf47c5c9012fc6d22046a8124bb7596 | [
"Apache-2.0"
] | null | null | null | src/dispatch/plugins/dispatch_slack/events.py | KATO-Hiro/dispatch | d41f42c0ecf47c5c9012fc6d22046a8124bb7596 | [
"Apache-2.0"
] | null | null | null | import pytz
import logging
import datetime
from typing import List
from pydantic import BaseModel
from sqlalchemy import func
from sqlalchemy.sql.functions import user
from dispatch.nlp import build_phrase_matcher, build_term_vocab, extract_terms_from_text
from dispatch.conversation import service as conversation_service
from dispatch.event import service as event_service
from dispatch.incident import flows as incident_flows
from dispatch.incident import service as incident_service
from dispatch.plugins.dispatch_slack.config import (
SlackConversationConfiguration,
)
from dispatch.tag import service as tag_service
from dispatch.individual import service as individual_service
from dispatch.participant import service as participant_service
from dispatch.participant_role.models import ParticipantRoleType
from dispatch.plugins.dispatch_slack import service as dispatch_slack_service
from dispatch.plugin import service as plugin_service
from dispatch.monitor import service as monitor_service
from dispatch.conversation.enums import ConversationButtonActions
from dispatch.tag.models import Tag
from .decorators import slack_background_task, get_organization_scope_from_channel_id
from .service import get_user_email
from .models import MonitorButton
log = logging.getLogger(__name__)
class EventBodyItem(BaseModel):
"""Body item of the Slack event."""
type: str = None
channel: str = None
ts: str = None
class EventBody(BaseModel):
"""Body of the Slack event."""
channel: str = None
channel_id: str = None
channel_type: str = None
deleted_ts: str = None
event_ts: str = None
thread_ts: str = None
file_id: str = None
hidden: bool = None
inviter: str = None
item: EventBodyItem = None
item_user: str = None
reaction: str = None
subtype: str = None
team: str = None
text: str = None
type: str
user: str = None
user_id: str = None
class EventEnvelope(BaseModel):
"""Envelope of the Slack event."""
api_app_id: str = None
authed_users: List[str] = []
challenge: str = None
enterprise_id: str = None
event: EventBody = None
event_id: str = None
event_time: int = None
team_id: str = None
token: str = None
type: str
def get_channel_id_from_event(event: EventEnvelope):
"""Returns the channel id from the Slack event."""
channel_id = ""
if event.event.channel_id:
return event.event.channel_id
if event.event.channel:
return event.event.channel
if event.event.item.channel:
return event.event.item.channel
return channel_id
def event_functions(event: EventEnvelope):
"""Interprets the events and routes it the appropriate function."""
event_mappings = {
"member_joined_channel": [member_joined_channel],
"member_left_channel": [member_left_channel],
"message": [after_hours, ban_threads_warning, message_tagging, message_monitor],
"message.groups": [],
"message.im": [],
"reaction_added": [handle_reaction_added_event],
}
return event_mappings.get(event.event.type, [])
async def handle_slack_event(*, config, client, event, background_tasks):
"""Handles slack event message."""
user_id = event.event.user
channel_id = get_channel_id_from_event(event)
if user_id and channel_id:
db_session = get_organization_scope_from_channel_id(channel_id=channel_id)
if not db_session:
log.info(
f"Unable to determine organization associated with channel id. ChannelId: {channel_id}"
)
return {"ok": ""}
conversation = conversation_service.get_by_channel_id_ignoring_channel_type(
db_session=db_session, channel_id=channel_id
)
if conversation and dispatch_slack_service.is_user(config, user_id):
# We resolve the user's email
user_email = await dispatch_slack_service.get_user_email_async(client, user_id)
# increment activity for user
participant = participant_service.get_by_incident_id_and_email(
db_session=db_session, incident_id=conversation.incident_id, email=user_email
)
if participant.activity:
participant.activity += 1
else:
participant.activity = 1
db_session.commit()
# Dispatch event functions to be executed in the background
for f in event_functions(event):
background_tasks.add_task(
f,
config=config,
user_id=user_id,
user_email=user_email,
channel_id=channel_id,
incident_id=conversation.incident_id,
event=event,
)
return {"ok": ""}
@slack_background_task
def handle_reaction_added_event(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
"""Handles an event where a reaction is added to a message."""
reaction = event.event.reaction
if reaction == config.timeline_event_reaction:
conversation_id = event.event.item.channel
message_ts = event.event.item.ts
message_ts_utc = datetime.datetime.utcfromtimestamp(float(message_ts))
# we fetch the message information
response = dispatch_slack_service.list_conversation_messages(
slack_client, conversation_id, latest=message_ts, limit=1, inclusive=1
)
message_text = response["messages"][0]["text"]
message_sender_id = response["messages"][0]["user"]
# we fetch the incident
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
# we fetch the individual who sent the message
message_sender_email = get_user_email(client=slack_client, user_id=message_sender_id)
individual = individual_service.get_by_email_and_project(
db_session=db_session, email=message_sender_email, project_id=incident.project.id
)
# we log the event
event_service.log(
db_session=db_session,
source="Slack Plugin - Conversation Management",
description=f'"{message_text}," said {individual.name}',
incident_id=incident_id,
individual_id=individual.id,
started_at=message_ts_utc,
)
def is_business_hours(commander_tz: str):
"""Determines if it's currently office hours where the incident commander is located."""
now = datetime.datetime.now(pytz.timezone(commander_tz))
return now.weekday() not in [5, 6] and 9 <= now.hour < 17
@slack_background_task
def after_hours(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
"""Notifies the user that this incident is current in after hours mode."""
# we ignore user channel and group join messages
if event.event.subtype in ["channel_join", "group_join"]:
return
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
# get their timezone from slack
commander_info = dispatch_slack_service.get_user_info_by_email(
slack_client, email=incident.commander.individual.email
)
commander_tz = commander_info["tz"]
if not is_business_hours(commander_tz):
# send ephermal message
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": (
(
f"Responses may be delayed. The current incident priority is *{incident.incident_priority.name}*"
f" and your message was sent outside of the Incident Commander's working hours (Weekdays, 9am-5pm, {commander_tz} timezone)."
)
),
},
}
]
participant = participant_service.get_by_incident_id_and_email(
db_session=db_session, incident_id=incident_id, email=user_email
)
if not participant.after_hours_notification:
dispatch_slack_service.send_ephemeral_message(
slack_client, channel_id, user_id, "", blocks=blocks
)
participant.after_hours_notification = True
db_session.add(participant)
db_session.commit()
@slack_background_task
def member_joined_channel(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope,
db_session=None,
slack_client=None,
):
"""Handles the member_joined_channel slack event."""
participant = incident_flows.incident_add_or_reactivate_participant_flow(
user_email=user_email, incident_id=incident_id, db_session=db_session
)
if event.event.inviter:
# we update the participant's metadata
if not dispatch_slack_service.is_user(config, event.event.inviter):
# we default to the incident commander when we don't know how the user was added
added_by_participant = participant_service.get_by_incident_id_and_role(
db_session=db_session,
incident_id=incident_id,
role=ParticipantRoleType.incident_commander,
)
participant.added_by = added_by_participant
participant.added_reason = (
f"Participant added by {added_by_participant.individual.name}"
)
else:
inviter_email = get_user_email(client=slack_client, user_id=event.event.inviter)
added_by_participant = participant_service.get_by_incident_id_and_email(
db_session=db_session, incident_id=incident_id, email=inviter_email
)
participant.added_by = added_by_participant
participant.added_reason = event.event.text
db_session.add(participant)
db_session.commit()
@slack_background_task
def member_left_channel(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope,
db_session=None,
slack_client=None,
):
"""Handles the member_left_channel Slack event."""
incident_flows.incident_remove_participant_flow(user_email, incident_id, db_session=db_session)
@slack_background_task
def ban_threads_warning(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
"""Sends the user an ephemeral message if they use threads."""
if not config.ban_threads:
return
if event.event.thread_ts:
# we should be able to look for `subtype == message_replied` once this bug is fixed
# https://api.slack.com/events/message/message_replied
# From Slack: Bug alert! This event is missing the subtype field when dispatched
# over the Events API. Until it is fixed, examine message events' thread_ts value.
# When present, it's a reply. To be doubly sure, compare a thread_ts to the top-level ts
# value, when they differ the latter is a reply to the former.
message = "Please refrain from using threads in incident related channels. Threads make it harder for incident participants to maintain context."
dispatch_slack_service.send_ephemeral_message(
slack_client,
channel_id,
user_id,
message,
thread_ts=event.event.thread_ts,
)
@slack_background_task
def message_tagging(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
"""Looks for incident tags in incident messages."""
text = event.event.text
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
tags = tag_service.get_all(db_session=db_session, project_id=incident.project.id).all()
tag_strings = [t.name.lower() for t in tags if t.discoverable]
phrases = build_term_vocab(tag_strings)
matcher = build_phrase_matcher("dispatch-tag", phrases)
extracted_tags = list(set(extract_terms_from_text(text, matcher)))
matched_tags = (
db_session.query(Tag)
.filter(func.upper(Tag.name).in_([func.upper(t) for t in extracted_tags]))
.all()
)
incident.tags.extend(matched_tags)
db_session.commit()
@slack_background_task
def message_monitor(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
"""Looks strings that are available for monitoring (usually links)."""
text = event.event.text
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
plugins = plugin_service.get_active_instances(
db_session=db_session, project_id=incident.project.id, plugin_type="monitor"
)
for p in plugins:
for matcher in p.instance.get_matchers():
for match in matcher.finditer(text):
match_data = match.groupdict()
monitor = monitor_service.get_by_weblink(
db_session=db_session, weblink=match_data["weblink"]
)
# silence ignored matches
if monitor:
continue
current_status = p.instance.get_match_status(match_data)
if current_status:
status_text = ""
for k, v in current_status.items():
status_text += f"*{k.title()}*:\n{v.title()}\n"
monitor_button = MonitorButton(
incident_id=incident.id,
plugin_instance_id=p.id,
organization=incident.project.organization.slug,
weblink=match_data["weblink"],
action_type="monitor",
)
ignore_button = MonitorButton(
incident_id=incident.id,
plugin_instance_id=p.id,
organization=incident.project.organization.slug,
weblink=match_data["weblink"],
action_type="ignore",
)
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"Hi! Dispatch is able to help track the status of: \n {match_data['weblink']} \n\n Would you like for changes in it's status to be propagated to this incident channel?",
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": status_text,
},
},
{
"type": "actions",
"block_id": f"{ConversationButtonActions.monitor_link}",
"elements": [
{
"type": "button",
"text": {
"type": "plain_text",
"emoji": True,
"text": "Monitor",
},
"style": "primary",
"value": monitor_button.json(),
},
{
"type": "button",
"text": {"type": "plain_text", "emoji": True, "text": "Ignore"},
"style": "danger",
"value": ignore_button.json(),
},
],
},
]
dispatch_slack_service.send_ephemeral_message(
slack_client, channel_id, user_id, "", blocks=blocks
)
| 35.599581 | 210 | 0.611271 | import pytz
import logging
import datetime
from typing import List
from pydantic import BaseModel
from sqlalchemy import func
from sqlalchemy.sql.functions import user
from dispatch.nlp import build_phrase_matcher, build_term_vocab, extract_terms_from_text
from dispatch.conversation import service as conversation_service
from dispatch.event import service as event_service
from dispatch.incident import flows as incident_flows
from dispatch.incident import service as incident_service
from dispatch.plugins.dispatch_slack.config import (
SlackConversationConfiguration,
)
from dispatch.tag import service as tag_service
from dispatch.individual import service as individual_service
from dispatch.participant import service as participant_service
from dispatch.participant_role.models import ParticipantRoleType
from dispatch.plugins.dispatch_slack import service as dispatch_slack_service
from dispatch.plugin import service as plugin_service
from dispatch.monitor import service as monitor_service
from dispatch.conversation.enums import ConversationButtonActions
from dispatch.tag.models import Tag
from .decorators import slack_background_task, get_organization_scope_from_channel_id
from .service import get_user_email
from .models import MonitorButton
log = logging.getLogger(__name__)
class EventBodyItem(BaseModel):
type: str = None
channel: str = None
ts: str = None
class EventBody(BaseModel):
channel: str = None
channel_id: str = None
channel_type: str = None
deleted_ts: str = None
event_ts: str = None
thread_ts: str = None
file_id: str = None
hidden: bool = None
inviter: str = None
item: EventBodyItem = None
item_user: str = None
reaction: str = None
subtype: str = None
team: str = None
text: str = None
type: str
user: str = None
user_id: str = None
class EventEnvelope(BaseModel):
api_app_id: str = None
authed_users: List[str] = []
challenge: str = None
enterprise_id: str = None
event: EventBody = None
event_id: str = None
event_time: int = None
team_id: str = None
token: str = None
type: str
def get_channel_id_from_event(event: EventEnvelope):
channel_id = ""
if event.event.channel_id:
return event.event.channel_id
if event.event.channel:
return event.event.channel
if event.event.item.channel:
return event.event.item.channel
return channel_id
def event_functions(event: EventEnvelope):
event_mappings = {
"member_joined_channel": [member_joined_channel],
"member_left_channel": [member_left_channel],
"message": [after_hours, ban_threads_warning, message_tagging, message_monitor],
"message.groups": [],
"message.im": [],
"reaction_added": [handle_reaction_added_event],
}
return event_mappings.get(event.event.type, [])
async def handle_slack_event(*, config, client, event, background_tasks):
user_id = event.event.user
channel_id = get_channel_id_from_event(event)
if user_id and channel_id:
db_session = get_organization_scope_from_channel_id(channel_id=channel_id)
if not db_session:
log.info(
f"Unable to determine organization associated with channel id. ChannelId: {channel_id}"
)
return {"ok": ""}
conversation = conversation_service.get_by_channel_id_ignoring_channel_type(
db_session=db_session, channel_id=channel_id
)
if conversation and dispatch_slack_service.is_user(config, user_id):
user_email = await dispatch_slack_service.get_user_email_async(client, user_id)
# increment activity for user
participant = participant_service.get_by_incident_id_and_email(
db_session=db_session, incident_id=conversation.incident_id, email=user_email
)
if participant.activity:
participant.activity += 1
else:
participant.activity = 1
db_session.commit()
# Dispatch event functions to be executed in the background
for f in event_functions(event):
background_tasks.add_task(
f,
config=config,
user_id=user_id,
user_email=user_email,
channel_id=channel_id,
incident_id=conversation.incident_id,
event=event,
)
return {"ok": ""}
@slack_background_task
def handle_reaction_added_event(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
reaction = event.event.reaction
if reaction == config.timeline_event_reaction:
conversation_id = event.event.item.channel
message_ts = event.event.item.ts
message_ts_utc = datetime.datetime.utcfromtimestamp(float(message_ts))
# we fetch the message information
response = dispatch_slack_service.list_conversation_messages(
slack_client, conversation_id, latest=message_ts, limit=1, inclusive=1
)
message_text = response["messages"][0]["text"]
message_sender_id = response["messages"][0]["user"]
# we fetch the incident
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
# we fetch the individual who sent the message
message_sender_email = get_user_email(client=slack_client, user_id=message_sender_id)
individual = individual_service.get_by_email_and_project(
db_session=db_session, email=message_sender_email, project_id=incident.project.id
)
# we log the event
event_service.log(
db_session=db_session,
source="Slack Plugin - Conversation Management",
description=f'"{message_text}," said {individual.name}',
incident_id=incident_id,
individual_id=individual.id,
started_at=message_ts_utc,
)
def is_business_hours(commander_tz: str):
now = datetime.datetime.now(pytz.timezone(commander_tz))
return now.weekday() not in [5, 6] and 9 <= now.hour < 17
@slack_background_task
def after_hours(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
# we ignore user channel and group join messages
if event.event.subtype in ["channel_join", "group_join"]:
return
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
# get their timezone from slack
commander_info = dispatch_slack_service.get_user_info_by_email(
slack_client, email=incident.commander.individual.email
)
commander_tz = commander_info["tz"]
if not is_business_hours(commander_tz):
# send ephermal message
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": (
(
f"Responses may be delayed. The current incident priority is *{incident.incident_priority.name}*"
f" and your message was sent outside of the Incident Commander's working hours (Weekdays, 9am-5pm, {commander_tz} timezone)."
)
),
},
}
]
participant = participant_service.get_by_incident_id_and_email(
db_session=db_session, incident_id=incident_id, email=user_email
)
if not participant.after_hours_notification:
dispatch_slack_service.send_ephemeral_message(
slack_client, channel_id, user_id, "", blocks=blocks
)
participant.after_hours_notification = True
db_session.add(participant)
db_session.commit()
@slack_background_task
def member_joined_channel(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope,
db_session=None,
slack_client=None,
):
participant = incident_flows.incident_add_or_reactivate_participant_flow(
user_email=user_email, incident_id=incident_id, db_session=db_session
)
if event.event.inviter:
if not dispatch_slack_service.is_user(config, event.event.inviter):
# we default to the incident commander when we don't know how the user was added
added_by_participant = participant_service.get_by_incident_id_and_role(
db_session=db_session,
incident_id=incident_id,
role=ParticipantRoleType.incident_commander,
)
participant.added_by = added_by_participant
participant.added_reason = (
f"Participant added by {added_by_participant.individual.name}"
)
else:
inviter_email = get_user_email(client=slack_client, user_id=event.event.inviter)
added_by_participant = participant_service.get_by_incident_id_and_email(
db_session=db_session, incident_id=incident_id, email=inviter_email
)
participant.added_by = added_by_participant
participant.added_reason = event.event.text
db_session.add(participant)
db_session.commit()
@slack_background_task
def member_left_channel(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope,
db_session=None,
slack_client=None,
):
incident_flows.incident_remove_participant_flow(user_email, incident_id, db_session=db_session)
@slack_background_task
def ban_threads_warning(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
if not config.ban_threads:
return
if event.event.thread_ts:
# When present, it's a reply. To be doubly sure, compare a thread_ts to the top-level ts
message = "Please refrain from using threads in incident related channels. Threads make it harder for incident participants to maintain context."
dispatch_slack_service.send_ephemeral_message(
slack_client,
channel_id,
user_id,
message,
thread_ts=event.event.thread_ts,
)
@slack_background_task
def message_tagging(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
text = event.event.text
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
tags = tag_service.get_all(db_session=db_session, project_id=incident.project.id).all()
tag_strings = [t.name.lower() for t in tags if t.discoverable]
phrases = build_term_vocab(tag_strings)
matcher = build_phrase_matcher("dispatch-tag", phrases)
extracted_tags = list(set(extract_terms_from_text(text, matcher)))
matched_tags = (
db_session.query(Tag)
.filter(func.upper(Tag.name).in_([func.upper(t) for t in extracted_tags]))
.all()
)
incident.tags.extend(matched_tags)
db_session.commit()
@slack_background_task
def message_monitor(
config: SlackConversationConfiguration,
user_id: str,
user_email: str,
channel_id: str,
incident_id: int,
event: EventEnvelope = None,
db_session=None,
slack_client=None,
):
text = event.event.text
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
plugins = plugin_service.get_active_instances(
db_session=db_session, project_id=incident.project.id, plugin_type="monitor"
)
for p in plugins:
for matcher in p.instance.get_matchers():
for match in matcher.finditer(text):
match_data = match.groupdict()
monitor = monitor_service.get_by_weblink(
db_session=db_session, weblink=match_data["weblink"]
)
if monitor:
continue
current_status = p.instance.get_match_status(match_data)
if current_status:
status_text = ""
for k, v in current_status.items():
status_text += f"*{k.title()}*:\n{v.title()}\n"
monitor_button = MonitorButton(
incident_id=incident.id,
plugin_instance_id=p.id,
organization=incident.project.organization.slug,
weblink=match_data["weblink"],
action_type="monitor",
)
ignore_button = MonitorButton(
incident_id=incident.id,
plugin_instance_id=p.id,
organization=incident.project.organization.slug,
weblink=match_data["weblink"],
action_type="ignore",
)
blocks = [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"Hi! Dispatch is able to help track the status of: \n {match_data['weblink']} \n\n Would you like for changes in it's status to be propagated to this incident channel?",
},
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": status_text,
},
},
{
"type": "actions",
"block_id": f"{ConversationButtonActions.monitor_link}",
"elements": [
{
"type": "button",
"text": {
"type": "plain_text",
"emoji": True,
"text": "Monitor",
},
"style": "primary",
"value": monitor_button.json(),
},
{
"type": "button",
"text": {"type": "plain_text", "emoji": True, "text": "Ignore"},
"style": "danger",
"value": ignore_button.json(),
},
],
},
]
dispatch_slack_service.send_ephemeral_message(
slack_client, channel_id, user_id, "", blocks=blocks
)
| true | true |
f7f96352842d13a838c3183d27862860ba1a89c1 | 568 | py | Python | bdc_collection_builder/blueprint.py | rodolfolotte/bdc-collection-builder | 62583f6c25bca79e7e1b5503bc6308298838c877 | [
"MIT"
] | null | null | null | bdc_collection_builder/blueprint.py | rodolfolotte/bdc-collection-builder | 62583f6c25bca79e7e1b5503bc6308298838c877 | [
"MIT"
] | null | null | null | bdc_collection_builder/blueprint.py | rodolfolotte/bdc-collection-builder | 62583f6c25bca79e7e1b5503bc6308298838c877 | [
"MIT"
] | null | null | null | #
# This file is part of Brazil Data Cube Collection Builder.
# Copyright (C) 2019-2020 INPE.
#
# Brazil Data Cube Collection Builder is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Define Brazil Data Cube Scripts Blueprint strategy."""
from flask import Blueprint
from flask_restplus import Api
from .collections.controller import api as radcor_ns
bp = Blueprint('bdc_collection_builder', __name__, url_prefix='/api')
api = Api(bp, doc=False)
api.add_namespace(radcor_ns)
| 27.047619 | 96 | 0.771127 |
from flask import Blueprint
from flask_restplus import Api
from .collections.controller import api as radcor_ns
bp = Blueprint('bdc_collection_builder', __name__, url_prefix='/api')
api = Api(bp, doc=False)
api.add_namespace(radcor_ns)
| true | true |
f7f968fe5bc66e4b54b01da42710d70c4059ce43 | 522 | py | Python | things/urls.py | kylehoac/DjangoX_Lab | ec4f5c3e595c7d91aeeddfdcb8cad40243d91758 | [
"MIT"
] | null | null | null | things/urls.py | kylehoac/DjangoX_Lab | ec4f5c3e595c7d91aeeddfdcb8cad40243d91758 | [
"MIT"
] | null | null | null | things/urls.py | kylehoac/DjangoX_Lab | ec4f5c3e595c7d91aeeddfdcb8cad40243d91758 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import (
ThingListView,
ThingDetailView,
ThingCreateView,
ThingUpdateView,
ThingDeleteView
)
urlpatterns = [
path('', ThingListView.as_view(), name='thing_list'),
path('<int:pk>/',ThingDetailView.as_view(), name='thing_detail'),
path('create/',ThingCreateView.as_view(), name='thing_create'),
path('<int:pk>/update/',ThingUpdateView.as_view(), name='thing_update'),
path('<int:pk>/delete/',ThingDeleteView.as_view(), name='thing_delete'),
] | 32.625 | 76 | 0.693487 | from django.urls import path
from .views import (
ThingListView,
ThingDetailView,
ThingCreateView,
ThingUpdateView,
ThingDeleteView
)
urlpatterns = [
path('', ThingListView.as_view(), name='thing_list'),
path('<int:pk>/',ThingDetailView.as_view(), name='thing_detail'),
path('create/',ThingCreateView.as_view(), name='thing_create'),
path('<int:pk>/update/',ThingUpdateView.as_view(), name='thing_update'),
path('<int:pk>/delete/',ThingDeleteView.as_view(), name='thing_delete'),
] | true | true |
f7f969c00bb76b4b3919c34f0a9081ad3cbee407 | 5,080 | py | Python | tests/resources/serializers/test_csl_serializer.py | utnapischtim/invenio-rdm-records | 436bd2f4f6a75713db87ebf19d9cb962176d3899 | [
"MIT"
] | 10 | 2020-01-17T10:13:09.000Z | 2022-03-17T10:14:41.000Z | tests/resources/serializers/test_csl_serializer.py | utnapischtim/invenio-rdm-records | 436bd2f4f6a75713db87ebf19d9cb962176d3899 | [
"MIT"
] | 570 | 2019-08-15T16:35:25.000Z | 2022-03-31T13:46:17.000Z | tests/resources/serializers/test_csl_serializer.py | utnapischtim/invenio-rdm-records | 436bd2f4f6a75713db87ebf19d9cb962176d3899 | [
"MIT"
] | 57 | 2019-09-04T09:25:29.000Z | 2022-03-30T19:32:55.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CERN.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Resources serializers tests."""
from copy import deepcopy
from citeproc_styles import get_style_filepath
from invenio_rdm_records.proxies import current_rdm_records
from invenio_rdm_records.records.api import RDMRecord
from invenio_rdm_records.resources.serializers import CSLJSONSerializer, \
StringCitationSerializer
from invenio_rdm_records.resources.serializers.csl import get_citation_string
from invenio_rdm_records.resources.serializers.csl.schema import CSLJSONSchema
def test_csl_json_serializer(running_app, full_record):
"""Test JSON CLS Serializer."""
# if the record is created this field will be present
full_record["id"] = "12345-abcde"
expected_data = {
"publisher": "InvenioRDM",
"DOI": "10.5281/inveniordm.1234",
"language": "dan",
"title": "InvenioRDM",
"issued": {"date-parts": [["2018"], ["2020", "09"]]},
"abstract": "A description with HTML tags",
"author": [
{
"family": "Nielsen",
"given": "Lars Holm",
}
],
"note": "Funding by European Commission ROR 1234.",
"version": "v1.0",
"type": "graphic",
"id": "12345-abcde",
}
serializer = CSLJSONSerializer()
serialized_record = serializer.dump_one(full_record)
assert serialized_record == expected_data
# test wrong publication date
rec_wrong_date = deepcopy(full_record)
rec_wrong_date["metadata"]["publication_date"] = "wrong"
expected = deepcopy(expected_data)
del expected["issued"] # missing
assert serialized_record == expected_data
def test_citation_string_serializer_records_list(
running_app,
client,
es_clear,
minimal_record,
superuser_identity,
):
"""Test Citation String Serializer for a list of records."""
service = current_rdm_records.records_service
default_style = StringCitationSerializer._default_style
default_locale = StringCitationSerializer._default_locale
headers = {"Accept": "text/x-bibliography"}
expected_data = []
for _ in range(3):
draft = service.create(superuser_identity, minimal_record)
record = service.publish(superuser_identity, draft.id)
expected_record_data = get_citation_string(
CSLJSONSchema().dump(record),
record.id,
locale=default_locale,
style=get_style_filepath(default_style),
)
expected_data.append(expected_record_data)
RDMRecord.index.refresh()
response = client.get("/records", headers=headers)
response_data = response.get_data(as_text=True)
assert response.status_code == 200
assert response.headers["content-type"] == "text/plain"
for citation in expected_data:
assert citation in response_data
def test_citation_string_serializer_record(
running_app,
client,
es_clear,
minimal_record,
superuser_identity,
):
"""Test Citation String Serializer for single records."""
service = current_rdm_records.records_service
draft = service.create(superuser_identity, minimal_record)
record = service.publish(superuser_identity, draft.id)
_id = record.id
_url = f"/records/{_id}"
headers = {"Accept": "text/x-bibliography"}
default_style = StringCitationSerializer._default_style
default_locale = StringCitationSerializer._default_locale
test_cases = [
(
f"{_url}?style=3d-printing-in-medicine&locale=es-ES",
"3d-printing-in-medicine",
"es-ES",
200,
),
(f"{_url}?locale=es-ES", default_style, "es-ES", 200),
(
f"{_url}?style=3d-printing-in-medicine",
"3d-printing-in-medicine",
default_locale,
200,
),
(f"{_url}", default_style, default_locale, 200),
(f"{_url}?style=Unknown_style", "Unknown_style", default_locale, 400),
(f"{_url}?locale=Unknown_locale", default_style, default_locale, 200),
]
for url, expected_style, expected_locale, expected_status in test_cases:
response = client.get(url, headers=headers)
assert response.status_code == expected_status
body = response.get_data(as_text=True)
if expected_status == 200:
assert response.headers["content-type"] == "text/plain"
expected_data = get_citation_string(
CSLJSONSchema().dump(record),
_id,
locale=expected_locale,
style=get_style_filepath(expected_style),
)
assert expected_data == body
elif expected_status == 400:
# in case of error, the response is JSON
assert response.headers["content-type"] == "application/json"
assert f"Citation string style not found." in body
| 33.421053 | 78 | 0.658465 |
from copy import deepcopy
from citeproc_styles import get_style_filepath
from invenio_rdm_records.proxies import current_rdm_records
from invenio_rdm_records.records.api import RDMRecord
from invenio_rdm_records.resources.serializers import CSLJSONSerializer, \
StringCitationSerializer
from invenio_rdm_records.resources.serializers.csl import get_citation_string
from invenio_rdm_records.resources.serializers.csl.schema import CSLJSONSchema
def test_csl_json_serializer(running_app, full_record):
full_record["id"] = "12345-abcde"
expected_data = {
"publisher": "InvenioRDM",
"DOI": "10.5281/inveniordm.1234",
"language": "dan",
"title": "InvenioRDM",
"issued": {"date-parts": [["2018"], ["2020", "09"]]},
"abstract": "A description with HTML tags",
"author": [
{
"family": "Nielsen",
"given": "Lars Holm",
}
],
"note": "Funding by European Commission ROR 1234.",
"version": "v1.0",
"type": "graphic",
"id": "12345-abcde",
}
serializer = CSLJSONSerializer()
serialized_record = serializer.dump_one(full_record)
assert serialized_record == expected_data
rec_wrong_date = deepcopy(full_record)
rec_wrong_date["metadata"]["publication_date"] = "wrong"
expected = deepcopy(expected_data)
del expected["issued"]
assert serialized_record == expected_data
def test_citation_string_serializer_records_list(
running_app,
client,
es_clear,
minimal_record,
superuser_identity,
):
service = current_rdm_records.records_service
default_style = StringCitationSerializer._default_style
default_locale = StringCitationSerializer._default_locale
headers = {"Accept": "text/x-bibliography"}
expected_data = []
for _ in range(3):
draft = service.create(superuser_identity, minimal_record)
record = service.publish(superuser_identity, draft.id)
expected_record_data = get_citation_string(
CSLJSONSchema().dump(record),
record.id,
locale=default_locale,
style=get_style_filepath(default_style),
)
expected_data.append(expected_record_data)
RDMRecord.index.refresh()
response = client.get("/records", headers=headers)
response_data = response.get_data(as_text=True)
assert response.status_code == 200
assert response.headers["content-type"] == "text/plain"
for citation in expected_data:
assert citation in response_data
def test_citation_string_serializer_record(
running_app,
client,
es_clear,
minimal_record,
superuser_identity,
):
service = current_rdm_records.records_service
draft = service.create(superuser_identity, minimal_record)
record = service.publish(superuser_identity, draft.id)
_id = record.id
_url = f"/records/{_id}"
headers = {"Accept": "text/x-bibliography"}
default_style = StringCitationSerializer._default_style
default_locale = StringCitationSerializer._default_locale
test_cases = [
(
f"{_url}?style=3d-printing-in-medicine&locale=es-ES",
"3d-printing-in-medicine",
"es-ES",
200,
),
(f"{_url}?locale=es-ES", default_style, "es-ES", 200),
(
f"{_url}?style=3d-printing-in-medicine",
"3d-printing-in-medicine",
default_locale,
200,
),
(f"{_url}", default_style, default_locale, 200),
(f"{_url}?style=Unknown_style", "Unknown_style", default_locale, 400),
(f"{_url}?locale=Unknown_locale", default_style, default_locale, 200),
]
for url, expected_style, expected_locale, expected_status in test_cases:
response = client.get(url, headers=headers)
assert response.status_code == expected_status
body = response.get_data(as_text=True)
if expected_status == 200:
assert response.headers["content-type"] == "text/plain"
expected_data = get_citation_string(
CSLJSONSchema().dump(record),
_id,
locale=expected_locale,
style=get_style_filepath(expected_style),
)
assert expected_data == body
elif expected_status == 400:
assert response.headers["content-type"] == "application/json"
assert f"Citation string style not found." in body
| true | true |
f7f969c1a6ac200c2d56dd055a94745f011c80dd | 1,479 | py | Python | andrew/generategenomes.py | ajclaros/rl_legged_walker | 26d0e124ef38045943449c2772b966571117683b | [
"MIT"
] | null | null | null | andrew/generategenomes.py | ajclaros/rl_legged_walker | 26d0e124ef38045943449c2772b966571117683b | [
"MIT"
] | null | null | null | andrew/generategenomes.py | ajclaros/rl_legged_walker | 26d0e124ef38045943449c2772b966571117683b | [
"MIT"
] | null | null | null | import numpy as np
import os
from fitnessFunction import fitnessFunction
import pandas as pd
#filename = "two_neuron-step1.npy"
genome_arr = np.load(filename)
best = genome_arr[3]
#index = 0
select_genome = 7
#lower_fitness = np.load("./genomes/scalinggenome-{select_genome}.npy")
# uncomment to generate new scaling genome
for j in range(1):
#lower_fitness = np.zeros((best.shape))
#for i in range(lower_fitness.size):
# lower_fitness[i] = - np.random.uniform(0, 0.4)
#np.save(f"./genomes/scalinggenome-{select_genome}.npy",lower_fitness)
#genome9 start 6130
#delete 7
print(j)
points = [0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
for i in range(10000):
save_fitness = best+lower_fitness*(i*10**-4)
fit = fitnessFunction(save_fitness)
print(fit)
#print(fit, end=" ", flush=False)
point_delete = []
if not (i % 100):
print(i, end=' ', flush=False)
for point in points:
if fit<point+0.02 and fit> point-0.02:
print(f"index{i}")
print("saving fit of {} to {}".format(fit, point))
# np.save(f"./perturbations/{point}/p-{select_genome}-{i}.npy", save_fitness)
point_delete.append(points.index(point))
break
for ind in point_delete:
points.pop(ind)
print(f"points left:{points}")
#find genome with fitness in range 0.1 genome 5,
#lowest fitness before before given range
| 32.866667 | 92 | 0.617309 | import numpy as np
import os
from fitnessFunction import fitnessFunction
import pandas as pd
genome_arr = np.load(filename)
best = genome_arr[3]
select_genome = 7
for j in range(1):
print(j)
points = [0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
for i in range(10000):
save_fitness = best+lower_fitness*(i*10**-4)
fit = fitnessFunction(save_fitness)
print(fit)
point_delete = []
if not (i % 100):
print(i, end=' ', flush=False)
for point in points:
if fit<point+0.02 and fit> point-0.02:
print(f"index{i}")
print("saving fit of {} to {}".format(fit, point))
point_delete.append(points.index(point))
break
for ind in point_delete:
points.pop(ind)
print(f"points left:{points}")
| true | true |
f7f969d3f63f785ff953f69cad230024a7cf5eae | 243 | py | Python | PyEvolv/assets/font.py | peerlator/PyEvolv | 7f5644e2ea22257f34547c9b050bc4cdd4f3bdec | [
"MIT"
] | 1 | 2018-08-02T19:42:35.000Z | 2018-08-02T19:42:35.000Z | PyEvolv/assets/font.py | peerlator/PyEvolv | 7f5644e2ea22257f34547c9b050bc4cdd4f3bdec | [
"MIT"
] | 1 | 2018-08-02T19:41:58.000Z | 2018-08-05T17:53:17.000Z | PyEvolv/assets/font.py | peerlator/PyEvolv | 7f5644e2ea22257f34547c9b050bc4cdd4f3bdec | [
"MIT"
] | null | null | null | import pygame
import PyEvolv
import os
pygame.font.init()
path = os.path.join(PyEvolv.__path__[0], 'assets', 'Arial.ttf')
FONT = pygame.font.Font(path, 20)
def get_font(size:int) -> pygame.font.Font:
return pygame.font.Font(path, size)
| 20.25 | 63 | 0.720165 | import pygame
import PyEvolv
import os
pygame.font.init()
path = os.path.join(PyEvolv.__path__[0], 'assets', 'Arial.ttf')
FONT = pygame.font.Font(path, 20)
def get_font(size:int) -> pygame.font.Font:
return pygame.font.Font(path, size)
| true | true |
f7f96bd1f2cdb6be9177a9e421651ecb01b38e5c | 1,529 | py | Python | engine/config_storage.py | hive-engine/distribution-engine-smt | 9ed4fddbae17342700a4ba0f24c667c8b853c793 | [
"MIT"
] | null | null | null | engine/config_storage.py | hive-engine/distribution-engine-smt | 9ed4fddbae17342700a4ba0f24c667c8b853c793 | [
"MIT"
] | null | null | null | engine/config_storage.py | hive-engine/distribution-engine-smt | 9ed4fddbae17342700a4ba0f24c667c8b853c793 | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import bytes
from builtins import object
from beemgraphenebase.py23 import py23_bytes, bytes_types
from sqlalchemy.dialects.postgresql import insert as pg_insert
import shutil
import time
import os
import json
import sqlite3
from appdirs import user_data_dir
from datetime import datetime, timedelta
from beem.utils import formatTimeString, addTzInfo
import logging
from binascii import hexlify
import random
import hashlib
import dataset
from sqlalchemy import and_
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler())
timeformat = "%Y%m%d-%H%M%S"
HIVED = 1
ENGINE_SIDECHAIN = 2
class ConfigurationDB(object):
""" This is the trx storage class
"""
__tablename__ = 'configuration'
def __init__(self, db):
self.db = db
def get(self):
table = self.db[self.__tablename__]
return table.find_one(id=HIVED)
def get_engine(self):
table = self.db[self.__tablename__]
return table.find_one(id=ENGINE_SIDECHAIN)
def upsert(self, data):
data["id"]= HIVED
table = self.db[self.__tablename__]
table.upsert(data, ['id'])
def upsert_engine(self, data):
data["id"]= ENGINE_SIDECHAIN
table = self.db[self.__tablename__]
table.upsert(data, ['id'])
| 25.915254 | 62 | 0.734467 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import bytes
from builtins import object
from beemgraphenebase.py23 import py23_bytes, bytes_types
from sqlalchemy.dialects.postgresql import insert as pg_insert
import shutil
import time
import os
import json
import sqlite3
from appdirs import user_data_dir
from datetime import datetime, timedelta
from beem.utils import formatTimeString, addTzInfo
import logging
from binascii import hexlify
import random
import hashlib
import dataset
from sqlalchemy import and_
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler())
timeformat = "%Y%m%d-%H%M%S"
HIVED = 1
ENGINE_SIDECHAIN = 2
class ConfigurationDB(object):
__tablename__ = 'configuration'
def __init__(self, db):
self.db = db
def get(self):
table = self.db[self.__tablename__]
return table.find_one(id=HIVED)
def get_engine(self):
table = self.db[self.__tablename__]
return table.find_one(id=ENGINE_SIDECHAIN)
def upsert(self, data):
data["id"]= HIVED
table = self.db[self.__tablename__]
table.upsert(data, ['id'])
def upsert_engine(self, data):
data["id"]= ENGINE_SIDECHAIN
table = self.db[self.__tablename__]
table.upsert(data, ['id'])
| true | true |
f7f96c62879766d126c79a1e1f6e5517705c2a98 | 1,628 | py | Python | backend/main.py | le-kag/Fast-API-backend | 1693c293ddf4a7060a9a1604f7d189c6a81ff48d | [
"MIT"
] | null | null | null | backend/main.py | le-kag/Fast-API-backend | 1693c293ddf4a7060a9a1604f7d189c6a81ff48d | [
"MIT"
] | null | null | null | backend/main.py | le-kag/Fast-API-backend | 1693c293ddf4a7060a9a1604f7d189c6a81ff48d | [
"MIT"
] | null | null | null | from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from model import Todo
app = FastAPI()
from database import (
fetch_one_todo,
fetch_all_todos,
create_todo,
update_todo,
remove_todo
)
origins = ['https://localhost:3000']
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials = True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/")
async def read_root():
return {"Hello": "World"}
@app.get("/api/todo")
async def get_todo():
response = await fetch_all_todos()
return response
@app.get("/api/todo/{title}", response_model=Todo)
async def get_todo_by_title(title):
response = await fetch_one_todo(title)
if response:
return response
raise HTTPException(404, f"There is no todo with the title {title}")
@app.post("/api/todo/", response_model=Todo)
async def post_todo(todo: Todo):
response = await create_todo(todo.dict())
if response:
return response
raise HTTPException(400, "Something went wrong")
@app.put("/api/todo/{title}/", response_model=Todo)
async def put_todo(title: str, desc: str):
response = await update_todo(title, desc)
if response:
return response
raise HTTPException(404, f"There is no todo with the title {title}")
@app.delete("/api/todo/{title}")
async def delete_todo(title):
response = await remove_todo(title)
if response:
return "Successfully deleted todo"
raise HTTPException(404, f"There is no todo with the title {title}") | 26.688525 | 73 | 0.670147 | from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from model import Todo
app = FastAPI()
from database import (
fetch_one_todo,
fetch_all_todos,
create_todo,
update_todo,
remove_todo
)
origins = ['https://localhost:3000']
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials = True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/")
async def read_root():
return {"Hello": "World"}
@app.get("/api/todo")
async def get_todo():
response = await fetch_all_todos()
return response
@app.get("/api/todo/{title}", response_model=Todo)
async def get_todo_by_title(title):
response = await fetch_one_todo(title)
if response:
return response
raise HTTPException(404, f"There is no todo with the title {title}")
@app.post("/api/todo/", response_model=Todo)
async def post_todo(todo: Todo):
response = await create_todo(todo.dict())
if response:
return response
raise HTTPException(400, "Something went wrong")
@app.put("/api/todo/{title}/", response_model=Todo)
async def put_todo(title: str, desc: str):
response = await update_todo(title, desc)
if response:
return response
raise HTTPException(404, f"There is no todo with the title {title}")
@app.delete("/api/todo/{title}")
async def delete_todo(title):
response = await remove_todo(title)
if response:
return "Successfully deleted todo"
raise HTTPException(404, f"There is no todo with the title {title}") | true | true |
f7f96c7d03d0113e3a094765f6a9a9e0330e097d | 14,456 | py | Python | openpype/hosts/standalonepublisher/plugins/publish/extract_harmony_zip.py | dangerstudios/OpenPype | 10ddcc4699137888616eec57cd7fac9648189714 | [
"MIT"
] | null | null | null | openpype/hosts/standalonepublisher/plugins/publish/extract_harmony_zip.py | dangerstudios/OpenPype | 10ddcc4699137888616eec57cd7fac9648189714 | [
"MIT"
] | null | null | null | openpype/hosts/standalonepublisher/plugins/publish/extract_harmony_zip.py | dangerstudios/OpenPype | 10ddcc4699137888616eec57cd7fac9648189714 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Extract Harmony scene from zip file."""
import glob
import os
import shutil
import six
import sys
import tempfile
import zipfile
import pyblish.api
from avalon import api, io
import openpype.api
class ExtractHarmonyZip(openpype.api.Extractor):
"""Extract Harmony zip."""
# Pyblish settings
label = "Extract Harmony zip"
order = pyblish.api.ExtractorOrder + 0.02
hosts = ["standalonepublisher"]
families = ["scene"]
# Properties
session = None
task_types = None
task_statuses = None
assetversion_statuses = None
# Presets
create_workfile = True
default_task = "harmonyIngest"
default_task_type = "Ingest"
default_task_status = "Ingested"
assetversion_status = "Ingested"
def process(self, instance):
"""Plugin entry point."""
context = instance.context
self.session = context.data["ftrackSession"]
asset_doc = context.data["assetEntity"]
# asset_name = instance.data["asset"]
subset_name = instance.data["subset"]
instance_name = instance.data["name"]
family = instance.data["family"]
task = context.data["anatomyData"]["task"] or self.default_task
project_entity = instance.context.data["projectEntity"]
ftrack_id = asset_doc["data"]["ftrackId"]
repres = instance.data["representations"]
submitted_staging_dir = repres[0]["stagingDir"]
submitted_files = repres[0]["files"]
# Get all the ftrack entities needed
# Asset Entity
query = 'AssetBuild where id is "{}"'.format(ftrack_id)
asset_entity = self.session.query(query).first()
# Project Entity
query = 'Project where full_name is "{}"'.format(
project_entity["name"]
)
project_entity = self.session.query(query).one()
# Get Task types and Statuses for creation if needed
self.task_types = self._get_all_task_types(project_entity)
self.task_statuses = self.get_all_task_statuses(project_entity)
# Get Statuses of AssetVersions
self.assetversion_statuses = self.get_all_assetversion_statuses(
project_entity
)
# Setup the status that we want for the AssetVersion
if self.assetversion_status:
instance.data["assetversion_status"] = self.assetversion_status
# Create the default_task if it does not exist
if task == self.default_task:
existing_tasks = []
entity_children = asset_entity.get('children', [])
for child in entity_children:
if child.entity_type.lower() == 'task':
existing_tasks.append(child['name'].lower())
if task.lower() in existing_tasks:
print("Task {} already exists".format(task))
else:
self.create_task(
name=task,
task_type=self.default_task_type,
task_status=self.default_task_status,
parent=asset_entity,
)
# Find latest version
latest_version = self._find_last_version(subset_name, asset_doc)
version_number = 1
if latest_version is not None:
version_number += latest_version
self.log.info(
"Next version of instance \"{}\" will be {}".format(
instance_name, version_number
)
)
# update instance info
instance.data["task"] = task
instance.data["version_name"] = "{}_{}".format(subset_name, task)
instance.data["family"] = family
instance.data["subset"] = subset_name
instance.data["version"] = version_number
instance.data["latestVersion"] = latest_version
instance.data["anatomyData"].update({
"subset": subset_name,
"family": family,
"version": version_number
})
# Copy `families` and check if `family` is not in current families
families = instance.data.get("families") or list()
if families:
families = list(set(families))
instance.data["families"] = families
# Prepare staging dir for new instance and zip + sanitize scene name
staging_dir = tempfile.mkdtemp(prefix="pyblish_tmp_")
# Handle if the representation is a .zip and not an .xstage
pre_staged = False
if submitted_files.endswith(".zip"):
submitted_zip_file = os.path.join(submitted_staging_dir,
submitted_files
).replace("\\", "/")
pre_staged = self.sanitize_prezipped_project(instance,
submitted_zip_file,
staging_dir)
# Get the file to work with
source_dir = str(repres[0]["stagingDir"])
source_file = str(repres[0]["files"])
staging_scene_dir = os.path.join(staging_dir, "scene")
staging_scene = os.path.join(staging_scene_dir, source_file)
# If the file is an .xstage / directory, we must stage it
if not pre_staged:
shutil.copytree(source_dir, staging_scene_dir)
# Rename this latest file as 'scene.xstage'
# This is is determined in the collector from the latest scene in a
# submitted directory / directory the submitted .xstage is in.
# In the case of a zip file being submitted, this is determined within
# the self.sanitize_project() method in this extractor.
os.rename(staging_scene,
os.path.join(staging_scene_dir, "scene.xstage")
)
# Required to set the current directory where the zip will end up
os.chdir(staging_dir)
# Create the zip file
zip_filepath = shutil.make_archive(os.path.basename(source_dir),
"zip",
staging_scene_dir
)
zip_filename = os.path.basename(zip_filepath)
self.log.info("Zip file: {}".format(zip_filepath))
# Setup representation
new_repre = {
"name": "zip",
"ext": "zip",
"files": zip_filename,
"stagingDir": staging_dir
}
self.log.debug(
"Creating new representation: {}".format(new_repre)
)
instance.data["representations"] = [new_repre]
self.log.debug("Completed prep of zipped Harmony scene: {}"
.format(zip_filepath)
)
# If this extractor is setup to also extract a workfile...
if self.create_workfile:
workfile_path = self.extract_workfile(instance,
staging_scene
)
self.log.debug("Extracted Workfile to: {}".format(workfile_path))
def extract_workfile(self, instance, staging_scene):
"""Extract a valid workfile for this corresponding publish.
Args:
instance (:class:`pyblish.api.Instance`): Instance data.
staging_scene (str): path of staging scene.
Returns:
str: Path to workdir.
"""
# Since the staging scene was renamed to "scene.xstage" for publish
# rename the staging scene in the temp stagingdir
staging_scene = os.path.join(os.path.dirname(staging_scene),
"scene.xstage")
# Setup the data needed to form a valid work path filename
anatomy = openpype.api.Anatomy()
project_entity = instance.context.data["projectEntity"]
data = {
"root": api.registered_root(),
"project": {
"name": project_entity["name"],
"code": project_entity["data"].get("code", '')
},
"asset": instance.data["asset"],
"hierarchy": openpype.api.get_hierarchy(instance.data["asset"]),
"family": instance.data["family"],
"task": instance.data.get("task"),
"subset": instance.data["subset"],
"version": 1,
"ext": "zip",
}
# Get a valid work filename first with version 1
file_template = anatomy.templates["work"]["file"]
anatomy_filled = anatomy.format(data)
work_path = anatomy_filled["work"]["path"]
# Get the final work filename with the proper version
data["version"] = api.last_workfile_with_version(
os.path.dirname(work_path), file_template, data, [".zip"]
)[1]
work_path = anatomy_filled["work"]["path"]
base_name = os.path.splitext(os.path.basename(work_path))[0]
staging_work_path = os.path.join(os.path.dirname(staging_scene),
base_name + ".xstage"
)
# Rename this latest file after the workfile path filename
os.rename(staging_scene, staging_work_path)
# Required to set the current directory where the zip will end up
os.chdir(os.path.dirname(os.path.dirname(staging_scene)))
# Create the zip file
zip_filepath = shutil.make_archive(base_name,
"zip",
os.path.dirname(staging_scene)
)
self.log.info(staging_scene)
self.log.info(work_path)
self.log.info(staging_work_path)
self.log.info(os.path.dirname(os.path.dirname(staging_scene)))
self.log.info(base_name)
self.log.info(zip_filepath)
# Create the work path on disk if it does not exist
os.makedirs(os.path.dirname(work_path), exist_ok=True)
shutil.copy(zip_filepath, work_path)
return work_path
def sanitize_prezipped_project(
self, instance, zip_filepath, staging_dir):
"""Fix when a zip contains a folder.
Handle zip file root contains folder instead of the project.
Args:
instance (:class:`pyblish.api.Instance`): Instance data.
zip_filepath (str): Path to zip.
staging_dir (str): Path to staging directory.
"""
zip = zipfile.ZipFile(zip_filepath)
zip_contents = zipfile.ZipFile.namelist(zip)
# Determine if any xstage file is in root of zip
project_in_root = [pth for pth in zip_contents
if "/" not in pth and pth.endswith(".xstage")]
staging_scene_dir = os.path.join(staging_dir, "scene")
# The project is nested, so we must extract and move it
if not project_in_root:
staging_tmp_dir = os.path.join(staging_dir, "tmp")
with zipfile.ZipFile(zip_filepath, "r") as zip_ref:
zip_ref.extractall(staging_tmp_dir)
nested_project_folder = os.path.join(staging_tmp_dir,
zip_contents[0]
)
shutil.copytree(nested_project_folder, staging_scene_dir)
else:
# The project is not nested, so we just extract to scene folder
with zipfile.ZipFile(zip_filepath, "r") as zip_ref:
zip_ref.extractall(staging_scene_dir)
latest_file = max(glob.iglob(staging_scene_dir + "/*.xstage"),
key=os.path.getctime).replace("\\", "/")
instance.data["representations"][0]["stagingDir"] = staging_scene_dir
instance.data["representations"][0]["files"] = os.path.basename(
latest_file)
# We have staged the scene already so return True
return True
def _find_last_version(self, subset_name, asset_doc):
"""Find last version of subset."""
subset_doc = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_doc["_id"]
})
if subset_doc is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_doc = io.find_one(
{
"type": "version",
"parent": subset_doc["_id"]
},
sort=[("name", -1)]
)
if version_doc:
return int(version_doc["name"])
return None
def _get_all_task_types(self, project):
"""Get all task types."""
tasks = {}
proj_template = project['project_schema']
temp_task_types = proj_template['_task_type_schema']['types']
for type in temp_task_types:
if type['name'] not in tasks:
tasks[type['name']] = type
return tasks
def _get_all_task_statuses(self, project):
"""Get all statuses of tasks."""
statuses = {}
proj_template = project['project_schema']
temp_task_statuses = proj_template.get_statuses("Task")
for status in temp_task_statuses:
if status['name'] not in statuses:
statuses[status['name']] = status
return statuses
def _get_all_assetversion_statuses(self, project):
"""Get statuses of all asset versions."""
statuses = {}
proj_template = project['project_schema']
temp_task_statuses = proj_template.get_statuses("AssetVersion")
for status in temp_task_statuses:
if status['name'] not in statuses:
statuses[status['name']] = status
return statuses
def _create_task(self, name, task_type, parent, task_status):
"""Create task."""
task_data = {
'name': name,
'parent': parent,
}
self.log.info(task_type)
task_data['type'] = self.task_types[task_type]
task_data['status'] = self.task_statuses[task_status]
self.log.info(task_data)
task = self.session.create('Task', task_data)
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
six.reraise(tp, value, tb)
return task
| 35.693827 | 78 | 0.571251 |
import glob
import os
import shutil
import six
import sys
import tempfile
import zipfile
import pyblish.api
from avalon import api, io
import openpype.api
class ExtractHarmonyZip(openpype.api.Extractor):
label = "Extract Harmony zip"
order = pyblish.api.ExtractorOrder + 0.02
hosts = ["standalonepublisher"]
families = ["scene"]
session = None
task_types = None
task_statuses = None
assetversion_statuses = None
create_workfile = True
default_task = "harmonyIngest"
default_task_type = "Ingest"
default_task_status = "Ingested"
assetversion_status = "Ingested"
def process(self, instance):
context = instance.context
self.session = context.data["ftrackSession"]
asset_doc = context.data["assetEntity"]
subset_name = instance.data["subset"]
instance_name = instance.data["name"]
family = instance.data["family"]
task = context.data["anatomyData"]["task"] or self.default_task
project_entity = instance.context.data["projectEntity"]
ftrack_id = asset_doc["data"]["ftrackId"]
repres = instance.data["representations"]
submitted_staging_dir = repres[0]["stagingDir"]
submitted_files = repres[0]["files"]
query = 'AssetBuild where id is "{}"'.format(ftrack_id)
asset_entity = self.session.query(query).first()
query = 'Project where full_name is "{}"'.format(
project_entity["name"]
)
project_entity = self.session.query(query).one()
self.task_types = self._get_all_task_types(project_entity)
self.task_statuses = self.get_all_task_statuses(project_entity)
self.assetversion_statuses = self.get_all_assetversion_statuses(
project_entity
)
if self.assetversion_status:
instance.data["assetversion_status"] = self.assetversion_status
if task == self.default_task:
existing_tasks = []
entity_children = asset_entity.get('children', [])
for child in entity_children:
if child.entity_type.lower() == 'task':
existing_tasks.append(child['name'].lower())
if task.lower() in existing_tasks:
print("Task {} already exists".format(task))
else:
self.create_task(
name=task,
task_type=self.default_task_type,
task_status=self.default_task_status,
parent=asset_entity,
)
latest_version = self._find_last_version(subset_name, asset_doc)
version_number = 1
if latest_version is not None:
version_number += latest_version
self.log.info(
"Next version of instance \"{}\" will be {}".format(
instance_name, version_number
)
)
instance.data["task"] = task
instance.data["version_name"] = "{}_{}".format(subset_name, task)
instance.data["family"] = family
instance.data["subset"] = subset_name
instance.data["version"] = version_number
instance.data["latestVersion"] = latest_version
instance.data["anatomyData"].update({
"subset": subset_name,
"family": family,
"version": version_number
})
families = instance.data.get("families") or list()
if families:
families = list(set(families))
instance.data["families"] = families
staging_dir = tempfile.mkdtemp(prefix="pyblish_tmp_")
pre_staged = False
if submitted_files.endswith(".zip"):
submitted_zip_file = os.path.join(submitted_staging_dir,
submitted_files
).replace("\\", "/")
pre_staged = self.sanitize_prezipped_project(instance,
submitted_zip_file,
staging_dir)
source_dir = str(repres[0]["stagingDir"])
source_file = str(repres[0]["files"])
staging_scene_dir = os.path.join(staging_dir, "scene")
staging_scene = os.path.join(staging_scene_dir, source_file)
if not pre_staged:
shutil.copytree(source_dir, staging_scene_dir)
os.rename(staging_scene,
os.path.join(staging_scene_dir, "scene.xstage")
)
os.chdir(staging_dir)
zip_filepath = shutil.make_archive(os.path.basename(source_dir),
"zip",
staging_scene_dir
)
zip_filename = os.path.basename(zip_filepath)
self.log.info("Zip file: {}".format(zip_filepath))
new_repre = {
"name": "zip",
"ext": "zip",
"files": zip_filename,
"stagingDir": staging_dir
}
self.log.debug(
"Creating new representation: {}".format(new_repre)
)
instance.data["representations"] = [new_repre]
self.log.debug("Completed prep of zipped Harmony scene: {}"
.format(zip_filepath)
)
if self.create_workfile:
workfile_path = self.extract_workfile(instance,
staging_scene
)
self.log.debug("Extracted Workfile to: {}".format(workfile_path))
def extract_workfile(self, instance, staging_scene):
staging_scene = os.path.join(os.path.dirname(staging_scene),
"scene.xstage")
anatomy = openpype.api.Anatomy()
project_entity = instance.context.data["projectEntity"]
data = {
"root": api.registered_root(),
"project": {
"name": project_entity["name"],
"code": project_entity["data"].get("code", '')
},
"asset": instance.data["asset"],
"hierarchy": openpype.api.get_hierarchy(instance.data["asset"]),
"family": instance.data["family"],
"task": instance.data.get("task"),
"subset": instance.data["subset"],
"version": 1,
"ext": "zip",
}
file_template = anatomy.templates["work"]["file"]
anatomy_filled = anatomy.format(data)
work_path = anatomy_filled["work"]["path"]
data["version"] = api.last_workfile_with_version(
os.path.dirname(work_path), file_template, data, [".zip"]
)[1]
work_path = anatomy_filled["work"]["path"]
base_name = os.path.splitext(os.path.basename(work_path))[0]
staging_work_path = os.path.join(os.path.dirname(staging_scene),
base_name + ".xstage"
)
os.rename(staging_scene, staging_work_path)
os.chdir(os.path.dirname(os.path.dirname(staging_scene)))
zip_filepath = shutil.make_archive(base_name,
"zip",
os.path.dirname(staging_scene)
)
self.log.info(staging_scene)
self.log.info(work_path)
self.log.info(staging_work_path)
self.log.info(os.path.dirname(os.path.dirname(staging_scene)))
self.log.info(base_name)
self.log.info(zip_filepath)
os.makedirs(os.path.dirname(work_path), exist_ok=True)
shutil.copy(zip_filepath, work_path)
return work_path
def sanitize_prezipped_project(
self, instance, zip_filepath, staging_dir):
zip = zipfile.ZipFile(zip_filepath)
zip_contents = zipfile.ZipFile.namelist(zip)
project_in_root = [pth for pth in zip_contents
if "/" not in pth and pth.endswith(".xstage")]
staging_scene_dir = os.path.join(staging_dir, "scene")
if not project_in_root:
staging_tmp_dir = os.path.join(staging_dir, "tmp")
with zipfile.ZipFile(zip_filepath, "r") as zip_ref:
zip_ref.extractall(staging_tmp_dir)
nested_project_folder = os.path.join(staging_tmp_dir,
zip_contents[0]
)
shutil.copytree(nested_project_folder, staging_scene_dir)
else:
with zipfile.ZipFile(zip_filepath, "r") as zip_ref:
zip_ref.extractall(staging_scene_dir)
latest_file = max(glob.iglob(staging_scene_dir + "/*.xstage"),
key=os.path.getctime).replace("\\", "/")
instance.data["representations"][0]["stagingDir"] = staging_scene_dir
instance.data["representations"][0]["files"] = os.path.basename(
latest_file)
return True
def _find_last_version(self, subset_name, asset_doc):
subset_doc = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_doc["_id"]
})
if subset_doc is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_doc = io.find_one(
{
"type": "version",
"parent": subset_doc["_id"]
},
sort=[("name", -1)]
)
if version_doc:
return int(version_doc["name"])
return None
def _get_all_task_types(self, project):
tasks = {}
proj_template = project['project_schema']
temp_task_types = proj_template['_task_type_schema']['types']
for type in temp_task_types:
if type['name'] not in tasks:
tasks[type['name']] = type
return tasks
def _get_all_task_statuses(self, project):
statuses = {}
proj_template = project['project_schema']
temp_task_statuses = proj_template.get_statuses("Task")
for status in temp_task_statuses:
if status['name'] not in statuses:
statuses[status['name']] = status
return statuses
def _get_all_assetversion_statuses(self, project):
statuses = {}
proj_template = project['project_schema']
temp_task_statuses = proj_template.get_statuses("AssetVersion")
for status in temp_task_statuses:
if status['name'] not in statuses:
statuses[status['name']] = status
return statuses
def _create_task(self, name, task_type, parent, task_status):
task_data = {
'name': name,
'parent': parent,
}
self.log.info(task_type)
task_data['type'] = self.task_types[task_type]
task_data['status'] = self.task_statuses[task_status]
self.log.info(task_data)
task = self.session.create('Task', task_data)
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
six.reraise(tp, value, tb)
return task
| true | true |
f7f96c9ff5efcfd0c01cd37a8ca34ec81c088922 | 10,447 | py | Python | Lib/robofab/test/testSupport.py | Vectro-Type-Foundry/robofab | cd65d78292d24358c98dce53d283314cdc85878e | [
"BSD-3-Clause"
] | 61 | 2015-01-17T10:15:45.000Z | 2018-12-02T13:53:02.000Z | Lib/robofab/test/testSupport.py | Vectro-Type-Foundry/robofab | cd65d78292d24358c98dce53d283314cdc85878e | [
"BSD-3-Clause"
] | 37 | 2015-01-05T23:44:56.000Z | 2018-03-16T19:05:28.000Z | Lib/robofab/test/testSupport.py | Vectro-Type-Foundry/robofab | cd65d78292d24358c98dce53d283314cdc85878e | [
"BSD-3-Clause"
] | 25 | 2015-01-08T19:49:36.000Z | 2018-10-29T00:36:46.000Z | """Miscellaneous helpers for our test suite."""
import sys
import os
import types
import unittest
def getDemoFontPath():
"""Return the path to Data/DemoFont.ufo/."""
import robofab
root = os.path.dirname(os.path.dirname(os.path.dirname(robofab.__file__)))
return os.path.join(root, "Data", "DemoFont.ufo")
def getDemoFontGlyphSetPath():
"""Return the path to Data/DemoFont.ufo/glyphs/."""
return os.path.join(getDemoFontPath(), "glyphs")
def _gatherTestCasesFromCallerByMagic():
# UGLY magic: fetch TestClass subclasses from the globals of our
# caller's caller.
frame = sys._getframe(2)
return _gatherTestCasesFromDict(frame.f_globals)
def _gatherTestCasesFromDict(d):
testCases = []
for ob in d.values():
if isinstance(ob, type) and issubclass(ob, unittest.TestCase):
testCases.append(ob)
return testCases
def runTests(testCases=None, verbosity=1):
"""Run a series of tests."""
if testCases is None:
testCases = _gatherTestCasesFromCallerByMagic()
loader = unittest.TestLoader()
suites = []
for testCase in testCases:
suites.append(loader.loadTestsFromTestCase(testCase))
testRunner = unittest.TextTestRunner(verbosity=verbosity)
testSuite = unittest.TestSuite(suites)
testRunner.run(testSuite)
# font info values used by several tests
fontInfoVersion1 = {
"familyName" : "Some Font (Family Name)",
"styleName" : "Regular (Style Name)",
"fullName" : "Some Font-Regular (Postscript Full Name)",
"fontName" : "SomeFont-Regular (Postscript Font Name)",
"menuName" : "Some Font Regular (Style Map Family Name)",
"fontStyle" : 64,
"note" : "A note.",
"versionMajor" : 1,
"versionMinor" : 0,
"year" : 2008,
"copyright" : "Copyright Some Foundry.",
"notice" : "Some Font by Some Designer for Some Foundry.",
"trademark" : "Trademark Some Foundry",
"license" : "License info for Some Foundry.",
"licenseURL" : "http://somefoundry.com/license",
"createdBy" : "Some Foundry",
"designer" : "Some Designer",
"designerURL" : "http://somedesigner.com",
"vendorURL" : "http://somefoundry.com",
"unitsPerEm" : 1000,
"ascender" : 750,
"descender" : -250,
"capHeight" : 750,
"xHeight" : 500,
"defaultWidth" : 400,
"slantAngle" : -12.5,
"italicAngle" : -12.5,
"widthName" : "Medium (normal)",
"weightName" : "Medium",
"weightValue" : 500,
"fondName" : "SomeFont Regular (FOND Name)",
"otFamilyName" : "Some Font (Preferred Family Name)",
"otStyleName" : "Regular (Preferred Subfamily Name)",
"otMacName" : "Some Font Regular (Compatible Full Name)",
"msCharSet" : 0,
"fondID" : 15000,
"uniqueID" : 4000000,
"ttVendor" : "SOME",
"ttUniqueID" : "OpenType name Table Unique ID",
"ttVersion" : "OpenType name Table Version",
}
fontInfoVersion2 = {
"familyName" : "Some Font (Family Name)",
"styleName" : "Regular (Style Name)",
"styleMapFamilyName" : "Some Font Regular (Style Map Family Name)",
"styleMapStyleName" : "regular",
"versionMajor" : 1,
"versionMinor" : 0,
"year" : 2008,
"copyright" : "Copyright Some Foundry.",
"trademark" : "Trademark Some Foundry",
"unitsPerEm" : 1000,
"descender" : -250,
"xHeight" : 500,
"capHeight" : 750,
"ascender" : 750,
"italicAngle" : -12.5,
"note" : "A note.",
"openTypeHeadCreated" : "2000/01/01 00:00:00",
"openTypeHeadLowestRecPPEM" : 10,
"openTypeHeadFlags" : [0, 1],
"openTypeHheaAscender" : 750,
"openTypeHheaDescender" : -250,
"openTypeHheaLineGap" : 200,
"openTypeHheaCaretSlopeRise" : 1,
"openTypeHheaCaretSlopeRun" : 0,
"openTypeHheaCaretOffset" : 0,
"openTypeNameDesigner" : "Some Designer",
"openTypeNameDesignerURL" : "http://somedesigner.com",
"openTypeNameManufacturer" : "Some Foundry",
"openTypeNameManufacturerURL" : "http://somefoundry.com",
"openTypeNameLicense" : "License info for Some Foundry.",
"openTypeNameLicenseURL" : "http://somefoundry.com/license",
"openTypeNameVersion" : "OpenType name Table Version",
"openTypeNameUniqueID" : "OpenType name Table Unique ID",
"openTypeNameDescription" : "Some Font by Some Designer for Some Foundry.",
"openTypeNamePreferredFamilyName" : "Some Font (Preferred Family Name)",
"openTypeNamePreferredSubfamilyName" : "Regular (Preferred Subfamily Name)",
"openTypeNameCompatibleFullName" : "Some Font Regular (Compatible Full Name)",
"openTypeNameSampleText" : "Sample Text for Some Font.",
"openTypeNameWWSFamilyName" : "Some Font (WWS Family Name)",
"openTypeNameWWSSubfamilyName" : "Regular (WWS Subfamily Name)",
"openTypeOS2WidthClass" : 5,
"openTypeOS2WeightClass" : 500,
"openTypeOS2Selection" : [3],
"openTypeOS2VendorID" : "SOME",
"openTypeOS2Panose" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
"openTypeOS2FamilyClass" : [1, 1],
"openTypeOS2UnicodeRanges" : [0, 1],
"openTypeOS2CodePageRanges" : [0, 1],
"openTypeOS2TypoAscender" : 750,
"openTypeOS2TypoDescender" : -250,
"openTypeOS2TypoLineGap" : 200,
"openTypeOS2WinAscent" : 750,
"openTypeOS2WinDescent" : -250,
"openTypeOS2Type" : [],
"openTypeOS2SubscriptXSize" : 200,
"openTypeOS2SubscriptYSize" : 400,
"openTypeOS2SubscriptXOffset" : 0,
"openTypeOS2SubscriptYOffset" : -100,
"openTypeOS2SuperscriptXSize" : 200,
"openTypeOS2SuperscriptYSize" : 400,
"openTypeOS2SuperscriptXOffset" : 0,
"openTypeOS2SuperscriptYOffset" : 200,
"openTypeOS2StrikeoutSize" : 20,
"openTypeOS2StrikeoutPosition" : 300,
"openTypeVheaVertTypoAscender" : 750,
"openTypeVheaVertTypoDescender" : -250,
"openTypeVheaVertTypoLineGap" : 200,
"openTypeVheaCaretSlopeRise" : 0,
"openTypeVheaCaretSlopeRun" : 1,
"openTypeVheaCaretOffset" : 0,
"postscriptFontName" : "SomeFont-Regular (Postscript Font Name)",
"postscriptFullName" : "Some Font-Regular (Postscript Full Name)",
"postscriptSlantAngle" : -12.5,
"postscriptUniqueID" : 4000000,
"postscriptUnderlineThickness" : 20,
"postscriptUnderlinePosition" : -200,
"postscriptIsFixedPitch" : False,
"postscriptBlueValues" : [500, 510],
"postscriptOtherBlues" : [-250, -260],
"postscriptFamilyBlues" : [500, 510],
"postscriptFamilyOtherBlues" : [-250, -260],
"postscriptStemSnapH" : [100, 120],
"postscriptStemSnapV" : [80, 90],
"postscriptBlueFuzz" : 1,
"postscriptBlueShift" : 7,
"postscriptBlueScale" : 0.039625,
"postscriptForceBold" : True,
"postscriptDefaultWidthX" : 400,
"postscriptNominalWidthX" : 400,
"postscriptWeightName" : "Medium",
"postscriptDefaultCharacter" : ".notdef",
"postscriptWindowsCharacterSet" : 1,
"macintoshFONDFamilyID" : 15000,
"macintoshFONDName" : "SomeFont Regular (FOND Name)",
}
expectedFontInfo1To2Conversion = {
"familyName" : "Some Font (Family Name)",
"styleMapFamilyName" : "Some Font Regular (Style Map Family Name)",
"styleMapStyleName" : "regular",
"styleName" : "Regular (Style Name)",
"unitsPerEm" : 1000,
"ascender" : 750,
"capHeight" : 750,
"xHeight" : 500,
"descender" : -250,
"italicAngle" : -12.5,
"versionMajor" : 1,
"versionMinor" : 0,
"year" : 2008,
"copyright" : "Copyright Some Foundry.",
"trademark" : "Trademark Some Foundry",
"note" : "A note.",
"macintoshFONDFamilyID" : 15000,
"macintoshFONDName" : "SomeFont Regular (FOND Name)",
"openTypeNameCompatibleFullName" : "Some Font Regular (Compatible Full Name)",
"openTypeNameDescription" : "Some Font by Some Designer for Some Foundry.",
"openTypeNameDesigner" : "Some Designer",
"openTypeNameDesignerURL" : "http://somedesigner.com",
"openTypeNameLicense" : "License info for Some Foundry.",
"openTypeNameLicenseURL" : "http://somefoundry.com/license",
"openTypeNameManufacturer" : "Some Foundry",
"openTypeNameManufacturerURL" : "http://somefoundry.com",
"openTypeNamePreferredFamilyName" : "Some Font (Preferred Family Name)",
"openTypeNamePreferredSubfamilyName": "Regular (Preferred Subfamily Name)",
"openTypeNameCompatibleFullName" : "Some Font Regular (Compatible Full Name)",
"openTypeNameUniqueID" : "OpenType name Table Unique ID",
"openTypeNameVersion" : "OpenType name Table Version",
"openTypeOS2VendorID" : "SOME",
"openTypeOS2WeightClass" : 500,
"openTypeOS2WidthClass" : 5,
"postscriptDefaultWidthX" : 400,
"postscriptFontName" : "SomeFont-Regular (Postscript Font Name)",
"postscriptFullName" : "Some Font-Regular (Postscript Full Name)",
"postscriptSlantAngle" : -12.5,
"postscriptUniqueID" : 4000000,
"postscriptWeightName" : "Medium",
"postscriptWindowsCharacterSet" : 1
}
expectedFontInfo2To1Conversion = {
"familyName" : "Some Font (Family Name)",
"menuName" : "Some Font Regular (Style Map Family Name)",
"fontStyle" : 64,
"styleName" : "Regular (Style Name)",
"unitsPerEm" : 1000,
"ascender" : 750,
"capHeight" : 750,
"xHeight" : 500,
"descender" : -250,
"italicAngle" : -12.5,
"versionMajor" : 1,
"versionMinor" : 0,
"copyright" : "Copyright Some Foundry.",
"trademark" : "Trademark Some Foundry",
"note" : "A note.",
"fondID" : 15000,
"fondName" : "SomeFont Regular (FOND Name)",
"fullName" : "Some Font Regular (Compatible Full Name)",
"notice" : "Some Font by Some Designer for Some Foundry.",
"designer" : "Some Designer",
"designerURL" : "http://somedesigner.com",
"license" : "License info for Some Foundry.",
"licenseURL" : "http://somefoundry.com/license",
"createdBy" : "Some Foundry",
"vendorURL" : "http://somefoundry.com",
"otFamilyName" : "Some Font (Preferred Family Name)",
"otStyleName" : "Regular (Preferred Subfamily Name)",
"otMacName" : "Some Font Regular (Compatible Full Name)",
"ttUniqueID" : "OpenType name Table Unique ID",
"ttVersion" : "OpenType name Table Version",
"ttVendor" : "SOME",
"weightValue" : 500,
"widthName" : "Medium (normal)",
"defaultWidth" : 400,
"fontName" : "SomeFont-Regular (Postscript Font Name)",
"fullName" : "Some Font-Regular (Postscript Full Name)",
"slantAngle" : -12.5,
"uniqueID" : 4000000,
"weightName" : "Medium",
"msCharSet" : 0,
"year" : 2008
}
| 37.444444 | 80 | 0.667273 |
import sys
import os
import types
import unittest
def getDemoFontPath():
import robofab
root = os.path.dirname(os.path.dirname(os.path.dirname(robofab.__file__)))
return os.path.join(root, "Data", "DemoFont.ufo")
def getDemoFontGlyphSetPath():
return os.path.join(getDemoFontPath(), "glyphs")
def _gatherTestCasesFromCallerByMagic():
frame = sys._getframe(2)
return _gatherTestCasesFromDict(frame.f_globals)
def _gatherTestCasesFromDict(d):
testCases = []
for ob in d.values():
if isinstance(ob, type) and issubclass(ob, unittest.TestCase):
testCases.append(ob)
return testCases
def runTests(testCases=None, verbosity=1):
if testCases is None:
testCases = _gatherTestCasesFromCallerByMagic()
loader = unittest.TestLoader()
suites = []
for testCase in testCases:
suites.append(loader.loadTestsFromTestCase(testCase))
testRunner = unittest.TextTestRunner(verbosity=verbosity)
testSuite = unittest.TestSuite(suites)
testRunner.run(testSuite)
# font info values used by several tests
fontInfoVersion1 = {
"familyName" : "Some Font (Family Name)",
"styleName" : "Regular (Style Name)",
"fullName" : "Some Font-Regular (Postscript Full Name)",
"fontName" : "SomeFont-Regular (Postscript Font Name)",
"menuName" : "Some Font Regular (Style Map Family Name)",
"fontStyle" : 64,
"note" : "A note.",
"versionMajor" : 1,
"versionMinor" : 0,
"year" : 2008,
"copyright" : "Copyright Some Foundry.",
"notice" : "Some Font by Some Designer for Some Foundry.",
"trademark" : "Trademark Some Foundry",
"license" : "License info for Some Foundry.",
"licenseURL" : "http://somefoundry.com/license",
"createdBy" : "Some Foundry",
"designer" : "Some Designer",
"designerURL" : "http://somedesigner.com",
"vendorURL" : "http://somefoundry.com",
"unitsPerEm" : 1000,
"ascender" : 750,
"descender" : -250,
"capHeight" : 750,
"xHeight" : 500,
"defaultWidth" : 400,
"slantAngle" : -12.5,
"italicAngle" : -12.5,
"widthName" : "Medium (normal)",
"weightName" : "Medium",
"weightValue" : 500,
"fondName" : "SomeFont Regular (FOND Name)",
"otFamilyName" : "Some Font (Preferred Family Name)",
"otStyleName" : "Regular (Preferred Subfamily Name)",
"otMacName" : "Some Font Regular (Compatible Full Name)",
"msCharSet" : 0,
"fondID" : 15000,
"uniqueID" : 4000000,
"ttVendor" : "SOME",
"ttUniqueID" : "OpenType name Table Unique ID",
"ttVersion" : "OpenType name Table Version",
}
fontInfoVersion2 = {
"familyName" : "Some Font (Family Name)",
"styleName" : "Regular (Style Name)",
"styleMapFamilyName" : "Some Font Regular (Style Map Family Name)",
"styleMapStyleName" : "regular",
"versionMajor" : 1,
"versionMinor" : 0,
"year" : 2008,
"copyright" : "Copyright Some Foundry.",
"trademark" : "Trademark Some Foundry",
"unitsPerEm" : 1000,
"descender" : -250,
"xHeight" : 500,
"capHeight" : 750,
"ascender" : 750,
"italicAngle" : -12.5,
"note" : "A note.",
"openTypeHeadCreated" : "2000/01/01 00:00:00",
"openTypeHeadLowestRecPPEM" : 10,
"openTypeHeadFlags" : [0, 1],
"openTypeHheaAscender" : 750,
"openTypeHheaDescender" : -250,
"openTypeHheaLineGap" : 200,
"openTypeHheaCaretSlopeRise" : 1,
"openTypeHheaCaretSlopeRun" : 0,
"openTypeHheaCaretOffset" : 0,
"openTypeNameDesigner" : "Some Designer",
"openTypeNameDesignerURL" : "http://somedesigner.com",
"openTypeNameManufacturer" : "Some Foundry",
"openTypeNameManufacturerURL" : "http://somefoundry.com",
"openTypeNameLicense" : "License info for Some Foundry.",
"openTypeNameLicenseURL" : "http://somefoundry.com/license",
"openTypeNameVersion" : "OpenType name Table Version",
"openTypeNameUniqueID" : "OpenType name Table Unique ID",
"openTypeNameDescription" : "Some Font by Some Designer for Some Foundry.",
"openTypeNamePreferredFamilyName" : "Some Font (Preferred Family Name)",
"openTypeNamePreferredSubfamilyName" : "Regular (Preferred Subfamily Name)",
"openTypeNameCompatibleFullName" : "Some Font Regular (Compatible Full Name)",
"openTypeNameSampleText" : "Sample Text for Some Font.",
"openTypeNameWWSFamilyName" : "Some Font (WWS Family Name)",
"openTypeNameWWSSubfamilyName" : "Regular (WWS Subfamily Name)",
"openTypeOS2WidthClass" : 5,
"openTypeOS2WeightClass" : 500,
"openTypeOS2Selection" : [3],
"openTypeOS2VendorID" : "SOME",
"openTypeOS2Panose" : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
"openTypeOS2FamilyClass" : [1, 1],
"openTypeOS2UnicodeRanges" : [0, 1],
"openTypeOS2CodePageRanges" : [0, 1],
"openTypeOS2TypoAscender" : 750,
"openTypeOS2TypoDescender" : -250,
"openTypeOS2TypoLineGap" : 200,
"openTypeOS2WinAscent" : 750,
"openTypeOS2WinDescent" : -250,
"openTypeOS2Type" : [],
"openTypeOS2SubscriptXSize" : 200,
"openTypeOS2SubscriptYSize" : 400,
"openTypeOS2SubscriptXOffset" : 0,
"openTypeOS2SubscriptYOffset" : -100,
"openTypeOS2SuperscriptXSize" : 200,
"openTypeOS2SuperscriptYSize" : 400,
"openTypeOS2SuperscriptXOffset" : 0,
"openTypeOS2SuperscriptYOffset" : 200,
"openTypeOS2StrikeoutSize" : 20,
"openTypeOS2StrikeoutPosition" : 300,
"openTypeVheaVertTypoAscender" : 750,
"openTypeVheaVertTypoDescender" : -250,
"openTypeVheaVertTypoLineGap" : 200,
"openTypeVheaCaretSlopeRise" : 0,
"openTypeVheaCaretSlopeRun" : 1,
"openTypeVheaCaretOffset" : 0,
"postscriptFontName" : "SomeFont-Regular (Postscript Font Name)",
"postscriptFullName" : "Some Font-Regular (Postscript Full Name)",
"postscriptSlantAngle" : -12.5,
"postscriptUniqueID" : 4000000,
"postscriptUnderlineThickness" : 20,
"postscriptUnderlinePosition" : -200,
"postscriptIsFixedPitch" : False,
"postscriptBlueValues" : [500, 510],
"postscriptOtherBlues" : [-250, -260],
"postscriptFamilyBlues" : [500, 510],
"postscriptFamilyOtherBlues" : [-250, -260],
"postscriptStemSnapH" : [100, 120],
"postscriptStemSnapV" : [80, 90],
"postscriptBlueFuzz" : 1,
"postscriptBlueShift" : 7,
"postscriptBlueScale" : 0.039625,
"postscriptForceBold" : True,
"postscriptDefaultWidthX" : 400,
"postscriptNominalWidthX" : 400,
"postscriptWeightName" : "Medium",
"postscriptDefaultCharacter" : ".notdef",
"postscriptWindowsCharacterSet" : 1,
"macintoshFONDFamilyID" : 15000,
"macintoshFONDName" : "SomeFont Regular (FOND Name)",
}
expectedFontInfo1To2Conversion = {
"familyName" : "Some Font (Family Name)",
"styleMapFamilyName" : "Some Font Regular (Style Map Family Name)",
"styleMapStyleName" : "regular",
"styleName" : "Regular (Style Name)",
"unitsPerEm" : 1000,
"ascender" : 750,
"capHeight" : 750,
"xHeight" : 500,
"descender" : -250,
"italicAngle" : -12.5,
"versionMajor" : 1,
"versionMinor" : 0,
"year" : 2008,
"copyright" : "Copyright Some Foundry.",
"trademark" : "Trademark Some Foundry",
"note" : "A note.",
"macintoshFONDFamilyID" : 15000,
"macintoshFONDName" : "SomeFont Regular (FOND Name)",
"openTypeNameCompatibleFullName" : "Some Font Regular (Compatible Full Name)",
"openTypeNameDescription" : "Some Font by Some Designer for Some Foundry.",
"openTypeNameDesigner" : "Some Designer",
"openTypeNameDesignerURL" : "http://somedesigner.com",
"openTypeNameLicense" : "License info for Some Foundry.",
"openTypeNameLicenseURL" : "http://somefoundry.com/license",
"openTypeNameManufacturer" : "Some Foundry",
"openTypeNameManufacturerURL" : "http://somefoundry.com",
"openTypeNamePreferredFamilyName" : "Some Font (Preferred Family Name)",
"openTypeNamePreferredSubfamilyName": "Regular (Preferred Subfamily Name)",
"openTypeNameCompatibleFullName" : "Some Font Regular (Compatible Full Name)",
"openTypeNameUniqueID" : "OpenType name Table Unique ID",
"openTypeNameVersion" : "OpenType name Table Version",
"openTypeOS2VendorID" : "SOME",
"openTypeOS2WeightClass" : 500,
"openTypeOS2WidthClass" : 5,
"postscriptDefaultWidthX" : 400,
"postscriptFontName" : "SomeFont-Regular (Postscript Font Name)",
"postscriptFullName" : "Some Font-Regular (Postscript Full Name)",
"postscriptSlantAngle" : -12.5,
"postscriptUniqueID" : 4000000,
"postscriptWeightName" : "Medium",
"postscriptWindowsCharacterSet" : 1
}
expectedFontInfo2To1Conversion = {
"familyName" : "Some Font (Family Name)",
"menuName" : "Some Font Regular (Style Map Family Name)",
"fontStyle" : 64,
"styleName" : "Regular (Style Name)",
"unitsPerEm" : 1000,
"ascender" : 750,
"capHeight" : 750,
"xHeight" : 500,
"descender" : -250,
"italicAngle" : -12.5,
"versionMajor" : 1,
"versionMinor" : 0,
"copyright" : "Copyright Some Foundry.",
"trademark" : "Trademark Some Foundry",
"note" : "A note.",
"fondID" : 15000,
"fondName" : "SomeFont Regular (FOND Name)",
"fullName" : "Some Font Regular (Compatible Full Name)",
"notice" : "Some Font by Some Designer for Some Foundry.",
"designer" : "Some Designer",
"designerURL" : "http://somedesigner.com",
"license" : "License info for Some Foundry.",
"licenseURL" : "http://somefoundry.com/license",
"createdBy" : "Some Foundry",
"vendorURL" : "http://somefoundry.com",
"otFamilyName" : "Some Font (Preferred Family Name)",
"otStyleName" : "Regular (Preferred Subfamily Name)",
"otMacName" : "Some Font Regular (Compatible Full Name)",
"ttUniqueID" : "OpenType name Table Unique ID",
"ttVersion" : "OpenType name Table Version",
"ttVendor" : "SOME",
"weightValue" : 500,
"widthName" : "Medium (normal)",
"defaultWidth" : 400,
"fontName" : "SomeFont-Regular (Postscript Font Name)",
"fullName" : "Some Font-Regular (Postscript Full Name)",
"slantAngle" : -12.5,
"uniqueID" : 4000000,
"weightName" : "Medium",
"msCharSet" : 0,
"year" : 2008
}
| true | true |
f7f96cad2bd048ed76edc8ed9412ff9b6f5b5f7a | 618 | py | Python | train_restore_use_models/preprocess_data.py | MECLabTUDA/QA_med_data | 72897cb2d8e520dde6b88318c23bca32eb9210d7 | [
"MIT"
] | null | null | null | train_restore_use_models/preprocess_data.py | MECLabTUDA/QA_med_data | 72897cb2d8e520dde6b88318c23bca32eb9210d7 | [
"MIT"
] | null | null | null | train_restore_use_models/preprocess_data.py | MECLabTUDA/QA_med_data | 72897cb2d8e520dde6b88318c23bca32eb9210d7 | [
"MIT"
] | null | null | null | # Import needed libraries
from mp.data.datasets.dataset_JIP_cnn import JIPDataset
def preprocess_data(config):
r"""This function is used to load the original data from the workflow and preprocesses it
by saving it in the preprocessed workflow."""
JIP = JIPDataset(img_size=config['input_shape'], num_intensities=config['num_intensities'], data_type=config['data_type'],\
augmentation=config['augmentation'], gpu=True, cuda=config['device'], msg_bot=config['msg_bot'],\
nr_images=config['nr_images'], restore=config['restore'])
return JIP.preprocess()
| 56.181818 | 127 | 0.703883 |
from mp.data.datasets.dataset_JIP_cnn import JIPDataset
def preprocess_data(config):
JIP = JIPDataset(img_size=config['input_shape'], num_intensities=config['num_intensities'], data_type=config['data_type'],\
augmentation=config['augmentation'], gpu=True, cuda=config['device'], msg_bot=config['msg_bot'],\
nr_images=config['nr_images'], restore=config['restore'])
return JIP.preprocess()
| true | true |
f7f96ce53153034f0c909c1981ee2b1921cf1720 | 733 | py | Python | netforce_hr/netforce_hr/migrations/improve_payroll.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 27 | 2015-09-30T23:53:30.000Z | 2021-06-07T04:56:25.000Z | netforce_hr/netforce_hr/migrations/improve_payroll.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 191 | 2015-10-08T11:46:30.000Z | 2019-11-14T02:24:36.000Z | netforce_hr/netforce_hr/migrations/improve_payroll.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 32 | 2015-10-01T03:59:43.000Z | 2022-01-13T07:31:05.000Z | from netforce.model import get_model
from netforce import migration
from netforce.access import set_active_user, get_active_user
class Migration(migration.Migration):
_name="improve.payroll"
_version="2.10.0"
def migrate(self):
user_id=get_active_user()
set_active_user(1)
for payslip in get_model("hr.payslip").search_browse([]):
if not payslip.state:
payslip.write({
'state': 'draft',
})
for payrun in get_model("hr.payrun").search_browse([]):
if not payrun.state:
payrun.write({
'state': 'draft',
})
set_active_user(user_id)
Migration.register()
| 28.192308 | 65 | 0.582538 | from netforce.model import get_model
from netforce import migration
from netforce.access import set_active_user, get_active_user
class Migration(migration.Migration):
_name="improve.payroll"
_version="2.10.0"
def migrate(self):
user_id=get_active_user()
set_active_user(1)
for payslip in get_model("hr.payslip").search_browse([]):
if not payslip.state:
payslip.write({
'state': 'draft',
})
for payrun in get_model("hr.payrun").search_browse([]):
if not payrun.state:
payrun.write({
'state': 'draft',
})
set_active_user(user_id)
Migration.register()
| true | true |
f7f96edf1cbfedce8446f049553d598fd1d6ecfa | 951 | py | Python | examples/advanced/ray_example/ray_compose_example.py | packerliu/hydra | 3e1debd53eb15229950d170086e563d27cb54a04 | [
"MIT"
] | 1 | 2020-07-13T09:06:16.000Z | 2020-07-13T09:06:16.000Z | examples/advanced/ray_example/ray_compose_example.py | packerliu/hydra | 3e1debd53eb15229950d170086e563d27cb54a04 | [
"MIT"
] | 6 | 2021-03-01T21:23:23.000Z | 2022-02-27T09:15:03.000Z | examples/advanced/ray_example/ray_compose_example.py | packerliu/hydra | 3e1debd53eb15229950d170086e563d27cb54a04 | [
"MIT"
] | 1 | 2021-04-22T08:47:44.000Z | 2021-04-22T08:47:44.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import time
from typing import List, Tuple
import ray
from omegaconf import DictConfig
import hydra
from hydra.experimental import compose
@ray.remote # type: ignore
def train(overrides: List[str], cfg: DictConfig) -> Tuple[List[str], float]:
print(cfg.pretty())
time.sleep(5)
return overrides, 0.9
@hydra.main(config_path="conf", config_name="config")
def main(cfg: DictConfig) -> None:
ray.init(**cfg.ray.init)
results = []
for model in ["alexnet", "resnet"]:
for dataset in ["cifar10", "imagenet"]:
overrides = [f"dataset={dataset}", f"model={model}"]
run_cfg = compose(overrides=overrides)
ret = train.remote(overrides, run_cfg)
results.append(ret)
for overrides, score in ray.get(results):
print(f"Result from {overrides} : {score}")
if __name__ == "__main__":
main()
| 25.702703 | 76 | 0.654048 |
import time
from typing import List, Tuple
import ray
from omegaconf import DictConfig
import hydra
from hydra.experimental import compose
@ray.remote
def train(overrides: List[str], cfg: DictConfig) -> Tuple[List[str], float]:
print(cfg.pretty())
time.sleep(5)
return overrides, 0.9
@hydra.main(config_path="conf", config_name="config")
def main(cfg: DictConfig) -> None:
ray.init(**cfg.ray.init)
results = []
for model in ["alexnet", "resnet"]:
for dataset in ["cifar10", "imagenet"]:
overrides = [f"dataset={dataset}", f"model={model}"]
run_cfg = compose(overrides=overrides)
ret = train.remote(overrides, run_cfg)
results.append(ret)
for overrides, score in ray.get(results):
print(f"Result from {overrides} : {score}")
if __name__ == "__main__":
main()
| true | true |
f7f96f6a4891149c611ba417bcbaabbc514043af | 2,644 | py | Python | packages/syft/src/syft/core/node/common/action/greenlets_switch.py | jackbandy/PySyft | 0e20e90abab6a7a7ca672d6eedfa1e7f83c4981b | [
"Apache-2.0"
] | null | null | null | packages/syft/src/syft/core/node/common/action/greenlets_switch.py | jackbandy/PySyft | 0e20e90abab6a7a7ca672d6eedfa1e7f83c4981b | [
"Apache-2.0"
] | null | null | null | packages/syft/src/syft/core/node/common/action/greenlets_switch.py | jackbandy/PySyft | 0e20e90abab6a7a7ca672d6eedfa1e7f83c4981b | [
"Apache-2.0"
] | null | null | null | # stdlib
from typing import Tuple
# third party
import gevent
# relative
from .....logger import critical
from ....common.uid import UID
from ....store.storeable_object import StorableObject
from ...abstract.node import AbstractNode
def retrieve_object(
node: AbstractNode, id_at_location: UID, path: str, proxy_only: bool = False
) -> StorableObject:
# A hard time limit is set on celery worker which prevents infinite execution.
ctr = 0
while True:
store_obj = node.store.get_or_none(key=id_at_location, proxy_only=proxy_only)
if store_obj is None:
if ctr % 1500 == 0:
critical(
f"execute_action on {path} failed due to missing object"
+ f" at: {id_at_location}"
)
# Implicit context switch between greenlets.
gevent.sleep(0)
ctr += 1
else:
return store_obj
def beaver_retrieve_object(
node: AbstractNode, id_at_location: UID, nr_parties: int
) -> StorableObject:
# relative
from .beaver_action import BEAVER_CACHE
# A hard time limit is set on celery worker which prevents infinite execution.
ctr = 0
while True:
store_obj = BEAVER_CACHE.get(id_at_location, None) # type: ignore
if store_obj is None or len(store_obj.data) != nr_parties:
if ctr % 1500 == 0:
critical(
f"Beaver Retrieval failed for {nr_parties} parties due to missing object"
+ f" at: {id_at_location} values: {store_obj}"
)
# Implicit context switch between greenlets.
gevent.sleep(0)
ctr += 1
else:
return store_obj
def crypto_store_retrieve_object( # type: ignore
op_str: str,
**kwargs,
) -> Tuple:
# relative
from ....smpc.store.exceptions import EmptyPrimitiveStore
from ....tensor.smpc.share_tensor import ShareTensor
crypto_store = ShareTensor.crypto_store
# A hard time limit is set on celery worker which prevents infinite execution.
ctr = 0
while True:
try:
store_values = crypto_store.get_primitives_from_store(
op_str, **kwargs # type: ignore
)
return tuple(store_values)
except EmptyPrimitiveStore:
if ctr % 1500 == 0:
critical(
f"Crypto Store Retrieval failed for parties due to missing object: {EmptyPrimitiveStore}"
)
# Implicit context switch between greenlets.
gevent.sleep(0)
ctr += 1
| 31.47619 | 109 | 0.607035 |
from typing import Tuple
import gevent
from .....logger import critical
from ....common.uid import UID
from ....store.storeable_object import StorableObject
from ...abstract.node import AbstractNode
def retrieve_object(
node: AbstractNode, id_at_location: UID, path: str, proxy_only: bool = False
) -> StorableObject:
ctr = 0
while True:
store_obj = node.store.get_or_none(key=id_at_location, proxy_only=proxy_only)
if store_obj is None:
if ctr % 1500 == 0:
critical(
f"execute_action on {path} failed due to missing object"
+ f" at: {id_at_location}"
)
gevent.sleep(0)
ctr += 1
else:
return store_obj
def beaver_retrieve_object(
node: AbstractNode, id_at_location: UID, nr_parties: int
) -> StorableObject:
from .beaver_action import BEAVER_CACHE
ctr = 0
while True:
store_obj = BEAVER_CACHE.get(id_at_location, None)
if store_obj is None or len(store_obj.data) != nr_parties:
if ctr % 1500 == 0:
critical(
f"Beaver Retrieval failed for {nr_parties} parties due to missing object"
+ f" at: {id_at_location} values: {store_obj}"
)
gevent.sleep(0)
ctr += 1
else:
return store_obj
def crypto_store_retrieve_object(
op_str: str,
**kwargs,
) -> Tuple:
from ....smpc.store.exceptions import EmptyPrimitiveStore
from ....tensor.smpc.share_tensor import ShareTensor
crypto_store = ShareTensor.crypto_store
ctr = 0
while True:
try:
store_values = crypto_store.get_primitives_from_store(
op_str, **kwargs
)
return tuple(store_values)
except EmptyPrimitiveStore:
if ctr % 1500 == 0:
critical(
f"Crypto Store Retrieval failed for parties due to missing object: {EmptyPrimitiveStore}"
)
gevent.sleep(0)
ctr += 1
| true | true |
f7f96f874be80cf5419958b278dd1ee46fd52ba6 | 13,181 | py | Python | dogechia/rpc/rpc_server.py | hagbardcelene/doge-chia | 72bdf0a7b20a579fe4645f0cb132955e181e1c44 | [
"Apache-2.0"
] | 27 | 2021-07-06T16:33:50.000Z | 2022-02-19T21:11:25.000Z | dogechia/rpc/rpc_server.py | hagbardcelene/doge-chia | 72bdf0a7b20a579fe4645f0cb132955e181e1c44 | [
"Apache-2.0"
] | 15 | 2021-07-07T02:32:59.000Z | 2021-10-15T21:19:51.000Z | dogechia/rpc/rpc_server.py | hagbardcelene/doge-chia | 72bdf0a7b20a579fe4645f0cb132955e181e1c44 | [
"Apache-2.0"
] | 12 | 2021-07-08T15:36:20.000Z | 2022-03-15T08:34:01.000Z | import asyncio
import json
import logging
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional
import aiohttp
from dogechia.server.outbound_message import NodeType
from dogechia.server.server import ssl_context_for_server
from dogechia.types.peer_info import PeerInfo
from dogechia.util.byte_types import hexstr_to_bytes
from dogechia.util.ints import uint16
from dogechia.util.json_util import dict_to_json_str, obj_to_response
from dogechia.util.ws_message import create_payload, create_payload_dict, format_response, pong
log = logging.getLogger(__name__)
class RpcServer:
"""
Implementation of RPC server.
"""
def __init__(self, rpc_api: Any, service_name: str, stop_cb: Callable, root_path, net_config):
self.rpc_api = rpc_api
self.stop_cb: Callable = stop_cb
self.log = log
self.shut_down = False
self.websocket: Optional[aiohttp.ClientWebSocketResponse] = None
self.service_name = service_name
self.root_path = root_path
self.net_config = net_config
self.crt_path = root_path / net_config["daemon_ssl"]["private_crt"]
self.key_path = root_path / net_config["daemon_ssl"]["private_key"]
self.ca_cert_path = root_path / net_config["private_ssl_ca"]["crt"]
self.ca_key_path = root_path / net_config["private_ssl_ca"]["key"]
self.ssl_context = ssl_context_for_server(self.ca_cert_path, self.ca_key_path, self.crt_path, self.key_path)
async def stop(self):
self.shut_down = True
if self.websocket is not None:
await self.websocket.close()
async def _state_changed(self, *args):
if self.websocket is None:
return None
payloads: List[Dict] = await self.rpc_api._state_changed(*args)
change = args[0]
if change == "add_connection" or change == "close_connection" or change == "peer_changed_peak":
data = await self.get_connections({})
if data is not None:
payload = create_payload_dict(
"get_connections",
data,
self.service_name,
"wallet_ui",
)
payloads.append(payload)
for payload in payloads:
if "success" not in payload["data"]:
payload["data"]["success"] = True
try:
await self.websocket.send_str(dict_to_json_str(payload))
except Exception:
tb = traceback.format_exc()
self.log.warning(f"Sending data failed. Exception {tb}.")
def state_changed(self, *args):
if self.websocket is None:
return None
asyncio.create_task(self._state_changed(*args))
def _wrap_http_handler(self, f) -> Callable:
async def inner(request) -> aiohttp.web.Response:
request_data = await request.json()
try:
res_object = await f(request_data)
if res_object is None:
res_object = {}
if "success" not in res_object:
res_object["success"] = True
except Exception as e:
tb = traceback.format_exc()
self.log.warning(f"Error while handling message: {tb}")
if len(e.args) > 0:
res_object = {"success": False, "error": f"{e.args[0]}"}
else:
res_object = {"success": False, "error": f"{e}"}
return obj_to_response(res_object)
return inner
async def get_connections(self, request: Dict) -> Dict:
if self.rpc_api.service.server is None:
raise ValueError("Global connections is not set")
if self.rpc_api.service.server._local_type is NodeType.FULL_NODE:
# TODO add peaks for peers
connections = self.rpc_api.service.server.get_connections()
con_info = []
if self.rpc_api.service.sync_store is not None:
peak_store = self.rpc_api.service.sync_store.peer_to_peak
else:
peak_store = None
for con in connections:
if peak_store is not None and con.peer_node_id in peak_store:
peak_hash, peak_height, peak_weight = peak_store[con.peer_node_id]
else:
peak_height = None
peak_hash = None
peak_weight = None
con_dict = {
"type": con.connection_type,
"local_port": con.local_port,
"peer_host": con.peer_host,
"peer_port": con.peer_port,
"peer_server_port": con.peer_server_port,
"node_id": con.peer_node_id,
"creation_time": con.creation_time,
"bytes_read": con.bytes_read,
"bytes_written": con.bytes_written,
"last_message_time": con.last_message_time,
"peak_height": peak_height,
"peak_weight": peak_weight,
"peak_hash": peak_hash,
}
con_info.append(con_dict)
else:
connections = self.rpc_api.service.server.get_connections()
con_info = [
{
"type": con.connection_type,
"local_port": con.local_port,
"peer_host": con.peer_host,
"peer_port": con.peer_port,
"peer_server_port": con.peer_server_port,
"node_id": con.peer_node_id,
"creation_time": con.creation_time,
"bytes_read": con.bytes_read,
"bytes_written": con.bytes_written,
"last_message_time": con.last_message_time,
}
for con in connections
]
return {"connections": con_info}
async def open_connection(self, request: Dict):
host = request["host"]
port = request["port"]
target_node: PeerInfo = PeerInfo(host, uint16(int(port)))
on_connect = None
if hasattr(self.rpc_api.service, "on_connect"):
on_connect = self.rpc_api.service.on_connect
if getattr(self.rpc_api.service, "server", None) is None or not (
await self.rpc_api.service.server.start_client(target_node, on_connect)
):
raise ValueError("Start client failed, or server is not set")
return {}
async def close_connection(self, request: Dict):
node_id = hexstr_to_bytes(request["node_id"])
if self.rpc_api.service.server is None:
raise aiohttp.web.HTTPInternalServerError()
connections_to_close = [c for c in self.rpc_api.service.server.get_connections() if c.peer_node_id == node_id]
if len(connections_to_close) == 0:
raise ValueError(f"Connection with node_id {node_id.hex()} does not exist")
for connection in connections_to_close:
await connection.close()
return {}
async def stop_node(self, request):
"""
Shuts down the node.
"""
if self.stop_cb is not None:
self.stop_cb()
return {}
async def ws_api(self, message):
"""
This function gets called when new message is received via websocket.
"""
command = message["command"]
if message["ack"]:
return None
data = None
if "data" in message:
data = message["data"]
if command == "ping":
return pong()
f = getattr(self, command, None)
if f is not None:
return await f(data)
f = getattr(self.rpc_api, command, None)
if f is not None:
return await f(data)
raise ValueError(f"unknown_command {command}")
async def safe_handle(self, websocket, payload):
message = None
try:
message = json.loads(payload)
self.log.debug(f"Rpc call <- {message['command']}")
response = await self.ws_api(message)
# Only respond if we return something from api call
if response is not None:
log.debug(f"Rpc response -> {message['command']}")
# Set success to true automatically (unless it's already set)
if "success" not in response:
response["success"] = True
await websocket.send_str(format_response(message, response))
except Exception as e:
tb = traceback.format_exc()
self.log.warning(f"Error while handling message: {tb}")
if message is not None:
error = e.args[0] if e.args else e
res = {"success": False, "error": f"{error}"}
await websocket.send_str(format_response(message, res))
async def connection(self, ws):
data = {"service": self.service_name}
payload = create_payload("register_service", data, self.service_name, "daemon")
await ws.send_str(payload)
while True:
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.TEXT:
message = msg.data.strip()
# self.log.info(f"received message: {message}")
await self.safe_handle(ws, message)
elif msg.type == aiohttp.WSMsgType.BINARY:
self.log.debug("Received binary data")
elif msg.type == aiohttp.WSMsgType.PING:
self.log.debug("Ping received")
await ws.pong()
elif msg.type == aiohttp.WSMsgType.PONG:
self.log.debug("Pong received")
else:
if msg.type == aiohttp.WSMsgType.CLOSE:
self.log.debug("Closing RPC websocket")
await ws.close()
elif msg.type == aiohttp.WSMsgType.ERROR:
self.log.error("Error during receive %s" % ws.exception())
elif msg.type == aiohttp.WSMsgType.CLOSED:
pass
break
await ws.close()
async def connect_to_daemon(self, self_hostname: str, daemon_port: uint16):
while True:
try:
if self.shut_down:
break
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f"wss://{self_hostname}:{daemon_port}",
autoclose=True,
autoping=True,
heartbeat=60,
ssl_context=self.ssl_context,
max_msg_size=100 * 1024 * 1024,
) as ws:
self.websocket = ws
await self.connection(ws)
self.websocket = None
except aiohttp.ClientConnectorError:
self.log.warning(f"Cannot connect to daemon at ws://{self_hostname}:{daemon_port}")
except Exception as e:
tb = traceback.format_exc()
self.log.warning(f"Exception: {tb} {type(e)}")
await asyncio.sleep(2)
async def start_rpc_server(
rpc_api: Any,
self_hostname: str,
daemon_port: uint16,
rpc_port: uint16,
stop_cb: Callable,
root_path: Path,
net_config,
connect_to_daemon=True,
):
"""
Starts an HTTP server with the following RPC methods, to be used by local clients to
query the node.
"""
app = aiohttp.web.Application()
rpc_server = RpcServer(rpc_api, rpc_api.service_name, stop_cb, root_path, net_config)
rpc_server.rpc_api.service._set_state_changed_callback(rpc_server.state_changed)
http_routes: Dict[str, Callable] = rpc_api.get_routes()
routes = [aiohttp.web.post(route, rpc_server._wrap_http_handler(func)) for (route, func) in http_routes.items()]
routes += [
aiohttp.web.post(
"/get_connections",
rpc_server._wrap_http_handler(rpc_server.get_connections),
),
aiohttp.web.post(
"/open_connection",
rpc_server._wrap_http_handler(rpc_server.open_connection),
),
aiohttp.web.post(
"/close_connection",
rpc_server._wrap_http_handler(rpc_server.close_connection),
),
aiohttp.web.post("/stop_node", rpc_server._wrap_http_handler(rpc_server.stop_node)),
]
app.add_routes(routes)
if connect_to_daemon:
daemon_connection = asyncio.create_task(rpc_server.connect_to_daemon(self_hostname, daemon_port))
runner = aiohttp.web.AppRunner(app, access_log=None)
await runner.setup()
site = aiohttp.web.TCPSite(runner, self_hostname, int(rpc_port), ssl_context=rpc_server.ssl_context)
await site.start()
async def cleanup():
await rpc_server.stop()
await runner.cleanup()
if connect_to_daemon:
await daemon_connection
return cleanup
| 39.11276 | 118 | 0.578257 | import asyncio
import json
import logging
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional
import aiohttp
from dogechia.server.outbound_message import NodeType
from dogechia.server.server import ssl_context_for_server
from dogechia.types.peer_info import PeerInfo
from dogechia.util.byte_types import hexstr_to_bytes
from dogechia.util.ints import uint16
from dogechia.util.json_util import dict_to_json_str, obj_to_response
from dogechia.util.ws_message import create_payload, create_payload_dict, format_response, pong
log = logging.getLogger(__name__)
class RpcServer:
def __init__(self, rpc_api: Any, service_name: str, stop_cb: Callable, root_path, net_config):
self.rpc_api = rpc_api
self.stop_cb: Callable = stop_cb
self.log = log
self.shut_down = False
self.websocket: Optional[aiohttp.ClientWebSocketResponse] = None
self.service_name = service_name
self.root_path = root_path
self.net_config = net_config
self.crt_path = root_path / net_config["daemon_ssl"]["private_crt"]
self.key_path = root_path / net_config["daemon_ssl"]["private_key"]
self.ca_cert_path = root_path / net_config["private_ssl_ca"]["crt"]
self.ca_key_path = root_path / net_config["private_ssl_ca"]["key"]
self.ssl_context = ssl_context_for_server(self.ca_cert_path, self.ca_key_path, self.crt_path, self.key_path)
async def stop(self):
self.shut_down = True
if self.websocket is not None:
await self.websocket.close()
async def _state_changed(self, *args):
if self.websocket is None:
return None
payloads: List[Dict] = await self.rpc_api._state_changed(*args)
change = args[0]
if change == "add_connection" or change == "close_connection" or change == "peer_changed_peak":
data = await self.get_connections({})
if data is not None:
payload = create_payload_dict(
"get_connections",
data,
self.service_name,
"wallet_ui",
)
payloads.append(payload)
for payload in payloads:
if "success" not in payload["data"]:
payload["data"]["success"] = True
try:
await self.websocket.send_str(dict_to_json_str(payload))
except Exception:
tb = traceback.format_exc()
self.log.warning(f"Sending data failed. Exception {tb}.")
def state_changed(self, *args):
if self.websocket is None:
return None
asyncio.create_task(self._state_changed(*args))
def _wrap_http_handler(self, f) -> Callable:
async def inner(request) -> aiohttp.web.Response:
request_data = await request.json()
try:
res_object = await f(request_data)
if res_object is None:
res_object = {}
if "success" not in res_object:
res_object["success"] = True
except Exception as e:
tb = traceback.format_exc()
self.log.warning(f"Error while handling message: {tb}")
if len(e.args) > 0:
res_object = {"success": False, "error": f"{e.args[0]}"}
else:
res_object = {"success": False, "error": f"{e}"}
return obj_to_response(res_object)
return inner
async def get_connections(self, request: Dict) -> Dict:
if self.rpc_api.service.server is None:
raise ValueError("Global connections is not set")
if self.rpc_api.service.server._local_type is NodeType.FULL_NODE:
connections = self.rpc_api.service.server.get_connections()
con_info = []
if self.rpc_api.service.sync_store is not None:
peak_store = self.rpc_api.service.sync_store.peer_to_peak
else:
peak_store = None
for con in connections:
if peak_store is not None and con.peer_node_id in peak_store:
peak_hash, peak_height, peak_weight = peak_store[con.peer_node_id]
else:
peak_height = None
peak_hash = None
peak_weight = None
con_dict = {
"type": con.connection_type,
"local_port": con.local_port,
"peer_host": con.peer_host,
"peer_port": con.peer_port,
"peer_server_port": con.peer_server_port,
"node_id": con.peer_node_id,
"creation_time": con.creation_time,
"bytes_read": con.bytes_read,
"bytes_written": con.bytes_written,
"last_message_time": con.last_message_time,
"peak_height": peak_height,
"peak_weight": peak_weight,
"peak_hash": peak_hash,
}
con_info.append(con_dict)
else:
connections = self.rpc_api.service.server.get_connections()
con_info = [
{
"type": con.connection_type,
"local_port": con.local_port,
"peer_host": con.peer_host,
"peer_port": con.peer_port,
"peer_server_port": con.peer_server_port,
"node_id": con.peer_node_id,
"creation_time": con.creation_time,
"bytes_read": con.bytes_read,
"bytes_written": con.bytes_written,
"last_message_time": con.last_message_time,
}
for con in connections
]
return {"connections": con_info}
async def open_connection(self, request: Dict):
host = request["host"]
port = request["port"]
target_node: PeerInfo = PeerInfo(host, uint16(int(port)))
on_connect = None
if hasattr(self.rpc_api.service, "on_connect"):
on_connect = self.rpc_api.service.on_connect
if getattr(self.rpc_api.service, "server", None) is None or not (
await self.rpc_api.service.server.start_client(target_node, on_connect)
):
raise ValueError("Start client failed, or server is not set")
return {}
async def close_connection(self, request: Dict):
node_id = hexstr_to_bytes(request["node_id"])
if self.rpc_api.service.server is None:
raise aiohttp.web.HTTPInternalServerError()
connections_to_close = [c for c in self.rpc_api.service.server.get_connections() if c.peer_node_id == node_id]
if len(connections_to_close) == 0:
raise ValueError(f"Connection with node_id {node_id.hex()} does not exist")
for connection in connections_to_close:
await connection.close()
return {}
async def stop_node(self, request):
if self.stop_cb is not None:
self.stop_cb()
return {}
async def ws_api(self, message):
command = message["command"]
if message["ack"]:
return None
data = None
if "data" in message:
data = message["data"]
if command == "ping":
return pong()
f = getattr(self, command, None)
if f is not None:
return await f(data)
f = getattr(self.rpc_api, command, None)
if f is not None:
return await f(data)
raise ValueError(f"unknown_command {command}")
async def safe_handle(self, websocket, payload):
message = None
try:
message = json.loads(payload)
self.log.debug(f"Rpc call <- {message['command']}")
response = await self.ws_api(message)
if response is not None:
log.debug(f"Rpc response -> {message['command']}")
if "success" not in response:
response["success"] = True
await websocket.send_str(format_response(message, response))
except Exception as e:
tb = traceback.format_exc()
self.log.warning(f"Error while handling message: {tb}")
if message is not None:
error = e.args[0] if e.args else e
res = {"success": False, "error": f"{error}"}
await websocket.send_str(format_response(message, res))
async def connection(self, ws):
data = {"service": self.service_name}
payload = create_payload("register_service", data, self.service_name, "daemon")
await ws.send_str(payload)
while True:
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.TEXT:
message = msg.data.strip()
# self.log.info(f"received message: {message}")
await self.safe_handle(ws, message)
elif msg.type == aiohttp.WSMsgType.BINARY:
self.log.debug("Received binary data")
elif msg.type == aiohttp.WSMsgType.PING:
self.log.debug("Ping received")
await ws.pong()
elif msg.type == aiohttp.WSMsgType.PONG:
self.log.debug("Pong received")
else:
if msg.type == aiohttp.WSMsgType.CLOSE:
self.log.debug("Closing RPC websocket")
await ws.close()
elif msg.type == aiohttp.WSMsgType.ERROR:
self.log.error("Error during receive %s" % ws.exception())
elif msg.type == aiohttp.WSMsgType.CLOSED:
pass
break
await ws.close()
async def connect_to_daemon(self, self_hostname: str, daemon_port: uint16):
while True:
try:
if self.shut_down:
break
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f"wss://{self_hostname}:{daemon_port}",
autoclose=True,
autoping=True,
heartbeat=60,
ssl_context=self.ssl_context,
max_msg_size=100 * 1024 * 1024,
) as ws:
self.websocket = ws
await self.connection(ws)
self.websocket = None
except aiohttp.ClientConnectorError:
self.log.warning(f"Cannot connect to daemon at ws://{self_hostname}:{daemon_port}")
except Exception as e:
tb = traceback.format_exc()
self.log.warning(f"Exception: {tb} {type(e)}")
await asyncio.sleep(2)
async def start_rpc_server(
rpc_api: Any,
self_hostname: str,
daemon_port: uint16,
rpc_port: uint16,
stop_cb: Callable,
root_path: Path,
net_config,
connect_to_daemon=True,
):
app = aiohttp.web.Application()
rpc_server = RpcServer(rpc_api, rpc_api.service_name, stop_cb, root_path, net_config)
rpc_server.rpc_api.service._set_state_changed_callback(rpc_server.state_changed)
http_routes: Dict[str, Callable] = rpc_api.get_routes()
routes = [aiohttp.web.post(route, rpc_server._wrap_http_handler(func)) for (route, func) in http_routes.items()]
routes += [
aiohttp.web.post(
"/get_connections",
rpc_server._wrap_http_handler(rpc_server.get_connections),
),
aiohttp.web.post(
"/open_connection",
rpc_server._wrap_http_handler(rpc_server.open_connection),
),
aiohttp.web.post(
"/close_connection",
rpc_server._wrap_http_handler(rpc_server.close_connection),
),
aiohttp.web.post("/stop_node", rpc_server._wrap_http_handler(rpc_server.stop_node)),
]
app.add_routes(routes)
if connect_to_daemon:
daemon_connection = asyncio.create_task(rpc_server.connect_to_daemon(self_hostname, daemon_port))
runner = aiohttp.web.AppRunner(app, access_log=None)
await runner.setup()
site = aiohttp.web.TCPSite(runner, self_hostname, int(rpc_port), ssl_context=rpc_server.ssl_context)
await site.start()
async def cleanup():
await rpc_server.stop()
await runner.cleanup()
if connect_to_daemon:
await daemon_connection
return cleanup
| true | true |
f7f96f983bda46c5b75f7d737b58070da9dd7d64 | 1,469 | py | Python | app/config.py | chrislaskey/kingdom | be82551824adadfc0c70e08b188eb45adae974c1 | [
"MIT"
] | 6 | 2015-02-17T23:40:42.000Z | 2021-11-04T17:22:57.000Z | app/config.py | chrislaskey/tree-tracker | 23597af0fe3c58cd57622cb01b303ed7743dc0e1 | [
"MIT"
] | null | null | null | app/config.py | chrislaskey/tree-tracker | 23597af0fe3c58cd57622cb01b303ed7743dc0e1 | [
"MIT"
] | 1 | 2020-04-20T05:50:02.000Z | 2020-04-20T05:50:02.000Z | from os import path
basedir = path.abspath(path.dirname(__file__))
# Flask
# See: http://flask.pocoo.org/docs/config/
DEBUG = True
TESTING = False
SECRET_KEY = 'Academicam omnem quaestionem duabus formis a Cicerone esse expositam olim cognitum est'
# Flask-SQLAlchemy
# See: http://pythonhosted.org/Flask-SQLAlchemy/config.html
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + path.join(basedir, '../app.db')
# Flask-Mail
# See: http://pythonhosted.org/flask-mail/
MAIL_SERVER = 'localhost'
MAIL_PORT = 25
MAIL_USE_TLS = False
MAIL_USE_SSL = False
MAIL_DEBUG = DEBUG
MAIL_USERNAME = None
MAIL_PASSWORD = None
MAIL_DEFAULT_SENDER = None
MAIL_MAX_EMAILS = None
MAIL_SUPPRESS_SEND = TESTING
# Flask-Security
# See: http://pythonhosted.org/Flask-Security/configuration.html
SECURITY_PASSWORD_HASH = 'bcrypt'
SECURITY_PASSWORD_SALT = 'SnK2na02nlnmsWsna01nvmac1SNAncKS9acn3m11aZmQEiAk9314jXma'
SECURITY_EMAIL_SENDER = 'no-reply@localhost'
SECURITY_LOGIN_URL = '/login'
SECURITY_LOGOUT_URL = '/logout'
SECURITY_TRACKABLE = True
SECURITY_RECOVERABLE = True
SECURITY_RESET_URL = '/login/reset-password'
SECURITY_CHANGEABLE = True
SECURITY_CHANGE_URL = '/login/change-password'
SECURITY_REGISTERABLE = True
SECURITY_REGISTER_URL = '/signup'
SECURITY_CONFIRMABLE = True
SECURITY_CONFIRM_URL = '/signup/confirm'
SECURITY_CONFIRM_EMAIL_WITHIN = '7 days'
SECURITY_LOGIN_WITHOUT_CONFIRMATION = DEBUG
# Flask-WTF
# See: https://flask-wtf.readthedocs.org/en/latest/
CSRF_ENABLED = True
| 28.25 | 101 | 0.79646 | from os import path
basedir = path.abspath(path.dirname(__file__))
DEBUG = True
TESTING = False
SECRET_KEY = 'Academicam omnem quaestionem duabus formis a Cicerone esse expositam olim cognitum est'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + path.join(basedir, '../app.db')
MAIL_SERVER = 'localhost'
MAIL_PORT = 25
MAIL_USE_TLS = False
MAIL_USE_SSL = False
MAIL_DEBUG = DEBUG
MAIL_USERNAME = None
MAIL_PASSWORD = None
MAIL_DEFAULT_SENDER = None
MAIL_MAX_EMAILS = None
MAIL_SUPPRESS_SEND = TESTING
SECURITY_PASSWORD_HASH = 'bcrypt'
SECURITY_PASSWORD_SALT = 'SnK2na02nlnmsWsna01nvmac1SNAncKS9acn3m11aZmQEiAk9314jXma'
SECURITY_EMAIL_SENDER = 'no-reply@localhost'
SECURITY_LOGIN_URL = '/login'
SECURITY_LOGOUT_URL = '/logout'
SECURITY_TRACKABLE = True
SECURITY_RECOVERABLE = True
SECURITY_RESET_URL = '/login/reset-password'
SECURITY_CHANGEABLE = True
SECURITY_CHANGE_URL = '/login/change-password'
SECURITY_REGISTERABLE = True
SECURITY_REGISTER_URL = '/signup'
SECURITY_CONFIRMABLE = True
SECURITY_CONFIRM_URL = '/signup/confirm'
SECURITY_CONFIRM_EMAIL_WITHIN = '7 days'
SECURITY_LOGIN_WITHOUT_CONFIRMATION = DEBUG
CSRF_ENABLED = True
| true | true |
f7f96fad38a39cdc2329b349296bcc7034330c2b | 14,272 | py | Python | libcloud/test/dns/test_liquidweb.py | atsaki/libcloud | ae85479e835494e196e2f6e79aae9a475603d8ac | [
"Apache-2.0"
] | 3 | 2016-06-03T03:40:18.000Z | 2018-09-24T05:28:47.000Z | libcloud/test/dns/test_liquidweb.py | atsaki/libcloud | ae85479e835494e196e2f6e79aae9a475603d8ac | [
"Apache-2.0"
] | 1 | 2015-10-26T21:29:56.000Z | 2015-10-27T17:29:20.000Z | libcloud/test/dns/test_liquidweb.py | atsaki/libcloud | ae85479e835494e196e2f6e79aae9a475603d8ac | [
"Apache-2.0"
] | 2 | 2018-09-24T05:28:42.000Z | 2020-12-31T05:11:04.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.dns.drivers.liquidweb import LiquidWebDNSDriver
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_LIQUIDWEB
from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.types import RecordType
from libcloud.dns.base import Zone, Record
class LiquidWebTests(unittest.TestCase):
def setUp(self):
LiquidWebMockHttp.type = None
LiquidWebDNSDriver.connectionCls.conn_classes = (
None, LiquidWebMockHttp)
self.driver = LiquidWebDNSDriver(*DNS_PARAMS_LIQUIDWEB)
self.test_zone = Zone(id='11', type='master', ttl=None,
domain='example.com', extra={},
driver=self.driver)
self.test_record = Record(id='13', type=RecordType.A,
name='example.com', zone=self.test_zone,
data='127.0.0.1', driver=self, extra={})
def assertHasKeys(self, dictionary, keys):
for key in keys:
self.assertTrue(key in dictionary, 'key "%s" not in dictionary' %
(key))
def test_list_zones_empty(self):
LiquidWebMockHttp.type = 'EMPTY_ZONES_LIST'
zones = self.driver.list_zones()
self.assertEqual(zones, [])
def test_list_zones_success(self):
zones = self.driver.list_zones()
self.assertEqual(len(zones), 3)
zone = zones[0]
self.assertEqual(zone.id, '378451')
self.assertEqual(zone.domain, 'blogtest.com')
self.assertEqual(zone.type, 'NATIVE')
self.assertEqual(zone.driver, self.driver)
self.assertEqual(zone.ttl, None)
second_zone = zones[1]
self.assertEqual(second_zone.id, '378449')
self.assertEqual(second_zone.domain, 'oltjanotest.com')
self.assertEqual(second_zone.type, 'NATIVE')
self.assertEqual(second_zone.driver, self.driver)
self.assertEqual(second_zone.ttl, None)
third_zone = zones[2]
self.assertEqual(third_zone.id, '378450')
self.assertEqual(third_zone.domain, 'pythontest.com')
self.assertEqual(third_zone.type, 'NATIVE')
self.assertEqual(third_zone.driver, self.driver)
self.assertEqual(third_zone.ttl, None)
def test_get_zone_zone_does_not_exist(self):
LiquidWebMockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.get_zone(zone_id='13')
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, '13')
else:
self.fail('Exception was not thrown')
def test_get_zone_success(self):
LiquidWebMockHttp.type = 'GET_ZONE_SUCCESS'
zone = self.driver.get_zone(zone_id='13')
self.assertEqual(zone.id, '13')
self.assertEqual(zone.domain, 'blogtest.com')
self.assertEqual(zone.type, 'NATIVE')
self.assertEqual(zone.ttl, None)
self.assertEqual(zone.driver, self.driver)
def test_delete_zone_success(self):
LiquidWebMockHttp.type = 'DELETE_ZONE_SUCCESS'
zone = self.test_zone
status = self.driver.delete_zone(zone=zone)
self.assertEqual(status, True)
def test_delete_zone_zone_does_not_exist(self):
LiquidWebMockHttp.type = 'DELETE_ZONE_ZONE_DOES_NOT_EXIST'
zone = self.test_zone
try:
self.driver.delete_zone(zone=zone)
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, '11')
else:
self.fail('Exception was not thrown')
def test_create_zone_success(self):
LiquidWebMockHttp.type = 'CREATE_ZONE_SUCCESS'
zone = self.driver.create_zone(domain='test.com')
self.assertEqual(zone.id, '13')
self.assertEqual(zone.domain, 'test.com')
self.assertEqual(zone.type, 'NATIVE')
self.assertEqual(zone.ttl, None)
self.assertEqual(zone.driver, self.driver)
def test_create_zone_zone_zone_already_exists(self):
LiquidWebMockHttp.type = 'CREATE_ZONE_ZONE_ALREADY_EXISTS'
try:
self.driver.create_zone(domain='test.com')
except ZoneAlreadyExistsError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, 'test.com')
else:
self.fail('Exception was not thrown')
def test_list_records_empty(self):
LiquidWebMockHttp.type = 'EMPTY_RECORDS_LIST'
zone = self.test_zone
records = self.driver.list_records(zone=zone)
self.assertEqual(records, [])
def test_list_records_success(self):
LiquidWebMockHttp.type = 'LIST_RECORDS_SUCCESS'
zone = self.test_zone
records = self.driver.list_records(zone=zone)
self.assertEqual(len(records), 3)
record = records[0]
self.assertEqual(record.id, '13')
self.assertEqual(record.type, 'A')
self.assertEqual(record.name, 'nerd.domain.com')
self.assertEqual(record.data, '127.0.0.1')
self.assertEqual(record.zone, self.test_zone)
self.assertEqual(record.zone.id, '11')
second_record = records[1]
self.assertEqual(second_record.id, '11')
self.assertEqual(second_record.type, 'A')
self.assertEqual(second_record.name, 'thisboy.domain.com')
self.assertEqual(second_record.data, '127.0.0.1')
self.assertEqual(second_record.zone, self.test_zone)
third_record = records[2]
self.assertEqual(third_record.id, '10')
self.assertEqual(third_record.type, 'A')
self.assertEqual(third_record.name, 'visitor.domain.com')
self.assertEqual(third_record.data, '127.0.0.1')
self.assertEqual(third_record.zone, self.test_zone)
def test_get_record_record_does_not_exist(self):
LiquidWebMockHttp.type = 'GET_RECORD_RECORD_DOES_NOT_EXIST'
try:
self.driver.get_record(zone_id='13', record_id='13')
except RecordDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.record_id, '13')
else:
self.fail('Exception was not thrown')
def test_get_record_success(self):
LiquidWebMockHttp.type = 'GET_RECORD_SUCCESS'
record = self.driver.get_record(zone_id='13', record_id='13')
self.assertEqual(record.id, '13')
self.assertEqual(record.type, 'A')
self.assertEqual(record.name, 'nerd.domain.com')
self.assertEqual(record.data, '127.0.0.1')
def test_update_record_success(self):
LiquidWebMockHttp.type = 'GET_RECORD_SUCCESS'
record = self.driver.get_record(zone_id='13', record_id='13')
self.assertEqual(record.id, '13')
self.assertEqual(record.type, 'A')
self.assertEqual(record.name, 'nerd.domain.com')
self.assertEqual(record.data, '127.0.0.1')
self.assertEqual(record.extra.get('ttl'), 300)
LiquidWebMockHttp.type = ''
record1 = self.driver.update_record(record=record, name=record.name,
type=record.type,
data=record.data,
extra={'ttl': 5600})
self.assertEqual(record1.id, '13')
self.assertEqual(record1.type, 'A')
self.assertEqual(record1.name, 'nerd.domain.com')
self.assertEqual(record1.data, '127.0.0.1')
self.assertEqual(record1.extra.get('ttl'), 5600)
def test_delete_record_success(self):
LiquidWebMockHttp.type = 'DELETE_RECORD_SUCCESS'
record = self.test_record
status = self.driver.delete_record(record=record)
self.assertEqual(status, True)
def test_delete_record_RECORD_DOES_NOT_EXIST_ERROR(self):
LiquidWebMockHttp.type = 'DELETE_RECORD_RECORD_DOES_NOT_EXIST'
record = self.test_record
try:
self.driver.delete_record(record=record)
except RecordDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.record_id, '13')
else:
self.fail('Exception was not thrown')
def test_create_record_success(self):
pass
def test_record_already_exists_error(self):
pass
class LiquidWebMockHttp(MockHttp):
fixtures = DNSFileFixtures('liquidweb')
def _v1_Network_DNS_Zone_list(self, method, url, body, headers):
body = self.fixtures.load('zones_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_list_EMPTY_ZONES_LIST(self, method, url, body,
headers):
body = self.fixtures.load('empty_zones_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_details_ZONE_DOES_NOT_EXIST(self, method, url,
body, headers):
body = self.fixtures.load('zone_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_details_GET_ZONE_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_delete_DELETE_ZONE_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('delete_zone_success.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_delete_DELETE_ZONE_ZONE_DOES_NOT_EXIST(
self, method, url, body, headers):
body = self.fixtures.load('zone_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_create_CREATE_ZONE_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('create_zone_success.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_create_CREATE_ZONE_ZONE_ALREADY_EXISTS(
self, method, url, body, headers):
body = self.fixtures.load('duplicate_record.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_list_EMPTY_RECORDS_LIST(self, method, url, body,
headers):
body = self.fixtures.load('empty_records_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_list_LIST_RECORDS_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('records_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_details_GET_RECORD_RECORD_DOES_NOT_EXIST(
self, method, url, body, headers):
body = self.fixtures.load('record_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_details_GET_RECORD_RECORD_DOES_NOT_EXIST(
self, method, url, body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_details_GET_RECORD_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_details_GET_RECORD_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('get_record.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_delete_DELETE_RECORD_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('delete_record.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_delete_DELETE_RECORD_RECORD_DOES_NOT_EXIST(
self, method, url, body, headers):
body = self.fixtures.load('record_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_create_CREATE_RECORD_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_ALREADY_EXISTS_ERROR(self, method, url, body,
headers):
body = self.fixtures.load('')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_update(self, method, url, body, headers):
body = self.fixtures.load('update_record.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| 42.350148 | 79 | 0.639364 |
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.dns.drivers.liquidweb import LiquidWebDNSDriver
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_LIQUIDWEB
from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.types import RecordType
from libcloud.dns.base import Zone, Record
class LiquidWebTests(unittest.TestCase):
def setUp(self):
LiquidWebMockHttp.type = None
LiquidWebDNSDriver.connectionCls.conn_classes = (
None, LiquidWebMockHttp)
self.driver = LiquidWebDNSDriver(*DNS_PARAMS_LIQUIDWEB)
self.test_zone = Zone(id='11', type='master', ttl=None,
domain='example.com', extra={},
driver=self.driver)
self.test_record = Record(id='13', type=RecordType.A,
name='example.com', zone=self.test_zone,
data='127.0.0.1', driver=self, extra={})
def assertHasKeys(self, dictionary, keys):
for key in keys:
self.assertTrue(key in dictionary, 'key "%s" not in dictionary' %
(key))
def test_list_zones_empty(self):
LiquidWebMockHttp.type = 'EMPTY_ZONES_LIST'
zones = self.driver.list_zones()
self.assertEqual(zones, [])
def test_list_zones_success(self):
zones = self.driver.list_zones()
self.assertEqual(len(zones), 3)
zone = zones[0]
self.assertEqual(zone.id, '378451')
self.assertEqual(zone.domain, 'blogtest.com')
self.assertEqual(zone.type, 'NATIVE')
self.assertEqual(zone.driver, self.driver)
self.assertEqual(zone.ttl, None)
second_zone = zones[1]
self.assertEqual(second_zone.id, '378449')
self.assertEqual(second_zone.domain, 'oltjanotest.com')
self.assertEqual(second_zone.type, 'NATIVE')
self.assertEqual(second_zone.driver, self.driver)
self.assertEqual(second_zone.ttl, None)
third_zone = zones[2]
self.assertEqual(third_zone.id, '378450')
self.assertEqual(third_zone.domain, 'pythontest.com')
self.assertEqual(third_zone.type, 'NATIVE')
self.assertEqual(third_zone.driver, self.driver)
self.assertEqual(third_zone.ttl, None)
def test_get_zone_zone_does_not_exist(self):
LiquidWebMockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.get_zone(zone_id='13')
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, '13')
else:
self.fail('Exception was not thrown')
def test_get_zone_success(self):
LiquidWebMockHttp.type = 'GET_ZONE_SUCCESS'
zone = self.driver.get_zone(zone_id='13')
self.assertEqual(zone.id, '13')
self.assertEqual(zone.domain, 'blogtest.com')
self.assertEqual(zone.type, 'NATIVE')
self.assertEqual(zone.ttl, None)
self.assertEqual(zone.driver, self.driver)
def test_delete_zone_success(self):
LiquidWebMockHttp.type = 'DELETE_ZONE_SUCCESS'
zone = self.test_zone
status = self.driver.delete_zone(zone=zone)
self.assertEqual(status, True)
def test_delete_zone_zone_does_not_exist(self):
LiquidWebMockHttp.type = 'DELETE_ZONE_ZONE_DOES_NOT_EXIST'
zone = self.test_zone
try:
self.driver.delete_zone(zone=zone)
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, '11')
else:
self.fail('Exception was not thrown')
def test_create_zone_success(self):
LiquidWebMockHttp.type = 'CREATE_ZONE_SUCCESS'
zone = self.driver.create_zone(domain='test.com')
self.assertEqual(zone.id, '13')
self.assertEqual(zone.domain, 'test.com')
self.assertEqual(zone.type, 'NATIVE')
self.assertEqual(zone.ttl, None)
self.assertEqual(zone.driver, self.driver)
def test_create_zone_zone_zone_already_exists(self):
LiquidWebMockHttp.type = 'CREATE_ZONE_ZONE_ALREADY_EXISTS'
try:
self.driver.create_zone(domain='test.com')
except ZoneAlreadyExistsError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, 'test.com')
else:
self.fail('Exception was not thrown')
def test_list_records_empty(self):
LiquidWebMockHttp.type = 'EMPTY_RECORDS_LIST'
zone = self.test_zone
records = self.driver.list_records(zone=zone)
self.assertEqual(records, [])
def test_list_records_success(self):
LiquidWebMockHttp.type = 'LIST_RECORDS_SUCCESS'
zone = self.test_zone
records = self.driver.list_records(zone=zone)
self.assertEqual(len(records), 3)
record = records[0]
self.assertEqual(record.id, '13')
self.assertEqual(record.type, 'A')
self.assertEqual(record.name, 'nerd.domain.com')
self.assertEqual(record.data, '127.0.0.1')
self.assertEqual(record.zone, self.test_zone)
self.assertEqual(record.zone.id, '11')
second_record = records[1]
self.assertEqual(second_record.id, '11')
self.assertEqual(second_record.type, 'A')
self.assertEqual(second_record.name, 'thisboy.domain.com')
self.assertEqual(second_record.data, '127.0.0.1')
self.assertEqual(second_record.zone, self.test_zone)
third_record = records[2]
self.assertEqual(third_record.id, '10')
self.assertEqual(third_record.type, 'A')
self.assertEqual(third_record.name, 'visitor.domain.com')
self.assertEqual(third_record.data, '127.0.0.1')
self.assertEqual(third_record.zone, self.test_zone)
def test_get_record_record_does_not_exist(self):
LiquidWebMockHttp.type = 'GET_RECORD_RECORD_DOES_NOT_EXIST'
try:
self.driver.get_record(zone_id='13', record_id='13')
except RecordDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.record_id, '13')
else:
self.fail('Exception was not thrown')
def test_get_record_success(self):
LiquidWebMockHttp.type = 'GET_RECORD_SUCCESS'
record = self.driver.get_record(zone_id='13', record_id='13')
self.assertEqual(record.id, '13')
self.assertEqual(record.type, 'A')
self.assertEqual(record.name, 'nerd.domain.com')
self.assertEqual(record.data, '127.0.0.1')
def test_update_record_success(self):
LiquidWebMockHttp.type = 'GET_RECORD_SUCCESS'
record = self.driver.get_record(zone_id='13', record_id='13')
self.assertEqual(record.id, '13')
self.assertEqual(record.type, 'A')
self.assertEqual(record.name, 'nerd.domain.com')
self.assertEqual(record.data, '127.0.0.1')
self.assertEqual(record.extra.get('ttl'), 300)
LiquidWebMockHttp.type = ''
record1 = self.driver.update_record(record=record, name=record.name,
type=record.type,
data=record.data,
extra={'ttl': 5600})
self.assertEqual(record1.id, '13')
self.assertEqual(record1.type, 'A')
self.assertEqual(record1.name, 'nerd.domain.com')
self.assertEqual(record1.data, '127.0.0.1')
self.assertEqual(record1.extra.get('ttl'), 5600)
def test_delete_record_success(self):
LiquidWebMockHttp.type = 'DELETE_RECORD_SUCCESS'
record = self.test_record
status = self.driver.delete_record(record=record)
self.assertEqual(status, True)
def test_delete_record_RECORD_DOES_NOT_EXIST_ERROR(self):
LiquidWebMockHttp.type = 'DELETE_RECORD_RECORD_DOES_NOT_EXIST'
record = self.test_record
try:
self.driver.delete_record(record=record)
except RecordDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.record_id, '13')
else:
self.fail('Exception was not thrown')
def test_create_record_success(self):
pass
def test_record_already_exists_error(self):
pass
class LiquidWebMockHttp(MockHttp):
fixtures = DNSFileFixtures('liquidweb')
def _v1_Network_DNS_Zone_list(self, method, url, body, headers):
body = self.fixtures.load('zones_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_list_EMPTY_ZONES_LIST(self, method, url, body,
headers):
body = self.fixtures.load('empty_zones_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_details_ZONE_DOES_NOT_EXIST(self, method, url,
body, headers):
body = self.fixtures.load('zone_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_details_GET_ZONE_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_delete_DELETE_ZONE_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('delete_zone_success.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_delete_DELETE_ZONE_ZONE_DOES_NOT_EXIST(
self, method, url, body, headers):
body = self.fixtures.load('zone_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_create_CREATE_ZONE_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('create_zone_success.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_create_CREATE_ZONE_ZONE_ALREADY_EXISTS(
self, method, url, body, headers):
body = self.fixtures.load('duplicate_record.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_list_EMPTY_RECORDS_LIST(self, method, url, body,
headers):
body = self.fixtures.load('empty_records_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_list_LIST_RECORDS_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('records_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_details_GET_RECORD_RECORD_DOES_NOT_EXIST(
self, method, url, body, headers):
body = self.fixtures.load('record_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_details_GET_RECORD_RECORD_DOES_NOT_EXIST(
self, method, url, body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Zone_details_GET_RECORD_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_details_GET_RECORD_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('get_record.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_delete_DELETE_RECORD_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('delete_record.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_delete_DELETE_RECORD_RECORD_DOES_NOT_EXIST(
self, method, url, body, headers):
body = self.fixtures.load('record_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_create_CREATE_RECORD_SUCCESS(self, method, url,
body, headers):
body = self.fixtures.load('')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_ALREADY_EXISTS_ERROR(self, method, url, body,
headers):
body = self.fixtures.load('')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _v1_Network_DNS_Record_update(self, method, url, body, headers):
body = self.fixtures.load('update_record.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| true | true |
f7f96fbf03212b6ead2f3ae676c9fb9b8ef0fcbe | 1,059 | py | Python | platform/ext/target/nxp/lpcxpresso55s69/scripts/flash_bl2.py | urutva/trusted-firmware-m | e7f1c4c42b3322c8d4077060b433c87b508283b9 | [
"BSD-3-Clause"
] | null | null | null | platform/ext/target/nxp/lpcxpresso55s69/scripts/flash_bl2.py | urutva/trusted-firmware-m | e7f1c4c42b3322c8d4077060b433c87b508283b9 | [
"BSD-3-Clause"
] | null | null | null | platform/ext/target/nxp/lpcxpresso55s69/scripts/flash_bl2.py | urutva/trusted-firmware-m | e7f1c4c42b3322c8d4077060b433c87b508283b9 | [
"BSD-3-Clause"
] | 1 | 2021-04-21T20:53:02.000Z | 2021-04-21T20:53:02.000Z | # Copyright (c) 2020, Linaro. All rights reserved.
# Copyright (c) 2020, Arm Limited. All rights reserved.
# Copyright (c) 2021, NXP Semiconductors. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
import os
import platform
os.chdir('../../../../../../build')
# Flash with JLinkExe
FILE = "flash.jlink"
if os.path.isfile(FILE):
if platform.system() == 'Windows':
os.system('del /f FILE')
else:
os.system('rm -rf FILE')
os.system('echo r >> ' + FILE)
os.system('echo erase >> ' + FILE)
os.system('echo loadfile bin/bl2.hex >> ' + FILE)
os.system('echo loadfile bin/tfm_s_signed.bin 0x8000 >> ' + FILE)
os.system('echo loadfile bin/tfm_ns_signed.bin 0x30000 >> ' + FILE)
os.system('echo r >> ' + FILE)
os.system('echo go >> ' + FILE)
os.system('echo exit >> ' + FILE)
if platform.system() == 'Windows':
os.system('JLink -device lpc55s69 -if swd -speed 2000 -autoconnect 1 -commanderscript ' + FILE)
else:
os.system('JLinkExe -device lpc55s69 -if swd -speed 2000 -autoconnect 1 -commanderscript ' +FILE)
| 31.147059 | 101 | 0.658168 |
import os
import platform
os.chdir('../../../../../../build')
FILE = "flash.jlink"
if os.path.isfile(FILE):
if platform.system() == 'Windows':
os.system('del /f FILE')
else:
os.system('rm -rf FILE')
os.system('echo r >> ' + FILE)
os.system('echo erase >> ' + FILE)
os.system('echo loadfile bin/bl2.hex >> ' + FILE)
os.system('echo loadfile bin/tfm_s_signed.bin 0x8000 >> ' + FILE)
os.system('echo loadfile bin/tfm_ns_signed.bin 0x30000 >> ' + FILE)
os.system('echo r >> ' + FILE)
os.system('echo go >> ' + FILE)
os.system('echo exit >> ' + FILE)
if platform.system() == 'Windows':
os.system('JLink -device lpc55s69 -if swd -speed 2000 -autoconnect 1 -commanderscript ' + FILE)
else:
os.system('JLinkExe -device lpc55s69 -if swd -speed 2000 -autoconnect 1 -commanderscript ' +FILE)
| true | true |
f7f970bd9d2b7f847e30117422df856dca92f41f | 5,861 | py | Python | causal_bald/application/workflows/active_learning.py | anndvision/causal-bald | 4e58ef0afa4ba7c4e12342e6052b39a93c95c680 | [
"Apache-2.0"
] | 8 | 2021-10-30T17:25:28.000Z | 2022-03-25T16:02:07.000Z | causal_bald/application/workflows/active_learning.py | anndvision/causal-bald | 4e58ef0afa4ba7c4e12342e6052b39a93c95c680 | [
"Apache-2.0"
] | 3 | 2021-12-03T12:00:07.000Z | 2021-12-11T11:36:23.000Z | causal_bald/application/workflows/active_learning.py | anndvision/causal-bald | 4e58ef0afa4ba7c4e12342e6052b39a93c95c680 | [
"Apache-2.0"
] | 5 | 2021-10-30T13:19:09.000Z | 2022-02-06T18:31:05.000Z | import json
import numpy as np
import scipy.stats
from copy import deepcopy
from causal_bald.library import models
from causal_bald.library import datasets
from causal_bald.library import acquisitions
from causal_bald.application.workflows import utils
def active_learner(model_name, config, experiment_dir, trial):
# Set dataset seeds
dataset_name = config.get("dataset_name")
config["ds_train"]["seed"] = trial
config["ds_valid"]["seed"] = trial + 1 if dataset_name == "synthetic" else trial
config["ds_test"]["seed"] = trial + 2 if dataset_name == "synthetic" else trial
# Get datasets
ds_active = datasets.ActiveLearningDataset(
datasets.DATASETS.get(dataset_name)(**config.get("ds_train"))
)
ds_valid = datasets.DATASETS.get(dataset_name)(**config.get("ds_valid"))
# Set the trial dir
experiment_dir = utils.DIRECTORIES[model_name](
base_dir=experiment_dir, config=config
)
trial_dir = experiment_dir / f"trial-{trial:03d}"
trial_dir.mkdir(parents=True, exist_ok=True)
# Write config for downstream use
config_path = trial_dir / "config.json"
with config_path.open(mode="w") as cp:
json.dump(config, cp)
# Get the acquisition function
acquisition_function = acquisitions.FUNCTIONS.get(
config.get("acquisition_function")
)
# Train pi model if needed by acquisition
pt = get_propensities(trial_dir=trial_dir, config=config)
# Do active learning loop
step_size = config.get("step_size")
warm_start_size = config.get("warm_start_size")
max_acquisitions = config.get("max_acquisitions")
temperature = config.get("temperature")
use_gumbel = config.get("use_gumbel")
for i in range(max_acquisitions):
batch_size = warm_start_size if i == 0 else step_size
acquisition_dir = trial_dir / f"acquisition-{i:03d}"
acquired_path = acquisition_dir / "aquired.json"
if not acquired_path.exists():
if i == 0:
scores = acquisitions.random(
mu_0=None, mu_1=None, t=ds_active.dataset.t, pt=pt, temperature=None
)[ds_active.pool_dataset.indices]
else:
# Predict pool set
mu_0, mu_1 = utils.PREDICT_FUNCTIONS[model_name](
dataset=ds_active.dataset,
job_dir=trial_dir / f"acquisition-{i-1:03d}",
config=config,
)
# Get acquisition scores
scores = (
acquisition_function(
mu_0=mu_0,
mu_1=mu_1,
t=ds_active.dataset.t,
pt=pt,
temperature=temperature if temperature > 0.0 else 1.0,
)
)[ds_active.pool_dataset.indices]
if temperature > 0.0:
if use_gumbel:
p = scores + scipy.stats.gumbel_r.rvs(
loc=0, scale=1, size=len(scores), random_state=None,
)
idx = np.argpartition(p, -batch_size)[-batch_size:]
else:
scores = np.exp(scores)
p = scores / scores.sum()
idx = np.random.choice(
range(len(p)), replace=False, p=p, size=batch_size,
)
else:
idx = np.argsort(scores)[-batch_size:]
ds_active.acquire(idx)
# Train model
utils.TRAIN_FUNCTIONS[model_name](
ds_train=ds_active.training_dataset,
ds_valid=ds_valid,
job_dir=acquisition_dir,
config=config,
dim_input=ds_active.dataset.dim_input,
)
# Save acuired points
with acquired_path.open(mode="w") as ap:
json.dump(
{"aquired_indices": [int(a) for a in ds_active.acquired_indices]},
ap,
)
def get_propensities(trial_dir, config):
dataset_name = config.get("dataset_name")
dim_hidden = config.get("dim_hidden")
depth = config.get("depth")
negative_slope = config.get("negative_slope")
dropout_rate = config.get("dropout_rate")
spectral_norm = config.get("spectral_norm")
learning_rate = config.get("learning_rate")
batch_size = config.get("batch_size")
epochs = config.get("epochs")
if config.get("acquisition_function") in ["pi", "mu-pi"]:
config_pi_train = deepcopy(config.get("ds_train"))
config_pi_train["mode"] = "pi"
ds_pi_train = datasets.DATASETS.get(dataset_name)(**config_pi_train)
pi_dir = trial_dir / "pi"
pi_model = models.NeuralNetwork(
job_dir=pi_dir,
architecture="resnet",
dim_input=ds_pi_train.dim_input,
dim_hidden=dim_hidden,
dim_output=1,
depth=depth,
negative_slope=negative_slope,
batch_norm=False,
spectral_norm=spectral_norm,
dropout_rate=dropout_rate,
weight_decay=(0.5 * (1 - config.get("dropout_rate"))) / len(ds_pi_train),
learning_rate=learning_rate,
batch_size=batch_size,
epochs=epochs,
patience=10,
num_workers=0,
seed=config.get("seed"),
)
if not (pi_dir / "best_checkpoint.pt").exists():
config_pi_valid = deepcopy(config.get("ds_valid"))
config_pi_valid["mode"] = "pi"
ds_pi_valid = datasets.DATASETS.get(dataset_name)(**config_pi_valid)
pi_model.fit(ds_pi_train, ds_pi_valid)
pi_model.load()
return pi_model.predict_mean(ds_pi_train).ravel()
else:
return None
| 39.870748 | 88 | 0.586077 | import json
import numpy as np
import scipy.stats
from copy import deepcopy
from causal_bald.library import models
from causal_bald.library import datasets
from causal_bald.library import acquisitions
from causal_bald.application.workflows import utils
def active_learner(model_name, config, experiment_dir, trial):
dataset_name = config.get("dataset_name")
config["ds_train"]["seed"] = trial
config["ds_valid"]["seed"] = trial + 1 if dataset_name == "synthetic" else trial
config["ds_test"]["seed"] = trial + 2 if dataset_name == "synthetic" else trial
ds_active = datasets.ActiveLearningDataset(
datasets.DATASETS.get(dataset_name)(**config.get("ds_train"))
)
ds_valid = datasets.DATASETS.get(dataset_name)(**config.get("ds_valid"))
experiment_dir = utils.DIRECTORIES[model_name](
base_dir=experiment_dir, config=config
)
trial_dir = experiment_dir / f"trial-{trial:03d}"
trial_dir.mkdir(parents=True, exist_ok=True)
config_path = trial_dir / "config.json"
with config_path.open(mode="w") as cp:
json.dump(config, cp)
acquisition_function = acquisitions.FUNCTIONS.get(
config.get("acquisition_function")
)
pt = get_propensities(trial_dir=trial_dir, config=config)
step_size = config.get("step_size")
warm_start_size = config.get("warm_start_size")
max_acquisitions = config.get("max_acquisitions")
temperature = config.get("temperature")
use_gumbel = config.get("use_gumbel")
for i in range(max_acquisitions):
batch_size = warm_start_size if i == 0 else step_size
acquisition_dir = trial_dir / f"acquisition-{i:03d}"
acquired_path = acquisition_dir / "aquired.json"
if not acquired_path.exists():
if i == 0:
scores = acquisitions.random(
mu_0=None, mu_1=None, t=ds_active.dataset.t, pt=pt, temperature=None
)[ds_active.pool_dataset.indices]
else:
mu_0, mu_1 = utils.PREDICT_FUNCTIONS[model_name](
dataset=ds_active.dataset,
job_dir=trial_dir / f"acquisition-{i-1:03d}",
config=config,
)
scores = (
acquisition_function(
mu_0=mu_0,
mu_1=mu_1,
t=ds_active.dataset.t,
pt=pt,
temperature=temperature if temperature > 0.0 else 1.0,
)
)[ds_active.pool_dataset.indices]
if temperature > 0.0:
if use_gumbel:
p = scores + scipy.stats.gumbel_r.rvs(
loc=0, scale=1, size=len(scores), random_state=None,
)
idx = np.argpartition(p, -batch_size)[-batch_size:]
else:
scores = np.exp(scores)
p = scores / scores.sum()
idx = np.random.choice(
range(len(p)), replace=False, p=p, size=batch_size,
)
else:
idx = np.argsort(scores)[-batch_size:]
ds_active.acquire(idx)
utils.TRAIN_FUNCTIONS[model_name](
ds_train=ds_active.training_dataset,
ds_valid=ds_valid,
job_dir=acquisition_dir,
config=config,
dim_input=ds_active.dataset.dim_input,
)
with acquired_path.open(mode="w") as ap:
json.dump(
{"aquired_indices": [int(a) for a in ds_active.acquired_indices]},
ap,
)
def get_propensities(trial_dir, config):
dataset_name = config.get("dataset_name")
dim_hidden = config.get("dim_hidden")
depth = config.get("depth")
negative_slope = config.get("negative_slope")
dropout_rate = config.get("dropout_rate")
spectral_norm = config.get("spectral_norm")
learning_rate = config.get("learning_rate")
batch_size = config.get("batch_size")
epochs = config.get("epochs")
if config.get("acquisition_function") in ["pi", "mu-pi"]:
config_pi_train = deepcopy(config.get("ds_train"))
config_pi_train["mode"] = "pi"
ds_pi_train = datasets.DATASETS.get(dataset_name)(**config_pi_train)
pi_dir = trial_dir / "pi"
pi_model = models.NeuralNetwork(
job_dir=pi_dir,
architecture="resnet",
dim_input=ds_pi_train.dim_input,
dim_hidden=dim_hidden,
dim_output=1,
depth=depth,
negative_slope=negative_slope,
batch_norm=False,
spectral_norm=spectral_norm,
dropout_rate=dropout_rate,
weight_decay=(0.5 * (1 - config.get("dropout_rate"))) / len(ds_pi_train),
learning_rate=learning_rate,
batch_size=batch_size,
epochs=epochs,
patience=10,
num_workers=0,
seed=config.get("seed"),
)
if not (pi_dir / "best_checkpoint.pt").exists():
config_pi_valid = deepcopy(config.get("ds_valid"))
config_pi_valid["mode"] = "pi"
ds_pi_valid = datasets.DATASETS.get(dataset_name)(**config_pi_valid)
pi_model.fit(ds_pi_train, ds_pi_valid)
pi_model.load()
return pi_model.predict_mean(ds_pi_train).ravel()
else:
return None
| true | true |
f7f97112714120a52a4be60858c4a6a2b38f22c3 | 2,901 | py | Python | models/resnext_block.py | khiemledev/Basic_CNNs_TensorFlow2 | be2c90f2a63ae13b7586a1e4114c2bc42a825c83 | [
"MIT"
] | 497 | 2019-10-14T03:57:31.000Z | 2022-03-30T08:49:51.000Z | models/resnext_block.py | khiemledev/Basic_CNNs_TensorFlow2 | be2c90f2a63ae13b7586a1e4114c2bc42a825c83 | [
"MIT"
] | 33 | 2019-10-23T02:42:27.000Z | 2022-03-07T01:56:13.000Z | models/resnext_block.py | khiemledev/Basic_CNNs_TensorFlow2 | be2c90f2a63ae13b7586a1e4114c2bc42a825c83 | [
"MIT"
] | 190 | 2019-10-31T14:38:28.000Z | 2022-03-30T07:42:36.000Z | import tensorflow as tf
from models.group_convolution import get_group_conv
class ResNeXt_BottleNeck(tf.keras.layers.Layer):
def __init__(self, filters, strides, groups):
super(ResNeXt_BottleNeck, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(filters=filters,
kernel_size=(1, 1),
strides=1,
padding="same")
self.bn1 = tf.keras.layers.BatchNormalization()
# self.group_conv = tf.keras.layers.Conv2D(filters=filters,
# kernel_size=(3, 3),
# strides=strides,
# padding="same",
# groups=groups)
self.group_conv = get_group_conv(in_channels=filters,
out_channels=filters,
kernel_size=(3, 3),
strides=strides,
padding="same",
groups=groups)
self.bn2 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(filters=2 * filters,
kernel_size=(1, 1),
strides=1,
padding="same")
self.bn3 = tf.keras.layers.BatchNormalization()
self.shortcut_conv = tf.keras.layers.Conv2D(filters=2 * filters,
kernel_size=(1, 1),
strides=strides,
padding="same")
self.shortcut_bn = tf.keras.layers.BatchNormalization()
def call(self, inputs, training=None, **kwargs):
x = self.conv1(inputs)
x = self.bn1(x, training=training)
x = tf.nn.relu(x)
x = self.group_conv(x)
x = self.bn2(x, training=training)
x = tf.nn.relu(x)
x = self.conv2(x)
x = self.bn3(x, training=training)
shortcut = self.shortcut_conv(inputs)
shortcut = self.shortcut_bn(shortcut, training=training)
output = tf.nn.relu(tf.keras.layers.add([x, shortcut]))
return output
def build_ResNeXt_block(filters, strides, groups, repeat_num):
block = tf.keras.Sequential()
block.add(ResNeXt_BottleNeck(filters=filters,
strides=strides,
groups=groups))
for _ in range(1, repeat_num):
block.add(ResNeXt_BottleNeck(filters=filters,
strides=1,
groups=groups))
return block | 45.328125 | 72 | 0.45605 | import tensorflow as tf
from models.group_convolution import get_group_conv
class ResNeXt_BottleNeck(tf.keras.layers.Layer):
def __init__(self, filters, strides, groups):
super(ResNeXt_BottleNeck, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(filters=filters,
kernel_size=(1, 1),
strides=1,
padding="same")
self.bn1 = tf.keras.layers.BatchNormalization()
self.group_conv = get_group_conv(in_channels=filters,
out_channels=filters,
kernel_size=(3, 3),
strides=strides,
padding="same",
groups=groups)
self.bn2 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(filters=2 * filters,
kernel_size=(1, 1),
strides=1,
padding="same")
self.bn3 = tf.keras.layers.BatchNormalization()
self.shortcut_conv = tf.keras.layers.Conv2D(filters=2 * filters,
kernel_size=(1, 1),
strides=strides,
padding="same")
self.shortcut_bn = tf.keras.layers.BatchNormalization()
def call(self, inputs, training=None, **kwargs):
x = self.conv1(inputs)
x = self.bn1(x, training=training)
x = tf.nn.relu(x)
x = self.group_conv(x)
x = self.bn2(x, training=training)
x = tf.nn.relu(x)
x = self.conv2(x)
x = self.bn3(x, training=training)
shortcut = self.shortcut_conv(inputs)
shortcut = self.shortcut_bn(shortcut, training=training)
output = tf.nn.relu(tf.keras.layers.add([x, shortcut]))
return output
def build_ResNeXt_block(filters, strides, groups, repeat_num):
block = tf.keras.Sequential()
block.add(ResNeXt_BottleNeck(filters=filters,
strides=strides,
groups=groups))
for _ in range(1, repeat_num):
block.add(ResNeXt_BottleNeck(filters=filters,
strides=1,
groups=groups))
return block | true | true |
f7f97151ef25bf25f7f3247b2596badf1e172fa4 | 4,147 | py | Python | activitysim/abm/models/util/trip.py | SEMCOG/SEMCOG_ActSim | cc18cce84b2e4b5f380f58c7919953d2cd03ee73 | [
"BSD-3-Clause"
] | null | null | null | activitysim/abm/models/util/trip.py | SEMCOG/SEMCOG_ActSim | cc18cce84b2e4b5f380f58c7919953d2cd03ee73 | [
"BSD-3-Clause"
] | 1 | 2021-06-30T23:39:37.000Z | 2021-06-30T23:39:37.000Z | activitysim/abm/models/util/trip.py | SEMCOG/SEMCOG_ActSim | cc18cce84b2e4b5f380f58c7919953d2cd03ee73 | [
"BSD-3-Clause"
] | null | null | null | # ActivitySim
# See full license in LICENSE.txt.
import logging
import numpy as np
from activitysim.core.util import assign_in_place
logger = logging.getLogger(__name__)
def failed_trip_cohorts(trips, failed):
# outbound trips in a tour with a failed outbound trip
bad_outbound_trips = \
trips.outbound & (trips.tour_id.isin(trips.tour_id[failed & trips.outbound]))
# inbound trips in a tour with a failed inbound trip
bad_inbound_trips = \
~trips.outbound & (trips.tour_id.isin(trips.tour_id[failed & ~trips.outbound]))
bad_trips = bad_outbound_trips | bad_inbound_trips
return bad_trips
def flag_failed_trip_leg_mates(trips_df, col_name):
"""
set boolean flag column of specified name to identify failed trip leg_mates in place
"""
failed_trip_leg_mates = failed_trip_cohorts(trips_df, trips_df.failed) & ~trips_df.failed
trips_df.loc[failed_trip_leg_mates, col_name] = True
# handle outbound and inbound legs independently
# for ob in [True, False]:
# same_leg = (trips_df.outbound == ob)
# # tour_ids of all tours with a failed trip in this (outbound or inbound) leg direction
# bad_tours = trips_df.tour_id[trips_df.failed & same_leg].unique()
# # not-failed leg_mates of all failed trips in this (outbound or inbound) leg direction
# failed_trip_leg_mates = same_leg & (trips_df.tour_id.isin(bad_tours)) & ~trips_df.failed
# # set the flag column
# trips_df.loc[failed_trip_leg_mates, col_name] = True
def cleanup_failed_trips(trips):
"""
drop failed trips and cleanup fields in leg_mates:
trip_num assign new ordinal trip num after failed trips are dropped
trip_count assign new count of trips in leg, sans failed trips
first update first flag as we may have dropped first trip (last trip can't fail)
next_trip_id assign id of next trip in leg after failed trips are dropped
"""
if trips.failed.any():
logger.warning("cleanup_failed_trips dropping %s failed trips" % trips.failed.sum())
trips['patch'] = False
flag_failed_trip_leg_mates(trips, 'patch')
# drop the original failures
trips = trips[~trips.failed]
# increasing trip_id order
patch_trips = trips[trips.patch].sort_index()
# recompute fields dependent on trip_num sequence
grouped = patch_trips.groupby(['tour_id', 'outbound'])
patch_trips['trip_num'] = grouped.cumcount() + 1
patch_trips['trip_count'] = patch_trips['trip_num'] + grouped.cumcount(ascending=False)
assign_in_place(trips, patch_trips[['trip_num', 'trip_count']])
del trips['patch']
del trips['failed']
return trips
def generate_alternative_sizes(max_duration, max_trips):
"""
Builds a lookup Numpy array pattern sizes based on the
number of trips in the leg and the duration available
to the leg.
:param max_duration:
:param max_trips:
:return:
"""
def np_shift(xs, n, fill_zero=True):
if n >= 0:
shift_array = np.concatenate((np.full(n, np.nan), xs[:-n]))
else:
shift_array = np.concatenate((xs[-n:], np.full(-n, np.nan)))
return np.nan_to_num(shift_array, np.nan).astype(np.int) if fill_zero else shift_array
levels = np.empty([max_trips, max_duration + max_trips])
levels[0] = np.arange(1, max_duration + max_trips + 1)
for level in np.arange(1, max_trips):
levels[level] = np_shift(np.cumsum(np_shift(levels[level - 1], 1)), -1, fill_zero=False)
return levels[:, :max_duration+1].astype(int)
def get_time_windows(residual, level):
"""
:param residual:
:param level:
:return:
"""
ranges = []
for a in np.arange(residual + 1):
if level > 1:
windows = get_time_windows(residual - a, level - 1)
width_dim = len(windows.shape) - 1
ranges.append(np.vstack([np.repeat(a, windows.shape[width_dim]), windows]))
else:
return np.arange(residual + 1)
return np.concatenate(ranges, axis=1)
| 33.176 | 98 | 0.667712 |
import logging
import numpy as np
from activitysim.core.util import assign_in_place
logger = logging.getLogger(__name__)
def failed_trip_cohorts(trips, failed):
bad_outbound_trips = \
trips.outbound & (trips.tour_id.isin(trips.tour_id[failed & trips.outbound]))
bad_inbound_trips = \
~trips.outbound & (trips.tour_id.isin(trips.tour_id[failed & ~trips.outbound]))
bad_trips = bad_outbound_trips | bad_inbound_trips
return bad_trips
def flag_failed_trip_leg_mates(trips_df, col_name):
failed_trip_leg_mates = failed_trip_cohorts(trips_df, trips_df.failed) & ~trips_df.failed
trips_df.loc[failed_trip_leg_mates, col_name] = True
ips['patch'] = False
flag_failed_trip_leg_mates(trips, 'patch')
trips = trips[~trips.failed]
patch_trips = trips[trips.patch].sort_index()
grouped = patch_trips.groupby(['tour_id', 'outbound'])
patch_trips['trip_num'] = grouped.cumcount() + 1
patch_trips['trip_count'] = patch_trips['trip_num'] + grouped.cumcount(ascending=False)
assign_in_place(trips, patch_trips[['trip_num', 'trip_count']])
del trips['patch']
del trips['failed']
return trips
def generate_alternative_sizes(max_duration, max_trips):
def np_shift(xs, n, fill_zero=True):
if n >= 0:
shift_array = np.concatenate((np.full(n, np.nan), xs[:-n]))
else:
shift_array = np.concatenate((xs[-n:], np.full(-n, np.nan)))
return np.nan_to_num(shift_array, np.nan).astype(np.int) if fill_zero else shift_array
levels = np.empty([max_trips, max_duration + max_trips])
levels[0] = np.arange(1, max_duration + max_trips + 1)
for level in np.arange(1, max_trips):
levels[level] = np_shift(np.cumsum(np_shift(levels[level - 1], 1)), -1, fill_zero=False)
return levels[:, :max_duration+1].astype(int)
def get_time_windows(residual, level):
ranges = []
for a in np.arange(residual + 1):
if level > 1:
windows = get_time_windows(residual - a, level - 1)
width_dim = len(windows.shape) - 1
ranges.append(np.vstack([np.repeat(a, windows.shape[width_dim]), windows]))
else:
return np.arange(residual + 1)
return np.concatenate(ranges, axis=1)
| true | true |
f7f97213cbe2905bf394290f9e0800fa04ba6522 | 15,701 | py | Python | sphinx/ext/graphviz.py | eqvinox/sphinx | cf5afec11753b73faecdc6670e2c28d16e410034 | [
"BSD-2-Clause"
] | null | null | null | sphinx/ext/graphviz.py | eqvinox/sphinx | cf5afec11753b73faecdc6670e2c28d16e410034 | [
"BSD-2-Clause"
] | null | null | null | sphinx/ext/graphviz.py | eqvinox/sphinx | cf5afec11753b73faecdc6670e2c28d16e410034 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
sphinx.ext.graphviz
~~~~~~~~~~~~~~~~~~~
Allow graphviz-formatted graphs to be included in Sphinx-generated
documents inline.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import posixpath
import re
from hashlib import sha1
from os import path
from subprocess import Popen, PIPE
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.statemachine import ViewList
from six import text_type
import sphinx
from sphinx.errors import SphinxError
from sphinx.locale import _, __
from sphinx.util import logging
from sphinx.util.docutils import SphinxDirective
from sphinx.util.fileutil import copy_asset_file
from sphinx.util.i18n import search_image_for_language
from sphinx.util.osutil import ensuredir, ENOENT, EPIPE, EINVAL
if False:
# For type annotation
from docutils.parsers.rst import Directive # NOQA
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
logger = logging.getLogger(__name__)
class GraphvizError(SphinxError):
category = 'Graphviz error'
class ClickableMapDefinition:
"""A manipulator for clickable map file of graphviz."""
maptag_re = re.compile('<map id="(.*?)"')
href_re = re.compile('href=".*?"')
def __init__(self, filename, content, dot=''):
# type: (unicode, unicode, unicode) -> None
self.id = None # type: unicode
self.filename = filename
self.content = content.splitlines()
self.clickable = [] # type: List[unicode]
self.parse(dot=dot)
def parse(self, dot=None):
# type: (unicode) -> None
matched = self.maptag_re.match(self.content[0]) # type: ignore
if not matched:
raise GraphvizError('Invalid clickable map file found: %s' % self.filename)
self.id = matched.group(1)
if self.id == '%3':
# graphviz generates wrong ID if graph name not specified
# https://gitlab.com/graphviz/graphviz/issues/1327
hashed = sha1(dot.encode('utf-8')).hexdigest()
self.id = 'grapviz%s' % hashed[-10:]
self.content[0] = self.content[0].replace('%3', self.id)
for line in self.content:
if self.href_re.search(line): # type: ignore
self.clickable.append(line)
def generate_clickable_map(self):
# type: () -> unicode
"""Generate clickable map tags if clickable item exists.
If not exists, this only returns empty string.
"""
if self.clickable:
return '\n'.join([self.content[0]] + self.clickable + [self.content[-1]])
else:
return ''
class graphviz(nodes.General, nodes.Inline, nodes.Element):
pass
def figure_wrapper(directive, node, caption):
# type: (Directive, nodes.Node, unicode) -> nodes.figure
figure_node = nodes.figure('', node)
if 'align' in node:
figure_node['align'] = node.attributes.pop('align')
parsed = nodes.Element()
directive.state.nested_parse(ViewList([caption], source=''),
directive.content_offset, parsed)
caption_node = nodes.caption(parsed[0].rawsource, '',
*parsed[0].children)
caption_node.source = parsed[0].source
caption_node.line = parsed[0].line
figure_node += caption_node
return figure_node
def align_spec(argument):
# type: (Any) -> bool
return directives.choice(argument, ('left', 'center', 'right'))
class Graphviz(SphinxDirective):
"""
Directive to insert arbitrary dot markup.
"""
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
'align': align_spec,
'caption': directives.unchanged,
'graphviz_dot': directives.unchanged,
'name': directives.unchanged,
}
def run(self):
# type: () -> List[nodes.Node]
if self.arguments:
document = self.state.document
if self.content:
return [document.reporter.warning(
__('Graphviz directive cannot have both content and '
'a filename argument'), line=self.lineno)]
argument = search_image_for_language(self.arguments[0], self.env)
rel_filename, filename = self.env.relfn2path(argument)
self.env.note_dependency(rel_filename)
try:
with open(filename, 'r', encoding='utf-8') as fp: # type: ignore
dotcode = fp.read()
except (IOError, OSError):
return [document.reporter.warning(
__('External Graphviz file %r not found or reading '
'it failed') % filename, line=self.lineno)]
else:
dotcode = '\n'.join(self.content)
if not dotcode.strip():
return [self.state_machine.reporter.warning(
__('Ignoring "graphviz" directive without content.'),
line=self.lineno)]
node = graphviz()
node['code'] = dotcode
node['options'] = {'docname': self.env.docname}
if 'graphviz_dot' in self.options:
node['options']['graphviz_dot'] = self.options['graphviz_dot']
if 'alt' in self.options:
node['alt'] = self.options['alt']
if 'align' in self.options:
node['align'] = self.options['align']
caption = self.options.get('caption')
if caption:
node = figure_wrapper(self, node, caption)
self.add_name(node)
return [node]
class GraphvizSimple(SphinxDirective):
"""
Directive to insert arbitrary dot markup.
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
'align': align_spec,
'caption': directives.unchanged,
'graphviz_dot': directives.unchanged,
'name': directives.unchanged,
}
def run(self):
# type: () -> List[nodes.Node]
node = graphviz()
node['code'] = '%s %s {\n%s\n}\n' % \
(self.name, self.arguments[0], '\n'.join(self.content))
node['options'] = {
'docname': path.splitext(self.state.document.current_source)[0],
}
if 'graphviz_dot' in self.options:
node['options']['graphviz_dot'] = self.options['graphviz_dot']
if 'alt' in self.options:
node['alt'] = self.options['alt']
if 'align' in self.options:
node['align'] = self.options['align']
caption = self.options.get('caption')
if caption:
node = figure_wrapper(self, node, caption)
self.add_name(node)
return [node]
def render_dot(self, code, options, format, prefix='graphviz'):
# type: (nodes.NodeVisitor, unicode, Dict, unicode, unicode) -> Tuple[unicode, unicode]
"""Render graphviz code into a PNG or PDF output file."""
graphviz_dot = options.get('graphviz_dot', self.builder.config.graphviz_dot)
hashkey = (code + str(options) + str(graphviz_dot) +
str(self.builder.config.graphviz_dot_args)).encode('utf-8')
fname = '%s-%s.%s' % (prefix, sha1(hashkey).hexdigest(), format)
relfn = posixpath.join(self.builder.imgpath, fname)
outfn = path.join(self.builder.outdir, self.builder.imagedir, fname)
if path.isfile(outfn):
return relfn, outfn
if (hasattr(self.builder, '_graphviz_warned_dot') and
self.builder._graphviz_warned_dot.get(graphviz_dot)):
return None, None
ensuredir(path.dirname(outfn))
# graphviz expects UTF-8 by default
if isinstance(code, text_type):
code = code.encode('utf-8')
dot_args = [graphviz_dot]
dot_args.extend(self.builder.config.graphviz_dot_args)
dot_args.extend(['-T' + format, '-o' + outfn])
docname = options.get('docname', 'index')
cwd = path.dirname(path.join(self.builder.srcdir, docname))
if format == 'png':
dot_args.extend(['-Tcmapx', '-o%s.map' % outfn])
try:
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE, cwd=cwd)
except OSError as err:
if err.errno != ENOENT: # No such file or directory
raise
logger.warning(__('dot command %r cannot be run (needed for graphviz '
'output), check the graphviz_dot setting'), graphviz_dot)
if not hasattr(self.builder, '_graphviz_warned_dot'):
self.builder._graphviz_warned_dot = {}
self.builder._graphviz_warned_dot[graphviz_dot] = True
return None, None
try:
# Graphviz may close standard input when an error occurs,
# resulting in a broken pipe on communicate()
stdout, stderr = p.communicate(code)
except (OSError, IOError) as err:
if err.errno not in (EPIPE, EINVAL):
raise
# in this case, read the standard output and standard error streams
# directly, to get the error message(s)
stdout, stderr = p.stdout.read(), p.stderr.read()
p.wait()
if p.returncode != 0:
raise GraphvizError(__('dot exited with error:\n[stderr]\n%s\n'
'[stdout]\n%s') % (stderr, stdout))
if not path.isfile(outfn):
raise GraphvizError(__('dot did not produce an output file:\n[stderr]\n%s\n'
'[stdout]\n%s') % (stderr, stdout))
return relfn, outfn
def render_dot_html(self, node, code, options, prefix='graphviz',
imgcls=None, alt=None):
# type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
format = self.builder.config.graphviz_output_format
try:
if format not in ('png', 'svg'):
raise GraphvizError(__("graphviz_output_format must be one of 'png', "
"'svg', but is %r") % format)
fname, outfn = render_dot(self, code, options, format, prefix)
except GraphvizError as exc:
logger.warning(__('dot code %r: %s'), code, text_type(exc))
raise nodes.SkipNode
if imgcls:
imgcls += " graphviz"
else:
imgcls = "graphviz"
if fname is None:
self.body.append(self.encode(code))
else:
if alt is None:
alt = node.get('alt', self.encode(code).strip())
if 'align' in node:
self.body.append('<div align="%s" class="align-%s">' %
(node['align'], node['align']))
if format == 'svg':
self.body.append('<div class="graphviz">')
self.body.append('<object data="%s" type="image/svg+xml" class="%s">\n' %
(fname, imgcls))
self.body.append('<p class="warning">%s</p>' % alt)
self.body.append('</object></div>\n')
else:
with open(outfn + '.map', 'r', encoding='utf-8') as mapfile: # type: ignore
imgmap = ClickableMapDefinition(outfn + '.map', mapfile.read(), dot=code)
if imgmap.clickable:
# has a map
self.body.append('<div class="graphviz">')
self.body.append('<img src="%s" alt="%s" usemap="#%s" class="%s" />' %
(fname, alt, imgmap.id, imgcls))
self.body.append('</div>\n')
self.body.append(imgmap.generate_clickable_map())
else:
# nothing in image map
self.body.append('<div class="graphviz">')
self.body.append('<img src="%s" alt="%s" class="%s" />' %
(fname, alt, imgcls))
self.body.append('</div>\n')
if 'align' in node:
self.body.append('</div>\n')
raise nodes.SkipNode
def html_visit_graphviz(self, node):
# type: (nodes.NodeVisitor, graphviz) -> None
render_dot_html(self, node, node['code'], node['options'])
def render_dot_latex(self, node, code, options, prefix='graphviz'):
# type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode) -> None
try:
fname, outfn = render_dot(self, code, options, 'pdf', prefix)
except GraphvizError as exc:
logger.warning(__('dot code %r: %s'), code, text_type(exc))
raise nodes.SkipNode
is_inline = self.is_inline(node)
if not is_inline:
pre = ''
post = ''
if 'align' in node:
if node['align'] == 'left':
pre = '{'
post = r'\hspace*{\fill}}'
elif node['align'] == 'right':
pre = r'{\hspace*{\fill}'
post = '}'
elif node['align'] == 'center':
pre = r'{\hfill'
post = r'\hspace*{\fill}}'
self.body.append('\n%s' % pre)
self.body.append(r'\sphinxincludegraphics[]{%s}' % fname)
if not is_inline:
self.body.append('%s\n' % post)
raise nodes.SkipNode
def latex_visit_graphviz(self, node):
# type: (nodes.NodeVisitor, graphviz) -> None
render_dot_latex(self, node, node['code'], node['options'])
def render_dot_texinfo(self, node, code, options, prefix='graphviz'):
# type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode) -> None
try:
fname, outfn = render_dot(self, code, options, 'png', prefix)
except GraphvizError as exc:
logger.warning(__('dot code %r: %s'), code, text_type(exc))
raise nodes.SkipNode
if fname is not None:
self.body.append('@image{%s,,,[graphviz],png}\n' % fname[:-4])
raise nodes.SkipNode
def texinfo_visit_graphviz(self, node):
# type: (nodes.NodeVisitor, graphviz) -> None
render_dot_texinfo(self, node, node['code'], node['options'])
def text_visit_graphviz(self, node):
# type: (nodes.NodeVisitor, graphviz) -> None
if 'alt' in node.attributes:
self.add_text(_('[graph: %s]') % node['alt'])
else:
self.add_text(_('[graph]'))
raise nodes.SkipNode
def man_visit_graphviz(self, node):
# type: (nodes.NodeVisitor, graphviz) -> None
if 'alt' in node.attributes:
self.body.append(_('[graph: %s]') % node['alt'])
else:
self.body.append(_('[graph]'))
raise nodes.SkipNode
def on_build_finished(app, exc):
# type: (Sphinx, Exception) -> None
if exc is None:
src = path.join(sphinx.package_dir, 'templates', 'graphviz', 'graphviz.css')
dst = path.join(app.outdir, '_static')
copy_asset_file(src, dst)
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
app.add_node(graphviz,
html=(html_visit_graphviz, None),
latex=(latex_visit_graphviz, None),
texinfo=(texinfo_visit_graphviz, None),
text=(text_visit_graphviz, None),
man=(man_visit_graphviz, None))
app.add_directive('graphviz', Graphviz)
app.add_directive('graph', GraphvizSimple)
app.add_directive('digraph', GraphvizSimple)
app.add_config_value('graphviz_dot', 'dot', 'html')
app.add_config_value('graphviz_dot_args', [], 'html')
app.add_config_value('graphviz_output_format', 'png', 'html')
app.add_css_file('graphviz.css')
app.connect('build-finished', on_build_finished)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
| 36.094253 | 118 | 0.593274 |
import posixpath
import re
from hashlib import sha1
from os import path
from subprocess import Popen, PIPE
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.statemachine import ViewList
from six import text_type
import sphinx
from sphinx.errors import SphinxError
from sphinx.locale import _, __
from sphinx.util import logging
from sphinx.util.docutils import SphinxDirective
from sphinx.util.fileutil import copy_asset_file
from sphinx.util.i18n import search_image_for_language
from sphinx.util.osutil import ensuredir, ENOENT, EPIPE, EINVAL
if False:
from docutils.parsers.rst import Directive
from typing import Any, Dict, List, Tuple
from sphinx.application import Sphinx
logger = logging.getLogger(__name__)
class GraphvizError(SphinxError):
category = 'Graphviz error'
class ClickableMapDefinition:
maptag_re = re.compile('<map id="(.*?)"')
href_re = re.compile('href=".*?"')
def __init__(self, filename, content, dot=''):
self.id = None
self.filename = filename
self.content = content.splitlines()
self.clickable = []
self.parse(dot=dot)
def parse(self, dot=None):
matched = self.maptag_re.match(self.content[0])
if not matched:
raise GraphvizError('Invalid clickable map file found: %s' % self.filename)
self.id = matched.group(1)
if self.id == '%3':
hashed = sha1(dot.encode('utf-8')).hexdigest()
self.id = 'grapviz%s' % hashed[-10:]
self.content[0] = self.content[0].replace('%3', self.id)
for line in self.content:
if self.href_re.search(line):
self.clickable.append(line)
def generate_clickable_map(self):
if self.clickable:
return '\n'.join([self.content[0]] + self.clickable + [self.content[-1]])
else:
return ''
class graphviz(nodes.General, nodes.Inline, nodes.Element):
pass
def figure_wrapper(directive, node, caption):
figure_node = nodes.figure('', node)
if 'align' in node:
figure_node['align'] = node.attributes.pop('align')
parsed = nodes.Element()
directive.state.nested_parse(ViewList([caption], source=''),
directive.content_offset, parsed)
caption_node = nodes.caption(parsed[0].rawsource, '',
*parsed[0].children)
caption_node.source = parsed[0].source
caption_node.line = parsed[0].line
figure_node += caption_node
return figure_node
def align_spec(argument):
return directives.choice(argument, ('left', 'center', 'right'))
class Graphviz(SphinxDirective):
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
'align': align_spec,
'caption': directives.unchanged,
'graphviz_dot': directives.unchanged,
'name': directives.unchanged,
}
def run(self):
if self.arguments:
document = self.state.document
if self.content:
return [document.reporter.warning(
__('Graphviz directive cannot have both content and '
'a filename argument'), line=self.lineno)]
argument = search_image_for_language(self.arguments[0], self.env)
rel_filename, filename = self.env.relfn2path(argument)
self.env.note_dependency(rel_filename)
try:
with open(filename, 'r', encoding='utf-8') as fp:
dotcode = fp.read()
except (IOError, OSError):
return [document.reporter.warning(
__('External Graphviz file %r not found or reading '
'it failed') % filename, line=self.lineno)]
else:
dotcode = '\n'.join(self.content)
if not dotcode.strip():
return [self.state_machine.reporter.warning(
__('Ignoring "graphviz" directive without content.'),
line=self.lineno)]
node = graphviz()
node['code'] = dotcode
node['options'] = {'docname': self.env.docname}
if 'graphviz_dot' in self.options:
node['options']['graphviz_dot'] = self.options['graphviz_dot']
if 'alt' in self.options:
node['alt'] = self.options['alt']
if 'align' in self.options:
node['align'] = self.options['align']
caption = self.options.get('caption')
if caption:
node = figure_wrapper(self, node, caption)
self.add_name(node)
return [node]
class GraphvizSimple(SphinxDirective):
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
'align': align_spec,
'caption': directives.unchanged,
'graphviz_dot': directives.unchanged,
'name': directives.unchanged,
}
def run(self):
node = graphviz()
node['code'] = '%s %s {\n%s\n}\n' % \
(self.name, self.arguments[0], '\n'.join(self.content))
node['options'] = {
'docname': path.splitext(self.state.document.current_source)[0],
}
if 'graphviz_dot' in self.options:
node['options']['graphviz_dot'] = self.options['graphviz_dot']
if 'alt' in self.options:
node['alt'] = self.options['alt']
if 'align' in self.options:
node['align'] = self.options['align']
caption = self.options.get('caption')
if caption:
node = figure_wrapper(self, node, caption)
self.add_name(node)
return [node]
def render_dot(self, code, options, format, prefix='graphviz'):
graphviz_dot = options.get('graphviz_dot', self.builder.config.graphviz_dot)
hashkey = (code + str(options) + str(graphviz_dot) +
str(self.builder.config.graphviz_dot_args)).encode('utf-8')
fname = '%s-%s.%s' % (prefix, sha1(hashkey).hexdigest(), format)
relfn = posixpath.join(self.builder.imgpath, fname)
outfn = path.join(self.builder.outdir, self.builder.imagedir, fname)
if path.isfile(outfn):
return relfn, outfn
if (hasattr(self.builder, '_graphviz_warned_dot') and
self.builder._graphviz_warned_dot.get(graphviz_dot)):
return None, None
ensuredir(path.dirname(outfn))
if isinstance(code, text_type):
code = code.encode('utf-8')
dot_args = [graphviz_dot]
dot_args.extend(self.builder.config.graphviz_dot_args)
dot_args.extend(['-T' + format, '-o' + outfn])
docname = options.get('docname', 'index')
cwd = path.dirname(path.join(self.builder.srcdir, docname))
if format == 'png':
dot_args.extend(['-Tcmapx', '-o%s.map' % outfn])
try:
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE, cwd=cwd)
except OSError as err:
if err.errno != ENOENT:
raise
logger.warning(__('dot command %r cannot be run (needed for graphviz '
'output), check the graphviz_dot setting'), graphviz_dot)
if not hasattr(self.builder, '_graphviz_warned_dot'):
self.builder._graphviz_warned_dot = {}
self.builder._graphviz_warned_dot[graphviz_dot] = True
return None, None
try:
stdout, stderr = p.communicate(code)
except (OSError, IOError) as err:
if err.errno not in (EPIPE, EINVAL):
raise
stdout, stderr = p.stdout.read(), p.stderr.read()
p.wait()
if p.returncode != 0:
raise GraphvizError(__('dot exited with error:\n[stderr]\n%s\n'
'[stdout]\n%s') % (stderr, stdout))
if not path.isfile(outfn):
raise GraphvizError(__('dot did not produce an output file:\n[stderr]\n%s\n'
'[stdout]\n%s') % (stderr, stdout))
return relfn, outfn
def render_dot_html(self, node, code, options, prefix='graphviz',
imgcls=None, alt=None):
ormat = self.builder.config.graphviz_output_format
try:
if format not in ('png', 'svg'):
raise GraphvizError(__("graphviz_output_format must be one of 'png', "
"'svg', but is %r") % format)
fname, outfn = render_dot(self, code, options, format, prefix)
except GraphvizError as exc:
logger.warning(__('dot code %r: %s'), code, text_type(exc))
raise nodes.SkipNode
if imgcls:
imgcls += " graphviz"
else:
imgcls = "graphviz"
if fname is None:
self.body.append(self.encode(code))
else:
if alt is None:
alt = node.get('alt', self.encode(code).strip())
if 'align' in node:
self.body.append('<div align="%s" class="align-%s">' %
(node['align'], node['align']))
if format == 'svg':
self.body.append('<div class="graphviz">')
self.body.append('<object data="%s" type="image/svg+xml" class="%s">\n' %
(fname, imgcls))
self.body.append('<p class="warning">%s</p>' % alt)
self.body.append('</object></div>\n')
else:
with open(outfn + '.map', 'r', encoding='utf-8') as mapfile:
imgmap = ClickableMapDefinition(outfn + '.map', mapfile.read(), dot=code)
if imgmap.clickable:
self.body.append('<div class="graphviz">')
self.body.append('<img src="%s" alt="%s" usemap="#%s" class="%s" />' %
(fname, alt, imgmap.id, imgcls))
self.body.append('</div>\n')
self.body.append(imgmap.generate_clickable_map())
else:
self.body.append('<div class="graphviz">')
self.body.append('<img src="%s" alt="%s" class="%s" />' %
(fname, alt, imgcls))
self.body.append('</div>\n')
if 'align' in node:
self.body.append('</div>\n')
raise nodes.SkipNode
def html_visit_graphviz(self, node):
render_dot_html(self, node, node['code'], node['options'])
def render_dot_latex(self, node, code, options, prefix='graphviz'):
try:
fname, outfn = render_dot(self, code, options, 'pdf', prefix)
except GraphvizError as exc:
logger.warning(__('dot code %r: %s'), code, text_type(exc))
raise nodes.SkipNode
is_inline = self.is_inline(node)
if not is_inline:
pre = ''
post = ''
if 'align' in node:
if node['align'] == 'left':
pre = '{'
post = r'\hspace*{\fill}}'
elif node['align'] == 'right':
pre = r'{\hspace*{\fill}'
post = '}'
elif node['align'] == 'center':
pre = r'{\hfill'
post = r'\hspace*{\fill}}'
self.body.append('\n%s' % pre)
self.body.append(r'\sphinxincludegraphics[]{%s}' % fname)
if not is_inline:
self.body.append('%s\n' % post)
raise nodes.SkipNode
def latex_visit_graphviz(self, node):
render_dot_latex(self, node, node['code'], node['options'])
def render_dot_texinfo(self, node, code, options, prefix='graphviz'):
try:
fname, outfn = render_dot(self, code, options, 'png', prefix)
except GraphvizError as exc:
logger.warning(__('dot code %r: %s'), code, text_type(exc))
raise nodes.SkipNode
if fname is not None:
self.body.append('@image{%s,,,[graphviz],png}\n' % fname[:-4])
raise nodes.SkipNode
def texinfo_visit_graphviz(self, node):
render_dot_texinfo(self, node, node['code'], node['options'])
def text_visit_graphviz(self, node):
if 'alt' in node.attributes:
self.add_text(_('[graph: %s]') % node['alt'])
else:
self.add_text(_('[graph]'))
raise nodes.SkipNode
def man_visit_graphviz(self, node):
if 'alt' in node.attributes:
self.body.append(_('[graph: %s]') % node['alt'])
else:
self.body.append(_('[graph]'))
raise nodes.SkipNode
def on_build_finished(app, exc):
if exc is None:
src = path.join(sphinx.package_dir, 'templates', 'graphviz', 'graphviz.css')
dst = path.join(app.outdir, '_static')
copy_asset_file(src, dst)
def setup(app):
app.add_node(graphviz,
html=(html_visit_graphviz, None),
latex=(latex_visit_graphviz, None),
texinfo=(texinfo_visit_graphviz, None),
text=(text_visit_graphviz, None),
man=(man_visit_graphviz, None))
app.add_directive('graphviz', Graphviz)
app.add_directive('graph', GraphvizSimple)
app.add_directive('digraph', GraphvizSimple)
app.add_config_value('graphviz_dot', 'dot', 'html')
app.add_config_value('graphviz_dot_args', [], 'html')
app.add_config_value('graphviz_output_format', 'png', 'html')
app.add_css_file('graphviz.css')
app.connect('build-finished', on_build_finished)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
| true | true |
f7f9732e25e8eb7f546f3c8c9cb822a0610bd658 | 706 | py | Python | MDRSREID/Trainer/evaluation_creation/PGFA_Evaluation/extract_part_label.py | nickhuang1996/HJL-re-id | 107b25f31c961f360f69560cfddd78dfc0da3291 | [
"MIT"
] | 43 | 2020-09-20T09:40:04.000Z | 2022-03-29T11:25:22.000Z | MDRSREID/Trainer/evaluation_creation/PGFA_Evaluation/extract_part_label.py | nickhuang1996/HJL-re-id | 107b25f31c961f360f69560cfddd78dfc0da3291 | [
"MIT"
] | 19 | 2020-10-05T05:35:38.000Z | 2021-12-10T03:17:31.000Z | MDRSREID/Trainer/evaluation_creation/PGFA_Evaluation/extract_part_label.py | nickhuang1996/HJL-re-id | 107b25f31c961f360f69560cfddd78dfc0da3291 | [
"MIT"
] | 18 | 2020-10-01T14:41:53.000Z | 2021-09-02T06:57:57.000Z | from MDRSREID.utils.data_utils.evaluations.PGFA.part_label import part_label_generate
import torch
def extract_part_label(item, cfg):
imgnames, imgheights = item['test_pose_path'], item['height']
N = len(imgnames)
# part_label_batch = torch.FloatTensor(N, 1, cfg.model.num_parts).zero_()
part_label_batch = torch.FloatTensor(N, cfg.model.num_parts).zero_()
i = 0
for imgname, height in zip(imgnames, imgheights):
part_label = part_label_generate(imgname, cfg.model.num_parts, height.item())
part_label = torch.from_numpy(part_label)
# part_label = part_label.unsqueeze(0)
part_label_batch[i] = part_label
i += 1
return part_label_batch
| 39.222222 | 85 | 0.715297 | from MDRSREID.utils.data_utils.evaluations.PGFA.part_label import part_label_generate
import torch
def extract_part_label(item, cfg):
imgnames, imgheights = item['test_pose_path'], item['height']
N = len(imgnames)
part_label_batch = torch.FloatTensor(N, cfg.model.num_parts).zero_()
i = 0
for imgname, height in zip(imgnames, imgheights):
part_label = part_label_generate(imgname, cfg.model.num_parts, height.item())
part_label = torch.from_numpy(part_label)
part_label_batch[i] = part_label
i += 1
return part_label_batch
| true | true |
f7f97410d7a2f375c897d4fee26504933d2350d8 | 2,028 | py | Python | src/leetcode_2009_minimum_number_of_operations_to_make_array_continuous.py | sungho-joo/leetcode2github | ce7730ef40f6051df23681dd3c0e1e657abba620 | [
"MIT"
] | null | null | null | src/leetcode_2009_minimum_number_of_operations_to_make_array_continuous.py | sungho-joo/leetcode2github | ce7730ef40f6051df23681dd3c0e1e657abba620 | [
"MIT"
] | null | null | null | src/leetcode_2009_minimum_number_of_operations_to_make_array_continuous.py | sungho-joo/leetcode2github | ce7730ef40f6051df23681dd3c0e1e657abba620 | [
"MIT"
] | null | null | null | # @l2g 2009 python3
# [2009] Minimum Number of Operations to Make Array Continuous
# Difficulty: Hard
# https://leetcode.com/problems/minimum-number-of-operations-to-make-array-continuous
#
# You are given an integer array nums.In one operation,
# you can replace any element in nums with any integer.
# nums is considered continuous if both of the following conditions are fulfilled:
#
# All elements in nums are unique.
# The difference between the maximum element and the minimum element in nums equals nums.length - 1.
#
# For example, nums = [4, 2, 5, 3] is continuous, but nums = [1, 2, 3, 5, 6] is not continuous.
# Return the minimum number of operations to make nums continuous.
#
# Example 1:
#
# Input: nums = [4,2,5,3]
# Output: 0
# Explanation: nums is already continuous.
#
# Example 2:
#
# Input: nums = [1,2,3,5,6]
# Output: 1
# Explanation: One possible solution is to change the last element to 4.
# The resulting array is [1,2,3,5,4], which is continuous.
#
# Example 3:
#
# Input: nums = [1,10,100,1000]
# Output: 3
# Explanation: One possible solution is to:
# - Change the second element to 2.
# - Change the third element to 3.
# - Change the fourth element to 4.
# The resulting array is [1,2,3,4], which is continuous.
#
#
# Constraints:
#
# 1 <= nums.length <= 10^5
# 1 <= nums[i] <= 10^9
#
#
import bisect
from typing import List
class Solution:
def minOperations(self, nums: List[int]) -> int:
num_set = set(nums)
sorted_key = sorted(num_set) + [float("inf")]
max_occupied = -1
for i in range(len(num_set)):
range_end = sorted_key[i] + len(nums) - 1
ind = bisect.bisect_left(sorted_key, range_end)
if sorted_key[ind] == range_end:
ind += 1
max_occupied = max(max_occupied, ind - i)
if ind == len(nums):
break
return len(nums) - max_occupied
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_2009.py")])
| 27.780822 | 100 | 0.65286 |
import bisect
from typing import List
class Solution:
def minOperations(self, nums: List[int]) -> int:
num_set = set(nums)
sorted_key = sorted(num_set) + [float("inf")]
max_occupied = -1
for i in range(len(num_set)):
range_end = sorted_key[i] + len(nums) - 1
ind = bisect.bisect_left(sorted_key, range_end)
if sorted_key[ind] == range_end:
ind += 1
max_occupied = max(max_occupied, ind - i)
if ind == len(nums):
break
return len(nums) - max_occupied
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_2009.py")])
| true | true |
f7f9742f0a387bb74dbdfff0fc48a7450a5e8809 | 2,228 | py | Python | clinicaml/pipelines/t1_freesurfer_longitudinal/t1_freesurfer_longitudinal_correction_cli.py | HorlavaNastassya/clinica | 65424423e319f981f0b20ebd6bb82060aab271c2 | [
"MIT"
] | null | null | null | clinicaml/pipelines/t1_freesurfer_longitudinal/t1_freesurfer_longitudinal_correction_cli.py | HorlavaNastassya/clinica | 65424423e319f981f0b20ebd6bb82060aab271c2 | [
"MIT"
] | null | null | null | clinicaml/pipelines/t1_freesurfer_longitudinal/t1_freesurfer_longitudinal_correction_cli.py | HorlavaNastassya/clinica | 65424423e319f981f0b20ebd6bb82060aab271c2 | [
"MIT"
] | null | null | null | # coding: utf8
import clinicaml.engine as ce
class T1FreeSurferLongitudinalCorrectionCLI(ce.CmdParser):
def define_name(self):
"""Define the sub-command name to run this pipeline."""
self._name = "t1-freesurfer-longitudinal-correction"
def define_description(self):
"""Define a description of this pipeline."""
self._description = (
"Longitudinal pre-processing correction of T1w images with FreeSurfer:\n"
"http://clinica.run/doc/Pipelines/T1_FreeSurfer_Longitudinal/"
)
def define_options(self):
from clinicaml.engine.cmdparser import PIPELINE_CATEGORIES
# Clinica compulsory arguments (e.g. BIDS, CAPS, group_label)
clinica_comp = self._args.add_argument_group(
PIPELINE_CATEGORIES["CLINICA_COMPULSORY"]
)
clinica_comp.add_argument("caps_directory", help="Path to the CAPS directory.")
# Clinica standard arguments (e.g. --n_procs)
self.add_clinica_standard_arguments(add_overwrite_flag=True)
def run_command(self, args):
"""Run the pipeline with defined args."""
from networkx import Graph
from clinicaml.utils.ux import print_crash_files_and_exit, print_end_pipeline
from .t1_freesurfer_longitudinal_correction_pipeline import (
T1FreeSurferLongitudinalCorrection,
)
pipeline = T1FreeSurferLongitudinalCorrection(
caps_directory=self.absolute_path(args.caps_directory),
tsv_file=self.absolute_path(args.subjects_sessions_tsv),
base_dir=self.absolute_path(args.working_directory),
name="t1-freesurfer-longitudinal-correction",
overwrite_caps=args.overwrite_outputs,
)
if args.n_procs:
exec_pipeline = pipeline.run(
plugin="MultiProc", plugin_args={"n_procs": args.n_procs}
)
else:
exec_pipeline = pipeline.run()
if isinstance(exec_pipeline, Graph):
print_end_pipeline(
self.name, pipeline.base_dir, pipeline.base_dir_was_specified
)
else:
print_crash_files_and_exit(args.logname, pipeline.base_dir)
| 36.52459 | 87 | 0.667415 |
import clinicaml.engine as ce
class T1FreeSurferLongitudinalCorrectionCLI(ce.CmdParser):
def define_name(self):
self._name = "t1-freesurfer-longitudinal-correction"
def define_description(self):
self._description = (
"Longitudinal pre-processing correction of T1w images with FreeSurfer:\n"
"http://clinica.run/doc/Pipelines/T1_FreeSurfer_Longitudinal/"
)
def define_options(self):
from clinicaml.engine.cmdparser import PIPELINE_CATEGORIES
clinica_comp = self._args.add_argument_group(
PIPELINE_CATEGORIES["CLINICA_COMPULSORY"]
)
clinica_comp.add_argument("caps_directory", help="Path to the CAPS directory.")
self.add_clinica_standard_arguments(add_overwrite_flag=True)
def run_command(self, args):
from networkx import Graph
from clinicaml.utils.ux import print_crash_files_and_exit, print_end_pipeline
from .t1_freesurfer_longitudinal_correction_pipeline import (
T1FreeSurferLongitudinalCorrection,
)
pipeline = T1FreeSurferLongitudinalCorrection(
caps_directory=self.absolute_path(args.caps_directory),
tsv_file=self.absolute_path(args.subjects_sessions_tsv),
base_dir=self.absolute_path(args.working_directory),
name="t1-freesurfer-longitudinal-correction",
overwrite_caps=args.overwrite_outputs,
)
if args.n_procs:
exec_pipeline = pipeline.run(
plugin="MultiProc", plugin_args={"n_procs": args.n_procs}
)
else:
exec_pipeline = pipeline.run()
if isinstance(exec_pipeline, Graph):
print_end_pipeline(
self.name, pipeline.base_dir, pipeline.base_dir_was_specified
)
else:
print_crash_files_and_exit(args.logname, pipeline.base_dir)
| true | true |
f7f974932af9192b7b0309274e49c97198fbcc6c | 5,138 | py | Python | recipes/Python/580696_DBF_reader_writer__selective_fields/recipe-580696.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/580696_DBF_reader_writer__selective_fields/recipe-580696.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/580696_DBF_reader_writer__selective_fields/recipe-580696.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | import struct, datetime, decimal, itertools
from collections import namedtuple
FI = namedtuple('FieldInfo', ('name', 'typ', 'size', 'deci',
'fmt', 'fmtsiz', 'keep', 'seekme'))
def dbfreader(f, names, nullreplace=None):
"""Returns an iterator over records in a Xbase DBF file.
The first row returned contains the field names. The second row
contains field specs: (type, size, decimal places). Subsequent rows
contain the data records. If a record is marked as deleted, it is
skipped.
names is the field names to extract. The value of nullreplace is
used with data of type 'N' as a replacement for '\0'.
File should be opened for binary reads.
"""
# See DBF format spec at:
# http://www.pgts.com.au/download/public/xbase.htm#DBF_STRUCT
numrec, lenheader = struct.unpack('<xxxxLH22x', f.read(32))
numfields = (lenheader - 33) // 32
fields = [FI('DeletionFlag', 'C', 1, 0,
'1s', struct.calcsize('1s'), True, 0)] # discarded in main loop
for fieldno in xrange(numfields):
name, typ, size, deci = struct.unpack('<11sc4xBB14x', f.read(32))
name = name.replace('\0', '') # eliminate NULs from string
fmt = str(size) + 's'
prev = fields[fieldno]
fi = FI(name, typ, size, deci, fmt, struct.calcsize(fmt), name in names,
prev.seekme + prev.size)
fields.append(fi)
selfields = [field for field in fields if field.keep]
yield [field.name for field in selfields[1:]]
yield [tuple(field[1:4]) for field in selfields[1:]]
terminator = f.read(1)
assert terminator == '\r'
for i in xrange(numrec):
refaddr = f.tell()
record = []
for field in selfields:
f.seek(refaddr + field.seekme)
record.append(struct.unpack(field.fmt, f.read(field.fmtsiz))[0])
if record[0] != ' ':
continue # deleted record
result = []
for sf, value in itertools.izip(selfields, record):
if sf.name == 'DeletionFlag':
continue
if sf.typ == "N":
value = value.replace('\0', '').lstrip()
if value == '':
value = nullreplace
elif sf.deci:
value = decimal.Decimal(value)
else:
value = int(value)
elif sf.typ == 'D':
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = datetime.date(y, m, d)
elif sf.typ == 'L':
value = (value in 'YyTt' and 'T') or (value in 'NnFf' and 'F') or '?'
elif sf.typ == 'F':
value = float(value)
result.append(value)
f.seek(refaddr + fields[-1].seekme + fields[-1].fmtsiz)
yield result
def dbfwriter(f, fieldnames, fieldspecs, records, nullreplace=None):
"""Return a string suitable for writing directly to a binary dbf file.
File f should be open for writing in a binary mode.
Fieldnames should be no longer than ten characters and not include \x00.
Fieldspecs are in the form (type, size, deci) where
type is one of:
C for ascii character data
M for ascii character memo data (real memo fields not supported)
D for datetime objects
N for ints or decimal objects
L for logical values 'T', 'F', or '?'
size is the field width
deci is the number of decimal places in the provided decimal object
Records can be an iterable over the records (sequences of field values).
The value of nullreplace is compared with values of type N and, if
equal, replaced with '\0' in the output.
"""
# header info
ver = 3
now = datetime.datetime.now()
yr, mon, day = now.year-1900, now.month, now.day
numrec = len(records)
numfields = len(fieldspecs)
lenheader = numfields * 32 + 33
lenrecord = sum(field[1] for field in fieldspecs) + 1
hdr = struct.pack('<BBBBLHH20x', ver, yr, mon, day, numrec, lenheader, lenrecord)
f.write(hdr)
# field specs
for name, (typ, size, deci) in itertools.izip(fieldnames, fieldspecs):
name = name.ljust(11, '\x00')
fld = struct.pack('<11sc4xBB14x', name, typ, size, deci)
f.write(fld)
# terminator
f.write('\r')
# records
for record in records:
f.write(' ') # deletion flag
for (typ, size, deci), value in itertools.izip(fieldspecs, record):
if typ == "N":
if value != nullreplace:
value = str(value).rjust(size, ' ')
else:
value = '\0'.rjust(size, ' ')
elif typ == 'D':
value = value.strftime('%Y%m%d')
elif typ == 'L':
value = str(value)[0].upper()
else:
value = str(value)[:size].ljust(size, ' ')
assert len(value) == size
f.write(value)
# End of file
f.write('\x1A')
| 36.964029 | 85 | 0.557999 | import struct, datetime, decimal, itertools
from collections import namedtuple
FI = namedtuple('FieldInfo', ('name', 'typ', 'size', 'deci',
'fmt', 'fmtsiz', 'keep', 'seekme'))
def dbfreader(f, names, nullreplace=None):
c, lenheader = struct.unpack('<xxxxLH22x', f.read(32))
numfields = (lenheader - 33) // 32
fields = [FI('DeletionFlag', 'C', 1, 0,
'1s', struct.calcsize('1s'), True, 0)]
for fieldno in xrange(numfields):
name, typ, size, deci = struct.unpack('<11sc4xBB14x', f.read(32))
name = name.replace('\0', '')
fmt = str(size) + 's'
prev = fields[fieldno]
fi = FI(name, typ, size, deci, fmt, struct.calcsize(fmt), name in names,
prev.seekme + prev.size)
fields.append(fi)
selfields = [field for field in fields if field.keep]
yield [field.name for field in selfields[1:]]
yield [tuple(field[1:4]) for field in selfields[1:]]
terminator = f.read(1)
assert terminator == '\r'
for i in xrange(numrec):
refaddr = f.tell()
record = []
for field in selfields:
f.seek(refaddr + field.seekme)
record.append(struct.unpack(field.fmt, f.read(field.fmtsiz))[0])
if record[0] != ' ':
continue
result = []
for sf, value in itertools.izip(selfields, record):
if sf.name == 'DeletionFlag':
continue
if sf.typ == "N":
value = value.replace('\0', '').lstrip()
if value == '':
value = nullreplace
elif sf.deci:
value = decimal.Decimal(value)
else:
value = int(value)
elif sf.typ == 'D':
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = datetime.date(y, m, d)
elif sf.typ == 'L':
value = (value in 'YyTt' and 'T') or (value in 'NnFf' and 'F') or '?'
elif sf.typ == 'F':
value = float(value)
result.append(value)
f.seek(refaddr + fields[-1].seekme + fields[-1].fmtsiz)
yield result
def dbfwriter(f, fieldnames, fieldspecs, records, nullreplace=None):
ver = 3
now = datetime.datetime.now()
yr, mon, day = now.year-1900, now.month, now.day
numrec = len(records)
numfields = len(fieldspecs)
lenheader = numfields * 32 + 33
lenrecord = sum(field[1] for field in fieldspecs) + 1
hdr = struct.pack('<BBBBLHH20x', ver, yr, mon, day, numrec, lenheader, lenrecord)
f.write(hdr)
for name, (typ, size, deci) in itertools.izip(fieldnames, fieldspecs):
name = name.ljust(11, '\x00')
fld = struct.pack('<11sc4xBB14x', name, typ, size, deci)
f.write(fld)
f.write('\r')
for record in records:
f.write(' ')
for (typ, size, deci), value in itertools.izip(fieldspecs, record):
if typ == "N":
if value != nullreplace:
value = str(value).rjust(size, ' ')
else:
value = '\0'.rjust(size, ' ')
elif typ == 'D':
value = value.strftime('%Y%m%d')
elif typ == 'L':
value = str(value)[0].upper()
else:
value = str(value)[:size].ljust(size, ' ')
assert len(value) == size
f.write(value)
f.write('\x1A')
| true | true |
f7f975abfa2c6f8e6d00282620a5619b2ea07ce0 | 1,100 | py | Python | Fenetres_Infinies/app.py | Origin4/PythonChallenges | 49f88c9e7341b9cd9ae4432ddd5117ed7d217b7e | [
"MIT"
] | 6 | 2019-11-13T17:49:26.000Z | 2021-11-15T09:44:44.000Z | Fenetres_Infinies/app.py | Origin4/PythonChallenges | 49f88c9e7341b9cd9ae4432ddd5117ed7d217b7e | [
"MIT"
] | null | null | null | Fenetres_Infinies/app.py | Origin4/PythonChallenges | 49f88c9e7341b9cd9ae4432ddd5117ed7d217b7e | [
"MIT"
] | 7 | 2019-11-22T16:08:14.000Z | 2021-08-24T15:02:19.000Z | from PySide2 import QtWidgets # pip install PySide2
fenetres = [] # liste qui contient toutes les fenêtres ouvertes
def mettreAJourNombreFenetre():
nombreDeFenetres = len(fenetres)
for fenetre in fenetres:
fenetre.resultatNombreFenetre.setText(f'Nombre de fenêtres : {nombreDeFenetres}')
def creerFenetre():
fenetres.append(Fenetre())
mettreAJourNombreFenetre()
class Fenetre(QtWidgets.QWidget):
def __init__(self):
super(Fenetre, self).__init__()
self.setWindowTitle('Application')
self.resize(280, 50)
layout = QtWidgets.QVBoxLayout(self)
button = QtWidgets.QPushButton('Cliquez sur le bouton')
self.resultatNombreFenetre = QtWidgets.QLineEdit('Nombre de fenêtres : 0')
layout.addWidget(button)
layout.addWidget(self.resultatNombreFenetre)
button.clicked.connect(creerFenetre)
self.show()
def closeEvent(self, event):
del fenetres[fenetres.index(self)]
mettreAJourNombreFenetre()
event.accept()
app = QtWidgets.QApplication([])
creerFenetre()
app.exec_() | 31.428571 | 89 | 0.699091 | from PySide2 import QtWidgets
fenetres = []
def mettreAJourNombreFenetre():
nombreDeFenetres = len(fenetres)
for fenetre in fenetres:
fenetre.resultatNombreFenetre.setText(f'Nombre de fenêtres : {nombreDeFenetres}')
def creerFenetre():
fenetres.append(Fenetre())
mettreAJourNombreFenetre()
class Fenetre(QtWidgets.QWidget):
def __init__(self):
super(Fenetre, self).__init__()
self.setWindowTitle('Application')
self.resize(280, 50)
layout = QtWidgets.QVBoxLayout(self)
button = QtWidgets.QPushButton('Cliquez sur le bouton')
self.resultatNombreFenetre = QtWidgets.QLineEdit('Nombre de fenêtres : 0')
layout.addWidget(button)
layout.addWidget(self.resultatNombreFenetre)
button.clicked.connect(creerFenetre)
self.show()
def closeEvent(self, event):
del fenetres[fenetres.index(self)]
mettreAJourNombreFenetre()
event.accept()
app = QtWidgets.QApplication([])
creerFenetre()
app.exec_() | true | true |
f7f9771f579cf993bef9434348f065621ffd4f7e | 2,747 | py | Python | pages/process.py | rsmecking/Predicting_women_shoe_prices | 41190d37c264664cbec46c64ff2df5e488c6018b | [
"MIT"
] | null | null | null | pages/process.py | rsmecking/Predicting_women_shoe_prices | 41190d37c264664cbec46c64ff2df5e488c6018b | [
"MIT"
] | null | null | null | pages/process.py | rsmecking/Predicting_women_shoe_prices | 41190d37c264664cbec46c64ff2df5e488c6018b | [
"MIT"
] | null | null | null | # Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# Imports from this application
from app import app
# 1 column layout
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
#### Process
Much of the [data](https://www.kaggle.com/datafiniti/womens-shoes-prices#7210_1.csv) was not very usable for
prediction purposes. I needed to do a lot of feature engineering to obtain any usable to get any predictability
for the target, ‘price’. Nearly every feature used for the the project I extracted from the ‘name’ column because
most of the descriptive words were contained within the name. Example being 4” heeled pumps. Pumps imply that there
is a heel to the shoe, so I created a column ‘has_heel’ that searched for keywords like pump and heel. I originally
had in the range of 500 columns but realized having that many inputs for an app would but cumbersome.
##### Target
Below are all the prices (the target), of all the shoes graphed. We can see that most shoes are in the $50 range.
My mean shoe price was $65 but the median was around $52. The graph in orange, is the prices logged. This allows
the more extreme prices to be evenly "weighted" or more defined like it is shown in the blue graph. Logarithmic
scaling shrinks or compresses the more common outcomes, while expanding the lesser. You’re not able to see prices
but it allows for better representing the values, specifically when shown visually.
 
##### Machine Learning Models
I ran the data through a linear , random forest, and XGBoots regression ML pipelines. Every model was able to beat the
baseline, inferring I was able to train the models correctly. Baseline Below is a graph showing a validation curve.
This is used to find the best parameters to obtain a good fitting model through iderations. Linear regression is being
deprecated (removed) from the process so I had it run with root mean squared error or RMSE. As shown in the graph after
a certain amount of attempts it will start to level out meaning it has optimized the learning potential.

"""
),
],
)
layout = dbc.Row([column1])
| 49.053571 | 132 | 0.67601 |
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
column1 = dbc.Col(
[
dcc.Markdown(
"""
#### Process
Much of the [data](https://www.kaggle.com/datafiniti/womens-shoes-prices#7210_1.csv) was not very usable for
prediction purposes. I needed to do a lot of feature engineering to obtain any usable to get any predictability
for the target, ‘price’. Nearly every feature used for the the project I extracted from the ‘name’ column because
most of the descriptive words were contained within the name. Example being 4” heeled pumps. Pumps imply that there
is a heel to the shoe, so I created a column ‘has_heel’ that searched for keywords like pump and heel. I originally
had in the range of 500 columns but realized having that many inputs for an app would but cumbersome.
##### Target
Below are all the prices (the target), of all the shoes graphed. We can see that most shoes are in the $50 range.
My mean shoe price was $65 but the median was around $52. The graph in orange, is the prices logged. This allows
the more extreme prices to be evenly "weighted" or more defined like it is shown in the blue graph. Logarithmic
scaling shrinks or compresses the more common outcomes, while expanding the lesser. You’re not able to see prices
but it allows for better representing the values, specifically when shown visually.
 
##### Machine Learning Models
I ran the data through a linear , random forest, and XGBoots regression ML pipelines. Every model was able to beat the
baseline, inferring I was able to train the models correctly. Baseline Below is a graph showing a validation curve.
This is used to find the best parameters to obtain a good fitting model through iderations. Linear regression is being
deprecated (removed) from the process so I had it run with root mean squared error or RMSE. As shown in the graph after
a certain amount of attempts it will start to level out meaning it has optimized the learning potential.

"""
),
],
)
layout = dbc.Row([column1])
| true | true |
f7f97739bd2ab8edd48d60f1478b7773aadd9be1 | 9,349 | py | Python | opgc/core/repository_service.py | DirtyBoyz/opgc_backend | 3fc7160d9d637378a358d4fc161fae495a18f0bd | [
"MIT"
] | 9 | 2020-08-19T02:49:03.000Z | 2022-03-04T10:50:18.000Z | opgc/core/repository_service.py | DirtyBoyz/opgc_backend | 3fc7160d9d637378a358d4fc161fae495a18f0bd | [
"MIT"
] | 7 | 2021-07-20T15:13:07.000Z | 2022-03-08T13:14:43.000Z | opgc/core/repository_service.py | DirtyBoyz/opgc_backend | 3fc7160d9d637378a358d4fc161fae495a18f0bd | [
"MIT"
] | 2 | 2021-08-13T08:05:34.000Z | 2022-02-28T05:51:06.000Z | import asyncio
import json
from typing import Optional
import aiohttp
import requests
from django.conf import settings
from apps.githubs.models import GithubUser, Repository, Language, UserLanguage
from core.github_dto import RepositoryDto
from utils.exceptions import manage_api_call_fail, REASON_FORBIDDEN
PER_PAGE = 50
class RepositoryService:
def __init__(self, github_user: GithubUser):
self.github_user = github_user
self.total_contribution = 0
self.total_stargazers_count = 0
self.repositories = [] # 업데이트할 레포지토리 리스트
self.new_repository_list = [] # 새로 생성될 레포지토리 리스트
self.update_language_dict = {} # 업데이트할 language
@staticmethod
def create_dto(repository_data: dict) -> RepositoryDto:
return RepositoryDto(**repository_data)
def update_repositories(self) -> bool:
"""
레포지토리 업데이트 함수
"""
# 유저의 현재 모든 repository를 가져온다.
user_repositories = list(Repository.objects.filter(github_user=self.github_user))
# loop = asyncio.get_event_loop()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.update_repository_futures(self.repositories, user_repositories))
if self.new_repository_list:
Repository.objects.bulk_create(self.new_repository_list)
# 남아 있는 user_repository는 삭제된 repository라 DB에서도 삭제 해준다.
repo_ids = []
for repo in user_repositories:
repo_ids.append(repo.id)
if repo_ids:
Repository.objects.filter(id__in=repo_ids).delete()
return True
def create_repository(self, repository: RepositoryDto) -> (int, Optional[Repository]):
contribution = 0
languages = ''
is_contributor = False
new_repository = None
if repository.fork is True:
return 0, None
# User 가 Repository 의 contributor 인지 확인한다.
# contributions 와 language 확인을 위해 아래 로직을 타야함
# Too many Contributor 403 오류인 경우만 어쩔수 없이 contributions 확인 불가
params = {'per_page': PER_PAGE, 'page': 1}
for i in range(0, (self.github_user.public_repos // PER_PAGE) + 1):
params['page'] = i + 1
res = requests.get(repository.contributors_url, headers=settings.GITHUB_API_HEADER)
if res.status_code != 200:
fail_type = manage_api_call_fail(self.github_user, res.status_code)
if fail_type == REASON_FORBIDDEN:
break
else:
try:
contributors = json.loads(res.content)
except json.JSONDecodeError:
return contribution, new_repository
for contributor in contributors:
# User 타입이고 contributor 가 본인인 경우 (깃헙에서 대소문자 구분을 하지않아서 lower 처리후 비교)
if contributor.get('type') == 'User' and \
contributor.get('login').lower() == self.github_user.username.lower():
contribution = contributor.get('contributions', 0)
if contribution > 0:
languages = self.record_language(repository.languages_url)
is_contributor = True
break
if is_contributor:
break
if is_contributor or repository.owner.lower() == self.github_user.username.lower():
# contributor 이거나 owner 인 경우
new_repository = Repository(
github_user=self.github_user,
name=repository.name,
full_name=repository.full_name,
owner=repository.owner,
contribution=contribution,
stargazers_count=repository.stargazers_count,
rep_language=repository.language if repository.language else '',
languages=languages
)
self.total_stargazers_count += repository.stargazers_count
return contribution, new_repository
def record_language(self, languages_url: str) -> str:
"""
repository 에서 사용중인 언어를 찾아서 dictionary에 type과 count를 저장
- count : 해당 언어로 작성된 코드의 바이트 수.
"""
res = requests.get(languages_url, headers=settings.GITHUB_API_HEADER)
if res.status_code != 200:
manage_api_call_fail(self.github_user, res.status_code)
try:
languages_data = json.loads(res.content)
except json.JSONDecodeError:
return ''
if languages_data:
for _type, count in languages_data.items():
if not self.update_language_dict.get(_type):
self.update_language_dict[_type] = count
else:
self.update_language_dict[_type] += count
return json.dumps(list(languages_data.keys()))
return ''
def update_or_create_language(self):
"""
새로 추가된 언어를 만들고 User가 사용하는 언어사용 count(byte 수)를 업데이트 해주는 함수
"""
# DB에 없던 Language 생성
new_language_list = []
exists_languages = set(Language.objects.filter(
type__in=self.update_language_dict.keys()).values_list('type', flat=True)
)
new_languages = set(self.update_language_dict.keys()) - exists_languages
for language in new_languages:
new_language_list.append(Language(type=language))
if new_language_list:
Language.objects.bulk_create(new_language_list)
# 가존에 있던 UserLanguage 업데이트
new_user_languages = []
user_language_qs = UserLanguage.objects.prefetch_related('language').filter(
github_user_id=self.github_user.id, language__type__in=self.update_language_dict.keys()
)
for user_language in user_language_qs:
if user_language.language.type in self.update_language_dict.keys():
count = self.update_language_dict.pop(user_language.language.type)
if user_language.number != count:
user_language.number = count
user_language.save(update_fields=['number'])
# 새로운 UserLanguage 생성
languages = Language.objects.filter(type__in=self.update_language_dict.keys())
for language in languages:
new_user_languages.append(
UserLanguage(
github_user_id=self.github_user.id,
language_id=language.id,
number=self.update_language_dict.pop(language.type)
)
)
if new_user_languages:
UserLanguage.objects.bulk_create(new_user_languages)
async def update_repository(self, repository: RepositoryDto, user_repositories: list): # 코루틴 정의
is_exist_repo = False
for idx, user_repo in enumerate(user_repositories):
if user_repo.full_name == repository.full_name and user_repo.owner == repository.owner:
is_exist_repo = True
user_repositories.pop(idx)
update_fields = []
contribution = 0
# User가 Repository의 contributor 인지 확인한다.
async with aiohttp.ClientSession() as session:
async with session.get(repository.contributors_url, headers=settings.GITHUB_API_HEADER) as res:
response_data = await res.text()
response_status = res.status
if response_status == 200:
for contributor in json.loads(response_data):
# User 타입이고 contributor 가 본인인 경우 (깃헙에서 대소문자 구분을 하지않아서 lower 처리후 비교)
if contributor.get('type') == 'User' and \
contributor.get('login').lower() == self.github_user.username.lower():
contribution = contributor.get('contributions')
# languages number update
self.record_language(repository.languages_url)
# repository update
if user_repo.contribution != contribution:
user_repo.contribution = contribution
update_fields.append('contribution')
# repository update
if user_repo.stargazers_count != repository.stargazers_count:
user_repo.stargazers_count = repository.stargazers_count
update_fields.append('stargazers_count')
if update_fields:
user_repo.save(update_fields=update_fields)
self.total_stargazers_count += repository.stargazers_count
self.total_contribution += contribution
break
# 새로운 레포지토리
if not is_exist_repo:
_contribution, new_repository = self.create_repository(repository)
if new_repository:
self.new_repository_list.append(new_repository)
self.total_contribution += _contribution
async def update_repository_futures(self, repositories, user_repositories: list):
futures = [
asyncio.ensure_future(self.update_repository(repository, user_repositories)) for repository in repositories
]
await asyncio.gather(*futures)
| 38.473251 | 119 | 0.6083 | import asyncio
import json
from typing import Optional
import aiohttp
import requests
from django.conf import settings
from apps.githubs.models import GithubUser, Repository, Language, UserLanguage
from core.github_dto import RepositoryDto
from utils.exceptions import manage_api_call_fail, REASON_FORBIDDEN
PER_PAGE = 50
class RepositoryService:
def __init__(self, github_user: GithubUser):
self.github_user = github_user
self.total_contribution = 0
self.total_stargazers_count = 0
self.repositories = []
self.new_repository_list = []
self.update_language_dict = {}
@staticmethod
def create_dto(repository_data: dict) -> RepositoryDto:
return RepositoryDto(**repository_data)
def update_repositories(self) -> bool:
user_repositories = list(Repository.objects.filter(github_user=self.github_user))
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self.update_repository_futures(self.repositories, user_repositories))
if self.new_repository_list:
Repository.objects.bulk_create(self.new_repository_list)
repo_ids = []
for repo in user_repositories:
repo_ids.append(repo.id)
if repo_ids:
Repository.objects.filter(id__in=repo_ids).delete()
return True
def create_repository(self, repository: RepositoryDto) -> (int, Optional[Repository]):
contribution = 0
languages = ''
is_contributor = False
new_repository = None
if repository.fork is True:
return 0, None
params = {'per_page': PER_PAGE, 'page': 1}
for i in range(0, (self.github_user.public_repos // PER_PAGE) + 1):
params['page'] = i + 1
res = requests.get(repository.contributors_url, headers=settings.GITHUB_API_HEADER)
if res.status_code != 200:
fail_type = manage_api_call_fail(self.github_user, res.status_code)
if fail_type == REASON_FORBIDDEN:
break
else:
try:
contributors = json.loads(res.content)
except json.JSONDecodeError:
return contribution, new_repository
for contributor in contributors:
if contributor.get('type') == 'User' and \
contributor.get('login').lower() == self.github_user.username.lower():
contribution = contributor.get('contributions', 0)
if contribution > 0:
languages = self.record_language(repository.languages_url)
is_contributor = True
break
if is_contributor:
break
if is_contributor or repository.owner.lower() == self.github_user.username.lower():
new_repository = Repository(
github_user=self.github_user,
name=repository.name,
full_name=repository.full_name,
owner=repository.owner,
contribution=contribution,
stargazers_count=repository.stargazers_count,
rep_language=repository.language if repository.language else '',
languages=languages
)
self.total_stargazers_count += repository.stargazers_count
return contribution, new_repository
def record_language(self, languages_url: str) -> str:
res = requests.get(languages_url, headers=settings.GITHUB_API_HEADER)
if res.status_code != 200:
manage_api_call_fail(self.github_user, res.status_code)
try:
languages_data = json.loads(res.content)
except json.JSONDecodeError:
return ''
if languages_data:
for _type, count in languages_data.items():
if not self.update_language_dict.get(_type):
self.update_language_dict[_type] = count
else:
self.update_language_dict[_type] += count
return json.dumps(list(languages_data.keys()))
return ''
def update_or_create_language(self):
new_language_list = []
exists_languages = set(Language.objects.filter(
type__in=self.update_language_dict.keys()).values_list('type', flat=True)
)
new_languages = set(self.update_language_dict.keys()) - exists_languages
for language in new_languages:
new_language_list.append(Language(type=language))
if new_language_list:
Language.objects.bulk_create(new_language_list)
new_user_languages = []
user_language_qs = UserLanguage.objects.prefetch_related('language').filter(
github_user_id=self.github_user.id, language__type__in=self.update_language_dict.keys()
)
for user_language in user_language_qs:
if user_language.language.type in self.update_language_dict.keys():
count = self.update_language_dict.pop(user_language.language.type)
if user_language.number != count:
user_language.number = count
user_language.save(update_fields=['number'])
languages = Language.objects.filter(type__in=self.update_language_dict.keys())
for language in languages:
new_user_languages.append(
UserLanguage(
github_user_id=self.github_user.id,
language_id=language.id,
number=self.update_language_dict.pop(language.type)
)
)
if new_user_languages:
UserLanguage.objects.bulk_create(new_user_languages)
async def update_repository(self, repository: RepositoryDto, user_repositories: list):
is_exist_repo = False
for idx, user_repo in enumerate(user_repositories):
if user_repo.full_name == repository.full_name and user_repo.owner == repository.owner:
is_exist_repo = True
user_repositories.pop(idx)
update_fields = []
contribution = 0
async with aiohttp.ClientSession() as session:
async with session.get(repository.contributors_url, headers=settings.GITHUB_API_HEADER) as res:
response_data = await res.text()
response_status = res.status
if response_status == 200:
for contributor in json.loads(response_data):
if contributor.get('type') == 'User' and \
contributor.get('login').lower() == self.github_user.username.lower():
contribution = contributor.get('contributions')
self.record_language(repository.languages_url)
if user_repo.contribution != contribution:
user_repo.contribution = contribution
update_fields.append('contribution')
if user_repo.stargazers_count != repository.stargazers_count:
user_repo.stargazers_count = repository.stargazers_count
update_fields.append('stargazers_count')
if update_fields:
user_repo.save(update_fields=update_fields)
self.total_stargazers_count += repository.stargazers_count
self.total_contribution += contribution
break
if not is_exist_repo:
_contribution, new_repository = self.create_repository(repository)
if new_repository:
self.new_repository_list.append(new_repository)
self.total_contribution += _contribution
async def update_repository_futures(self, repositories, user_repositories: list):
futures = [
asyncio.ensure_future(self.update_repository(repository, user_repositories)) for repository in repositories
]
await asyncio.gather(*futures)
| true | true |
f7f9795116bbb7e1b5db590159da85ef14de3507 | 11,880 | py | Python | mapping.py | adamkoziol/mixedReferenceMapping | 5c3d04172a85246f237d59cec498b51ca11f196a | [
"MIT"
] | null | null | null | mapping.py | adamkoziol/mixedReferenceMapping | 5c3d04172a85246f237d59cec498b51ca11f196a | [
"MIT"
] | null | null | null | mapping.py | adamkoziol/mixedReferenceMapping | 5c3d04172a85246f237d59cec498b51ca11f196a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from threading import Thread
from Bio.Sequencing.Applications import *
import SPAdesPipeline.OLCspades.metadataprinter as metadataprinter
from SPAdesPipeline.OLCspades.bowtie import *
from geneSipprV2.objectOriented.createObject import *
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__author__ = 'adamkoziol'
class Mapper(object):
def mapping(self):
# Create the object
printtime('Moving files and creating objects', self.starttime)
self.runmetadata = ObjectCreation(self)
metadataprinter.MetadataPrinter(self)
printtime('Performing reference mapping', self.starttime)
for i in range(len(self.runmetadata.samples)):
# Send the threads to
threads = Thread(target=self.map, args=())
# Set the daemon to True - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
# In order to keep from running too many cpu intensive processes concurrently (multi-threaded applications
# being run in a multi-threaded fashion), decrease the number of threads used in the applications to a minimum
# of four depending on how many samples are being processed
numthreads = self.cpus / len(self.runmetadata.samples)
numthreads = numthreads if numthreads >= 4 else 4
for sample in self.runmetadata.samples:
# Create the analysis type attribute
setattr(sample, self.analysistype, GenObject())
# Set the path/name for the bam and sorted bam files to be created
sample[self.analysistype].bam = '{}/{}.bam'.format(sample.general.outputdirectory, sample.name)
sample[self.analysistype].sortedbam = '{}/{}_sorted.bam'.format(sample.general.outputdirectory, sample.name)
# Set the output name depending on whether the bam files are to be sorted or not
output = sample[self.analysistype].bam if not self.sort else sample[self.analysistype].sortedbam
# Remove the file extension of the bait file for use in the indexing command
sample[self.analysistype].referencenoext = self.referencefile.split('.')[0]
# Use bowtie2 wrapper to create index the target file
bowtie2build = Bowtie2BuildCommandLine(reference=self.referencefile,
bt2=sample[self.analysistype].referencenoext)
# If the indexing option is specified, include the sort command
samsort = SamtoolsSortCommandline(input_bam=output,
o=True,
out_prefix="-")
# Create a list of programs to which data are piped as part of the reference mapping
samtools = [
# Use samtools wrapper to set up the samtools view
SamtoolsViewCommandline(b=True,
S=True,
input_file="-")
]
if self.sort:
samtools.append(samsort)
# Add custom parameters to a dictionary to be used in the bowtie2 alignment wrapper
if len(sample.general.trimmedcorrectedfastqfiles) == 1:
indict = {'-U': sample.general.trimmedcorrectedfastqfiles[0],
'--threads': numthreads
}
else:
indict = {'m1': sample.general.trimmedcorrectedfastqfiles[0],
'm2': sample.general.trimmedcorrectedfastqfiles[1],
'--threads': numthreads
}
# Create the bowtie2 reference mapping command
bowtie2align = Bowtie2CommandLine(bt2=sample[self.analysistype].referencenoext,
threads=numthreads,
samtools=samtools,
**indict)
# Add the commands (as strings) to the metadata
sample[self.analysistype].bowtie2align = str(bowtie2align)
sample[self.analysistype].bowtie2build = str(bowtie2build)
# Get variables ready for the bam moving step
sample[self.analysistype].bampath = os.path.join(self.path, 'bamfiles')
sample[self.analysistype].bamcollection = \
'{}/{}'.format(sample[self.analysistype].bampath, os.path.basename(sample[self.analysistype].bam))
# Add the commands to the queue. Note that the commands would usually be set as attributes of the sample
# but there was an issue with their serialization when printing out the metadata
if not os.path.isfile(sample[self.analysistype].referencenoext + '.1.bt2'):
stdoutbowtieindex, stderrbowtieindex = map(StringIO,
bowtie2build(cwd=self.referencepath))
# Write any error to a log file
if stderrbowtieindex:
# Write the standard error to log, bowtie2 puts alignment summary here
with open(os.path.join(self.referencepath,
'{}_bowtie_index.log'.format(self.analysistype)), 'ab+') as log:
log.writelines(logstr(bowtie2build, stderrbowtieindex.getvalue(),
stdoutbowtieindex.getvalue()))
# Close the stdout and stderr streams
stdoutbowtieindex.close()
stderrbowtieindex.close()
# Populate the queue
self.mapqueue.put((sample, bowtie2build, bowtie2align, output))
self.mapqueue.join()
metadataprinter.MetadataPrinter(self)
def map(self):
import shutil
while True:
# Get the necessary values from the queue
sample, bowtie2build, bowtie2align, output = self.mapqueue.get()
# Only run the functions if the sorted bam files and the indexed bait file do not exist
if not os.path.isfile(output) and not os.path. \
isfile(sample[self.analysistype].bamcollection):
# Set stdout to a stringIO stream
stdout, stderr = map(StringIO, bowtie2align(cwd=sample.general.outputdirectory))
if stderr:
# Write the standard error to log, bowtie2 puts alignment summary here
with open(os.path.join(sample.general.outputdirectory,
'{}_bowtie_samtools.log'.format(self.analysistype)), 'ab+') as log:
log.writelines(logstr(bowtie2align, stderr.getvalue(), stdout.getvalue()))
stdout.close()
stderr.close()
if self.sort:
# Perform indexing
# Only index if the index file doesn't exist
if not os.path.isfile(output + '.bai'):
samtoolsindex = SamtoolsIndexCommandline(input=output)
# Set stdout to a stringIO stream
stdout, stderr = map(StringIO, samtoolsindex(cwd=sample.general.outputdirectory))
if stderr:
# Write the standard error to log, bowtie2 puts alignment summary here
with open(os.path.join(sample.general.outputdirectory,
'{}_bowtie_index.log'.format(self.analysistype)), 'ab+') as log:
log.writelines(logstr(samtoolsindex, stderr.getvalue(), stdout.getvalue()))
stdout.close()
stderr.close()
else:
make_path(sample[self.analysistype].bampath)
# Move the bam file to a common location
if not os.path.isfile(sample[self.analysistype].bamcollection):
shutil.move(sample[self.analysistype].bam, sample[self.analysistype].bamcollection)
self.mapqueue.task_done()
def __init__(self, args, pipelinecommit, startingtime, scriptpath):
from Queue import Queue
import multiprocessing
from glob import glob
# Initialise variables
self.commit = str(pipelinecommit)
self.starttime = startingtime
self.homepath = scriptpath
# Define variables based on supplied arguments
self.args = args
self.path = os.path.join(args.path, '')
assert os.path.isdir(self.path), u'Supplied path is not a valid directory {0!r:s}'.format(self.path)
self.sequencepath = os.path.join(args.sequencepath, '')
assert os.path.isdir(self.sequencepath), u'Sequence folder is not a valid directory {0!r:s}' \
.format(self.sequencepath)
self.referencepath = os.path.join(args.referencepath, '')
assert os.path.isdir(self.sequencepath), u'Reference folder is not a valid directory {0!r:s}' \
.format(self.referencepath)
try:
self.referencefile = glob('{}*.fa*'.format(self.referencepath))[0]
except IndexError:
print 'Cannot find a .fa/.fas/.fasta reference file in the supplied reference path: {}' \
.format(self.referencepath)
quit()
# Use the argument for the number of threads to use, or default to the number of cpus in the system
self.cpus = int(args.threads if args.threads else multiprocessing.cpu_count())
# Determine whether BAM files need to be sorted and indexed
self.sort = args.index
self.runmetadata = MetadataObject()
self.mapqueue = Queue(maxsize=self.cpus)
self.analysistype = 'referencemapping'
# Run the analyses
self.mapping()
if __name__ == '__main__':
import subprocess
import time
import os
# Argument parser for user-inputted values, and a nifty help menu
from argparse import ArgumentParser
# Get the current commit of the pipeline from git
# Extract the path of the current script from the full path + file name
homepath = os.path.split(os.path.abspath(__file__))[0]
# Find the commit of the script by running a command to change to the directory containing the script and run
# a git command to return the short version of the commit hash
commit = subprocess.Popen('cd {} && git rev-parse --short HEAD'.format(homepath),
shell=True, stdout=subprocess.PIPE).communicate()[0].rstrip()
# Parser for arguments
parser = ArgumentParser(description='Perform modelling of parameters for GeneSipping')
parser.add_argument('path',
help='Specify input directory')
parser.add_argument('-s', '--sequencepath',
required=True,
help='Path of .fastq(.gz) files to process.')
parser.add_argument('-r', '--referencepath',
required=True,
help='Path to folder containing a single fasta file to be used as the reference genome')
parser.add_argument('-t', '--threads',
help='Number of threads. Default is the number of cores in the system')
parser.add_argument('-i', '--index',
action='store_true',
help='Sort and index the bam files')
# Get the arguments into an object
arguments = parser.parse_args()
# Define the start time
start = time.time()
# Run the script
Mapper(arguments, commit, start, homepath)
# Print a bold, green exit statement
print '\033[92m' + '\033[1m' + "\nElapsed Time: %0.2f seconds" % (time.time() - start) + '\033[0m'
| 55.255814 | 120 | 0.599074 |
from threading import Thread
from Bio.Sequencing.Applications import *
import SPAdesPipeline.OLCspades.metadataprinter as metadataprinter
from SPAdesPipeline.OLCspades.bowtie import *
from geneSipprV2.objectOriented.createObject import *
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__author__ = 'adamkoziol'
class Mapper(object):
def mapping(self):
printtime('Moving files and creating objects', self.starttime)
self.runmetadata = ObjectCreation(self)
metadataprinter.MetadataPrinter(self)
printtime('Performing reference mapping', self.starttime)
for i in range(len(self.runmetadata.samples)):
threads = Thread(target=self.map, args=())
threads.setDaemon(True)
threads.start()
numthreads = self.cpus / len(self.runmetadata.samples)
numthreads = numthreads if numthreads >= 4 else 4
for sample in self.runmetadata.samples:
setattr(sample, self.analysistype, GenObject())
sample[self.analysistype].bam = '{}/{}.bam'.format(sample.general.outputdirectory, sample.name)
sample[self.analysistype].sortedbam = '{}/{}_sorted.bam'.format(sample.general.outputdirectory, sample.name)
output = sample[self.analysistype].bam if not self.sort else sample[self.analysistype].sortedbam
sample[self.analysistype].referencenoext = self.referencefile.split('.')[0]
bowtie2build = Bowtie2BuildCommandLine(reference=self.referencefile,
bt2=sample[self.analysistype].referencenoext)
samsort = SamtoolsSortCommandline(input_bam=output,
o=True,
out_prefix="-")
samtools = [
SamtoolsViewCommandline(b=True,
S=True,
input_file="-")
]
if self.sort:
samtools.append(samsort)
if len(sample.general.trimmedcorrectedfastqfiles) == 1:
indict = {'-U': sample.general.trimmedcorrectedfastqfiles[0],
'--threads': numthreads
}
else:
indict = {'m1': sample.general.trimmedcorrectedfastqfiles[0],
'm2': sample.general.trimmedcorrectedfastqfiles[1],
'--threads': numthreads
}
bowtie2align = Bowtie2CommandLine(bt2=sample[self.analysistype].referencenoext,
threads=numthreads,
samtools=samtools,
**indict)
sample[self.analysistype].bowtie2align = str(bowtie2align)
sample[self.analysistype].bowtie2build = str(bowtie2build)
sample[self.analysistype].bampath = os.path.join(self.path, 'bamfiles')
sample[self.analysistype].bamcollection = \
'{}/{}'.format(sample[self.analysistype].bampath, os.path.basename(sample[self.analysistype].bam))
if not os.path.isfile(sample[self.analysistype].referencenoext + '.1.bt2'):
stdoutbowtieindex, stderrbowtieindex = map(StringIO,
bowtie2build(cwd=self.referencepath))
if stderrbowtieindex:
with open(os.path.join(self.referencepath,
'{}_bowtie_index.log'.format(self.analysistype)), 'ab+') as log:
log.writelines(logstr(bowtie2build, stderrbowtieindex.getvalue(),
stdoutbowtieindex.getvalue()))
stdoutbowtieindex.close()
stderrbowtieindex.close()
self.mapqueue.put((sample, bowtie2build, bowtie2align, output))
self.mapqueue.join()
metadataprinter.MetadataPrinter(self)
def map(self):
import shutil
while True:
sample, bowtie2build, bowtie2align, output = self.mapqueue.get()
if not os.path.isfile(output) and not os.path. \
isfile(sample[self.analysistype].bamcollection):
stdout, stderr = map(StringIO, bowtie2align(cwd=sample.general.outputdirectory))
if stderr:
with open(os.path.join(sample.general.outputdirectory,
'{}_bowtie_samtools.log'.format(self.analysistype)), 'ab+') as log:
log.writelines(logstr(bowtie2align, stderr.getvalue(), stdout.getvalue()))
stdout.close()
stderr.close()
if self.sort:
if not os.path.isfile(output + '.bai'):
samtoolsindex = SamtoolsIndexCommandline(input=output)
# Set stdout to a stringIO stream
stdout, stderr = map(StringIO, samtoolsindex(cwd=sample.general.outputdirectory))
if stderr:
# Write the standard error to log, bowtie2 puts alignment summary here
with open(os.path.join(sample.general.outputdirectory,
'{}_bowtie_index.log'.format(self.analysistype)), 'ab+') as log:
log.writelines(logstr(samtoolsindex, stderr.getvalue(), stdout.getvalue()))
stdout.close()
stderr.close()
else:
make_path(sample[self.analysistype].bampath)
# Move the bam file to a common location
if not os.path.isfile(sample[self.analysistype].bamcollection):
shutil.move(sample[self.analysistype].bam, sample[self.analysistype].bamcollection)
self.mapqueue.task_done()
def __init__(self, args, pipelinecommit, startingtime, scriptpath):
from Queue import Queue
import multiprocessing
from glob import glob
# Initialise variables
self.commit = str(pipelinecommit)
self.starttime = startingtime
self.homepath = scriptpath
# Define variables based on supplied arguments
self.args = args
self.path = os.path.join(args.path, '')
assert os.path.isdir(self.path), u'Supplied path is not a valid directory {0!r:s}'.format(self.path)
self.sequencepath = os.path.join(args.sequencepath, '')
assert os.path.isdir(self.sequencepath), u'Sequence folder is not a valid directory {0!r:s}' \
.format(self.sequencepath)
self.referencepath = os.path.join(args.referencepath, '')
assert os.path.isdir(self.sequencepath), u'Reference folder is not a valid directory {0!r:s}' \
.format(self.referencepath)
try:
self.referencefile = glob('{}*.fa*'.format(self.referencepath))[0]
except IndexError:
print 'Cannot find a .fa/.fas/.fasta reference file in the supplied reference path: {}' \
.format(self.referencepath)
quit()
# Use the argument for the number of threads to use, or default to the number of cpus in the system
self.cpus = int(args.threads if args.threads else multiprocessing.cpu_count())
# Determine whether BAM files need to be sorted and indexed
self.sort = args.index
self.runmetadata = MetadataObject()
self.mapqueue = Queue(maxsize=self.cpus)
self.analysistype = 'referencemapping'
# Run the analyses
self.mapping()
if __name__ == '__main__':
import subprocess
import time
import os
# Argument parser for user-inputted values, and a nifty help menu
from argparse import ArgumentParser
# Get the current commit of the pipeline from git
# Extract the path of the current script from the full path + file name
homepath = os.path.split(os.path.abspath(__file__))[0]
# Find the commit of the script by running a command to change to the directory containing the script and run
# a git command to return the short version of the commit hash
commit = subprocess.Popen('cd {} && git rev-parse --short HEAD'.format(homepath),
shell=True, stdout=subprocess.PIPE).communicate()[0].rstrip()
# Parser for arguments
parser = ArgumentParser(description='Perform modelling of parameters for GeneSipping')
parser.add_argument('path',
help='Specify input directory')
parser.add_argument('-s', '--sequencepath',
required=True,
help='Path of .fastq(.gz) files to process.')
parser.add_argument('-r', '--referencepath',
required=True,
help='Path to folder containing a single fasta file to be used as the reference genome')
parser.add_argument('-t', '--threads',
help='Number of threads. Default is the number of cores in the system')
parser.add_argument('-i', '--index',
action='store_true',
help='Sort and index the bam files')
# Get the arguments into an object
arguments = parser.parse_args()
# Define the start time
start = time.time()
# Run the script
Mapper(arguments, commit, start, homepath)
# Print a bold, green exit statement
print '\033[92m' + '\033[1m' + "\nElapsed Time: %0.2f seconds" % (time.time() - start) + '\033[0m'
| false | true |
f7f979a5902f70cdcbf2377197c39d30c8952c5d | 3,968 | py | Python | src/lib/detectors/ddd.py | tanaydw/CenterNet | 91c2ccd2c8a063db8c8ec101adfd4c6830cd47eb | [
"MIT"
] | null | null | null | src/lib/detectors/ddd.py | tanaydw/CenterNet | 91c2ccd2c8a063db8c8ec101adfd4c6830cd47eb | [
"MIT"
] | null | null | null | src/lib/detectors/ddd.py | tanaydw/CenterNet | 91c2ccd2c8a063db8c8ec101adfd4c6830cd47eb | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from models.decode import ddd_decode
from models.utils import flip_tensor
from utils.image import get_affine_transform
from utils.post_process import ddd_post_process
from utils.debugger import Debugger
from utils.ddd_utils import compute_box_3d, project_to_image, alpha2rot_y
from utils.ddd_utils import draw_box_3d, unproject_2d_to_3d
from .base_detector import BaseDetector
class DddDetector(BaseDetector):
def __init__(self, opt):
super(DddDetector, self).__init__(opt)
self.calib = np.array([[707.0493, 0, 604.0814, 45.75831],
[0, 707.0493, 180.5066, -0.3454157],
[0, 0, 1., 0.004981016]], dtype=np.float32)
def pre_process(self, image, scale, calib=None):
height, width = image.shape[0:2]
inp_height, inp_width = self.opt.input_h, self.opt.input_w
c = np.array([width / 2, height / 2], dtype=np.float32)
if self.opt.keep_res:
s = np.array([inp_width, inp_height], dtype=np.int32)
else:
s = np.array([width, height], dtype=np.int32)
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = image #cv2.resize(image, (width, height))
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = (inp_image.astype(np.float32) / 255.)
inp_image = (inp_image - self.mean) / self.std
images = inp_image.transpose(2, 0, 1)[np.newaxis, ...]
calib = np.array(calib, dtype=np.float32) if calib is not None \
else self.calib
images = torch.from_numpy(images)
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio,
'calib': calib}
return images, meta
def process(self, images, return_time=False):
with torch.no_grad():
torch.cuda.synchronize()
output = self.model(images)[-1]
output['hm'] = output['hm'].sigmoid_()
output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1.
wh = output['wh'] if self.opt.reg_bbox else None
reg = output['reg'] if self.opt.reg_offset else None
torch.cuda.synchronize()
forward_time = time.time()
dets = ddd_decode(output['hm'], output['rot'], output['dep'],
output['dim'], wh=wh, reg=reg, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy()
detections = ddd_post_process(
dets.copy(), [meta['c']], [meta['s']], [meta['calib']], self.opt)
self.this_calib = meta['calib']
return detections[0]
def merge_outputs(self, detections):
results = detections[0]
for j in range(1, self.num_classes + 1):
if len(results[j] > 0):
keep_inds = (results[j][:, -1] > self.opt.peak_thresh)
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
dets = dets.detach().cpu().numpy()
img = images[0].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_ct_detection(
img, dets[0], show_box=self.opt.reg_bbox,
center_thresh=self.opt.vis_thresh, img_id='det_pred')
def show_results(self, debugger, image, results):
debugger.add_3d_detection(
image, results, self.this_calib,
center_thresh=self.opt.vis_thresh, img_id='add_pred')
debugger.add_bird_view(
results, center_thresh=self.opt.vis_thresh, img_id='bird_pred') | 37.790476 | 73 | 0.657258 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
from models.decode import ddd_decode
from models.utils import flip_tensor
from utils.image import get_affine_transform
from utils.post_process import ddd_post_process
from utils.debugger import Debugger
from utils.ddd_utils import compute_box_3d, project_to_image, alpha2rot_y
from utils.ddd_utils import draw_box_3d, unproject_2d_to_3d
from .base_detector import BaseDetector
class DddDetector(BaseDetector):
def __init__(self, opt):
super(DddDetector, self).__init__(opt)
self.calib = np.array([[707.0493, 0, 604.0814, 45.75831],
[0, 707.0493, 180.5066, -0.3454157],
[0, 0, 1., 0.004981016]], dtype=np.float32)
def pre_process(self, image, scale, calib=None):
height, width = image.shape[0:2]
inp_height, inp_width = self.opt.input_h, self.opt.input_w
c = np.array([width / 2, height / 2], dtype=np.float32)
if self.opt.keep_res:
s = np.array([inp_width, inp_height], dtype=np.int32)
else:
s = np.array([width, height], dtype=np.int32)
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = image
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = (inp_image.astype(np.float32) / 255.)
inp_image = (inp_image - self.mean) / self.std
images = inp_image.transpose(2, 0, 1)[np.newaxis, ...]
calib = np.array(calib, dtype=np.float32) if calib is not None \
else self.calib
images = torch.from_numpy(images)
meta = {'c': c, 's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio,
'calib': calib}
return images, meta
def process(self, images, return_time=False):
with torch.no_grad():
torch.cuda.synchronize()
output = self.model(images)[-1]
output['hm'] = output['hm'].sigmoid_()
output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1.
wh = output['wh'] if self.opt.reg_bbox else None
reg = output['reg'] if self.opt.reg_offset else None
torch.cuda.synchronize()
forward_time = time.time()
dets = ddd_decode(output['hm'], output['rot'], output['dep'],
output['dim'], wh=wh, reg=reg, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy()
detections = ddd_post_process(
dets.copy(), [meta['c']], [meta['s']], [meta['calib']], self.opt)
self.this_calib = meta['calib']
return detections[0]
def merge_outputs(self, detections):
results = detections[0]
for j in range(1, self.num_classes + 1):
if len(results[j] > 0):
keep_inds = (results[j][:, -1] > self.opt.peak_thresh)
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
dets = dets.detach().cpu().numpy()
img = images[0].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_ct_detection(
img, dets[0], show_box=self.opt.reg_bbox,
center_thresh=self.opt.vis_thresh, img_id='det_pred')
def show_results(self, debugger, image, results):
debugger.add_3d_detection(
image, results, self.this_calib,
center_thresh=self.opt.vis_thresh, img_id='add_pred')
debugger.add_bird_view(
results, center_thresh=self.opt.vis_thresh, img_id='bird_pred') | true | true |
f7f979bd9ca137771d56153f1cf48fdee6e1509e | 622 | py | Python | tests/models.py | ThibaultVigier/django-auth-ldap | 5c3fbdc9058bdaffb40127dd77d0bf692ecec379 | [
"BSD-2-Clause"
] | 1 | 2020-10-20T03:35:08.000Z | 2020-10-20T03:35:08.000Z | tests/models.py | ThibaultVigier/django-auth-ldap | 5c3fbdc9058bdaffb40127dd77d0bf692ecec379 | [
"BSD-2-Clause"
] | null | null | null | tests/models.py | ThibaultVigier/django-auth-ldap | 5c3fbdc9058bdaffb40127dd77d0bf692ecec379 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import unicode_literals
from django.contrib.auth.models import AbstractBaseUser
from django.db import models
class TestUser(AbstractBaseUser):
identifier = models.CharField(max_length=40, unique=True, db_index=True)
uid_number = models.IntegerField()
USERNAME_FIELD = 'identifier'
def get_full_name(self):
return self.identifier
def get_short_name(self):
return self.identifier
def get_first_name(self):
return 'Alice'
def set_first_name(self, value):
raise Exception('Oops...')
first_name = property(get_first_name, set_first_name)
| 23.923077 | 76 | 0.726688 | from __future__ import unicode_literals
from django.contrib.auth.models import AbstractBaseUser
from django.db import models
class TestUser(AbstractBaseUser):
identifier = models.CharField(max_length=40, unique=True, db_index=True)
uid_number = models.IntegerField()
USERNAME_FIELD = 'identifier'
def get_full_name(self):
return self.identifier
def get_short_name(self):
return self.identifier
def get_first_name(self):
return 'Alice'
def set_first_name(self, value):
raise Exception('Oops...')
first_name = property(get_first_name, set_first_name)
| true | true |
f7f97c9ed4943d03be2e548c11e3a04ace6831f2 | 52 | py | Python | app/__init__.py | jiazifa/blog | 4165d69af9ee9adf573e7e4ed6c913e53254901b | [
"MIT"
] | null | null | null | app/__init__.py | jiazifa/blog | 4165d69af9ee9adf573e7e4ed6c913e53254901b | [
"MIT"
] | null | null | null | app/__init__.py | jiazifa/blog | 4165d69af9ee9adf573e7e4ed6c913e53254901b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .app import create_app | 17.333333 | 27 | 0.634615 |
from .app import create_app | true | true |
f7f97cc08531b78399b4a00dbd96d42936b9dfd3 | 4,545 | py | Python | app.py | shanur00029/Faster-R-CNN-model-dockerization | ace64a39c040df93deaf2f61de73a2561498a9eb | [
"Unlicense"
] | 2 | 2020-02-10T13:11:01.000Z | 2020-02-14T16:42:03.000Z | app.py | shanur00029/Faster-R-CNN-model-dockerization | ace64a39c040df93deaf2f61de73a2561498a9eb | [
"Unlicense"
] | null | null | null | app.py | shanur00029/Faster-R-CNN-model-dockerization | ace64a39c040df93deaf2f61de73a2561498a9eb | [
"Unlicense"
] | 3 | 2020-02-10T13:10:55.000Z | 2020-02-26T04:42:37.000Z | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from flask import Flask, url_for, send_from_directory, request
import logging, os
from werkzeug import secure_filename
from test_deploy import testfunction, mainf
import random
import pprint
import sys
import time
import numpy as np
from optparse import OptionParser
import pickle
import math
import cv2
import copy
from matplotlib import pyplot as plt
import tensorflow as tf
import pandas as pd
import os
from flask import Flask, render_template, request, redirect, url_for, send_from_directory
from sklearn.metrics import average_precision_score
from keras import backend as K
from keras.optimizers import Adam, SGD, RMSprop
from keras.layers import Flatten, Dense, Input, Conv2D, MaxPooling2D, Dropout
from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, TimeDistributed
from keras.engine.topology import get_source_inputs
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.objectives import categorical_crossentropy
from keras.models import Model
from keras.utils import generic_utils
from keras.engine import Layer, InputSpec
from keras import initializers, regularizers
#app = Flask(__name__)
app = Flask(__name__, template_folder='templates')
file_handler = logging.FileHandler('server.log')
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
PROJECT_HOME = os.path.dirname(os.path.realpath(__file__))
UPLOAD_FOLDER = '{}/uploads/'.format(PROJECT_HOME)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/')
def index():
return render_template('hello.html')
def create_new_folder(local_dir):
newpath = local_dir
if not os.path.exists(newpath):
os.makedirs(newpath)
return newpath
@app.route('/upload', methods = ['POST'])
def upload():
app.logger.info(PROJECT_HOME)
if request.method == 'POST' and request.files['image']:
app.logger.info(app.config['UPLOAD_FOLDER'])
img = request.files['image']
img_name = secure_filename(img.filename)
create_new_folder(app.config['UPLOAD_FOLDER'])
saved_path = os.path.join(app.config['UPLOAD_FOLDER'], img_name)
app.logger.info("saving {}".format(saved_path))
img.save(saved_path)
im_path, im_name = mainf(saved_path)
return send_from_directory(im_path, im_name, as_attachment=False)
else:
return "Where is the image?"
class Config:
def __init__(self):
# Print the process or not
self.verbose = True
# Name of base network
self.network = 'vgg'
# Setting for data augmentation
self.use_horizontal_flips = False
self.use_vertical_flips = False
self.rot_90 = False
# Anchor box scales
# Note that if im_size is smaller, anchor_box_scales should be scaled
# Original anchor_box_scales in the paper is [128, 256, 512]
self.anchor_box_scales = [64, 128, 256]
# Anchor box ratios
self.anchor_box_ratios = [[1, 1], [1./math.sqrt(2), 2./math.sqrt(2)], [2./math.sqrt(2), 1./math.sqrt(2)]]
# Size to resize the smallest side of the image
# Original setting in paper is 600. Set to 300 in here to save training time
self.im_size = 300
# image channel-wise mean to subtract
self.img_channel_mean = [103.939, 116.779, 123.68]
self.img_scaling_factor = 1.0
# number of ROIs at once
self.num_rois = 4
# stride at the RPN (this depends on the network configuration)
self.rpn_stride = 16
self.balanced_classes = False
# scaling the stdev
self.std_scaling = 4.0
self.classifier_regr_std = [8.0, 8.0, 4.0, 4.0]
# overlaps for RPN
self.rpn_min_overlap = 0.3
self.rpn_max_overlap = 0.7
# overlaps for classifier ROIs
self.classifier_min_overlap = 0.1
self.classifier_max_overlap = 0.5
# placeholder for the class mapping, automatically generated by the parser
self.class_mapping = None
self.model_path = None
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False)
| 30.099338 | 121 | 0.660286 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from flask import Flask, url_for, send_from_directory, request
import logging, os
from werkzeug import secure_filename
from test_deploy import testfunction, mainf
import random
import pprint
import sys
import time
import numpy as np
from optparse import OptionParser
import pickle
import math
import cv2
import copy
from matplotlib import pyplot as plt
import tensorflow as tf
import pandas as pd
import os
from flask import Flask, render_template, request, redirect, url_for, send_from_directory
from sklearn.metrics import average_precision_score
from keras import backend as K
from keras.optimizers import Adam, SGD, RMSprop
from keras.layers import Flatten, Dense, Input, Conv2D, MaxPooling2D, Dropout
from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, TimeDistributed
from keras.engine.topology import get_source_inputs
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.objectives import categorical_crossentropy
from keras.models import Model
from keras.utils import generic_utils
from keras.engine import Layer, InputSpec
from keras import initializers, regularizers
app = Flask(__name__, template_folder='templates')
file_handler = logging.FileHandler('server.log')
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
PROJECT_HOME = os.path.dirname(os.path.realpath(__file__))
UPLOAD_FOLDER = '{}/uploads/'.format(PROJECT_HOME)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/')
def index():
return render_template('hello.html')
def create_new_folder(local_dir):
newpath = local_dir
if not os.path.exists(newpath):
os.makedirs(newpath)
return newpath
@app.route('/upload', methods = ['POST'])
def upload():
app.logger.info(PROJECT_HOME)
if request.method == 'POST' and request.files['image']:
app.logger.info(app.config['UPLOAD_FOLDER'])
img = request.files['image']
img_name = secure_filename(img.filename)
create_new_folder(app.config['UPLOAD_FOLDER'])
saved_path = os.path.join(app.config['UPLOAD_FOLDER'], img_name)
app.logger.info("saving {}".format(saved_path))
img.save(saved_path)
im_path, im_name = mainf(saved_path)
return send_from_directory(im_path, im_name, as_attachment=False)
else:
return "Where is the image?"
class Config:
def __init__(self):
self.verbose = True
self.network = 'vgg'
self.use_horizontal_flips = False
self.use_vertical_flips = False
self.rot_90 = False
self.anchor_box_scales = [64, 128, 256]
self.anchor_box_ratios = [[1, 1], [1./math.sqrt(2), 2./math.sqrt(2)], [2./math.sqrt(2), 1./math.sqrt(2)]]
self.im_size = 300
self.img_channel_mean = [103.939, 116.779, 123.68]
self.img_scaling_factor = 1.0
self.num_rois = 4
self.rpn_stride = 16
self.balanced_classes = False
self.std_scaling = 4.0
self.classifier_regr_std = [8.0, 8.0, 4.0, 4.0]
self.rpn_min_overlap = 0.3
self.rpn_max_overlap = 0.7
self.classifier_min_overlap = 0.1
self.classifier_max_overlap = 0.5
self.class_mapping = None
self.model_path = None
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False)
| true | true |
f7f97dd50702066aff03feda3fafdac8e6f30661 | 4,127 | py | Python | benchmark/test_loads.py | heroku/pghstore | 31de76a7431ca280b1d9138bd6baf1ac767ea0ea | [
"MIT"
] | 2 | 2021-03-29T06:39:04.000Z | 2021-08-04T06:40:17.000Z | benchmark/test_loads.py | heroku/pghstore | 31de76a7431ca280b1d9138bd6baf1ac767ea0ea | [
"MIT"
] | 12 | 2017-08-22T15:43:09.000Z | 2020-05-06T17:12:49.000Z | benchmark/test_loads.py | heroku/pghstore | 31de76a7431ca280b1d9138bd6baf1ac767ea0ea | [
"MIT"
] | 2 | 2017-08-19T12:24:52.000Z | 2019-10-06T18:53:49.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import unittest
import timeit
from functools import partial
from math import floor
try:
from pghstore import _speedups as cpghstore
except ImportError:
cpghstore = None
from pghstore import _native as pypghstore
names = [
"",
'"name"=>"Norge/Noreg"',
'"name"=>"Norge/Noreg", "name:af"=>"Noorwe\xc3\xab"',
'"name"=>"Norge/Noreg", "name:af"=>"Noorwe\xc3\xab", "name:ar"=>NULL',
'"name"=>"Norge/Noreg", "name:af"=>"Noorwe\xc3\xab", "name:ar"=>"\xd8\xa7\xd9\x84\xd9\x86\xd8\xb1\xd9\x88\xd9\x8a\xd8\xac", "name:be"=>"\xd0\x9d\xd0\xb0\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd1\x96\xd1\x8f", "name:br"=>"Norvegia", "name:ca"=>"Noruega", "name:cs"=>"Norsko", "name:cy"=>"Norwy", "name:da"=>"Norge", "name:de"=>"Norwegen", "name:el"=>"\xce\x9d\xce\xbf\xcf\x81\xce\xb2\xce\xb7\xce\xb3\xce\xaf\xce\xb1", "name:en"=>"Norway", "name:eo"=>"Norvegio", "name:es"=>"Noruega", "name:et"=>"Norra", "name:fa"=>"\xd9\x86\xd8\xb1\xd9\x88\xda\x98", "name:fi"=>"Norja", "name:fo"=>"Noregur", "name:fr"=>"Norv\xc3\xa8ge", "name:fy"=>"Noarwegen", "name:ga"=>"An Iorua", "name:gd"=>"Nirribhidh", "name:he"=>"\xd7\xa0\xd7\x95\xd7\xa8\xd7\x95\xd7\x95\xd7\x92\xd7\x99\xd7\x94", "name:hr"=>"Norve\xc5\xa1ka", "name:hu"=>"Norv\xc3\xa9gia", "name:hy"=>"\xd5\x86\xd5\xb8\xd6\x80\xd5\xbe\xd5\xa5\xd5\xa3\xd5\xab\xd5\xa1", "name:id"=>"Norwegia", "name:is"=>"Noregur", "name:it"=>"Norvegia", "name:ja"=>"\xe3\x83\x8e\xe3\x83\xab\xe3\x82\xa6\xe3\x82\xa7\xe3\x83\xbc", "name:la"=>"Norvegia", "name:lb"=>"Norwegen", "name:li"=>"Noorwege", "name:lt"=>"Norvegija", "name:lv"=>"Norv\xc4\x93\xc4\xa3ija", "name:mn"=>"\xd0\x9d\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd0\xb8", "name:nb"=>"Norge", "name:nl"=>"Noorwegen", "name:nn"=>"Noreg", "name:no"=>"Norge", "name:pl"=>"Norwegia", "name:ru"=>"\xd0\x9d\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd0\xb8\xd1\x8f", "name:sk"=>"N\xc3\xb3rsko", "name:sl"=>"Norve\xc5\xa1ka", "name:sv"=>"Norge", "name:th"=>"\xe0\xb8\x9b\xe0\xb8\xa3\xe0\xb8\xb0\xe0\xb9\x80\xe0\xb8\x97\xe0\xb8\xa8\xe0\xb8\x99\xe0\xb8\xad\xe0\xb8\xa3\xe0\xb9\x8c\xe0\xb9\x80\xe0\xb8\xa7\xe0\xb8\xa2\xe0\xb9\x8c", "name:tr"=>"Norve\xc3\xa7", "name:uk"=>"\xd0\x9d\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd1\x96\xd1\x8f", "name:vi"=>"Na Uy", "name:zh"=>"\xe6\x8c\xaa\xe5\xa8\x81", "name:haw"=>"Nolewai", "name:zh_py"=>"Nuowei", "name:zh_pyt"=>"Nu\xc3\xb3w\xc4\x93i", "official_name"=>"Kongeriket Norge", "official_name:be"=>"\xd0\x9a\xd0\xb0\xd1\x80\xd0\xb0\xd0\xbb\xd0\xb5\xd1\x9e\xd1\x81\xd1\x82\xd0\xb2\xd0\xb0 \xd0\x9d\xd0\xb0\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd1\x96\xd1\x8f", "official_name:el"=>"\xce\x92\xce\xb1\xcf\x83\xce\xaf\xce\xbb\xce\xb5\xce\xb9\xce\xbf \xcf\x84\xce\xb7\xcf\x82 \xce\x9d\xce\xbf\xcf\x81\xce\xb2\xce\xb7\xce\xb3\xce\xaf\xce\xb1\xcf\x82", "official_name:en"=>"Kingdom of Norway", "official_name:id"=>"Kerajaan Norwegia", "official_name:it"=>"Regno di Norvegia", "official_name:ja"=>"\xe3\x83\x8e\xe3\x83\xab\xe3\x82\xa6\xe3\x82\xa7\xe3\x83\xbc\xe7\x8e\x8b\xe5\x9b\xbd", "official_name:lb"=>"Kinneksr\xc3\xa4ich Norwegen", "official_name:lt"=>"Norvegijos Karalyst\xc4\x97", "official_name:sk"=>"N\xc3\xb3rske kr\xc3\xa1\xc4\xbeovstvo", "official_name:sv"=>"Konungariket Norge", "official_name:vi"=>"V\xc6\xb0\xc6\xa1ng qu\xe1\xbb\x91c Na Uy"',
]
tmpl = ".. n=%i, strlen=%i, cpghstore.loads (%.2fs) is %ix faster than pghstore.loads (%.2fs)"
tmpl_2 = ".. n=%i, dictlen=%i, pghstore.dumps (%.2fs)"
class LoadsBenchmark(unittest.TestCase):
def test_loads(self):
n = 100000
print("")
for name in names:
pg_time = timeit.timeit(partial(pypghstore.loads, name), number=n)
if cpghstore is not None:
cpg_time = timeit.timeit(partial(cpghstore.loads, name), number=n)
self.assertTrue(cpg_time < pg_time)
print(
tmpl % (n, len(name), cpg_time, floor(pg_time / cpg_time), pg_time)
)
else:
print(tmpl_2 % (n, len(name), pg_time))
if __name__ == "__main__":
LoadsBenchmark().test_loads()
| 89.717391 | 2,856 | 0.64914 |
from __future__ import print_function
import unittest
import timeit
from functools import partial
from math import floor
try:
from pghstore import _speedups as cpghstore
except ImportError:
cpghstore = None
from pghstore import _native as pypghstore
names = [
"",
'"name"=>"Norge/Noreg"',
'"name"=>"Norge/Noreg", "name:af"=>"Noorwe\xc3\xab"',
'"name"=>"Norge/Noreg", "name:af"=>"Noorwe\xc3\xab", "name:ar"=>NULL',
'"name"=>"Norge/Noreg", "name:af"=>"Noorwe\xc3\xab", "name:ar"=>"\xd8\xa7\xd9\x84\xd9\x86\xd8\xb1\xd9\x88\xd9\x8a\xd8\xac", "name:be"=>"\xd0\x9d\xd0\xb0\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd1\x96\xd1\x8f", "name:br"=>"Norvegia", "name:ca"=>"Noruega", "name:cs"=>"Norsko", "name:cy"=>"Norwy", "name:da"=>"Norge", "name:de"=>"Norwegen", "name:el"=>"\xce\x9d\xce\xbf\xcf\x81\xce\xb2\xce\xb7\xce\xb3\xce\xaf\xce\xb1", "name:en"=>"Norway", "name:eo"=>"Norvegio", "name:es"=>"Noruega", "name:et"=>"Norra", "name:fa"=>"\xd9\x86\xd8\xb1\xd9\x88\xda\x98", "name:fi"=>"Norja", "name:fo"=>"Noregur", "name:fr"=>"Norv\xc3\xa8ge", "name:fy"=>"Noarwegen", "name:ga"=>"An Iorua", "name:gd"=>"Nirribhidh", "name:he"=>"\xd7\xa0\xd7\x95\xd7\xa8\xd7\x95\xd7\x95\xd7\x92\xd7\x99\xd7\x94", "name:hr"=>"Norve\xc5\xa1ka", "name:hu"=>"Norv\xc3\xa9gia", "name:hy"=>"\xd5\x86\xd5\xb8\xd6\x80\xd5\xbe\xd5\xa5\xd5\xa3\xd5\xab\xd5\xa1", "name:id"=>"Norwegia", "name:is"=>"Noregur", "name:it"=>"Norvegia", "name:ja"=>"\xe3\x83\x8e\xe3\x83\xab\xe3\x82\xa6\xe3\x82\xa7\xe3\x83\xbc", "name:la"=>"Norvegia", "name:lb"=>"Norwegen", "name:li"=>"Noorwege", "name:lt"=>"Norvegija", "name:lv"=>"Norv\xc4\x93\xc4\xa3ija", "name:mn"=>"\xd0\x9d\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd0\xb8", "name:nb"=>"Norge", "name:nl"=>"Noorwegen", "name:nn"=>"Noreg", "name:no"=>"Norge", "name:pl"=>"Norwegia", "name:ru"=>"\xd0\x9d\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd0\xb8\xd1\x8f", "name:sk"=>"N\xc3\xb3rsko", "name:sl"=>"Norve\xc5\xa1ka", "name:sv"=>"Norge", "name:th"=>"\xe0\xb8\x9b\xe0\xb8\xa3\xe0\xb8\xb0\xe0\xb9\x80\xe0\xb8\x97\xe0\xb8\xa8\xe0\xb8\x99\xe0\xb8\xad\xe0\xb8\xa3\xe0\xb9\x8c\xe0\xb9\x80\xe0\xb8\xa7\xe0\xb8\xa2\xe0\xb9\x8c", "name:tr"=>"Norve\xc3\xa7", "name:uk"=>"\xd0\x9d\xd0\xbe\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd1\x96\xd1\x8f", "name:vi"=>"Na Uy", "name:zh"=>"\xe6\x8c\xaa\xe5\xa8\x81", "name:haw"=>"Nolewai", "name:zh_py"=>"Nuowei", "name:zh_pyt"=>"Nu\xc3\xb3w\xc4\x93i", "official_name"=>"Kongeriket Norge", "official_name:be"=>"\xd0\x9a\xd0\xb0\xd1\x80\xd0\xb0\xd0\xbb\xd0\xb5\xd1\x9e\xd1\x81\xd1\x82\xd0\xb2\xd0\xb0 \xd0\x9d\xd0\xb0\xd1\x80\xd0\xb2\xd0\xb5\xd0\xb3\xd1\x96\xd1\x8f", "official_name:el"=>"\xce\x92\xce\xb1\xcf\x83\xce\xaf\xce\xbb\xce\xb5\xce\xb9\xce\xbf \xcf\x84\xce\xb7\xcf\x82 \xce\x9d\xce\xbf\xcf\x81\xce\xb2\xce\xb7\xce\xb3\xce\xaf\xce\xb1\xcf\x82", "official_name:en"=>"Kingdom of Norway", "official_name:id"=>"Kerajaan Norwegia", "official_name:it"=>"Regno di Norvegia", "official_name:ja"=>"\xe3\x83\x8e\xe3\x83\xab\xe3\x82\xa6\xe3\x82\xa7\xe3\x83\xbc\xe7\x8e\x8b\xe5\x9b\xbd", "official_name:lb"=>"Kinneksr\xc3\xa4ich Norwegen", "official_name:lt"=>"Norvegijos Karalyst\xc4\x97", "official_name:sk"=>"N\xc3\xb3rske kr\xc3\xa1\xc4\xbeovstvo", "official_name:sv"=>"Konungariket Norge", "official_name:vi"=>"V\xc6\xb0\xc6\xa1ng qu\xe1\xbb\x91c Na Uy"',
]
tmpl = ".. n=%i, strlen=%i, cpghstore.loads (%.2fs) is %ix faster than pghstore.loads (%.2fs)"
tmpl_2 = ".. n=%i, dictlen=%i, pghstore.dumps (%.2fs)"
class LoadsBenchmark(unittest.TestCase):
def test_loads(self):
n = 100000
print("")
for name in names:
pg_time = timeit.timeit(partial(pypghstore.loads, name), number=n)
if cpghstore is not None:
cpg_time = timeit.timeit(partial(cpghstore.loads, name), number=n)
self.assertTrue(cpg_time < pg_time)
print(
tmpl % (n, len(name), cpg_time, floor(pg_time / cpg_time), pg_time)
)
else:
print(tmpl_2 % (n, len(name), pg_time))
if __name__ == "__main__":
LoadsBenchmark().test_loads()
| true | true |
f7f97eea171aeafdaed314fe22447d5c68e6b9fd | 2,221 | py | Python | applications/shop/models/paymentmethod/iban.py | dev-easyshares/mighty | a6cf473fb8cfbf5b92db68c7b068fc8ae2911b8b | [
"MIT"
] | null | null | null | applications/shop/models/paymentmethod/iban.py | dev-easyshares/mighty | a6cf473fb8cfbf5b92db68c7b068fc8ae2911b8b | [
"MIT"
] | 1 | 2022-03-12T00:57:37.000Z | 2022-03-12T00:57:37.000Z | applications/shop/models/paymentmethod/iban.py | dev-easyshares/mighty | a6cf473fb8cfbf5b92db68c7b068fc8ae2911b8b | [
"MIT"
] | null | null | null | from django.conf import settings
from django.core.exceptions import ValidationError
from mighty.applications.shop.apps import sepas_test, ShopConfig
from schwifty import IBAN, BIC
from dateutil.relativedelta import relativedelta
import re
class IbanModel:
@property
def readable_iban(self):
return ' '.join([self.iban[i:i+4] for i in range(0, len(self.iban), 4)])
@property
def str_iban(self):
return "%s/%s" % (self.readable_iban, self.bic)
@property
def iban_readable(self):
return " ".join(self.iban[i:i+4] for i in range(0, len(self.iban), 4))
@property
def mask_iban(self):
iban = self.readable_cb[0:4]+re.sub(r"[a-zA-Z0-9]", '*', self.iban_readable[4:-4])+self.iban_readable[-4:]
return "%s/%s" % (iban, self.bic)
@property
def is_valid_ibanlib(self):
self.iban = re.sub(r"\s+", "", self.iban, flags=re.UNICODE)
try:
iban = IBAN(self.iban)
if not self.bic: self.bic = str(iban.bic)
return True
except ValueError:
return False
@property
def is_valid_bic(self):
try:
if self.bic:
BIC(self.bic)
return True
except ValueError:
return False
@property
def is_exist_iban(self):
if ShopConfig.subscription_for == 'group':
qs = type(self).objects.filter(iban=self.iban, bic=self.bic, group=self.group)
else:
qs = type(self).objects.filter(iban=self.iban, bic=self.bic, user=self.user)
if self.pk: qs = qs.exclude(pk=self.pk)
return False if qs.exists() else True
@property
def is_valid_iban(self):
if not settings.DEBUG and self.iban in sepas_test():
raise ValidationError(code='invalid_iban', message='invalid IBAN')
if not self.iban or not self.is_valid_ibanlib:
raise ValidationError(code='invalid_iban', message='invalid IBAN')
if not self.is_valid_bic:
raise ValidationError(code='invalid_bic', message='invalid BIC')
if not self.is_exist_iban:
raise ValidationError(code='already_iban', message='IBAN already exist')
| 34.703125 | 114 | 0.620891 | from django.conf import settings
from django.core.exceptions import ValidationError
from mighty.applications.shop.apps import sepas_test, ShopConfig
from schwifty import IBAN, BIC
from dateutil.relativedelta import relativedelta
import re
class IbanModel:
@property
def readable_iban(self):
return ' '.join([self.iban[i:i+4] for i in range(0, len(self.iban), 4)])
@property
def str_iban(self):
return "%s/%s" % (self.readable_iban, self.bic)
@property
def iban_readable(self):
return " ".join(self.iban[i:i+4] for i in range(0, len(self.iban), 4))
@property
def mask_iban(self):
iban = self.readable_cb[0:4]+re.sub(r"[a-zA-Z0-9]", '*', self.iban_readable[4:-4])+self.iban_readable[-4:]
return "%s/%s" % (iban, self.bic)
@property
def is_valid_ibanlib(self):
self.iban = re.sub(r"\s+", "", self.iban, flags=re.UNICODE)
try:
iban = IBAN(self.iban)
if not self.bic: self.bic = str(iban.bic)
return True
except ValueError:
return False
@property
def is_valid_bic(self):
try:
if self.bic:
BIC(self.bic)
return True
except ValueError:
return False
@property
def is_exist_iban(self):
if ShopConfig.subscription_for == 'group':
qs = type(self).objects.filter(iban=self.iban, bic=self.bic, group=self.group)
else:
qs = type(self).objects.filter(iban=self.iban, bic=self.bic, user=self.user)
if self.pk: qs = qs.exclude(pk=self.pk)
return False if qs.exists() else True
@property
def is_valid_iban(self):
if not settings.DEBUG and self.iban in sepas_test():
raise ValidationError(code='invalid_iban', message='invalid IBAN')
if not self.iban or not self.is_valid_ibanlib:
raise ValidationError(code='invalid_iban', message='invalid IBAN')
if not self.is_valid_bic:
raise ValidationError(code='invalid_bic', message='invalid BIC')
if not self.is_exist_iban:
raise ValidationError(code='already_iban', message='IBAN already exist')
| true | true |
f7f97f1e083b2da4fa3985b5f7f8c3ebce4b3a87 | 3,091 | py | Python | benchmark/startCirq2377.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq2377.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startCirq2377.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=40
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.Z.on(input_qubit[3])) # number=29
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=31
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=32
c.append(cirq.H.on(input_qubit[3])) # number=33
c.append(cirq.X.on(input_qubit[3])) # number=27
c.append(cirq.H.on(input_qubit[3])) # number=34
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=35
c.append(cirq.H.on(input_qubit[3])) # number=36
c.append(cirq.H.on(input_qubit[0])) # number=37
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=38
c.append(cirq.H.on(input_qubit[0])) # number=39
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[1])) # number=30
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=15
c.append(cirq.H.on(input_qubit[0])) # number=16
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=20
c.append(cirq.X.on(input_qubit[2])) # number=21
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=22
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=17
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=23
c.append(cirq.X.on(input_qubit[2])) # number=24
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=25
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=19
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2377.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 36.364706 | 77 | 0.681333 |
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
from cirq.contrib.svg import SVGCircuit
def make_circuit(n: int, input_qubit):
c = cirq.Circuit()
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[2]))
c.append(cirq.Z.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[2]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.X.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2]))
c.append(cirq.X.on(input_qubit[2]))
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2]))
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2]))
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2]))
c.append(cirq.X.on(input_qubit[2]))
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2]))
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2]))
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2377.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | true | true |
f7f97f56258036a48aa42fcf4d54582e19df6e67 | 3,117 | py | Python | Django/DjangoT1.11_LTS/55_Pengenalan User Permission Checks/mywebsite/settings.py | Akhadafi/WEB-Framework | 4547a682ac1f007aa6f97512baf76b92ef1c9b9a | [
"MIT"
] | null | null | null | Django/DjangoT1.11_LTS/55_Pengenalan User Permission Checks/mywebsite/settings.py | Akhadafi/WEB-Framework | 4547a682ac1f007aa6f97512baf76b92ef1c9b9a | [
"MIT"
] | null | null | null | Django/DjangoT1.11_LTS/55_Pengenalan User Permission Checks/mywebsite/settings.py | Akhadafi/WEB-Framework | 4547a682ac1f007aa6f97512baf76b92ef1c9b9a | [
"MIT"
] | null | null | null | """
Django settings for mywebsite project.
Generated by 'django-admin startproject' using Django 1.11.20.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0l3t_qz$+ocv=^ss6vux!#+ep831tg2jh2p2@wmntbm+dbm2sy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mywebsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mywebsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| 25.760331 | 91 | 0.69907 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '0l3t_qz$+ocv=^ss6vux!#+ep831tg2jh2p2@wmntbm+dbm2sy'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mywebsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mywebsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| true | true |
f7f97fbddca8345981dcfbe6d3e65b14bef4616e | 8,544 | py | Python | sunpy/map/tests/test_map_factory.py | TomAugspurger/sunpy | cad2d473f6aff05df5fe787c781cb7d004959b94 | [
"BSD-2-Clause"
] | null | null | null | sunpy/map/tests/test_map_factory.py | TomAugspurger/sunpy | cad2d473f6aff05df5fe787c781cb7d004959b94 | [
"BSD-2-Clause"
] | null | null | null | sunpy/map/tests/test_map_factory.py | TomAugspurger/sunpy | cad2d473f6aff05df5fe787c781cb7d004959b94 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 21 15:05:09 2013
@author: stuart
"""
import os
import tempfile
import pathlib
import pytest
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
import sunpy
import sunpy.map
import sunpy.data.test
filepath = sunpy.data.test.rootdir
a_list_of_many = [os.fspath(f) for f in pathlib.Path(filepath, "EIT").glob("*")]
a_fname = a_list_of_many[0]
AIA_171_IMAGE = os.path.join(filepath, 'aia_171_level1.fits')
RHESSI_IMAGE = os.path.join(filepath, 'hsi_image_20101016_191218.fits')
#==============================================================================
# Map Factory Tests
#==============================================================================
class TestMap:
def test_mapsequence(self):
# Test making a MapSequence
sequence = sunpy.map.Map(a_list_of_many, sequence=True)
assert isinstance(sequence, sunpy.map.MapSequence)
def test_composite(self):
# Test making a CompositeMap
comp = sunpy.map.Map(AIA_171_IMAGE, RHESSI_IMAGE, composite=True)
assert isinstance(comp, sunpy.map.CompositeMap)
def test_patterns(self):
# Test different Map pattern matching
# File name
eitmap = sunpy.map.Map(a_fname)
assert isinstance(eitmap, sunpy.map.GenericMap)
# Directory
directory = pathlib.Path(filepath, "EIT")
maps = sunpy.map.Map(os.fspath(directory))
assert isinstance(maps, list)
assert ([isinstance(amap, sunpy.map.GenericMap) for amap in maps])
# Test that returned maps are sorted
files_sorted = sorted(list(directory.glob('*')))
maps_sorted = [sunpy.map.Map(os.fspath(f)) for f in files_sorted]
assert all([m.date == m_s.date for m, m_s in zip(maps, maps_sorted)])
# Pathlib
path = pathlib.Path(a_fname)
eitmap = sunpy.map.Map(path)
assert isinstance(eitmap, sunpy.map.GenericMap)
maps = sunpy.map.Map(directory)
assert isinstance(maps, list)
assert ([isinstance(amap, sunpy.map.GenericMap) for amap in maps])
# Glob
pattern = os.path.join(filepath, "EIT", "*")
maps = sunpy.map.Map(pattern)
assert isinstance(maps, list)
assert ([isinstance(amap, sunpy.map.GenericMap) for amap in maps])
# Test that returned maps are sorted
files_sorted = sorted(list(pathlib.Path(pattern).parent.glob('*')))
maps_sorted = [sunpy.map.Map(os.fspath(f)) for f in files_sorted]
assert all([m.date == m_s.date for m, m_s in zip(maps, maps_sorted)])
# Single character wildcard (?)
pattern = os.path.join(filepath, "EIT", "efz20040301.0?0010_s.fits")
maps = sunpy.map.Map(pattern)
assert isinstance(maps, list)
assert len(maps) == 7
assert ([isinstance(amap, sunpy.map.GenericMap) for amap in maps])
# Character ranges
pattern = os.path.join(filepath, "EIT", "efz20040301.0[2-6]0010_s.fits")
maps = sunpy.map.Map(pattern)
assert isinstance(maps, list)
assert len(maps) == 4
assert ([isinstance(amap, sunpy.map.GenericMap) for amap in maps])
# Already a Map
amap = sunpy.map.Map(maps[0])
assert isinstance(amap, sunpy.map.GenericMap)
# A list of filenames
maps = sunpy.map.Map(a_list_of_many)
assert isinstance(maps, list)
assert ([isinstance(amap, sunpy.map.GenericMap) for amap in maps])
# Data-header pair in a tuple
pair_map = sunpy.map.Map((amap.data, amap.meta))
assert isinstance(pair_map, sunpy.map.GenericMap)
# Data-header pair not in a tuple
pair_map = sunpy.map.Map(amap.data, amap.meta)
assert isinstance(pair_map, sunpy.map.GenericMap)
# Data-wcs object pair in tuple
pair_map = sunpy.map.Map((amap.data, WCS(AIA_171_IMAGE)))
assert isinstance(pair_map, sunpy.map.GenericMap)
# Data-wcs object pair not in a tuple
pair_map = sunpy.map.Map(amap.data, WCS(AIA_171_IMAGE))
assert isinstance(pair_map, sunpy.map.GenericMap)
# Data-header from FITS
with fits.open(a_fname) as hdul:
data = hdul[0].data
header = hdul[0].header
pair_map = sunpy.map.Map((data, header))
assert isinstance(pair_map, sunpy.map.GenericMap)
pair_map, pair_map = sunpy.map.Map(((data, header), (data, header)))
assert isinstance(pair_map, sunpy.map.GenericMap)
pair_map = sunpy.map.Map(data, header)
assert isinstance(pair_map, sunpy.map.GenericMap)
# Custom Map
data = np.arange(0, 100).reshape(10, 10)
header = {'cdelt1': 10, 'cdelt2': 10,
'telescop': 'sunpy',
'cunit1': 'arcsec', 'cunit2': 'arcsec'}
pair_map = sunpy.map.Map(data, header)
assert isinstance(pair_map, sunpy.map.GenericMap)
# requires dask array to run properly
def test_dask_array(self):
dask_array = pytest.importorskip('dask.array')
amap = sunpy.map.Map(AIA_171_IMAGE)
da = dask_array.from_array(amap.data, chunks=(1, 1))
pair_map = sunpy.map.Map(da, amap.meta)
assert isinstance(pair_map, sunpy.map.GenericMap)
# requires sqlalchemy to run properly
def test_databaseentry(self):
sqlalchemy = pytest.importorskip('sqlalchemy')
sunpy_database = pytest.importorskip('sunpy.database')
db = sunpy_database.Database(url='sqlite://', default_waveunit='angstrom')
db.add_from_file(a_fname)
res = db.get_entry_by_id(1)
db_map = sunpy.map.Map(res)
assert isinstance(db_map, sunpy.map.GenericMap)
@pytest.mark.remote_data
def test_url_pattern(self):
# A URL
amap = sunpy.map.Map("http://data.sunpy.org/sample-data/AIA20110319_105400_0171.fits")
assert isinstance(amap, sunpy.map.GenericMap)
def test_save(self):
# Test save out
eitmap = sunpy.map.Map(a_fname)
afilename = tempfile.NamedTemporaryFile(suffix='fits').name
eitmap.save(afilename, filetype='fits', overwrite=True)
backin = sunpy.map.Map(afilename)
assert isinstance(backin, sunpy.map.sources.EITMap)
#==============================================================================
# Sources Tests
#==============================================================================
def test_sdo(self):
# Test an AIAMap
aia = sunpy.map.Map(AIA_171_IMAGE)
assert isinstance(aia, sunpy.map.sources.AIAMap)
# TODO: Test a HMIMap
def test_soho(self):
# Test EITMap, LASCOMap & MDIMap
eit = sunpy.map.Map(os.path.join(filepath, "EIT", "efz20040301.000010_s.fits"))
assert isinstance(eit, sunpy.map.sources.EITMap)
lasco = sunpy.map.Map(os.path.join(filepath, "lasco_c2_25299383_s.fts"))
assert isinstance(lasco, sunpy.map.sources.LASCOMap)
mdi_c = sunpy.map.Map(os.path.join(filepath, "mdi_fd_Ic_6h_01d.5871.0000_s.fits"))
assert isinstance(mdi_c, sunpy.map.sources.MDIMap)
mdi_m = sunpy.map.Map(os.path.join(filepath, "mdi_fd_M_96m_01d.5874.0005_s.fits"))
assert isinstance(mdi_m, sunpy.map.sources.MDIMap)
def test_stereo(self):
# Test EUVIMap & CORMap & HIMap
euvi = sunpy.map.Map(os.path.join(filepath, "euvi_20090615_000900_n4euA_s.fts"))
assert isinstance(euvi, sunpy.map.sources.EUVIMap)
cor = sunpy.map.Map(os.path.join(filepath, "cor1_20090615_000500_s4c1A.fts"))
assert isinstance(cor, sunpy.map.sources.CORMap)
hi = sunpy.map.Map(os.path.join(filepath, "hi_20110910_114721_s7h2A.fts"))
assert isinstance(hi, sunpy.map.sources.HIMap)
def test_rhessi(self):
# Test RHESSIMap
rhessi = sunpy.map.Map(RHESSI_IMAGE)
assert isinstance(rhessi, sunpy.map.sources.RHESSIMap)
def test_sot(self):
# Test SOTMap
sot = sunpy.map.Map(os.path.join(filepath, "FGMG4_20110214_030443.7.fits"))
assert isinstance(sot, sunpy.map.sources.SOTMap)
def test_swap(self):
# Test SWAPMap
swap = sunpy.map.Map(os.path.join(filepath, "swap_lv1_20140606_000113.fits"))
assert isinstance(swap, sunpy.map.sources.SWAPMap)
def test_xrt(self):
# Test XRTMap
xrt = sunpy.map.Map(os.path.join(filepath, "HinodeXRT.fits"))
assert isinstance(xrt, sunpy.map.sources.XRTMap)
# TODO: Test SXTMap
| 38.660633 | 94 | 0.628394 |
import os
import tempfile
import pathlib
import pytest
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
import sunpy
import sunpy.map
import sunpy.data.test
filepath = sunpy.data.test.rootdir
a_list_of_many = [os.fspath(f) for f in pathlib.Path(filepath, "EIT").glob("*")]
a_fname = a_list_of_many[0]
AIA_171_IMAGE = os.path.join(filepath, 'aia_171_level1.fits')
RHESSI_IMAGE = os.path.join(filepath, 'hsi_image_20101016_191218.fits')
class TestMap:
def test_mapsequence(self):
sequence = sunpy.map.Map(a_list_of_many, sequence=True)
assert isinstance(sequence, sunpy.map.MapSequence)
def test_composite(self):
comp = sunpy.map.Map(AIA_171_IMAGE, RHESSI_IMAGE, composite=True)
assert isinstance(comp, sunpy.map.CompositeMap)
def test_patterns(self):
eitmap = sunpy.map.Map(a_fname)
assert isinstance(eitmap, sunpy.map.GenericMap)
directory = pathlib.Path(filepath, "EIT")
maps = sunpy.map.Map(os.fspath(directory))
assert isinstance(maps, list)
assert ([isinstance(amap, sunpy.map.GenericMap) for amap in maps])
files_sorted = sorted(list(directory.glob('*')))
maps_sorted = [sunpy.map.Map(os.fspath(f)) for f in files_sorted]
assert all([m.date == m_s.date for m, m_s in zip(maps, maps_sorted)])
path = pathlib.Path(a_fname)
eitmap = sunpy.map.Map(path)
assert isinstance(eitmap, sunpy.map.GenericMap)
maps = sunpy.map.Map(directory)
assert isinstance(maps, list)
assert ([isinstance(amap, sunpy.map.GenericMap) for amap in maps])
pattern = os.path.join(filepath, "EIT", "*")
maps = sunpy.map.Map(pattern)
assert isinstance(maps, list)
assert ([isinstance(amap, sunpy.map.GenericMap) for amap in maps])
files_sorted = sorted(list(pathlib.Path(pattern).parent.glob('*')))
maps_sorted = [sunpy.map.Map(os.fspath(f)) for f in files_sorted]
assert all([m.date == m_s.date for m, m_s in zip(maps, maps_sorted)])
pattern = os.path.join(filepath, "EIT", "efz20040301.0?0010_s.fits")
maps = sunpy.map.Map(pattern)
assert isinstance(maps, list)
assert len(maps) == 7
assert ([isinstance(amap, sunpy.map.GenericMap) for amap in maps])
pattern = os.path.join(filepath, "EIT", "efz20040301.0[2-6]0010_s.fits")
maps = sunpy.map.Map(pattern)
assert isinstance(maps, list)
assert len(maps) == 4
assert ([isinstance(amap, sunpy.map.GenericMap) for amap in maps])
amap = sunpy.map.Map(maps[0])
assert isinstance(amap, sunpy.map.GenericMap)
maps = sunpy.map.Map(a_list_of_many)
assert isinstance(maps, list)
assert ([isinstance(amap, sunpy.map.GenericMap) for amap in maps])
pair_map = sunpy.map.Map((amap.data, amap.meta))
assert isinstance(pair_map, sunpy.map.GenericMap)
pair_map = sunpy.map.Map(amap.data, amap.meta)
assert isinstance(pair_map, sunpy.map.GenericMap)
pair_map = sunpy.map.Map((amap.data, WCS(AIA_171_IMAGE)))
assert isinstance(pair_map, sunpy.map.GenericMap)
pair_map = sunpy.map.Map(amap.data, WCS(AIA_171_IMAGE))
assert isinstance(pair_map, sunpy.map.GenericMap)
with fits.open(a_fname) as hdul:
data = hdul[0].data
header = hdul[0].header
pair_map = sunpy.map.Map((data, header))
assert isinstance(pair_map, sunpy.map.GenericMap)
pair_map, pair_map = sunpy.map.Map(((data, header), (data, header)))
assert isinstance(pair_map, sunpy.map.GenericMap)
pair_map = sunpy.map.Map(data, header)
assert isinstance(pair_map, sunpy.map.GenericMap)
data = np.arange(0, 100).reshape(10, 10)
header = {'cdelt1': 10, 'cdelt2': 10,
'telescop': 'sunpy',
'cunit1': 'arcsec', 'cunit2': 'arcsec'}
pair_map = sunpy.map.Map(data, header)
assert isinstance(pair_map, sunpy.map.GenericMap)
def test_dask_array(self):
dask_array = pytest.importorskip('dask.array')
amap = sunpy.map.Map(AIA_171_IMAGE)
da = dask_array.from_array(amap.data, chunks=(1, 1))
pair_map = sunpy.map.Map(da, amap.meta)
assert isinstance(pair_map, sunpy.map.GenericMap)
def test_databaseentry(self):
sqlalchemy = pytest.importorskip('sqlalchemy')
sunpy_database = pytest.importorskip('sunpy.database')
db = sunpy_database.Database(url='sqlite://', default_waveunit='angstrom')
db.add_from_file(a_fname)
res = db.get_entry_by_id(1)
db_map = sunpy.map.Map(res)
assert isinstance(db_map, sunpy.map.GenericMap)
@pytest.mark.remote_data
def test_url_pattern(self):
amap = sunpy.map.Map("http://data.sunpy.org/sample-data/AIA20110319_105400_0171.fits")
assert isinstance(amap, sunpy.map.GenericMap)
def test_save(self):
eitmap = sunpy.map.Map(a_fname)
afilename = tempfile.NamedTemporaryFile(suffix='fits').name
eitmap.save(afilename, filetype='fits', overwrite=True)
backin = sunpy.map.Map(afilename)
assert isinstance(backin, sunpy.map.sources.EITMap)
def test_sdo(self):
aia = sunpy.map.Map(AIA_171_IMAGE)
assert isinstance(aia, sunpy.map.sources.AIAMap)
def test_soho(self):
eit = sunpy.map.Map(os.path.join(filepath, "EIT", "efz20040301.000010_s.fits"))
assert isinstance(eit, sunpy.map.sources.EITMap)
lasco = sunpy.map.Map(os.path.join(filepath, "lasco_c2_25299383_s.fts"))
assert isinstance(lasco, sunpy.map.sources.LASCOMap)
mdi_c = sunpy.map.Map(os.path.join(filepath, "mdi_fd_Ic_6h_01d.5871.0000_s.fits"))
assert isinstance(mdi_c, sunpy.map.sources.MDIMap)
mdi_m = sunpy.map.Map(os.path.join(filepath, "mdi_fd_M_96m_01d.5874.0005_s.fits"))
assert isinstance(mdi_m, sunpy.map.sources.MDIMap)
def test_stereo(self):
euvi = sunpy.map.Map(os.path.join(filepath, "euvi_20090615_000900_n4euA_s.fts"))
assert isinstance(euvi, sunpy.map.sources.EUVIMap)
cor = sunpy.map.Map(os.path.join(filepath, "cor1_20090615_000500_s4c1A.fts"))
assert isinstance(cor, sunpy.map.sources.CORMap)
hi = sunpy.map.Map(os.path.join(filepath, "hi_20110910_114721_s7h2A.fts"))
assert isinstance(hi, sunpy.map.sources.HIMap)
def test_rhessi(self):
rhessi = sunpy.map.Map(RHESSI_IMAGE)
assert isinstance(rhessi, sunpy.map.sources.RHESSIMap)
def test_sot(self):
sot = sunpy.map.Map(os.path.join(filepath, "FGMG4_20110214_030443.7.fits"))
assert isinstance(sot, sunpy.map.sources.SOTMap)
def test_swap(self):
swap = sunpy.map.Map(os.path.join(filepath, "swap_lv1_20140606_000113.fits"))
assert isinstance(swap, sunpy.map.sources.SWAPMap)
def test_xrt(self):
xrt = sunpy.map.Map(os.path.join(filepath, "HinodeXRT.fits"))
assert isinstance(xrt, sunpy.map.sources.XRTMap)
| true | true |
f7f98051be59a9ce5dfd5c781777950cb599deef | 492 | py | Python | conv_serial.py | gpu0/nnet | 0fd5c718c2d03cab91d4a4fd4963b12df241a9de | [
"MIT"
] | null | null | null | conv_serial.py | gpu0/nnet | 0fd5c718c2d03cab91d4a4fd4963b12df241a9de | [
"MIT"
] | null | null | null | conv_serial.py | gpu0/nnet | 0fd5c718c2d03cab91d4a4fd4963b12df241a9de | [
"MIT"
] | null | null | null | import multiprocessing
import numpy as np
numThreads = 8
numRows = 32000
numCols = 3
numOut = 2
stride = numRows / numThreads
X = np.ones((numRows, numCols))
W = np.ones((numCols, numOut))
B = np.ones((numRows, numOut))
def conv(idx):
for i in range(100000):
X[idx*stride:idx*stride+stride].dot(W) + B[idx*stride:idx*stride+stride]
return X[idx*stride:idx*stride+stride].dot(W) + B[idx*stride:idx*stride+stride]
if __name__=='__main__':
for i in range(numThreads):
conv(i)
| 21.391304 | 81 | 0.697154 | import multiprocessing
import numpy as np
numThreads = 8
numRows = 32000
numCols = 3
numOut = 2
stride = numRows / numThreads
X = np.ones((numRows, numCols))
W = np.ones((numCols, numOut))
B = np.ones((numRows, numOut))
def conv(idx):
for i in range(100000):
X[idx*stride:idx*stride+stride].dot(W) + B[idx*stride:idx*stride+stride]
return X[idx*stride:idx*stride+stride].dot(W) + B[idx*stride:idx*stride+stride]
if __name__=='__main__':
for i in range(numThreads):
conv(i)
| true | true |
f7f980870d9219f3d403b290299070d59e3a0261 | 6,279 | py | Python | tests/contrib/operators/test_mssql_to_gcs_operator.py | is2co/airflow | 48f400541d19601d1571d07cf65f55c649884b87 | [
"Apache-2.0"
] | 2 | 2020-10-12T05:21:27.000Z | 2021-07-07T09:23:47.000Z | tests/contrib/operators/test_mssql_to_gcs_operator.py | is2co/airflow | 48f400541d19601d1571d07cf65f55c649884b87 | [
"Apache-2.0"
] | 3 | 2021-03-11T06:46:16.000Z | 2021-09-29T17:48:20.000Z | tests/contrib/operators/test_mssql_to_gcs_operator.py | is2co/airflow | 48f400541d19601d1571d07cf65f55c649884b87 | [
"Apache-2.0"
] | 1 | 2019-08-21T10:13:08.000Z | 2019-08-21T10:13:08.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow.contrib.operators.mssql_to_gcs import \
MsSqlToGoogleCloudStorageOperator
from tests.compat import mock
TASK_ID = 'test-mssql-to-gcs'
MSSQL_CONN_ID = 'mssql_conn_test'
SQL = 'select 1'
BUCKET = 'gs://test'
JSON_FILENAME = 'test_{}.ndjson'
GZIP = False
ROWS = [
('mock_row_content_1', 42),
('mock_row_content_2', 43),
('mock_row_content_3', 44)
]
CURSOR_DESCRIPTION = (
('some_str', 0, None, None, None, None, None),
('some_num', 3, None, None, None, None, None)
)
NDJSON_LINES = [
b'{"some_num": 42, "some_str": "mock_row_content_1"}\n',
b'{"some_num": 43, "some_str": "mock_row_content_2"}\n',
b'{"some_num": 44, "some_str": "mock_row_content_3"}\n'
]
SCHEMA_FILENAME = 'schema_test.json'
SCHEMA_JSON = [
b'[{"mode": "NULLABLE", "name": "some_str", "type": "STRING"}, ',
b'{"mode": "NULLABLE", "name": "some_num", "type": "INTEGER"}]'
]
class MsSqlToGoogleCloudStorageOperatorTest(unittest.TestCase):
def test_init(self):
"""Test MySqlToGoogleCloudStorageOperator instance is properly initialized."""
op = MsSqlToGoogleCloudStorageOperator(
task_id=TASK_ID, sql=SQL, bucket=BUCKET, filename=JSON_FILENAME)
self.assertEqual(op.task_id, TASK_ID)
self.assertEqual(op.sql, SQL)
self.assertEqual(op.bucket, BUCKET)
self.assertEqual(op.filename, JSON_FILENAME)
@mock.patch('airflow.contrib.operators.mssql_to_gcs.MsSqlHook')
@mock.patch('airflow.contrib.operators.sql_to_gcs.GoogleCloudStorageHook')
def test_exec_success_json(self, gcs_hook_mock_class, mssql_hook_mock_class):
"""Test successful run of execute function for JSON"""
op = MsSqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
mssql_conn_id=MSSQL_CONN_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME)
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
self.assertEqual(BUCKET, bucket)
self.assertEqual(JSON_FILENAME.format(0), obj)
self.assertEqual('application/json', mime_type)
self.assertEqual(GZIP, gzip)
with open(tmp_filename, 'rb') as file:
self.assertEqual(b''.join(NDJSON_LINES), file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op.execute(None)
mssql_hook_mock_class.assert_called_once_with(mssql_conn_id=MSSQL_CONN_ID)
mssql_hook_mock.get_conn().cursor().execute.assert_called_once_with(SQL)
@mock.patch('airflow.contrib.operators.mssql_to_gcs.MsSqlHook')
@mock.patch('airflow.contrib.operators.sql_to_gcs.GoogleCloudStorageHook')
def test_file_splitting(self, gcs_hook_mock_class, mssql_hook_mock_class):
"""Test that ndjson is split by approx_max_file_size_bytes param."""
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
expected_upload = {
JSON_FILENAME.format(0): b''.join(NDJSON_LINES[:2]),
JSON_FILENAME.format(1): NDJSON_LINES[2],
}
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
self.assertEqual(BUCKET, bucket)
self.assertEqual('application/json', mime_type)
self.assertEqual(GZIP, gzip)
with open(tmp_filename, 'rb') as file:
self.assertEqual(expected_upload[obj], file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op = MsSqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME,
approx_max_file_size_bytes=len(expected_upload[JSON_FILENAME.format(0)]))
op.execute(None)
@mock.patch('airflow.contrib.operators.mssql_to_gcs.MsSqlHook')
@mock.patch('airflow.contrib.operators.sql_to_gcs.GoogleCloudStorageHook')
def test_schema_file(self, gcs_hook_mock_class, mssql_hook_mock_class):
"""Test writing schema files."""
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type, gzip): # pylint: disable=unused-argument
if obj == SCHEMA_FILENAME:
with open(tmp_filename, 'rb') as file:
self.assertEqual(b''.join(SCHEMA_JSON), file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op = MsSqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME,
schema_filename=SCHEMA_FILENAME)
op.execute(None)
# once for the file and once for the schema
self.assertEqual(2, gcs_hook_mock.upload.call_count)
| 40.509677 | 106 | 0.691352 |
import unittest
from airflow.contrib.operators.mssql_to_gcs import \
MsSqlToGoogleCloudStorageOperator
from tests.compat import mock
TASK_ID = 'test-mssql-to-gcs'
MSSQL_CONN_ID = 'mssql_conn_test'
SQL = 'select 1'
BUCKET = 'gs://test'
JSON_FILENAME = 'test_{}.ndjson'
GZIP = False
ROWS = [
('mock_row_content_1', 42),
('mock_row_content_2', 43),
('mock_row_content_3', 44)
]
CURSOR_DESCRIPTION = (
('some_str', 0, None, None, None, None, None),
('some_num', 3, None, None, None, None, None)
)
NDJSON_LINES = [
b'{"some_num": 42, "some_str": "mock_row_content_1"}\n',
b'{"some_num": 43, "some_str": "mock_row_content_2"}\n',
b'{"some_num": 44, "some_str": "mock_row_content_3"}\n'
]
SCHEMA_FILENAME = 'schema_test.json'
SCHEMA_JSON = [
b'[{"mode": "NULLABLE", "name": "some_str", "type": "STRING"}, ',
b'{"mode": "NULLABLE", "name": "some_num", "type": "INTEGER"}]'
]
class MsSqlToGoogleCloudStorageOperatorTest(unittest.TestCase):
def test_init(self):
op = MsSqlToGoogleCloudStorageOperator(
task_id=TASK_ID, sql=SQL, bucket=BUCKET, filename=JSON_FILENAME)
self.assertEqual(op.task_id, TASK_ID)
self.assertEqual(op.sql, SQL)
self.assertEqual(op.bucket, BUCKET)
self.assertEqual(op.filename, JSON_FILENAME)
@mock.patch('airflow.contrib.operators.mssql_to_gcs.MsSqlHook')
@mock.patch('airflow.contrib.operators.sql_to_gcs.GoogleCloudStorageHook')
def test_exec_success_json(self, gcs_hook_mock_class, mssql_hook_mock_class):
op = MsSqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
mssql_conn_id=MSSQL_CONN_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME)
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
self.assertEqual(BUCKET, bucket)
self.assertEqual(JSON_FILENAME.format(0), obj)
self.assertEqual('application/json', mime_type)
self.assertEqual(GZIP, gzip)
with open(tmp_filename, 'rb') as file:
self.assertEqual(b''.join(NDJSON_LINES), file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op.execute(None)
mssql_hook_mock_class.assert_called_once_with(mssql_conn_id=MSSQL_CONN_ID)
mssql_hook_mock.get_conn().cursor().execute.assert_called_once_with(SQL)
@mock.patch('airflow.contrib.operators.mssql_to_gcs.MsSqlHook')
@mock.patch('airflow.contrib.operators.sql_to_gcs.GoogleCloudStorageHook')
def test_file_splitting(self, gcs_hook_mock_class, mssql_hook_mock_class):
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
expected_upload = {
JSON_FILENAME.format(0): b''.join(NDJSON_LINES[:2]),
JSON_FILENAME.format(1): NDJSON_LINES[2],
}
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
self.assertEqual(BUCKET, bucket)
self.assertEqual('application/json', mime_type)
self.assertEqual(GZIP, gzip)
with open(tmp_filename, 'rb') as file:
self.assertEqual(expected_upload[obj], file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op = MsSqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME,
approx_max_file_size_bytes=len(expected_upload[JSON_FILENAME.format(0)]))
op.execute(None)
@mock.patch('airflow.contrib.operators.mssql_to_gcs.MsSqlHook')
@mock.patch('airflow.contrib.operators.sql_to_gcs.GoogleCloudStorageHook')
def test_schema_file(self, gcs_hook_mock_class, mssql_hook_mock_class):
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type, gzip):
if obj == SCHEMA_FILENAME:
with open(tmp_filename, 'rb') as file:
self.assertEqual(b''.join(SCHEMA_JSON), file.read())
gcs_hook_mock.upload.side_effect = _assert_upload
op = MsSqlToGoogleCloudStorageOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME,
schema_filename=SCHEMA_FILENAME)
op.execute(None)
self.assertEqual(2, gcs_hook_mock.upload.call_count)
| true | true |
f7f9812cea08e9a63697df520d7b14ad0213b115 | 238 | py | Python | examples/whitenoise.py | ianling/wavebender | df66a466a73f609a5b71700836f7f344bda409de | [
"MIT"
] | null | null | null | examples/whitenoise.py | ianling/wavebender | df66a466a73f609a5b71700836f7f344bda409de | [
"MIT"
] | null | null | null | examples/whitenoise.py | ianling/wavebender | df66a466a73f609a5b71700836f7f344bda409de | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from sys import stdout
from wavebender import *
channels = ((white_noise(amplitude=0.1),),)
samples = compute_samples(channels, 44100 * 60 * 1)
write_wavefile(stdout.buffer, samples, 44100 * 60 * 1, nchannels=1)
| 23.8 | 67 | 0.731092 |
from sys import stdout
from wavebender import *
channels = ((white_noise(amplitude=0.1),),)
samples = compute_samples(channels, 44100 * 60 * 1)
write_wavefile(stdout.buffer, samples, 44100 * 60 * 1, nchannels=1)
| true | true |
f7f981d8ac7649d2cb17c4e71307cebdafe2372d | 430 | py | Python | day030/key-error-handling/main.py | rainleander/100daysofcode | 0391170af80b251e7fb3a78a60b55c3145e4551a | [
"Apache-2.0"
] | 8 | 2021-01-25T09:14:41.000Z | 2021-11-24T12:29:26.000Z | day030/key-error-handling/main.py | rainleander/100daysofcode | 0391170af80b251e7fb3a78a60b55c3145e4551a | [
"Apache-2.0"
] | null | null | null | day030/key-error-handling/main.py | rainleander/100daysofcode | 0391170af80b251e7fb3a78a60b55c3145e4551a | [
"Apache-2.0"
] | null | null | null | facebook_posts = [
{'Likes': 21, 'Comments': 2},
{'Likes': 13, 'Comments': 2, 'Shares': 1},
{'Likes': 33, 'Comments': 8, 'Shares': 3},
{'Comments': 4, 'Shares': 2},
{'Comments': 1, 'Shares': 1},
{'Likes': 19, 'Comments': 3}
]
total_likes = 0
for post in facebook_posts:
try:
total_likes = total_likes + post['Likes']
except KeyError:
total_likes = total_likes
print(total_likes)
| 22.631579 | 49 | 0.569767 | facebook_posts = [
{'Likes': 21, 'Comments': 2},
{'Likes': 13, 'Comments': 2, 'Shares': 1},
{'Likes': 33, 'Comments': 8, 'Shares': 3},
{'Comments': 4, 'Shares': 2},
{'Comments': 1, 'Shares': 1},
{'Likes': 19, 'Comments': 3}
]
total_likes = 0
for post in facebook_posts:
try:
total_likes = total_likes + post['Likes']
except KeyError:
total_likes = total_likes
print(total_likes)
| true | true |
f7f9821a027108b6432d49abb278aa65fe04225e | 1,604 | py | Python | pylibssh/event.py | arisada/pylibssh | 7110499e1463f4edc24e8f2736b2d71f5c34a0ae | [
"BSD-3-Clause"
] | 1 | 2017-03-28T12:23:11.000Z | 2017-03-28T12:23:11.000Z | pylibssh/event.py | arisada/pylibssh | 7110499e1463f4edc24e8f2736b2d71f5c34a0ae | [
"BSD-3-Clause"
] | null | null | null | pylibssh/event.py | arisada/pylibssh | 7110499e1463f4edc24e8f2736b2d71f5c34a0ae | [
"BSD-3-Clause"
] | null | null | null | from . import api
from api import lib
class EventFdCallbacks(object):
fd = None
def __init__ (self, fd):
self.fd = fd
def pollEvent(self, fd, revent):
if(self.fd.fileno() != fd):
raise RuntimeException("Different fd in callbacks")
if (revent & select.POLLIN):
self.pollInEvent()
if (revent & select.POLLERR):
self.pollErrEvent()
if (revent & select.POLLOUT):
self.pollOutEvent()
return 0
def pollInEvent(self):
pass
def pollOutEvent(self):
pass
def pollErrEvent(self):
pass
def _event_function(fd, revent, userargp):
return userargp.pollEvent(fd, revent)
_event_cb_object = api.event_cb(_event_function)
class Event(object):
event = None
def __init__(self):
self.event = lib.ssh_event_new()
def __del__(self):
if (self.event is not None):
lib.ssh_event_free(self.event)
self.event=None
def addSession(self, session):
lib.ssh_event_add_session(self.event, session.session)
def removeSession(self, session):
lib.ssh_event_remove_session(self.event, session.session)
def doPoll(self, timeout=-1):
rc = lib.ssh_event_dopoll(self.event, timeout)
if (rc == SSH_ERROR):
raise RuntimeError("event_dopoll failed")
return rc
def addFd(self, fdcallback, events):
lib.ssh_event_add_fd(self.event, fdcallback.fd.fileno(), events, _event_cb_object, fdcallback)
def removeFd(self, fd):
lib.ssh_event_remove_fd(self.event, fd)
| 30.846154 | 102 | 0.63404 | from . import api
from api import lib
class EventFdCallbacks(object):
fd = None
def __init__ (self, fd):
self.fd = fd
def pollEvent(self, fd, revent):
if(self.fd.fileno() != fd):
raise RuntimeException("Different fd in callbacks")
if (revent & select.POLLIN):
self.pollInEvent()
if (revent & select.POLLERR):
self.pollErrEvent()
if (revent & select.POLLOUT):
self.pollOutEvent()
return 0
def pollInEvent(self):
pass
def pollOutEvent(self):
pass
def pollErrEvent(self):
pass
def _event_function(fd, revent, userargp):
return userargp.pollEvent(fd, revent)
_event_cb_object = api.event_cb(_event_function)
class Event(object):
event = None
def __init__(self):
self.event = lib.ssh_event_new()
def __del__(self):
if (self.event is not None):
lib.ssh_event_free(self.event)
self.event=None
def addSession(self, session):
lib.ssh_event_add_session(self.event, session.session)
def removeSession(self, session):
lib.ssh_event_remove_session(self.event, session.session)
def doPoll(self, timeout=-1):
rc = lib.ssh_event_dopoll(self.event, timeout)
if (rc == SSH_ERROR):
raise RuntimeError("event_dopoll failed")
return rc
def addFd(self, fdcallback, events):
lib.ssh_event_add_fd(self.event, fdcallback.fd.fileno(), events, _event_cb_object, fdcallback)
def removeFd(self, fd):
lib.ssh_event_remove_fd(self.event, fd)
| true | true |
f7f98225c32f97b3bf539de83c54ea5542da77c6 | 862 | py | Python | switch_model/wecc/get_inputs/post_process_steps/reserve_technologies.py | REAM-lab/switch | 00af4508e34bdc460925950808dc7f87a0a064ff | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2021-06-03T00:23:57.000Z | 2022-02-05T13:51:00.000Z | switch_model/wecc/get_inputs/post_process_steps/reserve_technologies.py | REAM-lab/switch | 00af4508e34bdc460925950808dc7f87a0a064ff | [
"ECL-2.0",
"Apache-2.0"
] | 61 | 2021-05-07T23:46:20.000Z | 2022-02-17T01:19:17.000Z | switch_model/wecc/get_inputs/post_process_steps/reserve_technologies.py | RAEL-Berkeley/switch | 00af4508e34bdc460925950808dc7f87a0a064ff | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2018-02-05T02:01:01.000Z | 2020-11-17T22:14:26.000Z | """ This post-process selects which technologies can provide reserves"""
# Standard packages
import os
import shutil
# Third-party packages
import pandas as pd
from switch_model.wecc.get_inputs.register_post_process import post_process_step
@post_process_step(
msg="Removing fossil fuels from reserves.",
)
def post_process(_):
"""This function sets to zero the column that allows each candidate technology to
provide"""
fname = "generation_projects_info.csv"
df = pd.read_csv(fname)
# Energy sources to exclude from reserves
filter_techs = ["ResidualFuelOil", "Gas", "DistillateFuelOil", "Coal"]
# Set to zero column that allows technology to provide reserves
df.loc[
df["gen_energy_source"].isin(filter_techs), "gen_can_provide_cap_reserves"
] = 0
# Save file again
df.to_csv(fname, index=False)
| 26.9375 | 85 | 0.733179 |
import os
import shutil
import pandas as pd
from switch_model.wecc.get_inputs.register_post_process import post_process_step
@post_process_step(
msg="Removing fossil fuels from reserves.",
)
def post_process(_):
fname = "generation_projects_info.csv"
df = pd.read_csv(fname)
filter_techs = ["ResidualFuelOil", "Gas", "DistillateFuelOil", "Coal"]
df.loc[
df["gen_energy_source"].isin(filter_techs), "gen_can_provide_cap_reserves"
] = 0
df.to_csv(fname, index=False)
| true | true |
f7f982fdde0a534d0ccd114800ec898f43ca4ea5 | 365 | py | Python | mmpose/utils/collect_env.py | jlgzb/mmpose | 0ecf06e3580f141f6ab44645768a0d6d8ba48383 | [
"Apache-2.0"
] | 367 | 2022-01-14T03:32:25.000Z | 2022-03-31T04:48:20.000Z | mmpose/utils/collect_env.py | jlgzb/mmpose | 0ecf06e3580f141f6ab44645768a0d6d8ba48383 | [
"Apache-2.0"
] | 27 | 2022-01-27T07:12:49.000Z | 2022-03-31T04:31:13.000Z | mmpose/utils/collect_env.py | jlgzb/mmpose | 0ecf06e3580f141f6ab44645768a0d6d8ba48383 | [
"Apache-2.0"
] | 53 | 2022-01-18T11:21:43.000Z | 2022-03-31T06:42:41.000Z | from mmcv.utils import collect_env as collect_basic_env
from mmcv.utils import get_git_hash
import mmpose
def collect_env():
env_info = collect_basic_env()
env_info['MMPose'] = (mmpose.__version__ + '+' + get_git_hash(digits=7))
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}')
| 22.8125 | 76 | 0.69589 | from mmcv.utils import collect_env as collect_basic_env
from mmcv.utils import get_git_hash
import mmpose
def collect_env():
env_info = collect_basic_env()
env_info['MMPose'] = (mmpose.__version__ + '+' + get_git_hash(digits=7))
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}')
| true | true |
f7f983c8808f2a82989a8ce0a5fa7c82b51eb0e7 | 27,077 | py | Python | Python Simulator/Frontier Exploration/backup/1 (initial backup)/Faigl.py | yiorgosk/Path-Planning-Simulator | 84847d0068a3fd6fa30098b99a75dff237768a73 | [
"MIT"
] | 50 | 2018-11-15T08:42:49.000Z | 2022-03-20T10:51:58.000Z | Python Simulator/Frontier Exploration/backup/1 (initial backup)/Faigl.py | yiorgosk/Path-Planning-Simulator | 84847d0068a3fd6fa30098b99a75dff237768a73 | [
"MIT"
] | null | null | null | Python Simulator/Frontier Exploration/backup/1 (initial backup)/Faigl.py | yiorgosk/Path-Planning-Simulator | 84847d0068a3fd6fa30098b99a75dff237768a73 | [
"MIT"
] | 24 | 2019-02-03T06:11:58.000Z | 2022-03-15T06:18:39.000Z | # The MIT License (MIT)
# Copyright (c) 2015 INSPIRE Lab, BITS Pilani
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Provides an implementation of the Frontier Clustering algorithm.
"""
import math
import random
import sys
import time
import AStar
import Cluster
import GridWorld
import Hungarian
import kmeans
import Robot
from collections import defaultdict
from tsp_solver.greedy import solve_tsp
# The Faigl class
class Faigl:
"""
height and width specify the dimensions of the environment
obstacles is a list of locations which are to be initialized as obstacles
R specifies the range of communication
numRobots specifies the number of robot objects to be initialized
initLocs specifies the initial locations for each of the robots
k specifies the size of the population of configuration changes
T specifies the number of time steps to run the simulation for
base specifies the coordinates of the base station
"""
def __init__(self, height, width, obstacles, numRobots, initLocs, T = 10):
# Initialize the grid world
self.gridworld = GridWorld.GridWorld(height, width, obstacles)
self.centroid = []
self.cluster = kmeans.kmeans()
# Initialize a list of robots
self.robots = [Robot.Robot(j+1, -1, -1) for j in range(numRobots)]
# Initialize the starting location of each robot
i = 0
#self.allotted=Cell.Cell(0,0)
for initLoc in initLocs:
# If any robot is placed out of bounds or on an obstacle, print an error message and exit
currentPoint = (initLoc[0], initLoc[1])
if not self.gridworld.inBounds(currentPoint) or not self.gridworld.passable(currentPoint):
print 'Initial location', currentPoint, 'is not possible'
sys.exit(-1)
# Otherwise, modify the current location of the robot to currentPoint
self.robots[i].setLocation(initLoc[0], initLoc[1])
# Update that particular grid cell's occupied status
self.gridworld.cells[initLoc[0]][initLoc[1]].occupied = True
self.gridworld.cells[initLoc[0]][initLoc[1]].visited = True
i += 1
# Initialize other parameters of the algorithm
# Height of the Grid
self.height = height
# Width of the Grid
self.width = width
# List of Clusters (obtained using K-Means clustering)
self.frontierClusters=[]
# Number of Robots
self.numRobots = numRobots
# Parameter for adaptive K-Means
self.K = self.numRobots
# Time steps for which the algorithm is run
self.T = T
# Variable to indicate whether reclustering should be performed
self.reclusterFlag = True
# Centroids of clusters
self.centroids = []
# Number of time steps elapsed
self.t = 0
# Time taken to exhaust the frontier
self.completionTime = 0
# Set to True on completion
self.completedFlag = False
# List of Frontier Cells
self.frontier = []
# New Positions of each of the Robots
self.newPos = []
# Population of Configuration Changes
self.cfgc = []
# Number of Stalls (Used only for simulating Rooker's work)
self.stalls = 0
# Keeps track of whether the Final Stats were displayed
self.printedFinalStats = False
# Keeps track of Possible Configurations
self.possible = []
# Keeps track of Number of Cells Visited
# (Initialize this to self.numRobots, since the starting locations of the robots are considered visited)
self.visited = self.numRobots
self.sumNewVisited = numRobots
# Flag to switch between A* and Manhattan distance
self.aStarFlag = False
# Define value for infinity
self.infinity = 10000000
# Flag to switch between Hungarian and Greedy assignment
self.hungarianFlag = True
# Flag to switch between Greedy and Random Motion Planner
self.randomMotionPlan = False
# Variable to store the number of robots that do not make a move in the current iteration
self.currentEights = 0
# Variable to store the total number of moves made by the robots
self.totalMoves = 0
# Variable to store the volume of cells allocated
self.volume = 0
# Variable to store the percentage of allotted cells explored
self.exploredCells = 0
# Variable to store the number of times clustering occurred
self.timesClustered = 0
# Defines the threshold percentage
self.thresholdPercentage = 1.0
# Flag to determine if reallocation is required
self.reallocateFlag = False
# Flag to determine if printing of logs is required
self.printLog = False
# We also initialize an instance of AStar, which helps us in computing Manhattan distance
self.astar = AStar.AStar()
# Method to print the current gridworld to the output descriptor
def printGrid(self):
## Comment this later
frontier = self.computeFrontier()
##
print 'occupied cells:'
for i in range(self.height):
for j in range(self.width):
if self.gridworld.cells[i][j].occupied == True:
print i, j
print 'robot locations:'
for robot in self.robots:
print robot.curX, robot.curY
for i in range(self.height):
for j in range(self.width):
# If the current cell is an obstacle, print #
if self.gridworld.cells[i][j].obstacle == True:
sys.stdout.write(' # ')
# If the current cell is occupied by a robot, print its id
elif self.gridworld.cells[i][j].occupied == True:
robotId = 0
for robot in self.robots:
if robot.curX == i and robot.curY == j:
robotId = robot.id
temp = ' ' + str(robotId) + ' '
sys.stdout.write(temp)
# If the current cell is a frontier, print a |
elif (i, j) in frontier:
sys.stdout.write(' | ')
# Otherwise, print -
else:
if self.gridworld.cells[i][j].visited == True:
sys.stdout.write(' . ')
else:
sys.stdout.write(' - ')
sys.stdout.write('\n')
# Method to print the status of each cell to the output descriptor
def printVisitedStatus(self):
visited = 0
visitable = self.height * self.width
for i in range(self.height):
for j in range(self.width):
# If the current cell is an obstacle, print #
if self.gridworld.cells[i][j].visited == True:
sys.stdout.write(' 1 ')
visited += 1
# If the current cell is a frontier, print a |
else:
sys.stdout.write(' 0 ')
if self.gridworld.cells[i][j].obstacle == True:
visitable -= 1
sys.stdout.write('\n')
print 'visited:', visited, ' of ', visitable
print 'stalls:', self.stalls
return self.completionTime
# Method to print the final statistics to the output descriptor
# The final stats should be printed only once
# i.e., either when T steps have elapsed or when the frontier is empty
def printFinalStats(self, force = 0):
# if self.t != self.T:
# if self.completedFlag == False or self.printedFinalStats == True:
# return
# print 'Time taken:', self.t
visitednow=0
visitable = self.height * self.width
for i in range(self.height):
for j in range(self.width):
# If the current cell is an obstacle, print #
if self.gridworld.cells[i][j].visited == True:
visitednow += 1
# If the current cell is a frontier, print a |
else:
if self.gridworld.cells[i][j].obstacle == True:
visitable -= 1
metric = self.visited / visitable
# print (visitednow - self.visited)
if self.printLog == True:
print 'visitednow', visitednow ##
redundancy = (self.numRobots - self.currentEights) - (visitednow - self.visited)
if self.printLog == True:
print 'redundancy:', redundancy ##
# print self.currentEights
self.exploredCells += (visitednow - self.visited)
self.visited = visitednow
numMoves = self.numRobots - self.currentEights
self.totalMoves += numMoves
if self.printLog == True:
print 'totalMoves:', self.totalMoves ##
print 'self.exploredCells:', self.exploredCells ##
self.printedFinalStats = True
return
# Method to compute the frontier
def computeFrontier(self):
frontier = []
# Iterate over all cells in the grid
for i in range(self.height):
for j in range(self.width):
# We compute 8-neighbors for only those cells that haven't been visited or are obstacles
# Only such cells are possible candidates for the frontier
if self.gridworld.cells[i][j].visited == False and self.gridworld.cells[i][j].obstacle == False:
point = (i, j)
neighbors = self.gridworld.get8Neighbors(point)
# Now we see if there is at least one neighbor of the current cell which has been visited
# In such a case, the current cell would become a frontier cell
frontierFlag = False
for nbhr in neighbors:
if self.gridworld.cells[nbhr[0]][nbhr[1]].visited == True:
frontierFlag = True
if frontierFlag == True:
frontier.append((i, j))
return frontier
# Method to delete lone cells
def deleteLoneCells(self):
# Kill cells using the rules from Conway's Game of Life
for i in range(self.height):
for j in range(self.width):
if self.gridworld.cells[i][j].visited == True:
continue
else:
visitedNeighbors = 0
neighbors = self.gridworld.get8Neighbors((i, j))
for neighbor in neighbors:
if self.gridworld.cells[neighbor[0]][neighbor[1]].visited == True:
visitedNeighbors += 1
if visitedNeighbors == len(neighbors):
self.gridworld.cells[i][j].visited = True
frontier = self.computeFrontier()
return frontier
# Method to compute the new locations of each robot, given a command vector
def getNewPositions(self, cmd):
newPos = []
for i in range(self.numRobots):
nextX, nextY = self.gridworld.getNextPos(self.robots[i].curX, self.robots[i].curY, cmd[i])
newPos.append((nextX, nextY))
return newPos
# Method to check if a given configuration is possible
# We return an integer that describes the nature of the impossibility of a configuration
# 0 denotes that all robots move into non-obstacle cells
# 1 denotes that two robots occupy the same cell
# 2 denotes that one robot encounters an obstacle
def isCfgPossible(self, cfg):
# We first compute the new positions and see if two next positions coincide
# newPos = self.getNewPositions(cfg)
if any(self.newPos.count(element) > 1 for element in self.newPos) == True:
return 1
# Now we check if some robot encounters an obstacle
retval = 0
for i in range(self.numRobots):
if self.gridworld.checkCommand(self.robots[i].curX, self.robots[i].curY, cfg[i]) == False:
retval = 2
# Otherwise, the configuration is possible
return retval
def allocateFrontiers(self):
cmd = []
# Allocate robots to clusters
if self.reallocateFlag == True:
"""
If hungarianFlag is set to True, perform Hungarian assignment
Else, perform greedy assignment.
"""
# Hungarian assignment
if self.hungarianFlag == True:
# Determine the cost matrix
costMatrix = []
# Get the distance of each robot to its nearest cell in the cluster
for i in range(self.numRobots):
closestDist = self.infinity
cost = []
for cluster in self.frontierClusters:
closestDistToCluster = self.infinity
for point in cluster:
if self.aStarFlag == True:
path, distanceToPoint = self.astar.aStarSearch(self.gridworld, (self.robots[i].curX, self.robots[i].curY), (point[0], point[1]))
distanceToPoint = distanceToPoint[(point[0], point[1])]
else:
distanceToPoint = abs(self.robots[i].curX - point[0]) + abs(self.robots[i].curY - point[1])
if distanceToPoint < closestDistToCluster:
closestDistToCluster = distanceToPoint
cost.append(closestDistToCluster)
costMatrix.append(cost)
costMatrix = zip(*costMatrix)
# print 'costMatrixHere:', costMatrix, self.K
# for centroid in self.centroids:
# # Denotes the distance from each robot to the current centroid
# cost = []
# for robot in self.robots:
# # Note the confusing notation
# # manhattan has been used for bot manhattan and A* distances
# if self.aStarFlag == True:
# path, manhattan = self.astar.aStarSearch(self.gridworld, (robot.curX, robot.curY), (centroid.x, centroid.y))
# manhattan = manhattan[(centroid.x, centroid.y)]
# else:
# manhattan = abs(robot.curX - centroid.x) + abs(robot.curY - centroid.y)
# cost.append(manhattan)
# costMatrix.append(cost)
if self.K < self.numRobots:
additionalRows = self.numRobots - self.K
cost = [0 for j in range(self.numRobots)]
for i in range(additionalRows):
costMatrix.append(cost)
# if self.K < self.numRobots:
# print 'costMatrix: ', costMatrix, 'self.K:', self.K
"""Create an instance of the Hungarian assignment class"""
hungarian = Hungarian.Hungarian()
hungarian.calculate(costMatrix)
results = hungarian.get_results()
# print 'Total Potential: ', hungarian.get_total_potential()
# if self.K < self.numRobots:
# print 'Hungarian results:', results
# Choose only those allotments where the id of the cluster is less than self.K
if self.K < self.numRobots:
def filterFunction(parameter):
return parameter[0] < self.K-1
results = filter(filterFunction, results)
# print 'results:', results
# Perform the assignment
for result in results:
self.centroids[result[0]].allotted = result[1]
robotList = [i for i in range(self.numRobots)]
for result in results:
if result[1] in robotList:
robotList.remove(result[1])
# print 'robotList', robotList, self.K
for robot in robotList:
clusterObject = Cluster.Cluster(self.robots[robot].curX, self.robots[robot].curY)
clusterObject.allotted = robot
self.centroids.append(clusterObject)
# print 'prevClusters:', self.frontierClusters
self.frontierClusters.append([(self.robots[robot].curX, self.robots[robot].curY)])
# if robot < self.K:
# self.frontierClusters[robot] = ([(self.robots[robot].curX, self.robots[robot].curY)])
# else:
# self.frontierClusters.append([(self.robots[robot].curX, self.robots[robot].curY)])
# print 'clusters:', self.frontierClusters
# Greedy assignment
else:
totalPotential = 0
# Iterate over the list of cluster centroids
for j in range(self.numRobots):
robotPos = (self.robots[0].curX, self.robots[0].curY)
centroidPos = (self.centroids[j].x, self.centroids[j].y)
if self.aStarFlag == True:
path, tmp = self.astar.aStarSearch(self.gridworld, robotPos, (self.infinity, self.infinity))
# tmp = tmp[(self.infinity, self.infinity)]
tmp = self.infinity
else:
tmp = self.infinity
# Iterate over the list of robots
for i in range(self.numRobots):
robotPos = (self.robots[i].curX, self.robots[i].curY)
if self.aStarFlag == True:
path, tempdist = self.astar.aStarSearch(self.gridworld, robotPos, centroidPos)
tempdist = tempdist[centroidPos]
else:
tempdist = abs(robotPos[0]-centroidPos[0]) + abs(robotPos[1]-centroidPos[1])
allottedflag = False
# Check if the robot has already been allotted a centroid
for k in range(self.numRobots):
if self.centroids[k].allotted==i:
allottedflag = True
# If it hasn't been allotted
if tmp >= tempdist and allottedflag == False:
tmp = tempdist
self.centroids[j].allotted = i
totalPotential += tmp
# print 'Total Potential: ', totalPotential
"Motion planning"
shouldRecluster = False
for i in range(self.numRobots):
# If randomMotionPlan is True, use random motion planning
# i.e., visit each cluster cell in the order in which K-Means returned it
if self.randomMotionPlan == True:
isJobless = True
"""
Determine the cluster that was allotted the ith robot
Store it in temp
"""
for j in range(self.numRobots):
if self.centroids[j].allotted==i:
temp=j
break
# print 'robot', i, 'given cluster', temp
# Iterate over each frontier cell
for cellgen in self.frontierClusters[temp]:
# Mark the cell as not done
thisisdone=0
# Check if the cell has been visited already
# If yes mark this as done
for j in range(self.gridworld.height):
for k in range(self.gridworld.width):
if self.gridworld.cells[j][k].x == cellgen[0] and self.gridworld.cells[j][k].y == cellgen[1] and self.gridworld.cells[j][k].visited==True:
thisisdone=1
# If it has not been marked as done, allot this cell to the robot
# The variable 'isJobless' denotes if the robots frontier cluster is jobless now
if thisisdone==0:
allotted_frontier=cellgen
isJobless = False
break
# Otherwise, use the greedy motion plan
else:
isJobless = True
# To each robot, allocate the closest cell in its allotted frontier
# For that, initialize the least distance to infinity
dist = self.infinity ## If any error occurs, maybe commenting this line would resolve it
# Store the cluster number allotted to the ith robot
for j in range(self.numRobots):
if self.centroids[j].allotted == i:
allottedCluster = j
break
# print 'robot', i, 'given cluster', j
dists = []
if self.aStarFlag == True:
path, dist = self.astar.aStarSearch(self.gridworld, (0, 0), (self.infinity, self.infinity))
dist = self.infinity
# print 'dist:', dist
else:
dist = self.infinity
# Iterate over each frontier cell in the ith robot's cluster
for cellgen in self.frontierClusters[allottedCluster]:
# Mark the cell as not done
thisIsDone = 0
robotPos = (self.robots[i].curX, self.robots[i].curY)
cellPos = (cellgen[0], cellgen[1])
# Check if the cell has been visited already
# If yes mark this as done
if self.gridworld.cells[cellgen[0]][cellgen[1]].visited == True:
thisIsDone = 1
# If it has not been marked as done, allot this cell to the robot
# The variable 'isJobless' denotes if the robots frontier cluster is jobless now
if thisIsDone == 0:
if self.aStarFlag == True:
path, tempDist = self.astar.aStarSearch(self.gridworld, robotPos, cellPos)
tempDist = tempDist[cellPos]
else:
tempDist = abs(robotPos[0] - cellPos[0]) + abs(robotPos[1] - cellPos[1])
dists.append(tempDist)
if tempDist < dist:
dist = tempDist
allotted_frontier = cellgen
isJobless = False
# print 'dist:', dists, dist
# print i, isJobless
"""
Move the ith robot closer to its allotted frontier
"""
# If all cells in the robot's assigned cluster have been explored, the robot waits in the same cell
if self.volume > 0:
exploredPercentage = self.exploredCells / (self.volume * 1.0)
if self.printLog == True:
print 'self.exploredCells:', self.exploredCells ##
# print 'exploredPercentage:', exploredPercentage
if self.thresholdPercentage <= 0.1:
self.thresholdPercentage = 0.1
else:
# print '#########twenty-five iterations:', self.t / 25
if self.t < 200:
self.thresholdPercentage = 0.5
else:
if self.t > 600 and self.t < 800:
self.thresholdPercentage = 0.5
else:
self.thresholdPercentage = 0.5 - 0.01*(self.t / 25)
"""Uncomment these lines to revert to the old plan"""
# # if isJobless == True or exploredPercentage >= self.thresholdPercentage:
# if exploredPercentage >= self.thresholdPercentage:
# # genmax stores the command that is given to a robot
# # Kindly do not worry about the variable naming style
# # It was all Arjun's creativity and it shall be fixed soon
# shouldRecluster = True
# genmax = 8
# # self.exploredCells = 0 # ??
if isJobless == True: # or exploredPercentage >= self.thresholdPercentage:
genmax = 8
shouldRecluster = True
# if exploredPercentage >= self.thresholdPercentage:
# shouldRecluster = True
# Otherwise, it visits the cell
else:
# possCells stores the current 8-neighbors of the ith robot
possCells=[]
possCells = self.gridworld.get8Neighbors((self.robots[i].curX, self.robots[i].curY))
# If using A*
if self.aStarFlag == True:
path, tmp = self.astar.aStarSearch(self.gridworld, possCells[0], allotted_frontier)
tmp = tmp[allotted_frontier]
# If A* is not being used, Manhattan distance is used
else:
tmp = abs(possCells[0][0]-allotted_frontier[0]) + abs(possCells[0][1]-allotted_frontier[1])
# Here's yet another name from our creative genius (Arjun)
# This variable is initialized with the first of its 8-neighbors
thechosenone=possCells[0]
# For each neighbor of the ith robot
for nextcell in possCells:
# If using A*
if self.aStarFlag == True:
path, tmp1 = self.astar.aStarSearch(self.gridworld, nextcell, allotted_frontier)
tmp1 = tmp1[allotted_frontier]
# If A* is not being used, Manhattan distance is used
else:
tmp1=abs(nextcell[0]-allotted_frontier[0]) + abs(nextcell[1]-allotted_frontier[1])
# if tmp>=tmp1: ## Error ?
if tmp1 < tmp:
# path, tmp = self.astar.aStarSearch(self.gridworld, nextcell, allotted_frontier)
tmp=tmp1;
thechosenone = nextcell
genmax=self.gridworld.getcmd(thechosenone[0], thechosenone[1], self.robots[i].curX, self.robots[i].curY)
cmd.append(genmax)
# print 'cmd:', cmd
# for i in range(len(cmd)):
# temp = str(cmd[i])
# sys.stdout.write(temp)
# sys.stdout.write(' ')
# sys.stdout.write('\n')
return cmd, shouldRecluster
def executeBestCfgc(self, bestCfgc):
i = 0
for cmd in bestCfgc:
tempX = self.robots[i].curX
tempY = self.robots[i].curY
if self.gridworld.checkCommand(tempX, tempY, cmd) == True:
nextX, nextY = self.gridworld.getNextPos(tempX, tempY, cmd)
self.gridworld.cells[tempX][tempY].occupied = False
self.robots[i].curX = nextX
self.robots[i].curY = nextY
self.gridworld.cells[nextX][nextY].occupied = True
self.gridworld.cells[nextX][nextY].visited = True
i += 1
# Run the algorithm for 1 iteration
def runOneIter(self):
# If t time steps have already expired, return
self.t += 1
if self.t >= self.T:
if self.printedFinalStats == False:
self.printFinalStats()
return
# Else, run the algorithm for one time step
self.frontier = self.computeFrontier()
if self.frontier == []:
if self.completedFlag == False:
self.completedFlag = True
self.completionTime = self.t
# print 'Completed in time', self.completionTime
self.printFinalStats()
return
# self.frontier = self.deleteLoneCells()
if len(self.frontier) < self.numRobots:
self.K = len(self.frontier)
else:
self.K = self.numRobots
# If reclusterFlag is true, then perform K-Means clustering
if self.reclusterFlag == True:
if self.printLog == True:
print 'volume:', len(self.frontier) ##
self.exploredCells = 0
self.volume = len(self.frontier)
if self.K != 1:
self.centroids, self.frontierClusters = self.cluster.Kmeanscluster(self.frontier, self.K)
# K-Means doesn't accept K = 1, so handle that case separately
# by creating a centroid for that cell alone
else:
clusterObject = Cluster.Cluster(self.frontier[0][0], self.frontier[0][1])
self.centroids = [clusterObject]
self.frontierClusters = [self.frontier]
self.timesClustered += 1
# Calculating a few values in order to plot dispersion
# (not of any use to the algorithm)
centroidOfCentroids = ()
sumX = 0
sumY = 0
for centroid in self.centroids:
sumX += centroid.x
sumY += centroid.y
if self.centroids != 0:
tempX = int(sumX / len(self.centroids))
tempY = int(sumY / len(self.centroids))
# print tempX, tempY
centroidOfCentroids = (tempX, tempY)
sumOfDistances = 0
for centroid in self.centroids:
# sumOfDistances += (abs(centroid.x - centroidOfCentroids[0]) + abs(centroid.y - centroidOfCentroids[1]))
sumOfDistances += (((centroid.x - centroidOfCentroids[0])*(centroid.x - centroidOfCentroids[0])) + ((centroid.y - centroidOfCentroids[1]) * (centroid.y - centroidOfCentroids[1])))
print sumOfDistances
self.reallocateFlag = True
# Perform frontier allocation
bestCfgc, shouldRecluster = self.allocateFrontiers()
self.currentEights = 0
for cfg in bestCfgc:
if cfg == 8:
self.currentEights += 1
# print shouldRecluster
if shouldRecluster == True and self.K == self.numRobots:
self.reclusterFlag = True
return
else:
self.reclusterFlag = False
# if shouldRecluster == True and self.K < self.numRobots:
# bestCfgc, shouldRecluster = self.allocateFrontiers()
# print self.t, shouldRecluster
# self.reclusterFlag = True
for i in range(self.numRobots):
for cellgen in self.frontierClusters[i]:
for j in range(self.gridworld.height):
for k in range(self.gridworld.width):
if self.gridworld.cells[j][k].x == cellgen[0] and self.gridworld.cells[j][k].y == cellgen[1] and self.gridworld.cells[j][k].visited==False:
# self.reclusterFlag = False
self.gridworld.cells[j][k].cluster=i
break
# #print 'not jobless'
self.executeBestCfgc(bestCfgc)
# time.sleep(1)
# Print the final statistics
self.printFinalStats()
| 34.449109 | 184 | 0.664069 |
"""
Provides an implementation of the Frontier Clustering algorithm.
"""
import math
import random
import sys
import time
import AStar
import Cluster
import GridWorld
import Hungarian
import kmeans
import Robot
from collections import defaultdict
from tsp_solver.greedy import solve_tsp
class Faigl:
"""
height and width specify the dimensions of the environment
obstacles is a list of locations which are to be initialized as obstacles
R specifies the range of communication
numRobots specifies the number of robot objects to be initialized
initLocs specifies the initial locations for each of the robots
k specifies the size of the population of configuration changes
T specifies the number of time steps to run the simulation for
base specifies the coordinates of the base station
"""
def __init__(self, height, width, obstacles, numRobots, initLocs, T = 10):
self.gridworld = GridWorld.GridWorld(height, width, obstacles)
self.centroid = []
self.cluster = kmeans.kmeans()
self.robots = [Robot.Robot(j+1, -1, -1) for j in range(numRobots)]
i = 0
for initLoc in initLocs:
currentPoint = (initLoc[0], initLoc[1])
if not self.gridworld.inBounds(currentPoint) or not self.gridworld.passable(currentPoint):
print 'Initial location', currentPoint, 'is not possible'
sys.exit(-1)
self.robots[i].setLocation(initLoc[0], initLoc[1])
self.gridworld.cells[initLoc[0]][initLoc[1]].occupied = True
self.gridworld.cells[initLoc[0]][initLoc[1]].visited = True
i += 1
# Initialize other parameters of the algorithm
# Height of the Grid
self.height = height
# Width of the Grid
self.width = width
# List of Clusters (obtained using K-Means clustering)
self.frontierClusters=[]
# Number of Robots
self.numRobots = numRobots
# Parameter for adaptive K-Means
self.K = self.numRobots
# Time steps for which the algorithm is run
self.T = T
# Variable to indicate whether reclustering should be performed
self.reclusterFlag = True
# Centroids of clusters
self.centroids = []
# Number of time steps elapsed
self.t = 0
# Time taken to exhaust the frontier
self.completionTime = 0
# Set to True on completion
self.completedFlag = False
# List of Frontier Cells
self.frontier = []
# New Positions of each of the Robots
self.newPos = []
# Population of Configuration Changes
self.cfgc = []
# Number of Stalls (Used only for simulating Rooker's work)
self.stalls = 0
self.printedFinalStats = False
self.possible = []
self.visited = self.numRobots
self.sumNewVisited = numRobots
self.aStarFlag = False
self.infinity = 10000000
self.hungarianFlag = True
self.randomMotionPlan = False
self.currentEights = 0
self.totalMoves = 0
self.volume = 0
self.exploredCells = 0
self.timesClustered = 0
self.thresholdPercentage = 1.0
self.reallocateFlag = False
self.printLog = False
self.astar = AStar.AStar()
def printGrid(self):
mputeFrontier()
print 'occupied cells:'
for i in range(self.height):
for j in range(self.width):
if self.gridworld.cells[i][j].occupied == True:
print i, j
print 'robot locations:'
for robot in self.robots:
print robot.curX, robot.curY
for i in range(self.height):
for j in range(self.width):
if self.gridworld.cells[i][j].obstacle == True:
sys.stdout.write(' # ')
elif self.gridworld.cells[i][j].occupied == True:
robotId = 0
for robot in self.robots:
if robot.curX == i and robot.curY == j:
robotId = robot.id
temp = ' ' + str(robotId) + ' '
sys.stdout.write(temp)
elif (i, j) in frontier:
sys.stdout.write(' | ')
else:
if self.gridworld.cells[i][j].visited == True:
sys.stdout.write(' . ')
else:
sys.stdout.write(' - ')
sys.stdout.write('\n')
def printVisitedStatus(self):
visited = 0
visitable = self.height * self.width
for i in range(self.height):
for j in range(self.width):
if self.gridworld.cells[i][j].visited == True:
sys.stdout.write(' 1 ')
visited += 1
else:
sys.stdout.write(' 0 ')
if self.gridworld.cells[i][j].obstacle == True:
visitable -= 1
sys.stdout.write('\n')
print 'visited:', visited, ' of ', visitable
print 'stalls:', self.stalls
return self.completionTime
def printFinalStats(self, force = 0):
visitednow=0
visitable = self.height * self.width
for i in range(self.height):
for j in range(self.width):
if self.gridworld.cells[i][j].visited == True:
visitednow += 1
else:
if self.gridworld.cells[i][j].obstacle == True:
visitable -= 1
metric = self.visited / visitable
if self.printLog == True:
print 'visitednow', visitednow redundancy = (self.numRobots - self.currentEights) - (visitednow - self.visited)
if self.printLog == True:
print 'redundancy:', redundancy
self.exploredCells += (visitednow - self.visited)
self.visited = visitednow
numMoves = self.numRobots - self.currentEights
self.totalMoves += numMoves
if self.printLog == True:
print 'totalMoves:', self.totalMoves print 'self.exploredCells:', self.exploredCells
self.printedFinalStats = True
return
def computeFrontier(self):
frontier = []
for i in range(self.height):
for j in range(self.width):
# Only such cells are possible candidates for the frontier
if self.gridworld.cells[i][j].visited == False and self.gridworld.cells[i][j].obstacle == False:
point = (i, j)
neighbors = self.gridworld.get8Neighbors(point)
# Now we see if there is at least one neighbor of the current cell which has been visited
# In such a case, the current cell would become a frontier cell
frontierFlag = False
for nbhr in neighbors:
if self.gridworld.cells[nbhr[0]][nbhr[1]].visited == True:
frontierFlag = True
if frontierFlag == True:
frontier.append((i, j))
return frontier
# Method to delete lone cells
def deleteLoneCells(self):
# Kill cells using the rules from Conway's Game of Life
for i in range(self.height):
for j in range(self.width):
if self.gridworld.cells[i][j].visited == True:
continue
else:
visitedNeighbors = 0
neighbors = self.gridworld.get8Neighbors((i, j))
for neighbor in neighbors:
if self.gridworld.cells[neighbor[0]][neighbor[1]].visited == True:
visitedNeighbors += 1
if visitedNeighbors == len(neighbors):
self.gridworld.cells[i][j].visited = True
frontier = self.computeFrontier()
return frontier
def getNewPositions(self, cmd):
newPos = []
for i in range(self.numRobots):
nextX, nextY = self.gridworld.getNextPos(self.robots[i].curX, self.robots[i].curY, cmd[i])
newPos.append((nextX, nextY))
return newPos
def isCfgPossible(self, cfg):
if any(self.newPos.count(element) > 1 for element in self.newPos) == True:
return 1
retval = 0
for i in range(self.numRobots):
if self.gridworld.checkCommand(self.robots[i].curX, self.robots[i].curY, cfg[i]) == False:
retval = 2
return retval
def allocateFrontiers(self):
cmd = []
if self.reallocateFlag == True:
"""
If hungarianFlag is set to True, perform Hungarian assignment
Else, perform greedy assignment.
"""
if self.hungarianFlag == True:
costMatrix = []
for i in range(self.numRobots):
closestDist = self.infinity
cost = []
for cluster in self.frontierClusters:
closestDistToCluster = self.infinity
for point in cluster:
if self.aStarFlag == True:
path, distanceToPoint = self.astar.aStarSearch(self.gridworld, (self.robots[i].curX, self.robots[i].curY), (point[0], point[1]))
distanceToPoint = distanceToPoint[(point[0], point[1])]
else:
distanceToPoint = abs(self.robots[i].curX - point[0]) + abs(self.robots[i].curY - point[1])
if distanceToPoint < closestDistToCluster:
closestDistToCluster = distanceToPoint
cost.append(closestDistToCluster)
costMatrix.append(cost)
costMatrix = zip(*costMatrix)
t = [0 for j in range(self.numRobots)]
for i in range(additionalRows):
costMatrix.append(cost)
"""Create an instance of the Hungarian assignment class"""
hungarian = Hungarian.Hungarian()
hungarian.calculate(costMatrix)
results = hungarian.get_results()
if self.K < self.numRobots:
def filterFunction(parameter):
return parameter[0] < self.K-1
results = filter(filterFunction, results)
for result in results:
self.centroids[result[0]].allotted = result[1]
robotList = [i for i in range(self.numRobots)]
for result in results:
if result[1] in robotList:
robotList.remove(result[1])
for robot in robotList:
clusterObject = Cluster.Cluster(self.robots[robot].curX, self.robots[robot].curY)
clusterObject.allotted = robot
self.centroids.append(clusterObject)
self.frontierClusters.append([(self.robots[robot].curX, self.robots[robot].curY)])
else:
totalPotential = 0
for j in range(self.numRobots):
robotPos = (self.robots[0].curX, self.robots[0].curY)
centroidPos = (self.centroids[j].x, self.centroids[j].y)
if self.aStarFlag == True:
path, tmp = self.astar.aStarSearch(self.gridworld, robotPos, (self.infinity, self.infinity))
tmp = self.infinity
else:
tmp = self.infinity
for i in range(self.numRobots):
robotPos = (self.robots[i].curX, self.robots[i].curY)
if self.aStarFlag == True:
path, tempdist = self.astar.aStarSearch(self.gridworld, robotPos, centroidPos)
tempdist = tempdist[centroidPos]
else:
tempdist = abs(robotPos[0]-centroidPos[0]) + abs(robotPos[1]-centroidPos[1])
allottedflag = False
for k in range(self.numRobots):
if self.centroids[k].allotted==i:
allottedflag = True
if tmp >= tempdist and allottedflag == False:
tmp = tempdist
self.centroids[j].allotted = i
totalPotential += tmp
# print 'Total Potential: ', totalPotential
"Motion planning"
shouldRecluster = False
for i in range(self.numRobots):
# If randomMotionPlan is True, use random motion planning
# i.e., visit each cluster cell in the order in which K-Means returned it
if self.randomMotionPlan == True:
isJobless = True
"""
Determine the cluster that was allotted the ith robot
Store it in temp
"""
for j in range(self.numRobots):
if self.centroids[j].allotted==i:
temp=j
break
# print 'robot', i, 'given cluster', temp
# Iterate over each frontier cell
for cellgen in self.frontierClusters[temp]:
# Mark the cell as not done
thisisdone=0
# Check if the cell has been visited already
# If yes mark this as done
for j in range(self.gridworld.height):
for k in range(self.gridworld.width):
if self.gridworld.cells[j][k].x == cellgen[0] and self.gridworld.cells[j][k].y == cellgen[1] and self.gridworld.cells[j][k].visited==True:
thisisdone=1
# If it has not been marked as done, allot this cell to the robot
# The variable 'isJobless' denotes if the robots frontier cluster is jobless now
if thisisdone==0:
allotted_frontier=cellgen
isJobless = False
break
# Otherwise, use the greedy motion plan
else:
isJobless = True
# To each robot, allocate the closest cell in its allotted frontier
# For that, initialize the least distance to infinity
dist = self.infinity ## If any error occurs, maybe commenting this line would resolve it
# Store the cluster number allotted to the ith robot
for j in range(self.numRobots):
if self.centroids[j].allotted == i:
allottedCluster = j
break
# print 'robot', i, 'given cluster', j
dists = []
if self.aStarFlag == True:
path, dist = self.astar.aStarSearch(self.gridworld, (0, 0), (self.infinity, self.infinity))
dist = self.infinity
# print 'dist:', dist
else:
dist = self.infinity
# Iterate over each frontier cell in the ith robot's cluster
for cellgen in self.frontierClusters[allottedCluster]:
thisIsDone = 0
robotPos = (self.robots[i].curX, self.robots[i].curY)
cellPos = (cellgen[0], cellgen[1])
if self.gridworld.cells[cellgen[0]][cellgen[1]].visited == True:
thisIsDone = 1
if thisIsDone == 0:
if self.aStarFlag == True:
path, tempDist = self.astar.aStarSearch(self.gridworld, robotPos, cellPos)
tempDist = tempDist[cellPos]
else:
tempDist = abs(robotPos[0] - cellPos[0]) + abs(robotPos[1] - cellPos[1])
dists.append(tempDist)
if tempDist < dist:
dist = tempDist
allotted_frontier = cellgen
isJobless = False
"""
Move the ith robot closer to its allotted frontier
"""
if self.volume > 0:
exploredPercentage = self.exploredCells / (self.volume * 1.0)
if self.printLog == True:
print 'self.exploredCells:', self.exploredCells ##
# print 'exploredPercentage:', exploredPercentage
if self.thresholdPercentage <= 0.1:
self.thresholdPercentage = 0.1
else:
# print 'resholdPercentage:
# if exploredPercentage >= self.thresholdPercentage:
# # genmax stores the command that is given to a robot
# # Kindly do not worry about the variable naming style
# # It was all Arjun's creativity and it shall be fixed soon
nmax = 8
shouldRecluster = True
else:
possCells=[]
possCells = self.gridworld.get8Neighbors((self.robots[i].curX, self.robots[i].curY))
if self.aStarFlag == True:
path, tmp = self.astar.aStarSearch(self.gridworld, possCells[0], allotted_frontier)
tmp = tmp[allotted_frontier]
else:
tmp = abs(possCells[0][0]-allotted_frontier[0]) + abs(possCells[0][1]-allotted_frontier[1])
# This variable is initialized with the first of its 8-neighbors
thechosenone=possCells[0]
# For each neighbor of the ith robot
for nextcell in possCells:
# If using A*
if self.aStarFlag == True:
path, tmp1 = self.astar.aStarSearch(self.gridworld, nextcell, allotted_frontier)
tmp1 = tmp1[allotted_frontier]
# If A* is not being used, Manhattan distance is used
else:
tmp1=abs(nextcell[0]-allotted_frontier[0]) + abs(nextcell[1]-allotted_frontier[1])
# if tmp>=tmp1: ## Error ?
if tmp1 < tmp:
# path, tmp = self.astar.aStarSearch(self.gridworld, nextcell, allotted_frontier)
tmp=tmp1;
thechosenone = nextcell
genmax=self.gridworld.getcmd(thechosenone[0], thechosenone[1], self.robots[i].curX, self.robots[i].curY)
cmd.append(genmax)
# print 'cmd:', cmd
# for i in range(len(cmd)):
# temp = str(cmd[i])
# sys.stdout.write(temp)
# sys.stdout.write(' ')
# sys.stdout.write('\n')
return cmd, shouldRecluster
def executeBestCfgc(self, bestCfgc):
i = 0
for cmd in bestCfgc:
tempX = self.robots[i].curX
tempY = self.robots[i].curY
if self.gridworld.checkCommand(tempX, tempY, cmd) == True:
nextX, nextY = self.gridworld.getNextPos(tempX, tempY, cmd)
self.gridworld.cells[tempX][tempY].occupied = False
self.robots[i].curX = nextX
self.robots[i].curY = nextY
self.gridworld.cells[nextX][nextY].occupied = True
self.gridworld.cells[nextX][nextY].visited = True
i += 1
# Run the algorithm for 1 iteration
def runOneIter(self):
# If t time steps have already expired, return
self.t += 1
if self.t >= self.T:
if self.printedFinalStats == False:
self.printFinalStats()
return
# Else, run the algorithm for one time step
self.frontier = self.computeFrontier()
if self.frontier == []:
if self.completedFlag == False:
self.completedFlag = True
self.completionTime = self.t
# print 'Completed in time', self.completionTime
self.printFinalStats()
return
# self.frontier = self.deleteLoneCells()
if len(self.frontier) < self.numRobots:
self.K = len(self.frontier)
else:
self.K = self.numRobots
# If reclusterFlag is true, then perform K-Means clustering
if self.reclusterFlag == True:
if self.printLog == True:
print 'volume:', len(self.frontier) ##
self.exploredCells = 0
self.volume = len(self.frontier)
if self.K != 1:
self.centroids, self.frontierClusters = self.cluster.Kmeanscluster(self.frontier, self.K)
# K-Means doesn't accept K = 1, so handle that case separately
else:
clusterObject = Cluster.Cluster(self.frontier[0][0], self.frontier[0][1])
self.centroids = [clusterObject]
self.frontierClusters = [self.frontier]
self.timesClustered += 1
centroidOfCentroids = ()
sumX = 0
sumY = 0
for centroid in self.centroids:
sumX += centroid.x
sumY += centroid.y
if self.centroids != 0:
tempX = int(sumX / len(self.centroids))
tempY = int(sumY / len(self.centroids))
centroidOfCentroids = (tempX, tempY)
sumOfDistances = 0
for centroid in self.centroids:
sumOfDistances += (((centroid.x - centroidOfCentroids[0])*(centroid.x - centroidOfCentroids[0])) + ((centroid.y - centroidOfCentroids[1]) * (centroid.y - centroidOfCentroids[1])))
print sumOfDistances
self.reallocateFlag = True
bestCfgc, shouldRecluster = self.allocateFrontiers()
self.currentEights = 0
for cfg in bestCfgc:
if cfg == 8:
self.currentEights += 1
if shouldRecluster == True and self.K == self.numRobots:
self.reclusterFlag = True
return
else:
self.reclusterFlag = False
for i in range(self.numRobots):
for cellgen in self.frontierClusters[i]:
for j in range(self.gridworld.height):
for k in range(self.gridworld.width):
if self.gridworld.cells[j][k].x == cellgen[0] and self.gridworld.cells[j][k].y == cellgen[1] and self.gridworld.cells[j][k].visited==False:
self.gridworld.cells[j][k].cluster=i
break
Cfgc(bestCfgc)
self.printFinalStats()
| false | true |
f7f984532f4d4eee71e4b32e9e717f0c2370479a | 7,476 | py | Python | code/main_vehicule.py | fokoa/byzantine_kmeans-msc_thesis | b116cb2222cd0e1334db7df493ec453dd299b985 | [
"MIT"
] | null | null | null | code/main_vehicule.py | fokoa/byzantine_kmeans-msc_thesis | b116cb2222cd0e1334db7df493ec453dd299b985 | [
"MIT"
] | null | null | null | code/main_vehicule.py | fokoa/byzantine_kmeans-msc_thesis | b116cb2222cd0e1334db7df493ec453dd299b985 | [
"MIT"
] | null | null | null | #!usr/bin/python3
# -*- coding : utf8 -*-
import sys;
import getopt;
import warnings;
from mpi4py import MPI;
import numpy as np;
import pandas as pd;
import matplotlib.pyplot as plt;
from mpl_toolkits.mplot3d import Axes3D;
from sklearn import decomposition;
from sklearn.cluster import KMeans;
from kmeans_resilient import KMeansResilient as KMR;
from functions import *;
pd.set_option('max_column', None);
warnings.filterwarnings('ignore');
# MPI initialization
comm = MPI.COMM_WORLD;
P = comm.Get_size();
rank = comm.Get_rank();
def check_option(opt_arg):
"""
Check the arguments passed as parameters by the
command prompt
Parameters :
-----------
opt_arg : str
Arguments and options passed by the command prompt
Return :
-------
opts : list
Argument list
args : list
Option list
"""
try:
opts, args = opt_arg;
except getopt.GetoptError as err:
print(err);
print("Use :\t", sys.argv[0], "-b 5 \n\t",
"or:", sys.argv[0], "--byzantine 5");
sys.exit(-1);
for opt, val in opts:
if opt in ("-b", "--byzantine"):
if val.isdigit() == False:
raise ValueError("Enter an integer as number"
"of byzantine machines.");
elif opt in ("-h", "--help"):
print("Use:", sys.argv[0], "-b 5\n",
"or:", sys.argv[0], "--byzantine 5");
sys.exit(-1);
else:
print("unhandled options");
sys.exit(-1);
return opts, args;
def check_Nbyzan(opts, P):
"""
Check and get the number of Byzantine machines that
we are going to simulate
Parameters :
-----------
opts : str
Options passed by the command prompt
P : int
Total number of machines (nodes or workers).
1 coodinator ans the ramaining are workers
Return :
-------
n_byzantines : int (entire natural)
Number of byzantine machines that we
are going to simulate
"""
if len(opts) == 0:
n_byzantines = 0;
n_byzantines = int(opts[0][1]);
if n_byzantines < 0 or n_byzantines > P - 1:
raise ValueError("Number of byzantine must be an integer "
"< number of workers or >= 0");
return n_byzantines;
def sort_centroides(centroids):
"""
Sort centroids according to their norms
Parameters :
-----------
centroids : ndarray of shape (k, n_features)
All centroids of clusters where k
is number of clusters
Return :
-------
tmp : ndarray of shape (k, n_features)
Sorted centroids
"""
tmp = np.zeros((centroids.shape));
normes = {};
for centroid in range(0, centroids.shape[0]):
norm = np.linalg.norm(centroids[centroid]);
normes[norm] = centroid;
i=0;
for norm in sorted(normes):
tmp[i] = centroids[normes[norm]];
i = i + 1;
return tmp;
def comparaison_cluster(X, label_km, label_by, label_co):
"""
Plot all the formed clusters
Parameters :
-----------
X : ndarray of shape (n_samples, n_features)
Samples to be clustered
label_km : list of length 2
The first is labels obtained with K-means
The second is number of clusters
label_by : list of length 2
The first is labels obtained with byzantin K-means
The second is number of byzantines
label_co : ndarray of shape (n_samples, )
Label obtained by correcting byzantines
in byzantin K-means
"""
pca = decomposition.PCA(n_components = 3);
X_reduced = pca.fit_transform(X);
x_axis = [val[0] for val in X_reduced];
y_axis = [val[1] for val in X_reduced];
z_axis = [val[2] for val in X_reduced];
fig = plt.figure(figsize=plt.figaspect(0.5), facecolor="w");
ax = fig.add_subplot(1, 3, 1, projection='3d');
plt.title('%d-means'%(label_km[1]));
ax.scatter(x_axis, y_axis, z_axis, c=label_km[0]);
ax = fig.add_subplot(1, 3, 2, projection='3d');
plt.title('%d Byzantines' % (label_by[1]));
ax.scatter(x_axis, y_axis, z_axis, c=label_by[0]);
ax = fig.add_subplot(1, 3, 3, projection='3d');
ax.scatter(x_axis, y_axis, z_axis, c=label_co);
plt.title('Correction');
plt.show();
def main():
# Check options and number of byzantines
opts, arg = check_option(getopt.getopt(sys.argv[1:], "b:",
["byzantine="]));
n_byzantines = check_Nbyzan(opts, P);
# Load dataset
xaa = pd.read_csv("data/Statlog vehicule silhouette/xaa.dat",
header=None, sep='\s+');
xab = pd.read_csv("data/Statlog vehicule silhouette/xab.dat",
header=None, sep='\s+');
xac = pd.read_csv("data/Statlog vehicule silhouette/xac.dat",
header=None, sep='\s+');
xad = pd.read_csv("data/Statlog vehicule silhouette/xad.dat",
header=None, sep='\s+');
xae = pd.read_csv("data/Statlog vehicule silhouette/xae.dat",
header=None, sep='\s+');
xaf = pd.read_csv("data/Statlog vehicule silhouette/xaf.dat",
header=None, sep='\s+');
xag = pd.read_csv("data/Statlog vehicule silhouette/xag.dat",
header=None, sep='\s+');
xah = pd.read_csv("data/Statlog vehicule silhouette/xah.dat",
header=None, sep='\s+');
xai = pd.read_csv("data/Statlog vehicule silhouette/xai.dat",
header=None, sep='\s+');
# Concatate all data xa
data = pd.concat([xaa, xab, xac, xad, xae, xaf, xag, xah, xai],
ignore_index=True);
# Column to fit
cols = data.columns.difference([18]);
X = data[cols].values;
y = data[18].values;
# Model
km = KMR(n_clusters=4, n_init=10, n_iter=50, seed=2);
by = KMR(n_clusters=4, n_init=10, n_iter=50,
seed=2, n_byzantines=n_byzantines);
co = KMR(n_clusters=4, n_init=10, n_iter=50,
seed=2, n_byzantines=n_byzantines, correction=True);
# Fit
km.fit(X);
by.fit(X);
co.fit(X);
# Sort centroides
km.centroids_ = sort_centroides(km.centroids_);
by.centroids_ = sort_centroides(by.centroids_);
co.centroids_ = sort_centroides(co.centroids_);
# Plot
if rank == 0:
# print('\nKmeans centroids:\n' , km.centroids_);
# print('Byzantine centroids:\n', by.centroids_);
# print('Correct centroids:\n', co.centroids_);
print('\nKmeans inertia:\n', km.inertia_);
print('\nByzantine inertia:\n', by.inertia_);
print('\nCorrection inertia:\n', co.inertia_);
# mis_1 = by.misclassified(X, km.labels_, by.labels_);
# mis_2 = co.misclassified(X, km.labels_, co.labels_);
# print('\nByzantine has %d data point misclassified.' % (mis_1));
# print('\nCorrection has %d data point misclassified.' % (mis_2));
comparaison_cluster(X, [km.labels_, km.n_clusters], [by.labels_,
by.n_byzantines], co.labels_);
if __name__ == "__main__":
main();
| 27.688889 | 75 | 0.563938 |
import sys;
import getopt;
import warnings;
from mpi4py import MPI;
import numpy as np;
import pandas as pd;
import matplotlib.pyplot as plt;
from mpl_toolkits.mplot3d import Axes3D;
from sklearn import decomposition;
from sklearn.cluster import KMeans;
from kmeans_resilient import KMeansResilient as KMR;
from functions import *;
pd.set_option('max_column', None);
warnings.filterwarnings('ignore');
comm = MPI.COMM_WORLD;
P = comm.Get_size();
rank = comm.Get_rank();
def check_option(opt_arg):
try:
opts, args = opt_arg;
except getopt.GetoptError as err:
print(err);
print("Use :\t", sys.argv[0], "-b 5 \n\t",
"or:", sys.argv[0], "--byzantine 5");
sys.exit(-1);
for opt, val in opts:
if opt in ("-b", "--byzantine"):
if val.isdigit() == False:
raise ValueError("Enter an integer as number"
"of byzantine machines.");
elif opt in ("-h", "--help"):
print("Use:", sys.argv[0], "-b 5\n",
"or:", sys.argv[0], "--byzantine 5");
sys.exit(-1);
else:
print("unhandled options");
sys.exit(-1);
return opts, args;
def check_Nbyzan(opts, P):
if len(opts) == 0:
n_byzantines = 0;
n_byzantines = int(opts[0][1]);
if n_byzantines < 0 or n_byzantines > P - 1:
raise ValueError("Number of byzantine must be an integer "
"< number of workers or >= 0");
return n_byzantines;
def sort_centroides(centroids):
tmp = np.zeros((centroids.shape));
normes = {};
for centroid in range(0, centroids.shape[0]):
norm = np.linalg.norm(centroids[centroid]);
normes[norm] = centroid;
i=0;
for norm in sorted(normes):
tmp[i] = centroids[normes[norm]];
i = i + 1;
return tmp;
def comparaison_cluster(X, label_km, label_by, label_co):
pca = decomposition.PCA(n_components = 3);
X_reduced = pca.fit_transform(X);
x_axis = [val[0] for val in X_reduced];
y_axis = [val[1] for val in X_reduced];
z_axis = [val[2] for val in X_reduced];
fig = plt.figure(figsize=plt.figaspect(0.5), facecolor="w");
ax = fig.add_subplot(1, 3, 1, projection='3d');
plt.title('%d-means'%(label_km[1]));
ax.scatter(x_axis, y_axis, z_axis, c=label_km[0]);
ax = fig.add_subplot(1, 3, 2, projection='3d');
plt.title('%d Byzantines' % (label_by[1]));
ax.scatter(x_axis, y_axis, z_axis, c=label_by[0]);
ax = fig.add_subplot(1, 3, 3, projection='3d');
ax.scatter(x_axis, y_axis, z_axis, c=label_co);
plt.title('Correction');
plt.show();
def main():
opts, arg = check_option(getopt.getopt(sys.argv[1:], "b:",
["byzantine="]));
n_byzantines = check_Nbyzan(opts, P);
xaa = pd.read_csv("data/Statlog vehicule silhouette/xaa.dat",
header=None, sep='\s+');
xab = pd.read_csv("data/Statlog vehicule silhouette/xab.dat",
header=None, sep='\s+');
xac = pd.read_csv("data/Statlog vehicule silhouette/xac.dat",
header=None, sep='\s+');
xad = pd.read_csv("data/Statlog vehicule silhouette/xad.dat",
header=None, sep='\s+');
xae = pd.read_csv("data/Statlog vehicule silhouette/xae.dat",
header=None, sep='\s+');
xaf = pd.read_csv("data/Statlog vehicule silhouette/xaf.dat",
header=None, sep='\s+');
xag = pd.read_csv("data/Statlog vehicule silhouette/xag.dat",
header=None, sep='\s+');
xah = pd.read_csv("data/Statlog vehicule silhouette/xah.dat",
header=None, sep='\s+');
xai = pd.read_csv("data/Statlog vehicule silhouette/xai.dat",
header=None, sep='\s+');
data = pd.concat([xaa, xab, xac, xad, xae, xaf, xag, xah, xai],
ignore_index=True);
cols = data.columns.difference([18]);
X = data[cols].values;
y = data[18].values;
km = KMR(n_clusters=4, n_init=10, n_iter=50, seed=2);
by = KMR(n_clusters=4, n_init=10, n_iter=50,
seed=2, n_byzantines=n_byzantines);
co = KMR(n_clusters=4, n_init=10, n_iter=50,
seed=2, n_byzantines=n_byzantines, correction=True);
km.fit(X);
by.fit(X);
co.fit(X);
km.centroids_ = sort_centroides(km.centroids_);
by.centroids_ = sort_centroides(by.centroids_);
co.centroids_ = sort_centroides(co.centroids_);
if rank == 0:
print('\nKmeans inertia:\n', km.inertia_);
print('\nByzantine inertia:\n', by.inertia_);
print('\nCorrection inertia:\n', co.inertia_);
comparaison_cluster(X, [km.labels_, km.n_clusters], [by.labels_,
by.n_byzantines], co.labels_);
if __name__ == "__main__":
main();
| true | true |
f7f984e969f29607cfa83a0f9c6f5d92fbf7a72d | 264 | py | Python | .vscode/extensions/ms-python.python-2020.3.69010/pythonFiles/lib/python/old_ptvsd/ptvsd/reraise2.py | anish-sk/dotfiles | 144508dd9afdc141f8f4e6bec3ddb408f92a9827 | [
"MIT"
] | 5 | 2019-04-30T02:46:37.000Z | 2021-08-22T08:39:11.000Z | .vscode/extensions/ms-python.python-2020.3.69010/pythonFiles/lib/python/old_ptvsd/ptvsd/reraise2.py | anish-sk/dotfiles | 144508dd9afdc141f8f4e6bec3ddb408f92a9827 | [
"MIT"
] | 12 | 2015-10-30T19:20:28.000Z | 2021-04-23T15:59:58.000Z | vscode/extensions/ms-python.python-2020.3.69010/pythonFiles/lib/python/old_ptvsd/ptvsd/reraise2.py | Adespinoza/dotfiles | e2509402a7fd2623a3ea401b6f9fcbf6a372fc60 | [
"CC0-1.0"
] | 5 | 2015-09-16T07:50:06.000Z | 2019-09-09T14:33:46.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
def reraise(exc_info):
# TODO: docstring
raise exc_info[0], exc_info[1], exc_info[2] # noqa
| 29.333333 | 66 | 0.708333 |
def reraise(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
| false | true |
f7f986315190de3e8b8b526ed7e77f3ffb4e7979 | 2,130 | py | Python | app/models/UserModel.py | sanif/Simple-Blog-Flask | 3f84a1acf418e8c72cb6c2f3194371a635df0628 | [
"MIT"
] | null | null | null | app/models/UserModel.py | sanif/Simple-Blog-Flask | 3f84a1acf418e8c72cb6c2f3194371a635df0628 | [
"MIT"
] | null | null | null | app/models/UserModel.py | sanif/Simple-Blog-Flask | 3f84a1acf418e8c72cb6c2f3194371a635df0628 | [
"MIT"
] | null | null | null | # src/models/UserModel.py
import datetime
from flask_restful import Resource
from safrs.base import SAFRSBase
import app.models.PostModel
from . import BaseModel, bcrypt, db, ma
class UserModel(BaseModel):
"""
User Model
"""
# table name
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), nullable=False)
username = db.Column(db.String(128), unique=True, nullable=False)
password = db.Column(db.String(128), nullable=True)
created_at = db.Column(db.DateTime)
modified_at = db.Column(db.DateTime)
posts = db.relationship('PostModel', backref='users', lazy=True)
# http_methods = {"GET", "POST"}
# class constructor
def save(self):
db.session.add(self)
db.session.commit()
def update(self, data):
for key, item in data.items():
if key == 'password': # add this new line
self.password = self.__generate_hash(
item) # add this new line
setattr(self, key, item)
self.modified_at = datetime.datetime.utcnow()
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def __generate_hash(self, password):
return bcrypt.generate_password_hash(
password, rounds=10).decode("utf-8")
def check_hash(self, password):
return bcrypt.check_password_hash(self.password, password)
@staticmethod
def get_all_users():
return UserModel.query.all()
@staticmethod
def get_one_user(id):
return UserModel.query.get(id)
@staticmethod
def get_user_by_username(username):
return UserModel.query.filter_by(username=username).first()
def __repr(self):
return '<username {}>'.format(self.username)
class UserSchema(ma.SQLAlchemySchema):
"""
User Schema
"""
class Meta:
model = UserModel
id = ma.auto_field()
name = ma.auto_field()
username = ma.auto_field()
created_at = ma.auto_field()
modified_at = ma.auto_field()
posts = ma.auto_field()
| 25.97561 | 69 | 0.640376 |
import datetime
from flask_restful import Resource
from safrs.base import SAFRSBase
import app.models.PostModel
from . import BaseModel, bcrypt, db, ma
class UserModel(BaseModel):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), nullable=False)
username = db.Column(db.String(128), unique=True, nullable=False)
password = db.Column(db.String(128), nullable=True)
created_at = db.Column(db.DateTime)
modified_at = db.Column(db.DateTime)
posts = db.relationship('PostModel', backref='users', lazy=True)
def save(self):
db.session.add(self)
db.session.commit()
def update(self, data):
for key, item in data.items():
if key == 'password':
self.password = self.__generate_hash(
item)
setattr(self, key, item)
self.modified_at = datetime.datetime.utcnow()
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def __generate_hash(self, password):
return bcrypt.generate_password_hash(
password, rounds=10).decode("utf-8")
def check_hash(self, password):
return bcrypt.check_password_hash(self.password, password)
@staticmethod
def get_all_users():
return UserModel.query.all()
@staticmethod
def get_one_user(id):
return UserModel.query.get(id)
@staticmethod
def get_user_by_username(username):
return UserModel.query.filter_by(username=username).first()
def __repr(self):
return '<username {}>'.format(self.username)
class UserSchema(ma.SQLAlchemySchema):
class Meta:
model = UserModel
id = ma.auto_field()
name = ma.auto_field()
username = ma.auto_field()
created_at = ma.auto_field()
modified_at = ma.auto_field()
posts = ma.auto_field()
| true | true |
f7f9869adde2802b8a5d98e792e95d6a617f55e6 | 628 | py | Python | test/test_binary_heap.py | paolodelia99/Python-C-Algorithms | 9113ad566e4e659c1f16135c2d3abd3a4c57a46e | [
"MIT"
] | 2 | 2021-02-13T10:58:58.000Z | 2021-03-16T09:56:01.000Z | test/test_binary_heap.py | paolodelia99/Python-C-Algorithms | 9113ad566e4e659c1f16135c2d3abd3a4c57a46e | [
"MIT"
] | null | null | null | test/test_binary_heap.py | paolodelia99/Python-C-Algorithms | 9113ad566e4e659c1f16135c2d3abd3a4c57a46e | [
"MIT"
] | null | null | null | import nose
import binary_heap
BinaryHeap = binary_heap.BinaryHeap
def test_binary_heap_101():
b = BinaryHeap()
nose.tools.assert_is_instance(b, BinaryHeap)
def test_binary_heap_num_entries():
b = BinaryHeap()
nose.tools.assert_equal(b.num_entries(), 0)
def test_binary_heap_insert():
b = BinaryHeap()
b.insert("Paolo")
b.insert("CIao")
nose.tools.assert_equal(b.num_entries(), 2)
def test_binary_heap_pop():
b = BinaryHeap()
b.insert("Paolo")
b.insert("CIao")
max_el = b.pop()
nose.tools.assert_equal(b.num_entries(), 1)
nose.tools.assert_equal(max_el, "CIao")
| 19.625 | 48 | 0.687898 | import nose
import binary_heap
BinaryHeap = binary_heap.BinaryHeap
def test_binary_heap_101():
b = BinaryHeap()
nose.tools.assert_is_instance(b, BinaryHeap)
def test_binary_heap_num_entries():
b = BinaryHeap()
nose.tools.assert_equal(b.num_entries(), 0)
def test_binary_heap_insert():
b = BinaryHeap()
b.insert("Paolo")
b.insert("CIao")
nose.tools.assert_equal(b.num_entries(), 2)
def test_binary_heap_pop():
b = BinaryHeap()
b.insert("Paolo")
b.insert("CIao")
max_el = b.pop()
nose.tools.assert_equal(b.num_entries(), 1)
nose.tools.assert_equal(max_el, "CIao")
| true | true |
f7f9885c0793993b3a37de767e80561374f8c7b4 | 13,021 | py | Python | dataset/leadbang.py | liqile1/OCNet.pytorch | 5fb733adbf178ccc8040197057e3277896b3dc12 | [
"MIT"
] | null | null | null | dataset/leadbang.py | liqile1/OCNet.pytorch | 5fb733adbf178ccc8040197057e3277896b3dc12 | [
"MIT"
] | null | null | null | dataset/leadbang.py | liqile1/OCNet.pytorch | 5fb733adbf178ccc8040197057e3277896b3dc12 | [
"MIT"
] | null | null | null | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: speedinghzl02
## Modified by: RainbowSecret
## Microsoft Research
## yuyua@microsoft.com
## Copyright (c) 2018
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import cv2
import pdb
import collections
import matplotlib.pyplot as plt
import numpy as np
import os
import os.path as osp
from PIL import Image, ImageOps, ImageFilter
import random
import torch
import torchvision
from torch.utils import data
import torchvision.transforms as transforms
class LeadBangTrain(data.Dataset):
def __init__(self, root, max_iters=None,
scale=True, mirror=True, ignore_label=255, use_aug=False, network="resnet101"):
self.root = root
# self.crop_h, self.crop_w = crop_size
self.crop_h = 480
self.crop_w = 480
self.img_width = 512
self.img_height = 512
self.scale = scale
self.ignore_label = ignore_label
self.is_mirror = mirror
self.use_aug = use_aug
self.network = network
self.files = []
self.cache_img = {}
self.cache_label = {}
self.item_idx_list = []
for item_idx in range(1, 1463):
self.item_idx_list.append(item_idx)
img_path = 'source/' + str(item_idx) + ".bmp"
label_path = 'label/' + str(item_idx) + ".bmp"
img_file = osp.join(self.root, img_path)
label_file = osp.join(self.root, label_path)
print('label file: ', label_file)
self.files.append({
"img": img_file,
"label": label_file,
"name": str(item_idx),
"weight": 1
})
self.cache_img[item_idx] = cv2.imread(img_file)
self.cache_label[item_idx] = 255 - cv2.imread(label_file, 0)
print('{} images are loaded!'.format(1462))
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
f_scale = 0.5 + random.randint(0, 16) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def rescale(self, image, label):
image = cv2.resize(image, (self.img_width, self.img_height))
label = cv2.resize(label, (self.img_width, self.img_height))
return image, label
def id2trainId(self, label, reverse=False):
label_copy = label.copy()
if reverse:
for v, k in self.id_to_trainid.items():
label_copy[label == k] = v
else:
for k, v in self.id_to_trainid.items():
label_copy[label == k] = v
return label_copy
def get_rotate_angle(self, angle_min, angle_max, angle_delta):
count = int((angle_max - angle_min) / angle_delta)
delta = random.random() * (count + 1) * angle_delta
angle = angle_min + delta
if angle < angle_min:
angle = angle_min
if angle > angle_max:
angle = angle_max
return angle
def rotate(self, image, angle, border_value=None):
center = (self.img_width // 2, self.img_height // 2)
M = cv2.getRotationMatrix2D(center, angle, 1)
if border_value is None:
rotated = cv2.warpAffine(image, M, (self.img_width, self.img_height))
else:
rotated = cv2.warpAffine(image, M, (self.img_width, self.img_height), borderValue=(int(border_value),))
return rotated
def get_border_value(self, mat):
r = mat.shape[0]
c = mat.shape[1]
return (mat[1][1] + mat[1][c - 2] + mat[r-2][1] + mat[r-2][c-2] + mat[2][2] + mat[2][c - 3] + mat[r-3][2] + mat[r-3][c-3]) / 8.0
def rotate_img_lb(self, image, label, angle):
b = image[0]
g = image[1]
r = image[2]
# (102.9801, 115.9465, 122.7717)
# b = self.rotate(b, angle, border_value=255 - 102.9801)
# g = self.rotate(g, angle, border_value=255-115.9465)
# r = self.rotate(r, angle, border_value=255-122.7717)
b = self.rotate(b, angle, border_value=self.get_border_value(b))
g = self.rotate(g, angle, border_value=self.get_border_value(g))
r = self.rotate(r, angle, border_value=self.get_border_value(r))
label = self.rotate(label, angle)
image = np.asarray([b, g, r], dtype=np.float32)
ret, label = cv2.threshold(label, 127, 255, cv2.THRESH_BINARY)
return image, label
def adv_img_lb(self, img, lb):
# brightness
img += (random.random() * 10 - 5)
# rotate
angle = self.get_rotate_angle(-180, 180, 5)
img, lb = self.rotate_img_lb(img, lb, angle)
# flip lr
if random.random() < 0.5:
img = img[:,:,::-1]
lb = lb[:,::-1]
# flip ud
if random.random() < 0.5:
img = img[:,::-1,:]
lb = lb[::-1,:]
return img, lb
def __getitem__(self, index):
datafiles = self.files[index]
item_idx = self.item_idx_list[index]
image = self.cache_img[item_idx].copy()
label = self.cache_label[item_idx].copy()
size = image.shape
name = datafiles["name"]
image, label = self.rescale(image, label)
image = np.array(image, dtype=np.float32)
if self.network == "resnet101":
# mean = (102.9801, 115.9465, 122.7717)
# "mean_value_b" : 141.29403686523437,
# "mean_value_g" : 123.58832550048828,
# "mean_value_r" : 172.43679809570312,
mean = (172.43679809570312, 123.58832550048828, 141.29403686523437)
image = image[:,:,::-1]
image -= mean
elif self.network == "mobilenetv2":
mean = (0.485, 0.456, 0.406)
var = (0.229, 0.224, 0.225)
# print("network: {}, mean: {}, var: {}".format(self.network, mean, var))
image = image[:,:,::-1]
image /= 255
image -= mean
image /= var
elif self.network == "wide_resnet38":
mean = (0.41738699, 0.45732192, 0.46886091)
var = (0.25685097, 0.26509955, 0.29067996)
image = image[:,:,::-1]
image /= 255
image -= mean
image /= var
image = image.transpose((2, 0, 1))
image, label = self.adv_img_lb(image, label)
img_h, img_w = label.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(image[:,h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
# [0, 255] => [0, 1]
ret, label = cv2.threshold(label, 127, 1, cv2.THRESH_BINARY)
label = np.array(label, dtype=np.int64)
return image.copy(), label.copy(), np.array(size), name
class LeadBangTest(data.Dataset):
def __init__(self, root, max_iters=None,
scale=True, mirror=True, ignore_label=255, network="resnet101"):
self.root = root
# self.crop_h, self.crop_w = crop_size
self.crop_h = 512
self.crop_w = 512
self.img_width = 512
self.img_height = 512
self.scale = scale
self.ignore_label = ignore_label
self.is_mirror = mirror
self.network = network
self.files = []
self.cache_img = {}
self.cache_label = {}
self.item_idx_list = []
for item_idx in range(1463, 2352):
self.item_idx_list.append(item_idx)
img_path = 'source/' + str(item_idx) + ".bmp"
label_path = 'label/' + str(item_idx) + ".bmp"
img_file = osp.join(self.root, img_path)
label_file = osp.join(self.root, label_path)
self.files.append({
"img": img_file,
"label": label_file,
"name": str(item_idx),
"weight": 1
})
print("label: ", label_file)
self.cache_img[item_idx] = cv2.imread(img_file)
self.cache_label[item_idx] = 255-cv2.imread(label_file, 0)
print('{} images are loaded!'.format(2352-1463))
def __len__(self):
return len(self.files)
def id2trainId(self, label, reverse=False):
label_copy = label.copy()
if reverse:
for v, k in self.id_to_trainid.items():
label_copy[label == k] = v
else:
for k, v in self.id_to_trainid.items():
label_copy[label == k] = v
return label_copy
def rescale(self, image, label):
image = cv2.resize(image, (self.img_width, self.img_height))
label = cv2.resize(label, (self.img_width, self.img_height))
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
item_idx = self.item_idx_list[index]
image = self.cache_img[item_idx].copy()
label = self.cache_label[item_idx].copy()
size = image.shape
name = datafiles["name"]
image, label = self.rescale(image, label)
image = np.array(image, dtype=np.float32)
if self.network == "resnet101":
# mean = (102.9801, 115.9465, 122.7717)
mean = (172.43679809570312, 123.58832550048828, 141.29403686523437)
image = image[:,:,::-1]
image -= mean
elif self.network == "mobilenetv2":
mean = (0.485, 0.456, 0.406)
var = (0.229, 0.224, 0.225)
# print("network: {}, mean: {}, var: {}".format(self.network, mean, var))
image = image[:,:,::-1]
image /= 255
image -= mean
image /= var
elif self.network == "wide_resnet38":
mean = (0.41738699, 0.45732192, 0.46886091)
var = (0.25685097, 0.26509955, 0.29067996)
image = image[:,:,::-1]
image /= 255
image -= mean
image /= var
image = image.transpose((2, 0, 1))
img_h, img_w = label.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(image[:,h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
# [0, 255] => [0, 1]
ret, label = cv2.threshold(label, 127, 1, cv2.THRESH_BINARY)
label = np.array(label, dtype=np.int64)
label = np.asarray(label, dtype=np.int64)
return image.copy(), label.copy(), np.array(size), name
def test_train_leadbang():
dst = LeadBangTrain("./leadbang/")
trainloader = data.DataLoader(dst, batch_size=1, num_workers=0)
with open("./list/cityscapes/trainval.lst") as f:
train_list = f.readlines()
train_list = [x.strip() for x in train_list]
f_w = open("./list/cityscapes/bus_truck_train.lst", "w")
for i, dt in enumerate(trainloader):
imgs, labels, _, name = dt
img = imgs.numpy()
lb = labels.numpy()
print(name)
print(img.shape)
print(lb.shape)
name = name[0]
img = np.transpose(img[0], (1,2,0))
img += (172.43679809570312, 123.58832550048828, 141.29403686523437)
img = img[:,:,::-1]
img = np.array(img, dtype=np.uint8)
lb = 255 - lb[0] * 255
lb = np.asarray(lb, dtype=np.uint8)
cv2.imshow( "img", img)
cv2.imshow( "lb", lb)
cv2.waitKey(0)
def test_test_leadbang():
dst = LeadBangTest("./leadbang/")
trainloader = data.DataLoader(dst, batch_size=1, num_workers=0)
with open("./list/cityscapes/trainval.lst") as f:
train_list = f.readlines()
train_list = [x.strip() for x in train_list]
f_w = open("./list/cityscapes/bus_truck_train.lst", "w")
for i, dt in enumerate(trainloader):
imgs, labels, _, name = dt
img = imgs.numpy()
lb = labels.numpy()
print(name)
print(img.shape)
print(lb.shape)
name = name[0]
img = np.transpose(img[0], (1,2,0))
img += (172.43679809570312, 123.58832550048828, 141.29403686523437)
img = img[:,:,::-1]
img = np.array(img, dtype=np.uint8)
lb = 255 - lb[0] * 255
lb = np.asarray(lb, dtype=np.uint8)
cv2.imshow( "img", img)
cv2.imshow( "lb", lb)
cv2.waitKey(0)
if __name__ == '__main__':
test_train_leadbang()
# test_test_leadbang()
| 36.069252 | 136 | 0.558098 | e=True, mirror=True, ignore_label=255, use_aug=False, network="resnet101"):
self.root = root
self.crop_h = 480
self.crop_w = 480
self.img_width = 512
self.img_height = 512
self.scale = scale
self.ignore_label = ignore_label
self.is_mirror = mirror
self.use_aug = use_aug
self.network = network
self.files = []
self.cache_img = {}
self.cache_label = {}
self.item_idx_list = []
for item_idx in range(1, 1463):
self.item_idx_list.append(item_idx)
img_path = 'source/' + str(item_idx) + ".bmp"
label_path = 'label/' + str(item_idx) + ".bmp"
img_file = osp.join(self.root, img_path)
label_file = osp.join(self.root, label_path)
print('label file: ', label_file)
self.files.append({
"img": img_file,
"label": label_file,
"name": str(item_idx),
"weight": 1
})
self.cache_img[item_idx] = cv2.imread(img_file)
self.cache_label[item_idx] = 255 - cv2.imread(label_file, 0)
print('{} images are loaded!'.format(1462))
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
f_scale = 0.5 + random.randint(0, 16) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def rescale(self, image, label):
image = cv2.resize(image, (self.img_width, self.img_height))
label = cv2.resize(label, (self.img_width, self.img_height))
return image, label
def id2trainId(self, label, reverse=False):
label_copy = label.copy()
if reverse:
for v, k in self.id_to_trainid.items():
label_copy[label == k] = v
else:
for k, v in self.id_to_trainid.items():
label_copy[label == k] = v
return label_copy
def get_rotate_angle(self, angle_min, angle_max, angle_delta):
count = int((angle_max - angle_min) / angle_delta)
delta = random.random() * (count + 1) * angle_delta
angle = angle_min + delta
if angle < angle_min:
angle = angle_min
if angle > angle_max:
angle = angle_max
return angle
def rotate(self, image, angle, border_value=None):
center = (self.img_width // 2, self.img_height // 2)
M = cv2.getRotationMatrix2D(center, angle, 1)
if border_value is None:
rotated = cv2.warpAffine(image, M, (self.img_width, self.img_height))
else:
rotated = cv2.warpAffine(image, M, (self.img_width, self.img_height), borderValue=(int(border_value),))
return rotated
def get_border_value(self, mat):
r = mat.shape[0]
c = mat.shape[1]
return (mat[1][1] + mat[1][c - 2] + mat[r-2][1] + mat[r-2][c-2] + mat[2][2] + mat[2][c - 3] + mat[r-3][2] + mat[r-3][c-3]) / 8.0
def rotate_img_lb(self, image, label, angle):
b = image[0]
g = image[1]
r = image[2]
b = self.rotate(b, angle, border_value=self.get_border_value(b))
g = self.rotate(g, angle, border_value=self.get_border_value(g))
r = self.rotate(r, angle, border_value=self.get_border_value(r))
label = self.rotate(label, angle)
image = np.asarray([b, g, r], dtype=np.float32)
ret, label = cv2.threshold(label, 127, 255, cv2.THRESH_BINARY)
return image, label
def adv_img_lb(self, img, lb):
img += (random.random() * 10 - 5)
angle = self.get_rotate_angle(-180, 180, 5)
img, lb = self.rotate_img_lb(img, lb, angle)
if random.random() < 0.5:
img = img[:,:,::-1]
lb = lb[:,::-1]
if random.random() < 0.5:
img = img[:,::-1,:]
lb = lb[::-1,:]
return img, lb
def __getitem__(self, index):
datafiles = self.files[index]
item_idx = self.item_idx_list[index]
image = self.cache_img[item_idx].copy()
label = self.cache_label[item_idx].copy()
size = image.shape
name = datafiles["name"]
image, label = self.rescale(image, label)
image = np.array(image, dtype=np.float32)
if self.network == "resnet101":
mean = (172.43679809570312, 123.58832550048828, 141.29403686523437)
image = image[:,:,::-1]
image -= mean
elif self.network == "mobilenetv2":
mean = (0.485, 0.456, 0.406)
var = (0.229, 0.224, 0.225)
image = image[:,:,::-1]
image /= 255
image -= mean
image /= var
elif self.network == "wide_resnet38":
mean = (0.41738699, 0.45732192, 0.46886091)
var = (0.25685097, 0.26509955, 0.29067996)
image = image[:,:,::-1]
image /= 255
image -= mean
image /= var
image = image.transpose((2, 0, 1))
image, label = self.adv_img_lb(image, label)
img_h, img_w = label.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(image[:,h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
ret, label = cv2.threshold(label, 127, 1, cv2.THRESH_BINARY)
label = np.array(label, dtype=np.int64)
return image.copy(), label.copy(), np.array(size), name
class LeadBangTest(data.Dataset):
def __init__(self, root, max_iters=None,
scale=True, mirror=True, ignore_label=255, network="resnet101"):
self.root = root
self.crop_h = 512
self.crop_w = 512
self.img_width = 512
self.img_height = 512
self.scale = scale
self.ignore_label = ignore_label
self.is_mirror = mirror
self.network = network
self.files = []
self.cache_img = {}
self.cache_label = {}
self.item_idx_list = []
for item_idx in range(1463, 2352):
self.item_idx_list.append(item_idx)
img_path = 'source/' + str(item_idx) + ".bmp"
label_path = 'label/' + str(item_idx) + ".bmp"
img_file = osp.join(self.root, img_path)
label_file = osp.join(self.root, label_path)
self.files.append({
"img": img_file,
"label": label_file,
"name": str(item_idx),
"weight": 1
})
print("label: ", label_file)
self.cache_img[item_idx] = cv2.imread(img_file)
self.cache_label[item_idx] = 255-cv2.imread(label_file, 0)
print('{} images are loaded!'.format(2352-1463))
def __len__(self):
return len(self.files)
def id2trainId(self, label, reverse=False):
label_copy = label.copy()
if reverse:
for v, k in self.id_to_trainid.items():
label_copy[label == k] = v
else:
for k, v in self.id_to_trainid.items():
label_copy[label == k] = v
return label_copy
def rescale(self, image, label):
image = cv2.resize(image, (self.img_width, self.img_height))
label = cv2.resize(label, (self.img_width, self.img_height))
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
item_idx = self.item_idx_list[index]
image = self.cache_img[item_idx].copy()
label = self.cache_label[item_idx].copy()
size = image.shape
name = datafiles["name"]
image, label = self.rescale(image, label)
image = np.array(image, dtype=np.float32)
if self.network == "resnet101":
mean = (172.43679809570312, 123.58832550048828, 141.29403686523437)
image = image[:,:,::-1]
image -= mean
elif self.network == "mobilenetv2":
mean = (0.485, 0.456, 0.406)
var = (0.229, 0.224, 0.225)
image = image[:,:,::-1]
image /= 255
image -= mean
image /= var
elif self.network == "wide_resnet38":
mean = (0.41738699, 0.45732192, 0.46886091)
var = (0.25685097, 0.26509955, 0.29067996)
image = image[:,:,::-1]
image /= 255
image -= mean
image /= var
image = image.transpose((2, 0, 1))
img_h, img_w = label.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(image[:,h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
ret, label = cv2.threshold(label, 127, 1, cv2.THRESH_BINARY)
label = np.array(label, dtype=np.int64)
label = np.asarray(label, dtype=np.int64)
return image.copy(), label.copy(), np.array(size), name
def test_train_leadbang():
dst = LeadBangTrain("./leadbang/")
trainloader = data.DataLoader(dst, batch_size=1, num_workers=0)
with open("./list/cityscapes/trainval.lst") as f:
train_list = f.readlines()
train_list = [x.strip() for x in train_list]
f_w = open("./list/cityscapes/bus_truck_train.lst", "w")
for i, dt in enumerate(trainloader):
imgs, labels, _, name = dt
img = imgs.numpy()
lb = labels.numpy()
print(name)
print(img.shape)
print(lb.shape)
name = name[0]
img = np.transpose(img[0], (1,2,0))
img += (172.43679809570312, 123.58832550048828, 141.29403686523437)
img = img[:,:,::-1]
img = np.array(img, dtype=np.uint8)
lb = 255 - lb[0] * 255
lb = np.asarray(lb, dtype=np.uint8)
cv2.imshow( "img", img)
cv2.imshow( "lb", lb)
cv2.waitKey(0)
def test_test_leadbang():
dst = LeadBangTest("./leadbang/")
trainloader = data.DataLoader(dst, batch_size=1, num_workers=0)
with open("./list/cityscapes/trainval.lst") as f:
train_list = f.readlines()
train_list = [x.strip() for x in train_list]
f_w = open("./list/cityscapes/bus_truck_train.lst", "w")
for i, dt in enumerate(trainloader):
imgs, labels, _, name = dt
img = imgs.numpy()
lb = labels.numpy()
print(name)
print(img.shape)
print(lb.shape)
name = name[0]
img = np.transpose(img[0], (1,2,0))
img += (172.43679809570312, 123.58832550048828, 141.29403686523437)
img = img[:,:,::-1]
img = np.array(img, dtype=np.uint8)
lb = 255 - lb[0] * 255
lb = np.asarray(lb, dtype=np.uint8)
cv2.imshow( "img", img)
cv2.imshow( "lb", lb)
cv2.waitKey(0)
if __name__ == '__main__':
test_train_leadbang()
| true | true |
f7f988e26776eaa47c64078a1aae3b28165ff015 | 24,376 | py | Python | electrum/plugins/coldcard/coldcard.py | c4pt000/electrum-radiocoin | 7cb5f618a9aa8cd03d60191624a0e57cc24646d2 | [
"MIT"
] | null | null | null | electrum/plugins/coldcard/coldcard.py | c4pt000/electrum-radiocoin | 7cb5f618a9aa8cd03d60191624a0e57cc24646d2 | [
"MIT"
] | null | null | null | electrum/plugins/coldcard/coldcard.py | c4pt000/electrum-radiocoin | 7cb5f618a9aa8cd03d60191624a0e57cc24646d2 | [
"MIT"
] | null | null | null | #
# Coldcard Electrum plugin main code.
#
#
import os, time, io
import traceback
from typing import TYPE_CHECKING, Optional
import struct
from electrum import bip32
from electrum.bip32 import BIP32Node, InvalidMasterKeyVersionBytes
from electrum.i18n import _
from electrum.plugin import Device, hook, runs_in_hwd_thread
from electrum.keystore import Hardware_KeyStore, KeyStoreWithMPK
from electrum.transaction import PartialTransaction
from electrum.wallet import Standard_Wallet, Multisig_Wallet, Abstract_Wallet
from electrum.util import bfh, bh2u, versiontuple, UserFacingException
from electrum.base_wizard import ScriptTypeNotSupported
from electrum.logging import get_logger
from ..hw_wallet import HW_PluginBase, HardwareClientBase
from ..hw_wallet.plugin import LibraryFoundButUnusable, only_hook_if_libraries_available
_logger = get_logger(__name__)
try:
import hid
from ckcc.protocol import CCProtocolPacker, CCProtocolUnpacker
from ckcc.protocol import CCProtoError, CCUserRefused, CCBusyError
from ckcc.constants import (MAX_MSG_LEN, MAX_BLK_LEN, MSG_SIGNING_MAX_LENGTH, MAX_TXN_LEN,
AF_CLASSIC, AF_P2SH, AF_P2WPKH, AF_P2WSH, AF_P2WPKH_P2SH, AF_P2WSH_P2SH)
from ckcc.client import ColdcardDevice, COINKITE_VID, CKCC_PID, CKCC_SIMULATOR_PATH
requirements_ok = True
class ElectrumColdcardDevice(ColdcardDevice):
# avoid use of pycoin for MiTM message signature test
def mitm_verify(self, sig, expect_xpub):
# verify a signature (65 bytes) over the session key, using the master bip32 node
# - customized to use specific EC library of Electrum.
pubkey = BIP32Node.from_xkey(expect_xpub).eckey
try:
pubkey.verify_message_hash(sig[1:65], self.session_key)
return True
except:
return False
except ImportError as e:
if not (isinstance(e, ModuleNotFoundError) and e.name == 'ckcc'):
_logger.exception('error importing coldcard plugin deps')
requirements_ok = False
COINKITE_VID = 0xd13e
CKCC_PID = 0xcc10
CKCC_SIMULATED_PID = CKCC_PID ^ 0x55aa
class CKCCClient(HardwareClientBase):
def __init__(self, plugin, handler, dev_path, *, is_simulator=False):
HardwareClientBase.__init__(self, plugin=plugin)
self.device = plugin.device
self.handler = handler
# if we know what the (xfp, xpub) "should be" then track it here
self._expected_device = None
if is_simulator:
self.dev = ElectrumColdcardDevice(dev_path, encrypt=True)
else:
# open the real HID device
hd = hid.device(path=dev_path)
hd.open_path(dev_path)
self.dev = ElectrumColdcardDevice(dev=hd, encrypt=True)
# NOTE: MiTM test is delayed until we have a hint as to what XPUB we
# should expect. It's also kinda slow.
def __repr__(self):
return '<CKCCClient: xfp=%s label=%r>' % (xfp2str(self.dev.master_fingerprint),
self.label())
@runs_in_hwd_thread
def verify_connection(self, expected_xfp: int, expected_xpub=None):
ex = (expected_xfp, expected_xpub)
if self._expected_device == ex:
# all is as expected
return
if expected_xpub is None:
expected_xpub = self.dev.master_xpub
if ((self._expected_device is not None)
or (self.dev.master_fingerprint != expected_xfp)
or (self.dev.master_xpub != expected_xpub)):
# probably indicating programing error, not hacking
_logger.info(f"xpubs. reported by device: {self.dev.master_xpub}. "
f"stored in file: {expected_xpub}")
raise RuntimeError("Expecting %s but that's not what's connected?!" %
xfp2str(expected_xfp))
# check signature over session key
# - mitm might have lied about xfp and xpub up to here
# - important that we use value capture at wallet creation time, not some value
# we read over USB today
self.dev.check_mitm(expected_xpub=expected_xpub)
self._expected_device = ex
if not getattr(self, 'ckcc_xpub', None):
self.ckcc_xpub = expected_xpub
_logger.info("Successfully verified against MiTM")
def is_pairable(self):
# can't do anything w/ devices that aren't setup (this code not normally reachable)
return bool(self.dev.master_xpub)
@runs_in_hwd_thread
def close(self):
# close the HID device (so can be reused)
self.dev.close()
self.dev = None
def is_initialized(self):
return bool(self.dev.master_xpub)
def label(self):
# 'label' of this Coldcard. Warning: gets saved into wallet file, which might
# not be encrypted, so better for privacy if based on xpub/fingerprint rather than
# USB serial number.
if self.dev.is_simulator:
lab = 'Coldcard Simulator ' + xfp2str(self.dev.master_fingerprint)
elif not self.dev.master_fingerprint:
# failback; not expected
lab = 'Coldcard #' + self.dev.serial
else:
lab = 'Coldcard ' + xfp2str(self.dev.master_fingerprint)
return lab
def manipulate_keystore_dict_during_wizard_setup(self, d: dict):
master_xpub = self.dev.master_xpub
if master_xpub is not None:
try:
node = BIP32Node.from_xkey(master_xpub)
except InvalidMasterKeyVersionBytes:
raise UserFacingException(
_('Invalid xpub magic. Make sure your {} device is set to the correct chain.').format(self.device) + ' ' +
_('You might have to unplug and plug it in again.')
) from None
d['ckcc_xpub'] = master_xpub
@runs_in_hwd_thread
def has_usable_connection_with_device(self):
# Do end-to-end ping test
try:
self.ping_check()
return True
except:
return False
@runs_in_hwd_thread
def get_xpub(self, bip32_path, xtype):
assert xtype in ColdcardPlugin.SUPPORTED_XTYPES
_logger.info('Derive xtype = %r' % xtype)
xpub = self.dev.send_recv(CCProtocolPacker.get_xpub(bip32_path), timeout=5000)
# TODO handle timeout?
# change type of xpub to the requested type
try:
node = BIP32Node.from_xkey(xpub)
except InvalidMasterKeyVersionBytes:
raise UserFacingException(_('Invalid xpub magic. Make sure your {} device is set to the correct chain.')
.format(self.device)) from None
if xtype != 'standard':
xpub = node._replace(xtype=xtype).to_xpub()
return xpub
@runs_in_hwd_thread
def ping_check(self):
# check connection is working
assert self.dev.session_key, 'not encrypted?'
req = b'1234 Electrum Plugin 4321' # free up to 59 bytes
try:
echo = self.dev.send_recv(CCProtocolPacker.ping(req))
assert echo == req
except:
raise RuntimeError("Communication trouble with Coldcard")
@runs_in_hwd_thread
def show_address(self, path, addr_fmt):
# prompt user w/ address, also returns it immediately.
return self.dev.send_recv(CCProtocolPacker.show_address(path, addr_fmt), timeout=None)
@runs_in_hwd_thread
def show_p2sh_address(self, *args, **kws):
# prompt user w/ p2sh address, also returns it immediately.
return self.dev.send_recv(CCProtocolPacker.show_p2sh_address(*args, **kws), timeout=None)
@runs_in_hwd_thread
def get_version(self):
# gives list of strings
return self.dev.send_recv(CCProtocolPacker.version(), timeout=1000).split('\n')
@runs_in_hwd_thread
def sign_message_start(self, path, msg):
# this starts the UX experience.
self.dev.send_recv(CCProtocolPacker.sign_message(msg, path), timeout=None)
@runs_in_hwd_thread
def sign_message_poll(self):
# poll device... if user has approved, will get tuple: (addr, sig) else None
return self.dev.send_recv(CCProtocolPacker.get_signed_msg(), timeout=None)
@runs_in_hwd_thread
def sign_transaction_start(self, raw_psbt: bytes, *, finalize: bool = False):
# Multiple steps to sign:
# - upload binary
# - start signing UX
# - wait for coldcard to complete process, or have it refused.
# - download resulting txn
assert 20 <= len(raw_psbt) < MAX_TXN_LEN, 'PSBT is too big'
dlen, chk = self.dev.upload_file(raw_psbt)
resp = self.dev.send_recv(CCProtocolPacker.sign_transaction(dlen, chk, finalize=finalize),
timeout=None)
if resp != None:
raise ValueError(resp)
@runs_in_hwd_thread
def sign_transaction_poll(self):
# poll device... if user has approved, will get tuple: (legnth, checksum) else None
return self.dev.send_recv(CCProtocolPacker.get_signed_txn(), timeout=None)
@runs_in_hwd_thread
def download_file(self, length, checksum, file_number=1):
# get a file
return self.dev.download_file(length, checksum, file_number=file_number)
class Coldcard_KeyStore(Hardware_KeyStore):
hw_type = 'coldcard'
device = 'Coldcard'
plugin: 'ColdcardPlugin'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.force_watching_only = False
self.ux_busy = False
# we need to know at least the fingerprint of the master xpub to verify against MiTM
# - device reports these value during encryption setup process
# - full xpub value now optional
self.ckcc_xpub = d.get('ckcc_xpub', None)
def dump(self):
# our additions to the stored data about keystore -- only during creation?
d = Hardware_KeyStore.dump(self)
d['ckcc_xpub'] = self.ckcc_xpub
return d
def get_xfp_int(self) -> int:
xfp = self.get_root_fingerprint()
assert xfp is not None
return xfp_int_from_xfp_bytes(bfh(xfp))
def get_client(self):
# called when user tries to do something like view address, sign somthing.
# - not called during probing/setup
# - will fail if indicated device can't produce the xpub (at derivation) expected
rv = self.plugin.get_client(self)
if rv:
xfp_int = self.get_xfp_int()
rv.verify_connection(xfp_int, self.ckcc_xpub)
return rv
def give_error(self, message, clear_client=False):
self.logger.info(message)
if not self.ux_busy:
self.handler.show_error(message)
else:
self.ux_busy = False
if clear_client:
self.client = None
raise UserFacingException(message)
def wrap_busy(func):
# decorator: function takes over the UX on the device.
def wrapper(self, *args, **kwargs):
try:
self.ux_busy = True
return func(self, *args, **kwargs)
finally:
self.ux_busy = False
return wrapper
def decrypt_message(self, pubkey, message, password):
raise UserFacingException(_('Encryption and decryption are currently not supported for {}').format(self.device))
@wrap_busy
def sign_message(self, sequence, message, password):
# Sign a message on device. Since we have big screen, of course we
# have to show the message unabiguously there first!
try:
msg = message.encode('ascii', errors='strict')
assert 1 <= len(msg) <= MSG_SIGNING_MAX_LENGTH
except (UnicodeError, AssertionError):
# there are other restrictions on message content,
# but let the device enforce and report those
self.handler.show_error('Only short (%d max) ASCII messages can be signed.'
% MSG_SIGNING_MAX_LENGTH)
return b''
path = self.get_derivation_prefix() + ("/%d/%d" % sequence)
try:
cl = self.get_client()
try:
self.handler.show_message("Signing message (using %s)..." % path)
cl.sign_message_start(path, msg)
while 1:
# How to kill some time, without locking UI?
time.sleep(0.250)
resp = cl.sign_message_poll()
if resp is not None:
break
finally:
self.handler.finished()
assert len(resp) == 2
addr, raw_sig = resp
# already encoded in Radiocoin fashion, binary.
assert 40 < len(raw_sig) <= 65
return raw_sig
except (CCUserRefused, CCBusyError) as exc:
self.handler.show_error(str(exc))
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}\n\n{}'.format(
_('Error showing address') + ':', str(exc)))
except Exception as e:
self.give_error(e, True)
# give empty bytes for error cases; it seems to clear the old signature box
return b''
@wrap_busy
def sign_transaction(self, tx, password):
# Upload PSBT for signing.
# - we can also work offline (without paired device present)
if tx.is_complete():
return
client = self.get_client()
assert client.dev.master_fingerprint == self.get_xfp_int()
raw_psbt = tx.serialize_as_bytes()
try:
try:
self.handler.show_message("Authorize Transaction...")
client.sign_transaction_start(raw_psbt)
while 1:
# How to kill some time, without locking UI?
time.sleep(0.250)
resp = client.sign_transaction_poll()
if resp is not None:
break
rlen, rsha = resp
# download the resulting txn.
raw_resp = client.download_file(rlen, rsha)
finally:
self.handler.finished()
except (CCUserRefused, CCBusyError) as exc:
self.logger.info(f'Did not sign: {exc}')
self.handler.show_error(str(exc))
return
except BaseException as e:
self.logger.exception('')
self.give_error(e, True)
return
tx2 = PartialTransaction.from_raw_psbt(raw_resp)
# apply partial signatures back into txn
tx.combine_with_other_psbt(tx2)
# caller's logic looks at tx now and if it's sufficiently signed,
# will send it if that's the user's intent.
@staticmethod
def _encode_txin_type(txin_type):
# Map from Electrum code names to our code numbers.
return {'standard': AF_CLASSIC, 'p2pkh': AF_CLASSIC,
'p2sh': AF_P2SH,
'p2wpkh-p2sh': AF_P2WPKH_P2SH,
'p2wpkh': AF_P2WPKH,
'p2wsh-p2sh': AF_P2WSH_P2SH,
'p2wsh': AF_P2WSH,
}[txin_type]
@wrap_busy
def show_address(self, sequence, txin_type):
client = self.get_client()
address_path = self.get_derivation_prefix()[2:] + "/%d/%d"%sequence
addr_fmt = self._encode_txin_type(txin_type)
try:
try:
self.handler.show_message(_("Showing address ..."))
dev_addr = client.show_address(address_path, addr_fmt)
# we could double check address here
finally:
self.handler.finished()
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}\n\n{}'.format(
_('Error showing address') + ':', str(exc)))
except BaseException as exc:
self.logger.exception('')
self.handler.show_error(exc)
@wrap_busy
def show_p2sh_address(self, M, script, xfp_paths, txin_type):
client = self.get_client()
addr_fmt = self._encode_txin_type(txin_type)
try:
try:
self.handler.show_message(_("Showing address ..."))
dev_addr = client.show_p2sh_address(M, xfp_paths, script, addr_fmt=addr_fmt)
# we could double check address here
finally:
self.handler.finished()
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}.\n{}\n\n{}'.format(
_('Error showing address'),
_('Make sure you have imported the correct wallet description '
'file on the device for this multisig wallet.'),
str(exc)))
except BaseException as exc:
self.logger.exception('')
self.handler.show_error(exc)
class ColdcardPlugin(HW_PluginBase):
keystore_class = Coldcard_KeyStore
minimum_library = (0, 7, 7)
DEVICE_IDS = [
(COINKITE_VID, CKCC_PID),
(COINKITE_VID, CKCC_SIMULATED_PID)
]
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_devices(self.DEVICE_IDS, plugin=self)
self.device_manager().register_enumerate_func(self.detect_simulator)
def get_library_version(self):
import ckcc
try:
version = ckcc.__version__
except AttributeError:
version = 'unknown'
if requirements_ok:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def detect_simulator(self):
# if there is a simulator running on this machine,
# return details about it so it's offered as a pairing choice
fn = CKCC_SIMULATOR_PATH
if os.path.exists(fn):
return [Device(path=fn,
interface_number=-1,
id_=fn,
product_key=(COINKITE_VID, CKCC_SIMULATED_PID),
usage_page=0,
transport_ui_string='simulator')]
return []
@runs_in_hwd_thread
def create_client(self, device, handler):
if handler:
self.handler = handler
# We are given a HID device, or at least some details about it.
# Not sure why not we aren't just given a HID library handle, but
# the 'path' is unabiguous, so we'll use that.
try:
rv = CKCCClient(self, handler, device.path,
is_simulator=(device.product_key[1] == CKCC_SIMULATED_PID))
return rv
except Exception as e:
self.logger.exception('late failure connecting to device?')
return None
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
# this seems to be part of the pairing process only, not during normal ops?
# base_wizard:on_hw_derivation
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
client.ping_check()
xpub = client.get_xpub(derivation, xtype)
return xpub
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['CKCCClient']:
# Acquire a connection to the hardware device (via USB)
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
if client is not None:
client.ping_check()
return client
@staticmethod
def export_ms_wallet(wallet: Multisig_Wallet, fp, name):
# Build the text file Coldcard needs to understand the multisig wallet
# it is participating in. All involved Coldcards can share same file.
assert isinstance(wallet, Multisig_Wallet)
print('# Exported from Electrum', file=fp)
print(f'Name: {name:.20s}', file=fp)
print(f'Policy: {wallet.m} of {wallet.n}', file=fp)
print(f'Format: {wallet.txin_type.upper()}', file=fp)
xpubs = []
for xpub, ks in zip(wallet.get_master_public_keys(), wallet.get_keystores()): # type: str, KeyStoreWithMPK
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix=[], only_der_suffix=False)
fp_hex = fp_bytes.hex().upper()
der_prefix_str = bip32.convert_bip32_intpath_to_strpath(der_full)
xpubs.append((fp_hex, xpub, der_prefix_str))
# Before v3.2.1 derivation didn't matter too much to the Coldcard, since it
# could use key path data from PSBT or USB request as needed. However,
# derivation data is now required.
print('', file=fp)
assert len(xpubs) == wallet.n
for xfp, xpub, der_prefix in xpubs:
print(f'Derivation: {der_prefix}', file=fp)
print(f'{xfp}: {xpub}\n', file=fp)
def show_address(self, wallet, address, keystore: 'Coldcard_KeyStore' = None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
txin_type = wallet.get_txin_type(address)
# Standard_Wallet => not multisig, must be bip32
if type(wallet) is Standard_Wallet:
sequence = wallet.get_address_index(address)
keystore.show_address(sequence, txin_type)
elif type(wallet) is Multisig_Wallet:
assert isinstance(wallet, Multisig_Wallet) # only here for type-hints in IDE
# More involved for P2SH/P2WSH addresses: need M, and all public keys, and their
# derivation paths. Must construct script, and track fingerprints+paths for
# all those keys
pubkey_deriv_info = wallet.get_public_keys_with_deriv_info(address)
pubkey_hexes = sorted([pk.hex() for pk in list(pubkey_deriv_info)])
xfp_paths = []
for pubkey_hex in pubkey_hexes:
pubkey = bytes.fromhex(pubkey_hex)
ks, der_suffix = pubkey_deriv_info[pubkey]
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix, only_der_suffix=False)
xfp_int = xfp_int_from_xfp_bytes(fp_bytes)
xfp_paths.append([xfp_int] + list(der_full))
script = bfh(wallet.pubkeys_to_scriptcode(pubkey_hexes))
keystore.show_p2sh_address(wallet.m, script, xfp_paths, txin_type)
else:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
def xfp_int_from_xfp_bytes(fp_bytes: bytes) -> int:
return int.from_bytes(fp_bytes, byteorder="little", signed=False)
def xfp2str(xfp: int) -> str:
# Standardized way to show an xpub's fingerprint... it's a 4-byte string
# and not really an integer. Used to show as '0x%08x' but that's wrong endian.
return struct.pack('<I', xfp).hex().lower()
# EOF
| 38.147105 | 133 | 0.623154 |
import os, time, io
import traceback
from typing import TYPE_CHECKING, Optional
import struct
from electrum import bip32
from electrum.bip32 import BIP32Node, InvalidMasterKeyVersionBytes
from electrum.i18n import _
from electrum.plugin import Device, hook, runs_in_hwd_thread
from electrum.keystore import Hardware_KeyStore, KeyStoreWithMPK
from electrum.transaction import PartialTransaction
from electrum.wallet import Standard_Wallet, Multisig_Wallet, Abstract_Wallet
from electrum.util import bfh, bh2u, versiontuple, UserFacingException
from electrum.base_wizard import ScriptTypeNotSupported
from electrum.logging import get_logger
from ..hw_wallet import HW_PluginBase, HardwareClientBase
from ..hw_wallet.plugin import LibraryFoundButUnusable, only_hook_if_libraries_available
_logger = get_logger(__name__)
try:
import hid
from ckcc.protocol import CCProtocolPacker, CCProtocolUnpacker
from ckcc.protocol import CCProtoError, CCUserRefused, CCBusyError
from ckcc.constants import (MAX_MSG_LEN, MAX_BLK_LEN, MSG_SIGNING_MAX_LENGTH, MAX_TXN_LEN,
AF_CLASSIC, AF_P2SH, AF_P2WPKH, AF_P2WSH, AF_P2WPKH_P2SH, AF_P2WSH_P2SH)
from ckcc.client import ColdcardDevice, COINKITE_VID, CKCC_PID, CKCC_SIMULATOR_PATH
requirements_ok = True
class ElectrumColdcardDevice(ColdcardDevice):
def mitm_verify(self, sig, expect_xpub):
pubkey = BIP32Node.from_xkey(expect_xpub).eckey
try:
pubkey.verify_message_hash(sig[1:65], self.session_key)
return True
except:
return False
except ImportError as e:
if not (isinstance(e, ModuleNotFoundError) and e.name == 'ckcc'):
_logger.exception('error importing coldcard plugin deps')
requirements_ok = False
COINKITE_VID = 0xd13e
CKCC_PID = 0xcc10
CKCC_SIMULATED_PID = CKCC_PID ^ 0x55aa
class CKCCClient(HardwareClientBase):
def __init__(self, plugin, handler, dev_path, *, is_simulator=False):
HardwareClientBase.__init__(self, plugin=plugin)
self.device = plugin.device
self.handler = handler
self._expected_device = None
if is_simulator:
self.dev = ElectrumColdcardDevice(dev_path, encrypt=True)
else:
hd = hid.device(path=dev_path)
hd.open_path(dev_path)
self.dev = ElectrumColdcardDevice(dev=hd, encrypt=True)
def __repr__(self):
return '<CKCCClient: xfp=%s label=%r>' % (xfp2str(self.dev.master_fingerprint),
self.label())
@runs_in_hwd_thread
def verify_connection(self, expected_xfp: int, expected_xpub=None):
ex = (expected_xfp, expected_xpub)
if self._expected_device == ex:
# all is as expected
return
if expected_xpub is None:
expected_xpub = self.dev.master_xpub
if ((self._expected_device is not None)
or (self.dev.master_fingerprint != expected_xfp)
or (self.dev.master_xpub != expected_xpub)):
# probably indicating programing error, not hacking
_logger.info(f"xpubs. reported by device: {self.dev.master_xpub}. "
f"stored in file: {expected_xpub}")
raise RuntimeError("Expecting %s but that's not what's connected?!" %
xfp2str(expected_xfp))
# check signature over session key
# - mitm might have lied about xfp and xpub up to here
# - important that we use value capture at wallet creation time, not some value
# we read over USB today
self.dev.check_mitm(expected_xpub=expected_xpub)
self._expected_device = ex
if not getattr(self, 'ckcc_xpub', None):
self.ckcc_xpub = expected_xpub
_logger.info("Successfully verified against MiTM")
def is_pairable(self):
# can't do anything w/ devices that aren't setup (this code not normally reachable)
return bool(self.dev.master_xpub)
@runs_in_hwd_thread
def close(self):
# close the HID device (so can be reused)
self.dev.close()
self.dev = None
def is_initialized(self):
return bool(self.dev.master_xpub)
def label(self):
# 'label' of this Coldcard. Warning: gets saved into wallet file, which might
# not be encrypted, so better for privacy if based on xpub/fingerprint rather than
# USB serial number.
if self.dev.is_simulator:
lab = 'Coldcard Simulator ' + xfp2str(self.dev.master_fingerprint)
elif not self.dev.master_fingerprint:
# failback; not expected
lab = 'Coldcard
else:
lab = 'Coldcard ' + xfp2str(self.dev.master_fingerprint)
return lab
def manipulate_keystore_dict_during_wizard_setup(self, d: dict):
master_xpub = self.dev.master_xpub
if master_xpub is not None:
try:
node = BIP32Node.from_xkey(master_xpub)
except InvalidMasterKeyVersionBytes:
raise UserFacingException(
_('Invalid xpub magic. Make sure your {} device is set to the correct chain.').format(self.device) + ' ' +
_('You might have to unplug and plug it in again.')
) from None
d['ckcc_xpub'] = master_xpub
@runs_in_hwd_thread
def has_usable_connection_with_device(self):
# Do end-to-end ping test
try:
self.ping_check()
return True
except:
return False
@runs_in_hwd_thread
def get_xpub(self, bip32_path, xtype):
assert xtype in ColdcardPlugin.SUPPORTED_XTYPES
_logger.info('Derive xtype = %r' % xtype)
xpub = self.dev.send_recv(CCProtocolPacker.get_xpub(bip32_path), timeout=5000)
# TODO handle timeout?
# change type of xpub to the requested type
try:
node = BIP32Node.from_xkey(xpub)
except InvalidMasterKeyVersionBytes:
raise UserFacingException(_('Invalid xpub magic. Make sure your {} device is set to the correct chain.')
.format(self.device)) from None
if xtype != 'standard':
xpub = node._replace(xtype=xtype).to_xpub()
return xpub
@runs_in_hwd_thread
def ping_check(self):
# check connection is working
assert self.dev.session_key, 'not encrypted?'
req = b'1234 Electrum Plugin 4321' # free up to 59 bytes
try:
echo = self.dev.send_recv(CCProtocolPacker.ping(req))
assert echo == req
except:
raise RuntimeError("Communication trouble with Coldcard")
@runs_in_hwd_thread
def show_address(self, path, addr_fmt):
# prompt user w/ address, also returns it immediately.
return self.dev.send_recv(CCProtocolPacker.show_address(path, addr_fmt), timeout=None)
@runs_in_hwd_thread
def show_p2sh_address(self, *args, **kws):
# prompt user w/ p2sh address, also returns it immediately.
return self.dev.send_recv(CCProtocolPacker.show_p2sh_address(*args, **kws), timeout=None)
@runs_in_hwd_thread
def get_version(self):
# gives list of strings
return self.dev.send_recv(CCProtocolPacker.version(), timeout=1000).split('\n')
@runs_in_hwd_thread
def sign_message_start(self, path, msg):
# this starts the UX experience.
self.dev.send_recv(CCProtocolPacker.sign_message(msg, path), timeout=None)
@runs_in_hwd_thread
def sign_message_poll(self):
# poll device... if user has approved, will get tuple: (addr, sig) else None
return self.dev.send_recv(CCProtocolPacker.get_signed_msg(), timeout=None)
@runs_in_hwd_thread
def sign_transaction_start(self, raw_psbt: bytes, *, finalize: bool = False):
# Multiple steps to sign:
# - upload binary
# - start signing UX
# - wait for coldcard to complete process, or have it refused.
# - download resulting txn
assert 20 <= len(raw_psbt) < MAX_TXN_LEN, 'PSBT is too big'
dlen, chk = self.dev.upload_file(raw_psbt)
resp = self.dev.send_recv(CCProtocolPacker.sign_transaction(dlen, chk, finalize=finalize),
timeout=None)
if resp != None:
raise ValueError(resp)
@runs_in_hwd_thread
def sign_transaction_poll(self):
# poll device... if user has approved, will get tuple: (legnth, checksum) else None
return self.dev.send_recv(CCProtocolPacker.get_signed_txn(), timeout=None)
@runs_in_hwd_thread
def download_file(self, length, checksum, file_number=1):
# get a file
return self.dev.download_file(length, checksum, file_number=file_number)
class Coldcard_KeyStore(Hardware_KeyStore):
hw_type = 'coldcard'
device = 'Coldcard'
plugin: 'ColdcardPlugin'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
# Errors and other user interaction is done through the wallet's
self.force_watching_only = False
self.ux_busy = False
self.ckcc_xpub = d.get('ckcc_xpub', None)
def dump(self):
d = Hardware_KeyStore.dump(self)
d['ckcc_xpub'] = self.ckcc_xpub
return d
def get_xfp_int(self) -> int:
xfp = self.get_root_fingerprint()
assert xfp is not None
return xfp_int_from_xfp_bytes(bfh(xfp))
def get_client(self):
rv = self.plugin.get_client(self)
if rv:
xfp_int = self.get_xfp_int()
rv.verify_connection(xfp_int, self.ckcc_xpub)
return rv
def give_error(self, message, clear_client=False):
self.logger.info(message)
if not self.ux_busy:
self.handler.show_error(message)
else:
self.ux_busy = False
if clear_client:
self.client = None
raise UserFacingException(message)
def wrap_busy(func):
# decorator: function takes over the UX on the device.
def wrapper(self, *args, **kwargs):
try:
self.ux_busy = True
return func(self, *args, **kwargs)
finally:
self.ux_busy = False
return wrapper
def decrypt_message(self, pubkey, message, password):
raise UserFacingException(_('Encryption and decryption are currently not supported for {}').format(self.device))
@wrap_busy
def sign_message(self, sequence, message, password):
# Sign a message on device. Since we have big screen, of course we
# have to show the message unabiguously there first!
try:
msg = message.encode('ascii', errors='strict')
assert 1 <= len(msg) <= MSG_SIGNING_MAX_LENGTH
except (UnicodeError, AssertionError):
# there are other restrictions on message content,
# but let the device enforce and report those
self.handler.show_error('Only short (%d max) ASCII messages can be signed.'
% MSG_SIGNING_MAX_LENGTH)
return b''
path = self.get_derivation_prefix() + ("/%d/%d" % sequence)
try:
cl = self.get_client()
try:
self.handler.show_message("Signing message (using %s)..." % path)
cl.sign_message_start(path, msg)
while 1:
# How to kill some time, without locking UI?
time.sleep(0.250)
resp = cl.sign_message_poll()
if resp is not None:
break
finally:
self.handler.finished()
assert len(resp) == 2
addr, raw_sig = resp
# already encoded in Radiocoin fashion, binary.
assert 40 < len(raw_sig) <= 65
return raw_sig
except (CCUserRefused, CCBusyError) as exc:
self.handler.show_error(str(exc))
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}\n\n{}'.format(
_('Error showing address') + ':', str(exc)))
except Exception as e:
self.give_error(e, True)
# give empty bytes for error cases; it seems to clear the old signature box
return b''
@wrap_busy
def sign_transaction(self, tx, password):
# Upload PSBT for signing.
# - we can also work offline (without paired device present)
if tx.is_complete():
return
client = self.get_client()
assert client.dev.master_fingerprint == self.get_xfp_int()
raw_psbt = tx.serialize_as_bytes()
try:
try:
self.handler.show_message("Authorize Transaction...")
client.sign_transaction_start(raw_psbt)
while 1:
# How to kill some time, without locking UI?
time.sleep(0.250)
resp = client.sign_transaction_poll()
if resp is not None:
break
rlen, rsha = resp
# download the resulting txn.
raw_resp = client.download_file(rlen, rsha)
finally:
self.handler.finished()
except (CCUserRefused, CCBusyError) as exc:
self.logger.info(f'Did not sign: {exc}')
self.handler.show_error(str(exc))
return
except BaseException as e:
self.logger.exception('')
self.give_error(e, True)
return
tx2 = PartialTransaction.from_raw_psbt(raw_resp)
# apply partial signatures back into txn
tx.combine_with_other_psbt(tx2)
# caller's logic looks at tx now and if it's sufficiently signed,
# will send it if that's the user's intent.
@staticmethod
def _encode_txin_type(txin_type):
# Map from Electrum code names to our code numbers.
return {'standard': AF_CLASSIC, 'p2pkh': AF_CLASSIC,
'p2sh': AF_P2SH,
'p2wpkh-p2sh': AF_P2WPKH_P2SH,
'p2wpkh': AF_P2WPKH,
'p2wsh-p2sh': AF_P2WSH_P2SH,
'p2wsh': AF_P2WSH,
}[txin_type]
@wrap_busy
def show_address(self, sequence, txin_type):
client = self.get_client()
address_path = self.get_derivation_prefix()[2:] + "/%d/%d"%sequence
addr_fmt = self._encode_txin_type(txin_type)
try:
try:
self.handler.show_message(_("Showing address ..."))
dev_addr = client.show_address(address_path, addr_fmt)
# we could double check address here
finally:
self.handler.finished()
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}\n\n{}'.format(
_('Error showing address') + ':', str(exc)))
except BaseException as exc:
self.logger.exception('')
self.handler.show_error(exc)
@wrap_busy
def show_p2sh_address(self, M, script, xfp_paths, txin_type):
client = self.get_client()
addr_fmt = self._encode_txin_type(txin_type)
try:
try:
self.handler.show_message(_("Showing address ..."))
dev_addr = client.show_p2sh_address(M, xfp_paths, script, addr_fmt=addr_fmt)
# we could double check address here
finally:
self.handler.finished()
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}.\n{}\n\n{}'.format(
_('Error showing address'),
_('Make sure you have imported the correct wallet description '
'file on the device for this multisig wallet.'),
str(exc)))
except BaseException as exc:
self.logger.exception('')
self.handler.show_error(exc)
class ColdcardPlugin(HW_PluginBase):
keystore_class = Coldcard_KeyStore
minimum_library = (0, 7, 7)
DEVICE_IDS = [
(COINKITE_VID, CKCC_PID),
(COINKITE_VID, CKCC_SIMULATED_PID)
]
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_devices(self.DEVICE_IDS, plugin=self)
self.device_manager().register_enumerate_func(self.detect_simulator)
def get_library_version(self):
import ckcc
try:
version = ckcc.__version__
except AttributeError:
version = 'unknown'
if requirements_ok:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def detect_simulator(self):
# if there is a simulator running on this machine,
# return details about it so it's offered as a pairing choice
fn = CKCC_SIMULATOR_PATH
if os.path.exists(fn):
return [Device(path=fn,
interface_number=-1,
id_=fn,
product_key=(COINKITE_VID, CKCC_SIMULATED_PID),
usage_page=0,
transport_ui_string='simulator')]
return []
@runs_in_hwd_thread
def create_client(self, device, handler):
if handler:
self.handler = handler
# the 'path' is unabiguous, so we'll use that.
try:
rv = CKCCClient(self, handler, device.path,
is_simulator=(device.product_key[1] == CKCC_SIMULATED_PID))
return rv
except Exception as e:
self.logger.exception('late failure connecting to device?')
return None
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
client.ping_check()
xpub = client.get_xpub(derivation, xtype)
return xpub
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['CKCCClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
if client is not None:
client.ping_check()
return client
@staticmethod
def export_ms_wallet(wallet: Multisig_Wallet, fp, name):
assert isinstance(wallet, Multisig_Wallet)
print('# Exported from Electrum', file=fp)
print(f'Name: {name:.20s}', file=fp)
print(f'Policy: {wallet.m} of {wallet.n}', file=fp)
print(f'Format: {wallet.txin_type.upper()}', file=fp)
xpubs = []
for xpub, ks in zip(wallet.get_master_public_keys(), wallet.get_keystores()):
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix=[], only_der_suffix=False)
fp_hex = fp_bytes.hex().upper()
der_prefix_str = bip32.convert_bip32_intpath_to_strpath(der_full)
xpubs.append((fp_hex, xpub, der_prefix_str))
# could use key path data from PSBT or USB request as needed. However,
# derivation data is now required.
print('', file=fp)
assert len(xpubs) == wallet.n
for xfp, xpub, der_prefix in xpubs:
print(f'Derivation: {der_prefix}', file=fp)
print(f'{xfp}: {xpub}\n', file=fp)
def show_address(self, wallet, address, keystore: 'Coldcard_KeyStore' = None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
txin_type = wallet.get_txin_type(address)
# Standard_Wallet => not multisig, must be bip32
if type(wallet) is Standard_Wallet:
sequence = wallet.get_address_index(address)
keystore.show_address(sequence, txin_type)
elif type(wallet) is Multisig_Wallet:
assert isinstance(wallet, Multisig_Wallet) # only here for type-hints in IDE
# More involved for P2SH/P2WSH addresses: need M, and all public keys, and their
# derivation paths. Must construct script, and track fingerprints+paths for
# all those keys
pubkey_deriv_info = wallet.get_public_keys_with_deriv_info(address)
pubkey_hexes = sorted([pk.hex() for pk in list(pubkey_deriv_info)])
xfp_paths = []
for pubkey_hex in pubkey_hexes:
pubkey = bytes.fromhex(pubkey_hex)
ks, der_suffix = pubkey_deriv_info[pubkey]
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix, only_der_suffix=False)
xfp_int = xfp_int_from_xfp_bytes(fp_bytes)
xfp_paths.append([xfp_int] + list(der_full))
script = bfh(wallet.pubkeys_to_scriptcode(pubkey_hexes))
keystore.show_p2sh_address(wallet.m, script, xfp_paths, txin_type)
else:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
def xfp_int_from_xfp_bytes(fp_bytes: bytes) -> int:
return int.from_bytes(fp_bytes, byteorder="little", signed=False)
def xfp2str(xfp: int) -> str:
# Standardized way to show an xpub's fingerprint... it's a 4-byte string
# and not really an integer. Used to show as '0x%08x' but that's wrong endian.
return struct.pack('<I', xfp).hex().lower()
| true | true |
f7f9891821484758d545693da5d745fe3cada3a3 | 794 | py | Python | tests/containers/analyzer_result.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | 27 | 2019-04-05T12:01:49.000Z | 2022-02-08T02:26:25.000Z | tests/containers/analyzer_result.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | null | null | null | tests/containers/analyzer_result.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | 8 | 2019-11-28T08:06:34.000Z | 2020-08-29T13:53:30.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the analyzer result attribute container."""
from __future__ import unicode_literals
import unittest
from plaso.containers import analyzer_result
from tests import test_lib as shared_test_lib
class AnalyzerResultTest(shared_test_lib.BaseTestCase):
"""Tests for the analyzer result attribute container."""
def testGetAttributeNames(self):
"""Tests the GetAttributeNames function."""
attribute_container = analyzer_result.AnalyzerResult()
expected_attribute_names = [
'analyzer_name', 'attribute_name', 'attribute_value']
attribute_names = sorted(attribute_container.GetAttributeNames())
self.assertEqual(attribute_names, expected_attribute_names)
if __name__ == '__main__':
unittest.main()
| 25.612903 | 69 | 0.765743 |
from __future__ import unicode_literals
import unittest
from plaso.containers import analyzer_result
from tests import test_lib as shared_test_lib
class AnalyzerResultTest(shared_test_lib.BaseTestCase):
def testGetAttributeNames(self):
attribute_container = analyzer_result.AnalyzerResult()
expected_attribute_names = [
'analyzer_name', 'attribute_name', 'attribute_value']
attribute_names = sorted(attribute_container.GetAttributeNames())
self.assertEqual(attribute_names, expected_attribute_names)
if __name__ == '__main__':
unittest.main()
| true | true |
f7f98a57a27938c31d74bb67d3e4bbd2181b089c | 4,305 | py | Python | sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/v2018_01_01_preview/_event_hub_management_client.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | null | null | null | sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/v2018_01_01_preview/_event_hub_management_client.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/v2018_01_01_preview/_event_hub_management_client.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from ._configuration import EventHubManagementClientConfiguration
from .operations import ClustersOperations
from .operations import NamespacesOperations
from .operations import ConfigurationOperations
from .operations import DisasterRecoveryConfigsOperations
from .operations import EventHubsOperations
from .operations import ConsumerGroupsOperations
from .operations import Operations
from .operations import RegionsOperations
from . import models
class EventHubManagementClient(SDKClient):
"""EventHubManagementClient
:ivar config: Configuration for client.
:vartype config: EventHubManagementClientConfiguration
:ivar clusters: Clusters operations
:vartype clusters: azure.mgmt.eventhub.v2018_01_01_preview.operations.ClustersOperations
:ivar namespaces: Namespaces operations
:vartype namespaces: azure.mgmt.eventhub.v2018_01_01_preview.operations.NamespacesOperations
:ivar configuration: Configuration operations
:vartype configuration: azure.mgmt.eventhub.v2018_01_01_preview.operations.ConfigurationOperations
:ivar disaster_recovery_configs: DisasterRecoveryConfigs operations
:vartype disaster_recovery_configs: azure.mgmt.eventhub.v2018_01_01_preview.operations.DisasterRecoveryConfigsOperations
:ivar event_hubs: EventHubs operations
:vartype event_hubs: azure.mgmt.eventhub.v2018_01_01_preview.operations.EventHubsOperations
:ivar consumer_groups: ConsumerGroups operations
:vartype consumer_groups: azure.mgmt.eventhub.v2018_01_01_preview.operations.ConsumerGroupsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.eventhub.v2018_01_01_preview.operations.Operations
:ivar regions: Regions operations
:vartype regions: azure.mgmt.eventhub.v2018_01_01_preview.operations.RegionsOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Subscription credentials that uniquely identify a
Microsoft Azure subscription. The subscription ID forms part of the URI
for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = EventHubManagementClientConfiguration(credentials, subscription_id, base_url)
super(EventHubManagementClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.clusters = ClustersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.namespaces = NamespacesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.configuration = ConfigurationOperations(
self._client, self.config, self._serialize, self._deserialize)
self.disaster_recovery_configs = DisasterRecoveryConfigsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.event_hubs = EventHubsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.consumer_groups = ConsumerGroupsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self.config, self._serialize, self._deserialize)
self.regions = RegionsOperations(
self._client, self.config, self._serialize, self._deserialize)
| 50.05814 | 124 | 0.75122 |
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from ._configuration import EventHubManagementClientConfiguration
from .operations import ClustersOperations
from .operations import NamespacesOperations
from .operations import ConfigurationOperations
from .operations import DisasterRecoveryConfigsOperations
from .operations import EventHubsOperations
from .operations import ConsumerGroupsOperations
from .operations import Operations
from .operations import RegionsOperations
from . import models
class EventHubManagementClient(SDKClient):
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = EventHubManagementClientConfiguration(credentials, subscription_id, base_url)
super(EventHubManagementClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.clusters = ClustersOperations(
self._client, self.config, self._serialize, self._deserialize)
self.namespaces = NamespacesOperations(
self._client, self.config, self._serialize, self._deserialize)
self.configuration = ConfigurationOperations(
self._client, self.config, self._serialize, self._deserialize)
self.disaster_recovery_configs = DisasterRecoveryConfigsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.event_hubs = EventHubsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.consumer_groups = ConsumerGroupsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self.config, self._serialize, self._deserialize)
self.regions = RegionsOperations(
self._client, self.config, self._serialize, self._deserialize)
| true | true |
f7f98aa681c10e3fb1cd5f5a827238e8b3aab7bf | 2,961 | py | Python | sequence_server_wrapper/examples/example_device.py | vstadnytskyi/sequence-server-wrapper | 1ee23638752a969f51b11a65a3b7652874efe8c5 | [
"BSD-3-Clause"
] | null | null | null | sequence_server_wrapper/examples/example_device.py | vstadnytskyi/sequence-server-wrapper | 1ee23638752a969f51b11a65a3b7652874efe8c5 | [
"BSD-3-Clause"
] | null | null | null | sequence_server_wrapper/examples/example_device.py | vstadnytskyi/sequence-server-wrapper | 1ee23638752a969f51b11a65a3b7652874efe8c5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
from logging import debug, info, warning, error
from time import sleep
import traceback
class DeviceExample():
def __init__(self):
self.io = None
self.trajectory = None
self.idle_value = 0.0
def init(self):
from time import sleep
"""
orderly initialization
"""
debug('Conducting orderly initialization of the Example Device')
sleep(1)
def parse_CMD(self,cmd):
def linear(start=0,end=10,step=1):
from numpy import arange
return list(arange(start,end,step))
try:
lst = eval(cmd)
except:
lst = None
if type(lst) is list:
return {'flag':True, 'values':lst}
else:
return {'flag':False, 'values':[]}
def set_VAL(self,value):
self._VAL = value
sleep(0.5)
def get_VAL(self):
return self._VAL
VAL = property(get_VAL,set_VAL)
def io_execute(self,pv_name, value):
"""
"""
from time import sleep
print(f'io_execute received: {pv_name},{value}')
response = ''
if pv_name == 'CMD':
self.io_put(pv_name = 'ACK',value = 0)
self.io_put(pv_name = 'values',value = [])
reply = self.parse_CMD(value)
response = ""+str(int(reply['flag']))
self.trajectory = reply['values']
if reply['flag'] == False:
response += f"{'failed to eval the command'}"
self.io_put(pv_name = 'values',value = reply['values'])
sleep(1)
self.io_put(pv_name = 'ACK',value = response)
if pv_name == 'Nested_Indices':
print(f'io_execute inside if pv_name == Nested_Indices: {"Nested_Indices"}')
print(bytes(value))
index = eval(bytes(value))['test.server']
self.io_put(pv_name = 'ACK',value = 0)
try:
if self.trajectory is not None:
self.VAL = self.trajectory[index]
else:
self.VAL = self.idle_value
flag = True
resp = ' '
except:
resp = traceback.format_exc()
print(resp)
flag = False
print(self.VAL)
response += resp
self.io_put(pv_name = 'VAL',value = self.VAL)
self.io_put(pv_name = 'message',value = response)
self.io_put(pv_name = 'ACK',value = 1)
print('response:',reply,response)
def io_put(self,pv_name, value):
print(f'DeviceExample.io_put: {pv_name},{value}')
if self.io is not None:
if pv_name == 'VAL':
self.io.io_put_queue.put({pv_name: value})
else:
self.io.seq.io_put_queue.put({pv_name: value})
if __name__ is '__main__':
device = DeviceExample()
device.init()
| 30.214286 | 88 | 0.524147 |
from logging import debug, info, warning, error
from time import sleep
import traceback
class DeviceExample():
def __init__(self):
self.io = None
self.trajectory = None
self.idle_value = 0.0
def init(self):
from time import sleep
debug('Conducting orderly initialization of the Example Device')
sleep(1)
def parse_CMD(self,cmd):
def linear(start=0,end=10,step=1):
from numpy import arange
return list(arange(start,end,step))
try:
lst = eval(cmd)
except:
lst = None
if type(lst) is list:
return {'flag':True, 'values':lst}
else:
return {'flag':False, 'values':[]}
def set_VAL(self,value):
self._VAL = value
sleep(0.5)
def get_VAL(self):
return self._VAL
VAL = property(get_VAL,set_VAL)
def io_execute(self,pv_name, value):
from time import sleep
print(f'io_execute received: {pv_name},{value}')
response = ''
if pv_name == 'CMD':
self.io_put(pv_name = 'ACK',value = 0)
self.io_put(pv_name = 'values',value = [])
reply = self.parse_CMD(value)
response = ""+str(int(reply['flag']))
self.trajectory = reply['values']
if reply['flag'] == False:
response += f"{'failed to eval the command'}"
self.io_put(pv_name = 'values',value = reply['values'])
sleep(1)
self.io_put(pv_name = 'ACK',value = response)
if pv_name == 'Nested_Indices':
print(f'io_execute inside if pv_name == Nested_Indices: {"Nested_Indices"}')
print(bytes(value))
index = eval(bytes(value))['test.server']
self.io_put(pv_name = 'ACK',value = 0)
try:
if self.trajectory is not None:
self.VAL = self.trajectory[index]
else:
self.VAL = self.idle_value
flag = True
resp = ' '
except:
resp = traceback.format_exc()
print(resp)
flag = False
print(self.VAL)
response += resp
self.io_put(pv_name = 'VAL',value = self.VAL)
self.io_put(pv_name = 'message',value = response)
self.io_put(pv_name = 'ACK',value = 1)
print('response:',reply,response)
def io_put(self,pv_name, value):
print(f'DeviceExample.io_put: {pv_name},{value}')
if self.io is not None:
if pv_name == 'VAL':
self.io.io_put_queue.put({pv_name: value})
else:
self.io.seq.io_put_queue.put({pv_name: value})
if __name__ is '__main__':
device = DeviceExample()
device.init()
| true | true |
f7f98b2c930d177bb959c308103dda015dce3fc8 | 18,784 | py | Python | vnpy/trader/app/optionMaster/omEngine.py | jacksama/vnpy | 0fed1d01ad527b4c083ce39dc722f5a809074b78 | [
"MIT"
] | 1 | 2021-12-28T02:46:09.000Z | 2021-12-28T02:46:09.000Z | vnpy/trader/app/optionMaster/omEngine.py | gaoyangtom/vnpy | 0fed1d01ad527b4c083ce39dc722f5a809074b78 | [
"MIT"
] | null | null | null | vnpy/trader/app/optionMaster/omEngine.py | gaoyangtom/vnpy | 0fed1d01ad527b4c083ce39dc722f5a809074b78 | [
"MIT"
] | 1 | 2021-12-28T02:46:16.000Z | 2021-12-28T02:46:16.000Z | # encoding: UTF-8
from __future__ import division
import json
import shelve
import os
import traceback
from collections import OrderedDict
from vnpy.event import Event
from vnpy.trader.vtEvent import (EVENT_TICK, EVENT_TRADE, EVENT_CONTRACT,
EVENT_ORDER, EVENT_TIMER)
from vnpy.trader.vtFunction import getTempPath, getJsonPath
from vnpy.trader.vtObject import (VtLogData, VtSubscribeReq,
VtOrderReq, VtCancelOrderReq)
from vnpy.trader.vtConstant import (PRODUCT_OPTION, OPTION_CALL, OPTION_PUT,
DIRECTION_LONG, DIRECTION_SHORT,
OFFSET_OPEN, OFFSET_CLOSE,
PRICETYPE_LIMITPRICE)
from vnpy.pricing import black, bs, crr
from .omBase import (OmOption, OmUnderlying, OmChain, OmPortfolio,
EVENT_OM_LOG, EVENT_OM_STRATEGY, EVENT_OM_STRATEGYLOG,
OM_DB_NAME)
from .strategy import STRATEGY_CLASS
# 定价模型字典
MODEL_DICT = {}
MODEL_DICT['black'] = black
MODEL_DICT['bs'] = bs
MODEL_DICT['crr'] = crr
########################################################################
class OmEngine(object):
"""期权主引擎"""
impvFileName = 'PricingImpv.vt'
impvFilePath = getTempPath(impvFileName)
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.portfolio = None
self.optionContractDict = {} # symbol:contract
self.strategyEngine = OmStrategyEngine(self, eventEngine)
self.registerEvent()
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_CONTRACT, self.processContractEvent)
#----------------------------------------------------------------------
def processTickEvent(self, event):
"""行情事件"""
tick = event.dict_['data']
self.portfolio.newTick(tick)
#----------------------------------------------------------------------
def processTradeEvent(self, event):
"""成交事件"""
trade = event.dict_['data']
self.portfolio.newTrade(trade)
#----------------------------------------------------------------------
def processContractEvent(self, event):
"""合约事件"""
contract = event.dict_['data']
if contract.symbol and contract.productClass == PRODUCT_OPTION:
self.optionContractDict[contract.symbol] = contract
#----------------------------------------------------------------------
def subscribeEvent(self, symbol):
"""订阅对应合约的事件"""
contract = self.mainEngine.getContract(symbol)
if not contract:
self.writeLog(u'行情订阅失败,找不到合约:%s' %symbol)
return
vtSymbol = contract.vtSymbol
# 订阅行情
req = VtSubscribeReq()
req.symbol = contract.symbol
req.exchange = contract.exchange
self.mainEngine.subscribe(req, contract.gatewayName)
# 订阅事件
self.eventEngine.register(EVENT_TICK + vtSymbol, self.processTickEvent)
self.eventEngine.register(EVENT_TRADE + vtSymbol, self.processTradeEvent)
#----------------------------------------------------------------------
def initEngine(self, fileName):
"""初始化引擎"""
if self.portfolio:
return False
f = file(fileName)
setting = json.load(f)
# 读取定价模型
model = MODEL_DICT.get(setting['model'], None)
if not model:
self.writeLog(u'找不到定价模型%s' %setting['model'])
return
# 创建标的对象
underlyingDict = OrderedDict()
for underlyingSymbol in setting['underlying']:
contract = self.mainEngine.getContract(underlyingSymbol)
if not contract:
self.writeLog(u'找不到标的物合约%s' %underlyingSymbol)
continue
detail = self.mainEngine.getPositionDetail(contract.vtSymbol)
underlying = OmUnderlying(contract, detail)
underlyingDict[underlyingSymbol] = underlying
# 创建期权链对象并初始化
chainList = []
for d in setting['chain']:
chainSymbol = d['chainSymbol']
r = d['r']
# 锁定标的对象
underlying = underlyingDict.get(d['underlyingSymbol'], None)
if not underlying:
self.writeLog(u'%s期权链的标的合约%s尚未创建,请检查配置文件' %(chainSymbol, underlyingSymbol))
continue
# 创建期权对象并初始化
callDict = {}
putDict = {}
for symbol, contract in self.optionContractDict.items():
if contract.underlyingSymbol == d['chainSymbol']:
detail = self.mainEngine.getPositionDetail(contract.vtSymbol)
option = OmOption(contract, detail, underlying, model, r)
if contract.optionType is OPTION_CALL:
callDict[option.k] = option
else:
putDict[option.k] = option
# 期权排序
strikeList = callDict.keys()
strikeList.sort()
callList = [callDict[k] for k in strikeList]
putList = [putDict[k] for k in strikeList]
# 创建期权链
chain = OmChain(chainSymbol, callList, putList)
chainList.append(chain)
# 添加标的映射关系
underlying.addChain(chain)
# 创建持仓组合对象并初始化
self.portfolio = OmPortfolio(setting['name'], model, underlyingDict.values(), chainList)
# 载入波动率配置
self.loadImpvSetting()
# 订阅行情和事件
for underlying in underlyingDict.values():
self.subscribeEvent(underlying.vtSymbol)
for chain in chainList:
for option in chain.optionDict.values():
self.subscribeEvent(option.vtSymbol)
# 载入成功返回
return True
#----------------------------------------------------------------------
def loadImpvSetting(self):
"""载入波动率配置"""
f = shelve.open(self.impvFilePath)
for chain in self.portfolio.chainDict.values():
for option in chain.optionDict.values():
option.pricingImpv = f.get(option.symbol, 0)
f.close()
#----------------------------------------------------------------------
def saveImpvSetting(self):
"""保存波动率配置"""
if not self.portfolio:
return
f = shelve.open(self.impvFilePath)
for chain in self.portfolio.chainDict.values():
for option in chain.optionDict.values():
f[option.symbol] = option.pricingImpv
f.close()
#----------------------------------------------------------------------
def stop(self):
"""关闭函数"""
self.saveImpvSetting()
#----------------------------------------------------------------------
def writeLog(self, content):
"""发出日志 """
log = VtLogData()
log.logContent = content
event = Event(EVENT_OM_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
########################################################################
class OmStrategyEngine(object):
"""策略引擎"""
settingFileName = 'strategy_setting.json'
settingfilePath = getJsonPath(settingFileName, __file__)
#----------------------------------------------------------------------
def __init__(self, omEngine, eventEngine):
"""Constructor"""
self.omEngine = omEngine
self.mainEngine = omEngine.mainEngine
self.eventEngine = eventEngine
self.portfolio = None
self.strategyDict = {} # name: strategy
self.symbolStrategyDict = {} # vtSymbol:strategy list
self.orderStrategyDict= {} # vtOrderID: strategy
self.registerEvent()
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_TICK, self.processTickEvent)
self.eventEngine.register(EVENT_TRADE, self.processTradeEvent)
self.eventEngine.register(EVENT_ORDER, self.processOrderEvent)
self.eventEngine.register(EVENT_TIMER, self.processTimerEvent)
#----------------------------------------------------------------------
def writeLog(self, content):
"""快速发出日志事件"""
log = VtLogData()
log.logContent = content
event = Event(EVENT_OM_STRATEGYLOG)
event.dict_['data'] = log
self.eventEngine.put(event)
#----------------------------------------------------------------------
def callStrategyFunc(self, strategy, func, params=None):
"""调用策略的函数,若触发异常则捕捉"""
try:
if params:
func(params)
else:
func()
except Exception:
# 停止策略,修改状态为未初始化
strategy.trading = False
strategy.inited = False
# 发出日志
content = '\n'.join([u'策略%s触发异常已停止' %strategy.name,
traceback.format_exc()])
self.writeLog(content)
#----------------------------------------------------------------------
def processTickEvent(self, event):
"""处理行情事件"""
tick = event.dict_['data']
l = self.symbolStrategyDict.get(tick.vtSymbol, None)
if l:
for strategy in l:
self.callStrategyFunc(strategy, strategy.onTick, tick)
#----------------------------------------------------------------------
def processTradeEvent(self, event):
"""处理成交事件"""
trade = event.dict_['data']
strategy = self.orderStrategyDict.get(trade.vtOrderID, None)
if strategy:
self.callStrategyFunc(strategy, strategy.onTrade, trade)
#----------------------------------------------------------------------
def processOrderEvent(self, event):
"""处理委托事件"""
order = event.dict_['data']
strategy = self.orderStrategyDict.get(order.vtOrderID, None)
if strategy:
self.callStrategyFunc(strategy, strategy.onOrder, order)
#----------------------------------------------------------------------
def processTimerEvent(self, event):
"""处理定时事件"""
for strategy in self.strategyDict.values():
self.callStrategyFunc(strategy, strategy.onTimer)
#----------------------------------------------------------------------
def loadSetting(self):
"""加载配置"""
self.portfolio = self.omEngine.portfolio
with open(self.settingfilePath) as f:
l = json.load(f)
for setting in l:
self.loadStrategy(setting)
#----------------------------------------------------------------------
def loadStrategy(self, setting):
"""加载策略"""
try:
name = setting['name']
className = setting['className']
except Exception:
msg = traceback.format_exc()
self.writeLog(u'载入策略出错:%s' %msg)
return
# 获取策略类
strategyClass = STRATEGY_CLASS.get(className, None)
if not strategyClass:
self.writeLog(u'找不到策略类:%s' %className)
return
# 防止策略重名
if name in self.strategyDict:
self.writeLog(u'策略实例重名:%s' %name)
else:
# 创建策略实例
strategy = strategyClass(self, setting)
self.strategyDict[name] = strategy
# 保存Tick映射关系
for vtSymbol in strategy.vtSymbols:
l = self.symbolStrategyDict.setdefault(vtSymbol, [])
l.append(strategy)
#----------------------------------------------------------------------
def initStrategy(self, name):
"""初始化策略"""
strategy = self.strategyDict[name]
self.callStrategyFunc(strategy, strategy.onInit)
#----------------------------------------------------------------------
def startStrategy(self, name):
"""启动策略"""
strategy = self.strategyDict[name]
self.callStrategyFunc(strategy, strategy.onStart)
#----------------------------------------------------------------------
def stopStrategy(self, name):
"""停止策略"""
strategy = self.strategyDict[name]
self.callStrategyFunc(strategy, strategy.onStop)
#----------------------------------------------------------------------
def sendOrder(self, vtSymbol, direction, offset, price, volume):
"""发单"""
contract = self.mainEngine.getContract(vtSymbol)
if not contract:
return ''
req = VtOrderReq()
req.symbol = contract.symbol
req.exchange = contract.exchange
req.vtSymbol = vtSymbol
req.price = price
req.volume = volume
req.direction = direction
req.offset = offset
req.priceType = PRICETYPE_LIMITPRICE
return self.mainEngine.sendOrder(req, contract.gatewayName)
#----------------------------------------------------------------------
def cancelorder(self, vtOrderID):
"""撤单"""
order = self.mainEngine.getOrder(vtOrderID)
if not order:
return
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.orderID = order.orderID
req.symbol = order.symbol
req.vtSymbol = order.vtSymbo
self.mainEngine.cancelOrder(req, order.gatewayName)
#----------------------------------------------------------------------
def buy(self, vtSymbol, price, volume):
"""开多"""
return self.sendOrder(vtSymbol, DIRECTION_LONG, OFFSET_OPEN, price, volume)
#----------------------------------------------------------------------
def short(self, vtSymbol, price, volume):
"""开空"""
return self.sendOrder(vtSymbol, DIRECTION_SHORT, OFFSET_OPEN, price, volume)
#----------------------------------------------------------------------
def sell(self, vtSymbol, price, volume):
"""平多"""
return self.sendOrder(vtSymbol, DIRECTION_SHORT, OFFSET_CLOSE, price, volume)
#----------------------------------------------------------------------
def cover(self, vtSymbol, price, volume):
"""平空"""
return self.sendOrder(vtSymbol, DIRECTION_LONG, OFFSET_CLOSE, price, volume)
#----------------------------------------------------------------------
def dbQuery(self, collectionName, flt):
"""查询数据"""
return self.mainEngine.dbQuery(OM_DB_NAME, collectionName, flt)
#----------------------------------------------------------------------
def dbUpdate(self, collectionName, d, flt):
"""更新数据"""
self.mainEngine.dbUpdate(OM_DB_NAME, collectionName, d, flt, True)
#----------------------------------------------------------------------
def getOption(self, vtSymbol):
"""获取期权信息"""
return self.portfolio.optionDict.get(vtSymbol, None)
#----------------------------------------------------------------------
def getUnderlying(self, vtSymbol):
"""获取标的信息"""
return self.portfolio.underlyingDict.get(vtSymbol, None)
#----------------------------------------------------------------------
def getChain(self, symbol):
"""获取期权链信息"""
return self.portfolio.chainDict.get(symbol, None)
#----------------------------------------------------------------------
def getPortfolio(self):
"""获取持仓组合信息"""
return self.portfolio
#----------------------------------------------------------------------
def putStrategyEvent(self, name):
"""触发策略状态变化事件(通常用于通知GUI更新)"""
event = Event(EVENT_OM_STRATEGY+name)
self.eventEngine.put(event)
#----------------------------------------------------------------------
def setStrategyParam(self, name, key, value):
"""设置策略变量"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
strategy.__setattr__(key, value)
self.writeLog(u'策略%s参数%s已修改为%s' %(name, key, value))
else:
self.writeLog(u'策略实例不存在:' + name)
return None
#----------------------------------------------------------------------
def getStrategyVar(self, name):
"""获取策略当前的变量字典"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
varDict = OrderedDict()
for key in strategy.varList:
varDict[key] = strategy.__getattribute__(key)
return varDict
else:
self.writeLog(u'策略实例不存在:' + name)
return None
#----------------------------------------------------------------------
def getStrategyParam(self, name):
"""获取策略的参数字典"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
paramDict = OrderedDict()
for key in strategy.paramList:
paramDict[key] = strategy.__getattribute__(key)
return paramDict
else:
self.writeLog(u'策略实例不存在:' + name)
return None
#----------------------------------------------------------------------
def initAll(self):
"""全部初始化"""
for name in self.strategyDict.keys():
self.initStrategy(name)
#----------------------------------------------------------------------
def startAll(self):
"""全部启动"""
for name in self.strategyDict.keys():
self.startStrategy(name)
#----------------------------------------------------------------------
def stopAll(self):
"""全部停止"""
for name in self.strategyDict.keys():
self.stopStrategy(name) | 35.847328 | 96 | 0.458635 |
from __future__ import division
import json
import shelve
import os
import traceback
from collections import OrderedDict
from vnpy.event import Event
from vnpy.trader.vtEvent import (EVENT_TICK, EVENT_TRADE, EVENT_CONTRACT,
EVENT_ORDER, EVENT_TIMER)
from vnpy.trader.vtFunction import getTempPath, getJsonPath
from vnpy.trader.vtObject import (VtLogData, VtSubscribeReq,
VtOrderReq, VtCancelOrderReq)
from vnpy.trader.vtConstant import (PRODUCT_OPTION, OPTION_CALL, OPTION_PUT,
DIRECTION_LONG, DIRECTION_SHORT,
OFFSET_OPEN, OFFSET_CLOSE,
PRICETYPE_LIMITPRICE)
from vnpy.pricing import black, bs, crr
from .omBase import (OmOption, OmUnderlying, OmChain, OmPortfolio,
EVENT_OM_LOG, EVENT_OM_STRATEGY, EVENT_OM_STRATEGYLOG,
OM_DB_NAME)
from .strategy import STRATEGY_CLASS
MODEL_DICT = {}
MODEL_DICT['black'] = black
MODEL_DICT['bs'] = bs
MODEL_DICT['crr'] = crr
for d in setting['chain']:
chainSymbol = d['chainSymbol']
r = d['r']
underlying = underlyingDict.get(d['underlyingSymbol'], None)
if not underlying:
self.writeLog(u'%s期权链的标的合约%s尚未创建,请检查配置文件' %(chainSymbol, underlyingSymbol))
continue
callDict = {}
putDict = {}
for symbol, contract in self.optionContractDict.items():
if contract.underlyingSymbol == d['chainSymbol']:
detail = self.mainEngine.getPositionDetail(contract.vtSymbol)
option = OmOption(contract, detail, underlying, model, r)
if contract.optionType is OPTION_CALL:
callDict[option.k] = option
else:
putDict[option.k] = option
strikeList = callDict.keys()
strikeList.sort()
callList = [callDict[k] for k in strikeList]
putList = [putDict[k] for k in strikeList]
chain = OmChain(chainSymbol, callList, putList)
chainList.append(chain)
underlying.addChain(chain)
self.portfolio = OmPortfolio(setting['name'], model, underlyingDict.values(), chainList)
self.loadImpvSetting()
for underlying in underlyingDict.values():
self.subscribeEvent(underlying.vtSymbol)
for chain in chainList:
for option in chain.optionDict.values():
self.subscribeEvent(option.vtSymbol)
return True
def loadImpvSetting(self):
f = shelve.open(self.impvFilePath)
for chain in self.portfolio.chainDict.values():
for option in chain.optionDict.values():
option.pricingImpv = f.get(option.symbol, 0)
f.close()
def saveImpvSetting(self):
if not self.portfolio:
return
f = shelve.open(self.impvFilePath)
for chain in self.portfolio.chainDict.values():
for option in chain.optionDict.values():
f[option.symbol] = option.pricingImpv
f.close()
def stop(self):
self.saveImpvSetting()
def writeLog(self, content):
log = VtLogData()
log.logContent = content
event = Event(EVENT_OM_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
olio
with open(self.settingfilePath) as f:
l = json.load(f)
for setting in l:
self.loadStrategy(setting)
def loadStrategy(self, setting):
try:
name = setting['name']
className = setting['className']
except Exception:
msg = traceback.format_exc()
self.writeLog(u'载入策略出错:%s' %msg)
return
strategyClass = STRATEGY_CLASS.get(className, None)
if not strategyClass:
self.writeLog(u'找不到策略类:%s' %className)
return
if name in self.strategyDict:
self.writeLog(u'策略实例重名:%s' %name)
else:
strategy = strategyClass(self, setting)
self.strategyDict[name] = strategy
for vtSymbol in strategy.vtSymbols:
l = self.symbolStrategyDict.setdefault(vtSymbol, [])
l.append(strategy)
def initStrategy(self, name):
strategy = self.strategyDict[name]
self.callStrategyFunc(strategy, strategy.onInit)
def startStrategy(self, name):
strategy = self.strategyDict[name]
self.callStrategyFunc(strategy, strategy.onStart)
def stopStrategy(self, name):
strategy = self.strategyDict[name]
self.callStrategyFunc(strategy, strategy.onStop)
def sendOrder(self, vtSymbol, direction, offset, price, volume):
contract = self.mainEngine.getContract(vtSymbol)
if not contract:
return ''
req = VtOrderReq()
req.symbol = contract.symbol
req.exchange = contract.exchange
req.vtSymbol = vtSymbol
req.price = price
req.volume = volume
req.direction = direction
req.offset = offset
req.priceType = PRICETYPE_LIMITPRICE
return self.mainEngine.sendOrder(req, contract.gatewayName)
def cancelorder(self, vtOrderID):
order = self.mainEngine.getOrder(vtOrderID)
if not order:
return
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.orderID = order.orderID
req.symbol = order.symbol
req.vtSymbol = order.vtSymbo
self.mainEngine.cancelOrder(req, order.gatewayName)
def buy(self, vtSymbol, price, volume):
return self.sendOrder(vtSymbol, DIRECTION_LONG, OFFSET_OPEN, price, volume)
def short(self, vtSymbol, price, volume):
return self.sendOrder(vtSymbol, DIRECTION_SHORT, OFFSET_OPEN, price, volume)
def sell(self, vtSymbol, price, volume):
return self.sendOrder(vtSymbol, DIRECTION_SHORT, OFFSET_CLOSE, price, volume)
def cover(self, vtSymbol, price, volume):
return self.sendOrder(vtSymbol, DIRECTION_LONG, OFFSET_CLOSE, price, volume)
def dbQuery(self, collectionName, flt):
return self.mainEngine.dbQuery(OM_DB_NAME, collectionName, flt)
def dbUpdate(self, collectionName, d, flt):
self.mainEngine.dbUpdate(OM_DB_NAME, collectionName, d, flt, True)
def getOption(self, vtSymbol):
return self.portfolio.optionDict.get(vtSymbol, None)
def getUnderlying(self, vtSymbol):
return self.portfolio.underlyingDict.get(vtSymbol, None)
def getChain(self, symbol):
return self.portfolio.chainDict.get(symbol, None)
def getPortfolio(self):
return self.portfolio
def putStrategyEvent(self, name):
event = Event(EVENT_OM_STRATEGY+name)
self.eventEngine.put(event)
def setStrategyParam(self, name, key, value):
if name in self.strategyDict:
strategy = self.strategyDict[name]
strategy.__setattr__(key, value)
self.writeLog(u'策略%s参数%s已修改为%s' %(name, key, value))
else:
self.writeLog(u'策略实例不存在:' + name)
return None
def getStrategyVar(self, name):
if name in self.strategyDict:
strategy = self.strategyDict[name]
varDict = OrderedDict()
for key in strategy.varList:
varDict[key] = strategy.__getattribute__(key)
return varDict
else:
self.writeLog(u'策略实例不存在:' + name)
return None
def getStrategyParam(self, name):
if name in self.strategyDict:
strategy = self.strategyDict[name]
paramDict = OrderedDict()
for key in strategy.paramList:
paramDict[key] = strategy.__getattribute__(key)
return paramDict
else:
self.writeLog(u'策略实例不存在:' + name)
return None
def initAll(self):
for name in self.strategyDict.keys():
self.initStrategy(name)
def startAll(self):
for name in self.strategyDict.keys():
self.startStrategy(name)
def stopAll(self):
for name in self.strategyDict.keys():
self.stopStrategy(name) | true | true |
f7f98b579ef645df2a2c86f5bb7bbb28bf196c34 | 653 | py | Python | wagtail/wagtailcore/migrations/0034_page_live_revision.py | patphongs/wagtail | 32555f7a1c599c139e0f26c22907c9612af2e015 | [
"BSD-3-Clause"
] | 1 | 2019-11-06T10:51:42.000Z | 2019-11-06T10:51:42.000Z | wagtail/wagtailcore/migrations/0034_page_live_revision.py | patphongs/wagtail | 32555f7a1c599c139e0f26c22907c9612af2e015 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailcore/migrations/0034_page_live_revision.py | patphongs/wagtail | 32555f7a1c599c139e0f26c22907c9612af2e015 | [
"BSD-3-Clause"
] | 2 | 2017-08-08T01:39:02.000Z | 2018-05-06T06:16:10.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-26 21:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0033_remove_golive_expiry_help_text'),
]
operations = [
migrations.AddField(
model_name='page',
name='live_revision',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.PageRevision', verbose_name='live revision'),
),
]
| 29.681818 | 200 | 0.679939 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0033_remove_golive_expiry_help_text'),
]
operations = [
migrations.AddField(
model_name='page',
name='live_revision',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.PageRevision', verbose_name='live revision'),
),
]
| true | true |
f7f98c856f56ffb55ddba793a6f969bdabb8eaef | 18,273 | py | Python | packages/galapagos_embedded/libs/lib_frontcam.py | 100kimch/ros_galapagos | 8f92cb93246c263b61199aef113e43cefc5f3939 | [
"MIT"
] | 2 | 2020-10-26T05:01:35.000Z | 2022-02-14T10:37:17.000Z | packages/galapagos_embedded/libs/lib_frontcam.py | 100kimch/ros_galapagos | 8f92cb93246c263b61199aef113e43cefc5f3939 | [
"MIT"
] | null | null | null | packages/galapagos_embedded/libs/lib_frontcam.py | 100kimch/ros_galapagos | 8f92cb93246c263b61199aef113e43cefc5f3939 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import rospy
import matplotlib.pyplot as plt
import sys
from scheduler import SCHEDULER
from constants import *
import os
import sys
# * Variables
# variables for matching
# Initiate SIFT description detector
Orb = cv2.ORB_create()
# create BFMatcher object
Bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
ref_images = {}
# HSV Ranges used in each state
""" before
lower_blue = np.array([90, 80, 0])
upper_blue = np.array([130, 255, 255])
"""
lower_blue = np.array([90, 110, 100])
upper_blue = np.array([130, 200, 160])
lower_red = np.array([0, 120, 100])
upper_red = np.array([20, 180, 200])
lower_green = np.array([60, 55, 50])
upper_green = np.array([85, 255, 255])
# lower_green = np.array([65, 60, 60])
# upper_green = np.array([80, 255, 255])
value_default = 1
# * Methods
def init_ref_images():
global ref_images
for idx, key in enumerate(REF_IMAGE_PATH):
image = cv2.imread(SCHEDULER.path + "/images/" + REF_IMAGE_PATH[key])
try:
image = cv2.medianBlur(cv2.GaussianBlur(image, (11, 11), 0), 11)
except cv2.error as e:
rospy.logfatal("[LIB_FRONT] ref image '" + key +
"' not found in constants.py!")
rospy.signal_shutdown("Shutdown by fatal error.")
if SCHEDULER.debug_option["show_loaded_ref_images"]:
rospy.logdebug("[LIB_FRONT] {:8s} image loaded. ({})".format(
key, str(SCHEDULER.path + "/" + REF_IMAGE_PATH[key])))
keypoints, descriptor = Orb.detectAndCompute(image, None)
ref_images[key] = {
"image": image,
"keypoints": keypoints,
"descriptor": descriptor
}
def blob_parameter(state_type):
''' blob_parameter function for making detector for some blob shapes(circle or triangle)
& setting parameter of detector
* Input
state_type : recognition type in recognition_list (ex : 'parking')
* Output
blob_detector : blob detector that has parameters setted by recognition type
'''
if state_type == 'traffic_light':
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = 0
params.maxThreshold = 256
params.filterByArea = True
params.minArea = 500
params.maxArea = 2300
params.filterByCircularity = True
params.minCircularity = 0.4
params.filterByConvexity = True
params.minConvexity = 0.1
params.filterByInertia = True
params.minInertiaRatio = 0.01
elif state_type == 'intersection' or state_type == 'construction' or state_type == 'turnel':
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = 10
params.maxThreshold = 200
params.filterByArea = True
params.minArea = 500
params.filterByCircularity = True
params.minCircularity = 0.1
params.filterByConvexity = False
params.minConvexity = 0.1
params.filterByInertia = True
params.minInertiaRatio = 0.01
else:
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = 0
params.maxThreshold = 256
params.filterByArea = True
params.minArea = 1000
params.maxArea = 35000
params.filterByCircularity = True
params.minCircularity = 0.5
params.filterByConvexity = True
params.minConvexity = 0.1
params.filterByInertia = True
params.minInertiaRatio = 0.01
# Create a detector with the parameters
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3:
blob_detector = cv2.SimpleBlobDetector(params)
else:
blob_detector = cv2.SimpleBlobDetector_create(params)
return blob_detector
def blob_detecting(image, blob_detector, state_type):
''' blob_detecting function for finding center point of ROI
by HSV range thresholding & detecting specific blob shape
* Input
image : front camera image from pi camera --> BGR image
blob_detector : HSV blob detector made by blob_parameter() function
* Output
blob_centers : center points of blob (specific blob shape -> potential ROI center point)
centers : if `blob_centers` is detected, change the type of data to pt array
'''
#cv2.imshow("raw_image", image)
# cv2.waitKey(1)
_hsv_maxThreshold = value_default
_hsv_minThreshold = value_default
if state_type == 'intersection' or state_type == 'construction' or state_type == 'turnel':
_hsv_maxThreshold = upper_red
_hsv_minThreshold = lower_red
elif state_type == 'traffic_light':
_hsv_maxThreshold = upper_green
_hsv_minThreshold = lower_green
else:
_hsv_maxThreshold = upper_blue
_hsv_minThreshold = lower_blue
# thresholding process rgb_image to hsv_image by HSV Threshold
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# make mask and pre-image-processing : morphology (erode or dilate)
kernel = np.ones((3, 3), np.uint8)
mask = cv2.inRange(hsv, _hsv_minThreshold, _hsv_maxThreshold)
mask = cv2.dilate(mask, kernel, iterations=5)
#maks = cv2.erode(mask, kernel, iterations = 3)
reversemask = 255 - mask
# Detect specific blobshape and center point of blob
if state_type == 'intersection' or state_type == 'turnel' or state_type == 'to_intersection':
blob_centers = blob_detector.detect(mask)
else:
blob_centers = blob_detector.detect(reversemask)
BGR_ROI = cv2.cvtColor(reversemask, cv2.COLOR_GRAY2BGR)
# if IS_DEBUG_MODE == True:
if SCHEDULER.debug_option["show_blob_detecting"]:
print(len(blob_centers))
show_centers = cv2.drawKeypoints(reversemask, blob_centers, np.array(
[]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('hsv', hsv)
cv2.imshow('mask', mask)
cv2.imshow('reverse', reversemask)
cv2.imshow('result', show_centers)
cv2.waitKey(0)
if len(blob_centers) >= 1:
centers = []
for i in blob_centers:
centers.append(i.pt)
return centers
else:
return blob_centers
def signROI_detecting(image, state_type):
''' signROI_detecting function for detecting signROI
using HSV range thresholding and specific blob shape detectiong, different by cases
* Input
image : front camera image from pi camera --> BGR image
state_type : recognition type
* Output
signROI : sign detection result image --> BGR image
True : thiere is singROI
False : there is no signROI
'''
# image ROI detecting using HSV range
#image = cv2.GaussianBlur(image, (5, 5), 0)
sign_detector = blob_parameter(state_type)
sign_centers = blob_detecting(image, sign_detector, state_type)
# cv2.imshow('input', image) # ! code for debugging
# cv2.waitKey() # ! code for debugging
# print 'test' #! code for debugging
# print len(sign_centers) #! code for debugging
if len(sign_centers) >= 1:
xx = int(sign_centers[0][0])
yy = int(sign_centers[0][1])
# print sign_centers[0][1], sign_centers[0][0] #! code for debugging
if sign_centers[0][1] - 45 < 0 or sign_centers[0][0] < 45:
if sign_centers[0][0] < sign_centers[0][1]:
signROI_size = int(sign_centers[0][0])
else:
signROI_size = int(sign_centers[0][1])
else:
signROI_size = 45
signROI = image[yy - signROI_size: yy +
signROI_size, xx - signROI_size: xx + signROI_size]
cv2.imshow('ROI', signROI) # ! code for debugging
cv2.waitKey(1) # ! code for debugging
# print("blob detected!!!!!")
# print("blob detected!!!!!")
# print("blob detected!!!!!")
return signROI, True
else:
# print("blob detected failed!!!!")
# print("blob detected failed!!!!")
# print("blob detected failed!!!!")
signROI = image
return signROI, False
def ORB_matching(_roi, _ref_img, _ref_keypoints, _ref_descriptor):
''' ORB_matching function for matching two input image and output is matching result
* Input
_roi : sign ROI image --> BGR image
_ref : sign ref image --> gray image
* Output
matches : ORB descriptor matching result
'''
global Orb
global Bf
# image pretreatment
_roi = cv2.cvtColor(_roi, cv2.COLOR_BGR2GRAY)
_roi = cv2.medianBlur(_roi, 5)
# find the keypoints and descriptors with SIFT
ROI_keypoints, ROI_descriptor = Orb.detectAndCompute(_roi, None)
# Match descriptors.
matches = Bf.match(ROI_descriptor, _ref_descriptor)
# Sort them in the order of their distance.
# Not use distance values yet, but can use it at thresholding
matches = sorted(matches, key=lambda x: x.distance)
if SCHEDULER.debug_option["show_orb_matching"] == True:
print(len(matches)) # ! code for debugging
matching_image = cv2.drawMatches(
_roi, ROI_keypoints, _ref_img, _ref_keypoints, matches, None, flags=2) # ! code for debugging
cv2.imshow('matching', matching_image) # ! code for debugging
cv2.waitKey() # ! code for debugging
return matches
def check_if_left_or_right(image):
"""
Check direction where to go in intersection
@Input: image - sign roi image from front pi camera image --> gray image
@Output: direction where to go: ["left", "right]
"""
def left_or_right(_frontcam_roi):
''' left_or_right function for check direction of arrow sign
* Input
_frontcam_roi : sign roi image from front pi camera image --> gray image
* Output
direction left or right
'''
_frontcam_roi = cv2.cvtColor(_frontcam_roi, cv2.COLOR_BGR2GRAY)
# Threshloding --> binary image by otsu's method
# cv2.imwrite('gray_frontcam_roi.png', _frontcam_roi)
_frontcam_roi = cv2.GaussianBlur(_frontcam_roi, (7, 7), 0)
# cv2.imwrite('gaussian_gray_frontcam_roi.png', _frontcam_roi)
tmp, binary_roi = cv2.threshold(
_frontcam_roi, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#height = np.size(binary_roi, 0)/2
#width = np.size(binary_roi, 1)/2
# cutting small ROI from under center point 40 X 20 box
#small_roi = binary_roi[height:height+20, width-20:width+20]
# compare sum of left pixels' value and right's one
sum_left = 0
sum_right = 0
'''
for j in range(20):
for i in range(20):
sum_left += small_roi[j, i]
for i in range(20, 40):
sum_right += small_roi[j, i]
'''
bin_roi_H = binary_roi.shape[0]
bin_roi_W = binary_roi.shape[1]
for j in range(bin_roi_H // 2):
for i in range(bin_roi_W // 2):
sum_left += binary_roi[j + bin_roi_H // 2, i]
for i in range(bin_roi_W // 2):
sum_right += binary_roi[j + bin_roi_H //
2, i + binary_roi.shape[1] // 2]
print("=========================================")
print("sum left: ", sum_left / 255, ", sum right: ",
sum_right / 255) # ! code for debugging
print("==============sum printing================")
# cv2.imwrite('binary_roi.png', binary_roi) # ! code for debugging
# cv2.imshow('small_roi',small_roi) #! code for debugging
# cv2.waitKey(0) #! code for debugging
if sum_left > sum_right:
print("right detected!!!!")
print("right detected!!!!")
print("right detected!!!!")
return 'right'
else:
print("left detected!!!!")
print("left detected!!!!")
print("left detected!!!!")
return 'left'
def is_light_green(image):
""" check the traffic_light's light
image: front image
return True if light is green
return False if light is not green
"""
raw_data = np.fromstring(image.data, np.uint8)
cv_img = cv2.imdecode(raw_data, cv2.IMREAD_COLOR)
detector = blob_parameter('traffic_light')
centers = blob_detecting(cv_img, detector, 'traffic_light')
if len(centers) >= 1:
return True
else:
return False
def is_intersection(image):
""" check whether the image is intersection
return True if intersection
return False if not intersection
"""
ROI_img, ROI_OX = signROI_detecting(image, 'intersection')
if ROI_OX != False:
# matching & thresholding
result_matching = ORB_matching(
ROI_img, ref_intersection, ref_intersection_keypoints, ref_intersection_descriptor)
if len(result_matching) >= threshold_intersection:
return True
else:
return False
else:
return False
def is_parking(image):
""" check whether the image is parking
return True if parking
return False if not parking
"""
ROI_img, ROI_OX = signROI_detecting(image, 'parking')
if ROI_OX != False:
# matching & thresholding
result_matching = ORB_matching(
ROI_img, ref_images["parking"]["images"], ref_images["parking"]["keypoints"], ref_images["parking"]["descriptor"])
if len(result_matching) >= threshold_parking:
return True
else:
return False
else:
return False
def is_construction(image):
raw_data = np.fromstring(image.data, np.uint8)
cv_img = cv2.imdecode(raw_data, cv2.IMREAD_COLOR)
ROI_img, ROI_OX = signROI_detecting(cv_img, 'construction')
if ROI_OX != False:
# matching & thresholding
result_matching = ORB_matching(
ROI_img, ref_images["construction"]["image"], ref_images["construction"]["image"], ref_images["construction"]["descriptor"])
if SCHEDULER.debug_option["show_image_matching"]:
rospy.logdebug("result matching : " + str(len(result_matching)))
if len(result_matching) >= THRESHOLDS["construction"]:
return True
else:
return False
else:
return False
def is_tunnel(image):
ROI_img, ROI_OX = signROI_detecting(image, 'tunnel')
if ROI_OX != False:
# matching & thresholding
result_matching = ORB_matching(
ROI_img, ref_images["tunnel"], ref_images["tunnel"]["keypoints"], ref_images["tunnel"]["descriptor"])
if SCHEDULER.debug_option["show_image_matching"] == True:
rospy.logdebug("result matching : " + str(len(result_matching)))
if len(result_matching) >= threshold_tunnel:
return True
else:
return False
else:
return False
def check_left_right_sign(image):
""" check the sign of left or right sign
return 'left' if the sign means 'left'
return 'right' if the sign means 'right'
return 'none' if there isn't a sign
"""
global threshold_lrmin
global threshold_lrmax
raw_data = np.fromstring(image.data, np.uint8)
cv_img = cv2.imdecode(raw_data, cv2.IMREAD_COLOR)
# NOTE: ROI Setting
blob_ROI = cv_img[100:, :]
ROI_img, ROI_OX = signROI_detecting(cv_img, 'left_or_right')
if ROI_OX != False:
# LEFT or RIGHT
result_matching_left = ORB_matching(
ROI_img, ref_images["left"]["image"], ref_images["left"]["keypoints"], ref_images["left"]["descriptor"])
result_matching_right = ORB_matching(
ROI_img, ref_images["right"]["image"], ref_images["right"]["keypoints"], ref_images["right"]["descriptor"])
#print("left length: ",len(result_matching_left))
#print("right length: ",len(result_matching_right))
if len(result_matching_left) >= THRESHOLDS["left_right_min"] and \
len(result_matching_left) <= THRESHOLDS["left_right_max"] and \
len(result_matching_right) >= THRESHOLDS["left_right_min"] and \
len(result_matching_right) <= THRESHOLDS["left_right_max"]:
return left_or_right(ROI_img)
else:
return 'none'
else:
return 'none'
def check_sign(image):
""" check what the sign means
image: front image
return 'intersection' if the sign means 'intersection' state
return 'construction' if the sign means 'construction' state
return 'parking' if the sign means 'parking' state
return 'tunnel' if the sign means 'tunnel' state
return 'nothing' if there is no sign
"""
if(is_intersection(image) == True):
return 'intersection'
elif(is_parking(image) == True):
return 'parking'
else:
return 'nothing'
def has_curve_in(distance, image):
""" check if there is a curve in distance"""
# TODO: future task
return False
def is_straight_in(distance, image):
""" check if the way is straight in distance"""
# TODO: future task
return False
def is_stopping_sign(image):
""" check if the sign means 'stop """
# TODO: future task
return False
def has_crossing_line(image):
""" returns true if there is a crossing line from left to right"""
# TODO: future task
return False
# reference image & keypoints & descriptors initialize
# init_ref_images()
'''
from lib_frontcam import signROI_detecting
import cv2
right_img = cv2.imread(
"/home/kusw-004/steamcup_2020/assets/images_final/image1.png")
left_img = cv2.imread(
"/home/kusw-004/steamcup_2020/assets/images_final/image2.png")
ref_left = cv2.imread(
"/home/kusw-004/steamcup_2020/packages/galapagos_v2/libs/left.png")
ref_right = cv2.imread(
"/home/kusw-004/steamcup_2020/packages/galapagos_v2/libs/right.png")
'''
| 34.347744 | 136 | 0.626224 | import cv2
import numpy as np
import rospy
import matplotlib.pyplot as plt
import sys
from scheduler import SCHEDULER
from constants import *
import os
import sys
Orb = cv2.ORB_create()
Bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
ref_images = {}
lower_blue = np.array([90, 110, 100])
upper_blue = np.array([130, 200, 160])
lower_red = np.array([0, 120, 100])
upper_red = np.array([20, 180, 200])
lower_green = np.array([60, 55, 50])
upper_green = np.array([85, 255, 255])
value_default = 1
def init_ref_images():
global ref_images
for idx, key in enumerate(REF_IMAGE_PATH):
image = cv2.imread(SCHEDULER.path + "/images/" + REF_IMAGE_PATH[key])
try:
image = cv2.medianBlur(cv2.GaussianBlur(image, (11, 11), 0), 11)
except cv2.error as e:
rospy.logfatal("[LIB_FRONT] ref image '" + key +
"' not found in constants.py!")
rospy.signal_shutdown("Shutdown by fatal error.")
if SCHEDULER.debug_option["show_loaded_ref_images"]:
rospy.logdebug("[LIB_FRONT] {:8s} image loaded. ({})".format(
key, str(SCHEDULER.path + "/" + REF_IMAGE_PATH[key])))
keypoints, descriptor = Orb.detectAndCompute(image, None)
ref_images[key] = {
"image": image,
"keypoints": keypoints,
"descriptor": descriptor
}
def blob_parameter(state_type):
if state_type == 'traffic_light':
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = 0
params.maxThreshold = 256
params.filterByArea = True
params.minArea = 500
params.maxArea = 2300
params.filterByCircularity = True
params.minCircularity = 0.4
params.filterByConvexity = True
params.minConvexity = 0.1
params.filterByInertia = True
params.minInertiaRatio = 0.01
elif state_type == 'intersection' or state_type == 'construction' or state_type == 'turnel':
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = 10
params.maxThreshold = 200
params.filterByArea = True
params.minArea = 500
params.filterByCircularity = True
params.minCircularity = 0.1
params.filterByConvexity = False
params.minConvexity = 0.1
params.filterByInertia = True
params.minInertiaRatio = 0.01
else:
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = 0
params.maxThreshold = 256
params.filterByArea = True
params.minArea = 1000
params.maxArea = 35000
params.filterByCircularity = True
params.minCircularity = 0.5
params.filterByConvexity = True
params.minConvexity = 0.1
params.filterByInertia = True
params.minInertiaRatio = 0.01
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3:
blob_detector = cv2.SimpleBlobDetector(params)
else:
blob_detector = cv2.SimpleBlobDetector_create(params)
return blob_detector
def blob_detecting(image, blob_detector, state_type):
_hsv_maxThreshold = value_default
_hsv_minThreshold = value_default
if state_type == 'intersection' or state_type == 'construction' or state_type == 'turnel':
_hsv_maxThreshold = upper_red
_hsv_minThreshold = lower_red
elif state_type == 'traffic_light':
_hsv_maxThreshold = upper_green
_hsv_minThreshold = lower_green
else:
_hsv_maxThreshold = upper_blue
_hsv_minThreshold = lower_blue
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
kernel = np.ones((3, 3), np.uint8)
mask = cv2.inRange(hsv, _hsv_minThreshold, _hsv_maxThreshold)
mask = cv2.dilate(mask, kernel, iterations=5)
reversemask = 255 - mask
if state_type == 'intersection' or state_type == 'turnel' or state_type == 'to_intersection':
blob_centers = blob_detector.detect(mask)
else:
blob_centers = blob_detector.detect(reversemask)
BGR_ROI = cv2.cvtColor(reversemask, cv2.COLOR_GRAY2BGR)
if SCHEDULER.debug_option["show_blob_detecting"]:
print(len(blob_centers))
show_centers = cv2.drawKeypoints(reversemask, blob_centers, np.array(
[]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('hsv', hsv)
cv2.imshow('mask', mask)
cv2.imshow('reverse', reversemask)
cv2.imshow('result', show_centers)
cv2.waitKey(0)
if len(blob_centers) >= 1:
centers = []
for i in blob_centers:
centers.append(i.pt)
return centers
else:
return blob_centers
def signROI_detecting(image, state_type):
sign_detector = blob_parameter(state_type)
sign_centers = blob_detecting(image, sign_detector, state_type)
yy = int(sign_centers[0][1])
ers[0][1] - 45 < 0 or sign_centers[0][0] < 45:
if sign_centers[0][0] < sign_centers[0][1]:
signROI_size = int(sign_centers[0][0])
else:
signROI_size = int(sign_centers[0][1])
else:
signROI_size = 45
signROI = image[yy - signROI_size: yy +
signROI_size, xx - signROI_size: xx + signROI_size]
cv2.imshow('ROI', signROI)
cv2.waitKey(1)
return signROI, True
else:
signROI = image
return signROI, False
def ORB_matching(_roi, _ref_img, _ref_keypoints, _ref_descriptor):
global Orb
global Bf
_roi = cv2.cvtColor(_roi, cv2.COLOR_BGR2GRAY)
_roi = cv2.medianBlur(_roi, 5)
ROI_keypoints, ROI_descriptor = Orb.detectAndCompute(_roi, None)
matches = Bf.match(ROI_descriptor, _ref_descriptor)
matches = sorted(matches, key=lambda x: x.distance)
if SCHEDULER.debug_option["show_orb_matching"] == True:
print(len(matches))
matching_image = cv2.drawMatches(
_roi, ROI_keypoints, _ref_img, _ref_keypoints, matches, None, flags=2)
cv2.imshow('matching', matching_image)
cv2.waitKey()
return matches
def check_if_left_or_right(image):
def left_or_right(_frontcam_roi):
_frontcam_roi = cv2.cvtColor(_frontcam_roi, cv2.COLOR_BGR2GRAY)
# cv2.imwrite('gray_frontcam_roi.png', _frontcam_roi)
_frontcam_roi = cv2.GaussianBlur(_frontcam_roi, (7, 7), 0)
# cv2.imwrite('gaussian_gray_frontcam_roi.png', _frontcam_roi)
tmp, binary_roi = cv2.threshold(
_frontcam_roi, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#height = np.size(binary_roi, 0)/2
#width = np.size(binary_roi, 1)/2
# cutting small ROI from under center point 40 X 20 box
#small_roi = binary_roi[height:height+20, width-20:width+20]
# compare sum of left pixels' value and right's one
sum_left = 0
sum_right = 0
bin_roi_H = binary_roi.shape[0]
bin_roi_W = binary_roi.shape[1]
for j in range(bin_roi_H // 2):
for i in range(bin_roi_W // 2):
sum_left += binary_roi[j + bin_roi_H // 2, i]
for i in range(bin_roi_W // 2):
sum_right += binary_roi[j + bin_roi_H //
2, i + binary_roi.shape[1] // 2]
print("=========================================")
print("sum left: ", sum_left / 255, ", sum right: ",
sum_right / 255) # ! code for debugging
print("==============sum printing================")
# cv2.imwrite('binary_roi.png', binary_roi) # ! code for debugging
# cv2.imshow('small_roi',small_roi) #! code for debugging
# cv2.waitKey(0) #! code for debugging
if sum_left > sum_right:
print("right detected!!!!")
print("right detected!!!!")
print("right detected!!!!")
return 'right'
else:
print("left detected!!!!")
print("left detected!!!!")
print("left detected!!!!")
return 'left'
def is_light_green(image):
raw_data = np.fromstring(image.data, np.uint8)
cv_img = cv2.imdecode(raw_data, cv2.IMREAD_COLOR)
detector = blob_parameter('traffic_light')
centers = blob_detecting(cv_img, detector, 'traffic_light')
if len(centers) >= 1:
return True
else:
return False
def is_intersection(image):
ROI_img, ROI_OX = signROI_detecting(image, 'intersection')
if ROI_OX != False:
# matching & thresholding
result_matching = ORB_matching(
ROI_img, ref_intersection, ref_intersection_keypoints, ref_intersection_descriptor)
if len(result_matching) >= threshold_intersection:
return True
else:
return False
else:
return False
def is_parking(image):
ROI_img, ROI_OX = signROI_detecting(image, 'parking')
if ROI_OX != False:
# matching & thresholding
result_matching = ORB_matching(
ROI_img, ref_images["parking"]["images"], ref_images["parking"]["keypoints"], ref_images["parking"]["descriptor"])
if len(result_matching) >= threshold_parking:
return True
else:
return False
else:
return False
def is_construction(image):
raw_data = np.fromstring(image.data, np.uint8)
cv_img = cv2.imdecode(raw_data, cv2.IMREAD_COLOR)
ROI_img, ROI_OX = signROI_detecting(cv_img, 'construction')
if ROI_OX != False:
# matching & thresholding
result_matching = ORB_matching(
ROI_img, ref_images["construction"]["image"], ref_images["construction"]["image"], ref_images["construction"]["descriptor"])
if SCHEDULER.debug_option["show_image_matching"]:
rospy.logdebug("result matching : " + str(len(result_matching)))
if len(result_matching) >= THRESHOLDS["construction"]:
return True
else:
return False
else:
return False
def is_tunnel(image):
ROI_img, ROI_OX = signROI_detecting(image, 'tunnel')
if ROI_OX != False:
# matching & thresholding
result_matching = ORB_matching(
ROI_img, ref_images["tunnel"], ref_images["tunnel"]["keypoints"], ref_images["tunnel"]["descriptor"])
if SCHEDULER.debug_option["show_image_matching"] == True:
rospy.logdebug("result matching : " + str(len(result_matching)))
if len(result_matching) >= threshold_tunnel:
return True
else:
return False
else:
return False
def check_left_right_sign(image):
global threshold_lrmin
global threshold_lrmax
raw_data = np.fromstring(image.data, np.uint8)
cv_img = cv2.imdecode(raw_data, cv2.IMREAD_COLOR)
# NOTE: ROI Setting
blob_ROI = cv_img[100:, :]
ROI_img, ROI_OX = signROI_detecting(cv_img, 'left_or_right')
if ROI_OX != False:
# LEFT or RIGHT
result_matching_left = ORB_matching(
ROI_img, ref_images["left"]["image"], ref_images["left"]["keypoints"], ref_images["left"]["descriptor"])
result_matching_right = ORB_matching(
ROI_img, ref_images["right"]["image"], ref_images["right"]["keypoints"], ref_images["right"]["descriptor"])
#print("left length: ",len(result_matching_left))
#print("right length: ",len(result_matching_right))
if len(result_matching_left) >= THRESHOLDS["left_right_min"] and \
len(result_matching_left) <= THRESHOLDS["left_right_max"] and \
len(result_matching_right) >= THRESHOLDS["left_right_min"] and \
len(result_matching_right) <= THRESHOLDS["left_right_max"]:
return left_or_right(ROI_img)
else:
return 'none'
else:
return 'none'
def check_sign(image):
if(is_intersection(image) == True):
return 'intersection'
elif(is_parking(image) == True):
return 'parking'
else:
return 'nothing'
def has_curve_in(distance, image):
# TODO: future task
return False
def is_straight_in(distance, image):
# TODO: future task
return False
def is_stopping_sign(image):
# TODO: future task
return False
def has_crossing_line(image):
# TODO: future task
return False
# reference image & keypoints & descriptors initialize
# init_ref_images()
| true | true |
f7f98cbe22eec65b0b2524e768926b30f7b6bbdf | 552 | py | Python | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/wandering-firefly-29609 | bb3c8714937583d9784224e9d08f5590482595f7 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/wandering-firefly-29609 | bb3c8714937583d9784224e9d08f5590482595f7 | [
"FTL",
"AML",
"RSA-MD"
] | 56 | 2021-08-07T14:53:57.000Z | 2022-03-06T17:22:36.000Z | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/wandering-firefly-29609 | bb3c8714937583d9784224e9d08f5590482595f7 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "wandering-firefly-29609.botics.co"
site_params = {
"name": "Wandering Firefly",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 21.230769 | 61 | 0.663043 | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "wandering-firefly-29609.botics.co"
site_params = {
"name": "Wandering Firefly",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| true | true |
f7f98d85a31cd0e1cf877af91144cb1b4e04fd97 | 17,108 | py | Python | tests/base_test_class.py | valentijnscholten/django-DefectDojo | 5d0b9551ee9574d01b9912399919e3eefe1e1d65 | [
"BSD-3-Clause"
] | null | null | null | tests/base_test_class.py | valentijnscholten/django-DefectDojo | 5d0b9551ee9574d01b9912399919e3eefe1e1d65 | [
"BSD-3-Clause"
] | 206 | 2020-04-20T16:03:18.000Z | 2022-01-15T23:07:48.000Z | tests/base_test_class.py | valentijnscholten/django-DefectDojo | 5d0b9551ee9574d01b9912399919e3eefe1e1d65 | [
"BSD-3-Clause"
] | 1 | 2020-12-06T15:44:44.000Z | 2020-12-06T15:44:44.000Z | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoAlertPresentException, NoSuchElementException
import unittest
import os
import re
# import time
dd_driver = None
dd_driver_options = None
def on_exception_html_source_logger(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
print("exception occured at url:", self.driver.current_url)
print("page source:", self.driver.page_source)
f = open("selenium_page_source.html", "w", encoding='utf-8')
f.writelines(self.driver.page_source)
# time.sleep(30)
raise(e)
return wrapper
def set_suite_settings(suite, jira=False, github=False, block_execution=False):
if jira:
suite.addTest(BaseTestCase('enable_jira'))
else:
suite.addTest(BaseTestCase('disable_jira'))
if github:
suite.addTest(BaseTestCase('enable_github'))
else:
suite.addTest(BaseTestCase('disable_github'))
if block_execution:
suite.addTest(BaseTestCase('enable_block_execution'))
else:
suite.addTest(BaseTestCase('disable_block_execution'))
class BaseTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Path for automatic downloads, mapped to the media path
cls.export_path = 'media'
global dd_driver
if not dd_driver:
# setupModule and tearDownModule are not working in our scenario, so for now we use setupClass and a global variable
# global variables are dirty, but in unit tests scenario's like these they are acceptable
print('launching browser for: ', cls.__name__)
global dd_driver_options
dd_driver_options = Options()
# headless means no UI, if you want to see what is happening remove headless. Adding detach will leave the window open after the test
dd_driver_options.add_argument("--headless")
# dd_driver_options.add_experimental_option("detach", True)
# the next 2 maybe needed in some scenario's for example on WSL or other headless situations
dd_driver_options.add_argument("--no-sandbox")
dd_driver_options.add_argument("--disable-dev-shm-usage")
dd_driver_options.add_argument("--disable-gpu") # on windows sometimes chrome can't start with certain gpu driver versions, even in headless mode
# start maximized or at least with sufficient with because datatables will hide certain controls when the screen is too narrow
dd_driver_options.add_argument("--window-size=1280,1024")
# dd_driver_options.add_argument("--start-maximized")
dd_driver_options.set_capability("acceptInsecureCerts", True)
# some extra logging can be turned on if you want to query the browser javascripe console in your tests
desired = webdriver.DesiredCapabilities.CHROME
desired['goog:loggingPrefs'] = {'browser': 'ALL'}
# set automatic downloads to test csv and excel export
prefs = {"download.default_directory": cls.export_path}
dd_driver_options.add_experimental_option("prefs", prefs)
# change path of chromedriver according to which directory you have chromedriver.
print('starting chromedriver with options: ', vars(dd_driver_options), desired)
dd_driver = webdriver.Chrome(os.environ['CHROMEDRIVER'], chrome_options=dd_driver_options, desired_capabilities=desired)
# best practice is only use explicit waits
dd_driver.implicitly_wait(1)
cls.driver = dd_driver
cls.base_url = os.environ['DD_BASE_URL']
def setUp(self):
self.verificationErrors = []
self.accept_next_alert = True
self.accept_javascript_errors = False
self.driver.execute_script("console.clear()")
# clear browser console logs?
def login_page(self):
driver = self.driver
driver.get(self.base_url + "login")
driver.find_element(By.ID, "id_username").clear()
driver.find_element(By.ID, "id_username").send_keys(os.environ['DD_ADMIN_USER'])
driver.find_element(By.ID, "id_password").clear()
driver.find_element(By.ID, "id_password").send_keys(os.environ['DD_ADMIN_PASSWORD'])
driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click()
self.assertFalse(self.is_element_by_css_selector_present('.alert-danger', 'Please enter a correct username and password'))
return driver
def test_login(self):
return self.login_page()
def logout(self):
driver = self.driver
driver.get(self.base_url + "logout")
self.assertTrue(self.is_text_present_on_page("Login"))
return driver
def test_logout(self):
return self.logout()
@on_exception_html_source_logger
def delete_product_if_exists(self, name="QA Test"):
driver = self.driver
# Navigate to the product page
self.goto_product_overview(driver)
# Select the specific product to delete
qa_products = driver.find_elements(By.LINK_TEXT, name)
if len(qa_products) > 0:
self.test_delete_product(name)
@on_exception_html_source_logger
def delete_finding_template_if_exists(self, name="App Vulnerable to XSS"):
driver = self.driver
driver.get(self.base_url + "template")
# Click on `Delete Template` button
templates = driver.find_elements(By.LINK_TEXT, name)
if len(templates) > 0:
driver.find_element(By.ID, "id_delete").click()
# Click 'Yes' on Alert popup
driver.switch_to.alert.accept()
# used to load some page just to get started
# we choose /user because it's lightweight and fast
def goto_some_page(self):
driver = self.driver
driver.get(self.base_url + "user")
return driver
def goto_product_overview(self, driver):
driver.get(self.base_url + "product")
self.wait_for_datatable_if_content("no_products", "products_wrapper")
return driver
def goto_product_type_overview(self, driver):
driver.get(self.base_url + "product/type")
return driver
def goto_component_overview(self, driver):
driver.get(self.base_url + "components")
return driver
def goto_google_sheets_configuration_form(self, driver):
# if something is terribly wrong, it may still fail, even if system_settings is disabled.
# See https://github.com/DefectDojo/django-DefectDojo/issues/3742 for reference.
driver.get(self.base_url + "configure_google_sheets")
return driver
def goto_active_engagements_overview(self, driver):
driver.get(self.base_url + 'engagement/active')
return driver
def goto_all_engagements_overview(self, driver):
driver.get(self.base_url + 'engagement/all')
return driver
def goto_all_engagements_by_product_overview(self, driver):
return self.goto_engagements_internal(driver, 'engagements_all')
def goto_engagements_internal(self, driver, rel_url):
driver.get(self.base_url + rel_url)
self.wait_for_datatable_if_content("no_engagements", "engagements_wrapper")
return driver
def goto_all_findings_list(self, driver):
driver.get(self.base_url + "finding")
self.wait_for_datatable_if_content("no_findings", "open_findings_wrapper")
return driver
def wait_for_datatable_if_content(self, no_content_id, wrapper_id):
no_content = None
try:
no_content = self.driver.find_element(By.ID, no_content_id)
except:
pass
if no_content is None:
# wait for product_wrapper div as datatables javascript modifies the DOM on page load.
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID, wrapper_id)))
def is_element_by_css_selector_present(self, selector, text=None):
elems = self.driver.find_elements(By.CSS_SELECTOR, selector)
if len(elems) == 0:
print('no elements! for: "' + selector + '": ' + text)
return False
if text is None:
print('text is None!')
return True
for elem in elems:
print(elem.text)
if text in elem.text:
# print('contains! for: ' + text)
return True
print('text mismatch! for: ' + text)
return False
def is_element_by_id_present(self, id):
try:
self.driver.find_element(By.ID, id)
return True
except NoSuchElementException:
return False
def is_success_message_present(self, text=None):
return self.is_element_by_css_selector_present('.alert-success', text=text)
def is_error_message_present(self, text=None):
return self.is_element_by_css_selector_present('.alert-danger', text=text)
def is_help_message_present(self, text=None):
return self.is_element_by_css_selector_present('.help-block', text=text)
def is_text_present_on_page(self, text):
# DEBUG: couldn't find: Product type added successfully. path: //*[contains(text(),'Product type added successfully.')]
# can't get this xpath to work
# path = "//*[contains(text(), '" + text + "')]"
# elems = self.driver.find_elements(By.XPATH, path)
# if len(elems) == 0:
# print("DEBUG: couldn't find: ", text, "path: ", path)
body = self.driver.find_element(By.TAG_NAME, "body")
return re.search(text, body.text)
def element_exists_by_id(self, id):
elems = self.driver.find_elements(By.ID, id)
return len(elems) > 0
def change_system_setting(self, id, enable=True):
print("changing system setting " + id + " enable: " + str(enable))
driver = self.driver
driver.get(self.base_url + 'system_settings')
is_enabled = driver.find_element(By.ID, id).is_selected()
if (enable and not is_enabled) or (not enable and is_enabled):
# driver.find_element(By.XPATH, '//*[@id=' + id + ']').click()
driver.find_element(By.ID, id).click()
# save settings
driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click()
# check if it's enabled after reload
is_enabled = driver.find_element(By.ID, id).is_selected()
if enable:
self.assertTrue(is_enabled)
if not enable:
self.assertFalse(is_enabled)
return is_enabled
def enable_system_setting(self, id):
return self.change_system_setting(id, enable=True)
def disable_system_setting(self, id):
return self.change_system_setting(id, enable=False)
def enable_jira(self):
return self.enable_system_setting('id_enable_jira')
def disable_jira(self):
return self.disable_system_setting('id_enable_jira')
def disable_github(self):
return self.disable_system_setting('id_enable_github')
def enable_github(self):
return self.enable_system_setting('id_enable_github')
def set_block_execution(self, block_execution=True):
# we set the admin user (ourselves) to have block_execution checked
# this will force dedupe to happen synchronously, among other things like notifications, rules, ...
print('setting block execution to: ', str(block_execution))
driver = self.driver
driver.get(self.base_url + 'profile')
if driver.find_element(By.ID, 'id_block_execution').is_selected() != block_execution:
driver.find_element(By.XPATH, '//*[@id="id_block_execution"]').click()
# save settings
driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click()
# check if it's enabled after reload
self.assertTrue(driver.find_element(By.ID, 'id_block_execution').is_selected() == block_execution)
return driver
def enable_block_execution(self):
self.set_block_execution()
def disable_block_execution(self):
self.set_block_execution(block_execution=False)
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def assertNoConsoleErrors(self):
"""
Sample output for levels (i.e. errors are SEVERE)
{'level': 'DEBUG', 'message': 'http://localhost:8080/product/type/4/edit 560:12 "debug"', 'source': 'console-api', 'timestamp': 1583952828410}
{'level': 'INFO', 'message': 'http://localhost:8080/product/type/4/edit 561:16 "info"', 'source': 'console-api', 'timestamp': 1583952828410}
{'level': 'WARNING', 'message': 'http://localhost:8080/product/type/4/edit 562:16 "warning"', 'source': 'console-api', 'timestamp': 1583952828410}
{'level': 'SEVERE', 'message': 'http://localhost:8080/product/type/4/edit 563:16 "error"', 'source': 'console-api', 'timestamp': 1583952828410}
"""
for entry in WebdriverOnlyNewLogFacade(self.driver).get_log('browser'):
"""
Images are now working after https://github.com/DefectDojo/django-DefectDojo/pull/3954,
but http://localhost:8080/static/dojo/img/zoom-in.cur still produces a 404
The addition of the trigger exception is due to the Report Builder tests.
The addition of the innerHTML exception is due to the test for quick reports in finding_test.py
"""
accepted_javascript_messages = r'(zoom\-in\.cur.*)404\ \(Not\ Found\)|Uncaught TypeError: Cannot read properties of null \(reading \'trigger\'\)|Uncaught TypeError: Cannot read properties of null \(reading \'innerHTML\'\)'
if (entry['level'] == 'SEVERE'):
# print(self.driver.current_url) # TODO actually this seems to be the previous url
# self.driver.save_screenshot("C:\\Data\\django-DefectDojo\\tests\\javascript-errors.png")
# with open("C:\\Data\\django-DefectDojo\\tests\\javascript-errors.html", "w") as f:
# f.write(self.driver.page_source)
print(entry)
print('There was a SEVERE javascript error in the console, please check all steps fromt the current test to see where it happens')
print('Currently there is no reliable way to find out at which url the error happened, but it could be: .' + self.driver.current_url)
if self.accept_javascript_errors:
print('WARNING: skipping SEVERE javascript error because accept_javascript_errors is True!')
elif re.search(accepted_javascript_messages, entry['message']):
print('WARNING: skipping javascript errors related to known issues images, see https://github.com/DefectDojo/django-DefectDojo/blob/master/tests/base_test_class.py#L324')
else:
self.assertNotEqual(entry['level'], 'SEVERE')
return True
def tearDown(self):
self.assertNoConsoleErrors()
self.assertEqual([], self.verificationErrors)
@classmethod
def tearDownDriver(cls):
print('tearDownDriver: ', cls.__name__)
global dd_driver
if dd_driver:
if not dd_driver_options.experimental_options or not dd_driver_options.experimental_options.get('detach'):
print('closing browser')
dd_driver.quit()
class WebdriverOnlyNewLogFacade(object):
last_timestamp = 0
def __init__(self, webdriver):
self._webdriver = webdriver
def get_log(self, log_type):
last_timestamp = self.last_timestamp
entries = self._webdriver.get_log(log_type)
filtered = []
for entry in entries:
# check the logged timestamp against the
# stored timestamp
if entry["timestamp"] > self.last_timestamp:
filtered.append(entry)
# save the last timestamp only if newer
# in this set of logs
if entry["timestamp"] > last_timestamp:
last_timestamp = entry["timestamp"]
# store the very last timestamp
self.last_timestamp = last_timestamp
return filtered
| 41.026379 | 234 | 0.656886 | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoAlertPresentException, NoSuchElementException
import unittest
import os
import re
dd_driver = None
dd_driver_options = None
def on_exception_html_source_logger(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
print("exception occured at url:", self.driver.current_url)
print("page source:", self.driver.page_source)
f = open("selenium_page_source.html", "w", encoding='utf-8')
f.writelines(self.driver.page_source)
raise(e)
return wrapper
def set_suite_settings(suite, jira=False, github=False, block_execution=False):
if jira:
suite.addTest(BaseTestCase('enable_jira'))
else:
suite.addTest(BaseTestCase('disable_jira'))
if github:
suite.addTest(BaseTestCase('enable_github'))
else:
suite.addTest(BaseTestCase('disable_github'))
if block_execution:
suite.addTest(BaseTestCase('enable_block_execution'))
else:
suite.addTest(BaseTestCase('disable_block_execution'))
class BaseTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.export_path = 'media'
global dd_driver
if not dd_driver:
print('launching browser for: ', cls.__name__)
global dd_driver_options
dd_driver_options = Options()
# headless means no UI, if you want to see what is happening remove headless. Adding detach will leave the window open after the test
dd_driver_options.add_argument("--headless")
# dd_driver_options.add_experimental_option("detach", True)
# the next 2 maybe needed in some scenario's for example on WSL or other headless situations
dd_driver_options.add_argument("--no-sandbox")
dd_driver_options.add_argument("--disable-dev-shm-usage")
dd_driver_options.add_argument("--disable-gpu")
# start maximized or at least with sufficient with because datatables will hide certain controls when the screen is too narrow
dd_driver_options.add_argument("--window-size=1280,1024")
# dd_driver_options.add_argument("--start-maximized")
dd_driver_options.set_capability("acceptInsecureCerts", True)
# some extra logging can be turned on if you want to query the browser javascripe console in your tests
desired = webdriver.DesiredCapabilities.CHROME
desired['goog:loggingPrefs'] = {'browser': 'ALL'}
# set automatic downloads to test csv and excel export
prefs = {"download.default_directory": cls.export_path}
dd_driver_options.add_experimental_option("prefs", prefs)
# change path of chromedriver according to which directory you have chromedriver.
print('starting chromedriver with options: ', vars(dd_driver_options), desired)
dd_driver = webdriver.Chrome(os.environ['CHROMEDRIVER'], chrome_options=dd_driver_options, desired_capabilities=desired)
# best practice is only use explicit waits
dd_driver.implicitly_wait(1)
cls.driver = dd_driver
cls.base_url = os.environ['DD_BASE_URL']
def setUp(self):
self.verificationErrors = []
self.accept_next_alert = True
self.accept_javascript_errors = False
self.driver.execute_script("console.clear()")
# clear browser console logs?
def login_page(self):
driver = self.driver
driver.get(self.base_url + "login")
driver.find_element(By.ID, "id_username").clear()
driver.find_element(By.ID, "id_username").send_keys(os.environ['DD_ADMIN_USER'])
driver.find_element(By.ID, "id_password").clear()
driver.find_element(By.ID, "id_password").send_keys(os.environ['DD_ADMIN_PASSWORD'])
driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click()
self.assertFalse(self.is_element_by_css_selector_present('.alert-danger', 'Please enter a correct username and password'))
return driver
def test_login(self):
return self.login_page()
def logout(self):
driver = self.driver
driver.get(self.base_url + "logout")
self.assertTrue(self.is_text_present_on_page("Login"))
return driver
def test_logout(self):
return self.logout()
@on_exception_html_source_logger
def delete_product_if_exists(self, name="QA Test"):
driver = self.driver
# Navigate to the product page
self.goto_product_overview(driver)
# Select the specific product to delete
qa_products = driver.find_elements(By.LINK_TEXT, name)
if len(qa_products) > 0:
self.test_delete_product(name)
@on_exception_html_source_logger
def delete_finding_template_if_exists(self, name="App Vulnerable to XSS"):
driver = self.driver
driver.get(self.base_url + "template")
# Click on `Delete Template` button
templates = driver.find_elements(By.LINK_TEXT, name)
if len(templates) > 0:
driver.find_element(By.ID, "id_delete").click()
# Click 'Yes' on Alert popup
driver.switch_to.alert.accept()
# used to load some page just to get started
# we choose /user because it's lightweight and fast
def goto_some_page(self):
driver = self.driver
driver.get(self.base_url + "user")
return driver
def goto_product_overview(self, driver):
driver.get(self.base_url + "product")
self.wait_for_datatable_if_content("no_products", "products_wrapper")
return driver
def goto_product_type_overview(self, driver):
driver.get(self.base_url + "product/type")
return driver
def goto_component_overview(self, driver):
driver.get(self.base_url + "components")
return driver
def goto_google_sheets_configuration_form(self, driver):
driver.get(self.base_url + "configure_google_sheets")
return driver
def goto_active_engagements_overview(self, driver):
driver.get(self.base_url + 'engagement/active')
return driver
def goto_all_engagements_overview(self, driver):
driver.get(self.base_url + 'engagement/all')
return driver
def goto_all_engagements_by_product_overview(self, driver):
return self.goto_engagements_internal(driver, 'engagements_all')
def goto_engagements_internal(self, driver, rel_url):
driver.get(self.base_url + rel_url)
self.wait_for_datatable_if_content("no_engagements", "engagements_wrapper")
return driver
def goto_all_findings_list(self, driver):
driver.get(self.base_url + "finding")
self.wait_for_datatable_if_content("no_findings", "open_findings_wrapper")
return driver
def wait_for_datatable_if_content(self, no_content_id, wrapper_id):
no_content = None
try:
no_content = self.driver.find_element(By.ID, no_content_id)
except:
pass
if no_content is None:
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID, wrapper_id)))
def is_element_by_css_selector_present(self, selector, text=None):
elems = self.driver.find_elements(By.CSS_SELECTOR, selector)
if len(elems) == 0:
print('no elements! for: "' + selector + '": ' + text)
return False
if text is None:
print('text is None!')
return True
for elem in elems:
print(elem.text)
if text in elem.text:
return True
print('text mismatch! for: ' + text)
return False
def is_element_by_id_present(self, id):
try:
self.driver.find_element(By.ID, id)
return True
except NoSuchElementException:
return False
def is_success_message_present(self, text=None):
return self.is_element_by_css_selector_present('.alert-success', text=text)
def is_error_message_present(self, text=None):
return self.is_element_by_css_selector_present('.alert-danger', text=text)
def is_help_message_present(self, text=None):
return self.is_element_by_css_selector_present('.help-block', text=text)
def is_text_present_on_page(self, text):
# can't get this xpath to work
body = self.driver.find_element(By.TAG_NAME, "body")
return re.search(text, body.text)
def element_exists_by_id(self, id):
elems = self.driver.find_elements(By.ID, id)
return len(elems) > 0
def change_system_setting(self, id, enable=True):
print("changing system setting " + id + " enable: " + str(enable))
driver = self.driver
driver.get(self.base_url + 'system_settings')
is_enabled = driver.find_element(By.ID, id).is_selected()
if (enable and not is_enabled) or (not enable and is_enabled):
# driver.find_element(By.XPATH, '//*[@id=' + id + ']').click()
driver.find_element(By.ID, id).click()
# save settings
driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click()
# check if it's enabled after reload
is_enabled = driver.find_element(By.ID, id).is_selected()
if enable:
self.assertTrue(is_enabled)
if not enable:
self.assertFalse(is_enabled)
return is_enabled
def enable_system_setting(self, id):
return self.change_system_setting(id, enable=True)
def disable_system_setting(self, id):
return self.change_system_setting(id, enable=False)
def enable_jira(self):
return self.enable_system_setting('id_enable_jira')
def disable_jira(self):
return self.disable_system_setting('id_enable_jira')
def disable_github(self):
return self.disable_system_setting('id_enable_github')
def enable_github(self):
return self.enable_system_setting('id_enable_github')
def set_block_execution(self, block_execution=True):
print('setting block execution to: ', str(block_execution))
driver = self.driver
driver.get(self.base_url + 'profile')
if driver.find_element(By.ID, 'id_block_execution').is_selected() != block_execution:
driver.find_element(By.XPATH, '//*[@id="id_block_execution"]').click()
driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click()
self.assertTrue(driver.find_element(By.ID, 'id_block_execution').is_selected() == block_execution)
return driver
def enable_block_execution(self):
self.set_block_execution()
def disable_block_execution(self):
self.set_block_execution(block_execution=False)
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def assertNoConsoleErrors(self):
for entry in WebdriverOnlyNewLogFacade(self.driver).get_log('browser'):
accepted_javascript_messages = r'(zoom\-in\.cur.*)404\ \(Not\ Found\)|Uncaught TypeError: Cannot read properties of null \(reading \'trigger\'\)|Uncaught TypeError: Cannot read properties of null \(reading \'innerHTML\'\)'
if (entry['level'] == 'SEVERE'):
# print(self.driver.current_url) # TODO actually this seems to be the previous url
# self.driver.save_screenshot("C:\\Data\\django-DefectDojo\\tests\\javascript-errors.png")
# with open("C:\\Data\\django-DefectDojo\\tests\\javascript-errors.html", "w") as f:
# f.write(self.driver.page_source)
print(entry)
print('There was a SEVERE javascript error in the console, please check all steps fromt the current test to see where it happens')
print('Currently there is no reliable way to find out at which url the error happened, but it could be: .' + self.driver.current_url)
if self.accept_javascript_errors:
print('WARNING: skipping SEVERE javascript error because accept_javascript_errors is True!')
elif re.search(accepted_javascript_messages, entry['message']):
print('WARNING: skipping javascript errors related to known issues images, see https://github.com/DefectDojo/django-DefectDojo/blob/master/tests/base_test_class.py
else:
self.assertNotEqual(entry['level'], 'SEVERE')
return True
def tearDown(self):
self.assertNoConsoleErrors()
self.assertEqual([], self.verificationErrors)
@classmethod
def tearDownDriver(cls):
print('tearDownDriver: ', cls.__name__)
global dd_driver
if dd_driver:
if not dd_driver_options.experimental_options or not dd_driver_options.experimental_options.get('detach'):
print('closing browser')
dd_driver.quit()
class WebdriverOnlyNewLogFacade(object):
last_timestamp = 0
def __init__(self, webdriver):
self._webdriver = webdriver
def get_log(self, log_type):
last_timestamp = self.last_timestamp
entries = self._webdriver.get_log(log_type)
filtered = []
for entry in entries:
# check the logged timestamp against the
# stored timestamp
if entry["timestamp"] > self.last_timestamp:
filtered.append(entry)
# save the last timestamp only if newer
# in this set of logs
if entry["timestamp"] > last_timestamp:
last_timestamp = entry["timestamp"]
# store the very last timestamp
self.last_timestamp = last_timestamp
return filtered
| true | true |
f7f98e00acf11fa9788ce06d21ba4ee8e4f2a9aa | 4,393 | py | Python | modin/data_management/partitioning/remote_partition/pandas_on_python.py | zeevikal/modin | 2128f80280092902c61d738d9d2ef551bc4ffaf4 | [
"Apache-2.0"
] | null | null | null | modin/data_management/partitioning/remote_partition/pandas_on_python.py | zeevikal/modin | 2128f80280092902c61d738d9d2ef551bc4ffaf4 | [
"Apache-2.0"
] | null | null | null | modin/data_management/partitioning/remote_partition/pandas_on_python.py | zeevikal/modin | 2128f80280092902c61d738d9d2ef551bc4ffaf4 | [
"Apache-2.0"
] | null | null | null | import pandas
from .utils import length_fn_pandas, width_fn_pandas
class PandasOnPythonRemotePartition(object):
"""This abstract class holds the data and metadata for a single partition.
The methods required for implementing this abstract class are listed in
the section immediately following this.
The API exposed by the children of this object is used in
`BaseBlockPartitions`.
Note: These objects are treated as immutable by `BaseBlockPartitions`
subclasses. There is no logic for updating inplace.
"""
def __init__(self, data):
self.data = data
self.call_queue = []
def get(self):
"""Return the data.
Note: Since this object is a simple wrapper, just return the data.
Returns:
The object that was `put`.
"""
return self.data.copy()
def apply(self, func, **kwargs):
"""Apply some callable function to the data in this partition.
Note: It is up to the implementation how kwargs are handled. They are
an important part of many implementations. As of right now, they
are not serialized.
Args:
func: The lambda to apply (may already be correctly formatted)
Returns:
A new `BaseRemotePartition` containing the object that has had `func`
applied to it.
"""
self.call_queue.append((func, kwargs))
def call_queue_closure(data, call_queues):
for func, kwargs in call_queues:
try:
data = func(data.copy(), **kwargs)
except Exception as e:
self.call_queue = []
raise e
return data
new_data = call_queue_closure(self.data, self.call_queue)
self.call_queue = []
return PandasOnPythonRemotePartition(new_data)
def add_to_apply_calls(self, func, **kwargs):
"""Add the function to the apply function call stack.
This function will be executed when apply is called. It will be executed
in the order inserted; apply's func operates the last and return
"""
self.call_queue.append((func, kwargs))
return self
def to_pandas(self):
"""Convert the object stored in this partition to a Pandas DataFrame.
Note: If the underlying object is a Pandas DataFrame, this will likely
only need to call `get`
Returns:
A Pandas DataFrame.
"""
return self.data
@classmethod
def put(cls, obj):
"""A factory classmethod to format a given object.
Args:
obj: An object.
Returns:
A `RemotePartitions` object.
"""
return cls(obj)
@classmethod
def preprocess_func(cls, func):
"""Preprocess a function before an `apply` call.
Note: This is a classmethod because the definition of how to preprocess
should be class-wide. Also, we may want to use this before we
deploy a preprocessed function to multiple `BaseRemotePartition`
objects.
Args:
func: The function to preprocess.
Returns:
An object that can be accepted by `apply`.
"""
return func
@classmethod
def length_extraction_fn(cls):
"""The function to compute the length of the object in this partition.
Returns:
A callable function.
"""
return length_fn_pandas
@classmethod
def width_extraction_fn(cls):
"""The function to compute the width of the object in this partition.
Returns:
A callable function.
"""
return width_fn_pandas
_length_cache = None
_width_cache = None
def length(self):
if self._length_cache is None:
self._length_cache = type(self).width_extraction_fn()(self.data)
return self._length_cache
def width(self):
if self._width_cache is None:
self._width_cache = type(self).width_extraction_fn()(self.data)
return self._width_cache
@classmethod
def empty(cls):
return cls(pandas.DataFrame())
| 30.506944 | 83 | 0.59094 | import pandas
from .utils import length_fn_pandas, width_fn_pandas
class PandasOnPythonRemotePartition(object):
def __init__(self, data):
self.data = data
self.call_queue = []
def get(self):
return self.data.copy()
def apply(self, func, **kwargs):
self.call_queue.append((func, kwargs))
def call_queue_closure(data, call_queues):
for func, kwargs in call_queues:
try:
data = func(data.copy(), **kwargs)
except Exception as e:
self.call_queue = []
raise e
return data
new_data = call_queue_closure(self.data, self.call_queue)
self.call_queue = []
return PandasOnPythonRemotePartition(new_data)
def add_to_apply_calls(self, func, **kwargs):
self.call_queue.append((func, kwargs))
return self
def to_pandas(self):
return self.data
@classmethod
def put(cls, obj):
return cls(obj)
@classmethod
def preprocess_func(cls, func):
return func
@classmethod
def length_extraction_fn(cls):
return length_fn_pandas
@classmethod
def width_extraction_fn(cls):
return width_fn_pandas
_length_cache = None
_width_cache = None
def length(self):
if self._length_cache is None:
self._length_cache = type(self).width_extraction_fn()(self.data)
return self._length_cache
def width(self):
if self._width_cache is None:
self._width_cache = type(self).width_extraction_fn()(self.data)
return self._width_cache
@classmethod
def empty(cls):
return cls(pandas.DataFrame())
| true | true |
f7f98e0c702db3580e71be2e696ca6c7d904a52c | 1,700 | py | Python | mammoth/docx/numbering_xml.py | cockcrow/python-mammoth | f05ae0c7ec6e4faee6267d3b05e3aabce3a2cbb2 | [
"BSD-2-Clause"
] | 1 | 2021-03-20T01:50:18.000Z | 2021-03-20T01:50:18.000Z | mammoth/docx/numbering_xml.py | cockcrow/python-mammoth | f05ae0c7ec6e4faee6267d3b05e3aabce3a2cbb2 | [
"BSD-2-Clause"
] | null | null | null | mammoth/docx/numbering_xml.py | cockcrow/python-mammoth | f05ae0c7ec6e4faee6267d3b05e3aabce3a2cbb2 | [
"BSD-2-Clause"
] | null | null | null | from ..documents import numbering_level
def read_numbering_xml_element(element):
abstract_nums = _read_abstract_nums(element)
nums = _read_nums(element, abstract_nums)
return Numbering(nums)
def _read_abstract_nums(element):
abstract_num_elements = element.find_children("w:abstractNum")
return dict(map(_read_abstract_num, abstract_num_elements))
def _read_abstract_num(element):
abstract_num_id = element.attributes.get("w:abstractNumId")
levels = _read_abstract_num_levels(element)
return abstract_num_id, levels
def _read_abstract_num_levels(element):
levels = map(_read_abstract_num_level, element.find_children("w:lvl"))
return dict(
(level.level_index, level)
for level in levels
)
def _read_abstract_num_level(element):
level_index = element.attributes["w:ilvl"]
num_fmt = element.find_child_or_null("w:numFmt").attributes.get("w:val")
is_ordered = num_fmt != "bullet"
return numbering_level(level_index, is_ordered)
def _read_nums(element, abstract_nums):
num_elements = element.find_children("w:num")
return dict(
_read_num(num_element, abstract_nums)
for num_element in num_elements
)
def _read_num(element, abstract_nums):
num_id = element.attributes.get("w:numId")
abstract_num_id = element.find_child_or_null("w:abstractNumId").attributes["w:val"]
return num_id, abstract_nums[abstract_num_id]
class Numbering(object):
def __init__(self, nums):
self._nums = nums
def find_level(self, num_id, level):
num = self._nums.get(num_id)
if num is None:
return None
else:
return num.get(level)
| 28.333333 | 87 | 0.717647 | from ..documents import numbering_level
def read_numbering_xml_element(element):
abstract_nums = _read_abstract_nums(element)
nums = _read_nums(element, abstract_nums)
return Numbering(nums)
def _read_abstract_nums(element):
abstract_num_elements = element.find_children("w:abstractNum")
return dict(map(_read_abstract_num, abstract_num_elements))
def _read_abstract_num(element):
abstract_num_id = element.attributes.get("w:abstractNumId")
levels = _read_abstract_num_levels(element)
return abstract_num_id, levels
def _read_abstract_num_levels(element):
levels = map(_read_abstract_num_level, element.find_children("w:lvl"))
return dict(
(level.level_index, level)
for level in levels
)
def _read_abstract_num_level(element):
level_index = element.attributes["w:ilvl"]
num_fmt = element.find_child_or_null("w:numFmt").attributes.get("w:val")
is_ordered = num_fmt != "bullet"
return numbering_level(level_index, is_ordered)
def _read_nums(element, abstract_nums):
num_elements = element.find_children("w:num")
return dict(
_read_num(num_element, abstract_nums)
for num_element in num_elements
)
def _read_num(element, abstract_nums):
num_id = element.attributes.get("w:numId")
abstract_num_id = element.find_child_or_null("w:abstractNumId").attributes["w:val"]
return num_id, abstract_nums[abstract_num_id]
class Numbering(object):
def __init__(self, nums):
self._nums = nums
def find_level(self, num_id, level):
num = self._nums.get(num_id)
if num is None:
return None
else:
return num.get(level)
| true | true |
f7f98e0f9144d9c194c8264c39056b4374ebb641 | 2,470 | py | Python | Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/tools/summary.py | linuxonly801/awesome-DeepLearning | b063757fa130c4d56aea5cce2e592610f1e169f9 | [
"Apache-2.0"
] | 883 | 2020-11-12T11:46:46.000Z | 2022-03-31T18:27:10.000Z | tools/summary.py | arkofgalaxy/PaddleVideo | 64251233c83b7eb681061b454da198a9082309a6 | [
"Apache-2.0"
] | 233 | 2020-12-09T06:04:59.000Z | 2022-03-28T08:16:51.000Z | tools/summary.py | arkofgalaxy/PaddleVideo | 64251233c83b7eb681061b454da198a9082309a6 | [
"Apache-2.0"
] | 225 | 2020-11-13T06:21:55.000Z | 2022-03-31T05:36:11.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import os.path as osp
import paddle
import paddle.nn.functional as F
from paddle.jit import to_static
import paddleslim
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
from paddlevideo.modeling.builder import build_model
from paddlevideo.utils import get_config
def parse_args():
parser = argparse.ArgumentParser("PaddleVideo Summary")
parser.add_argument('-c',
'--config',
type=str,
default='configs/example.yaml',
help='config file path')
parser.add_argument("--img_size", type=int, default=224)
parser.add_argument("--num_seg", type=int, default=8)
parser.add_argument("--FLOPs",
action="store_true",
help="whether to print FLOPs")
return parser.parse_args()
def _trim(cfg, args):
"""
Reuse the trainging config will bring useless attribute, such as: backbone.pretrained model. Trim it here.
"""
model_name = cfg.model_name
cfg = cfg.MODEL
cfg.backbone.pretrained = ""
if 'num_seg' in cfg.backbone:
cfg.backbone.num_seg = args.num_seg
return cfg, model_name
def main():
args = parse_args()
cfg, model_name = _trim(get_config(args.config, show=False), args)
print(f"Building model({model_name})...")
model = build_model(cfg)
img_size = args.img_size
num_seg = args.num_seg
#NOTE: only support tsm now, will refine soon
params_info = paddle.summary(model, (1, 1, num_seg, 3, img_size, img_size))
print(params_info)
if args.FLOPs:
flops_info = paddleslim.analysis.flops(model, [1, 1, num_seg, 3, img_size, img_size])
print(flops_info)
if __name__ == "__main__":
main()
| 30.121951 | 110 | 0.675304 |
import argparse
import os
import sys
import os.path as osp
import paddle
import paddle.nn.functional as F
from paddle.jit import to_static
import paddleslim
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
from paddlevideo.modeling.builder import build_model
from paddlevideo.utils import get_config
def parse_args():
parser = argparse.ArgumentParser("PaddleVideo Summary")
parser.add_argument('-c',
'--config',
type=str,
default='configs/example.yaml',
help='config file path')
parser.add_argument("--img_size", type=int, default=224)
parser.add_argument("--num_seg", type=int, default=8)
parser.add_argument("--FLOPs",
action="store_true",
help="whether to print FLOPs")
return parser.parse_args()
def _trim(cfg, args):
model_name = cfg.model_name
cfg = cfg.MODEL
cfg.backbone.pretrained = ""
if 'num_seg' in cfg.backbone:
cfg.backbone.num_seg = args.num_seg
return cfg, model_name
def main():
args = parse_args()
cfg, model_name = _trim(get_config(args.config, show=False), args)
print(f"Building model({model_name})...")
model = build_model(cfg)
img_size = args.img_size
num_seg = args.num_seg
params_info = paddle.summary(model, (1, 1, num_seg, 3, img_size, img_size))
print(params_info)
if args.FLOPs:
flops_info = paddleslim.analysis.flops(model, [1, 1, num_seg, 3, img_size, img_size])
print(flops_info)
if __name__ == "__main__":
main()
| true | true |
f7f98e7de1216bce575d7173df886b1c0b423252 | 2,053 | py | Python | build/lib/tnetwork/test/test_convert.py | Yquetzal/tnetwork | 43fb2f19aeed57a8a9d9af032ee80f1c9f58516d | [
"BSD-2-Clause"
] | 4 | 2019-02-19T07:49:06.000Z | 2020-09-01T16:17:54.000Z | tnetwork/test/test_convert.py | Yquetzal/tnetwork | 43fb2f19aeed57a8a9d9af032ee80f1c9f58516d | [
"BSD-2-Clause"
] | 1 | 2019-07-13T16:16:28.000Z | 2019-07-15T09:34:33.000Z | build/lib/tnetwork/test/test_convert.py | Yquetzal/tnetwork | 43fb2f19aeed57a8a9d9af032ee80f1c9f58516d | [
"BSD-2-Clause"
] | 3 | 2019-07-13T16:09:20.000Z | 2022-02-08T02:23:46.000Z | import unittest
import tnetwork as tn
class FunctionTestCase(unittest.TestCase):
def test_SN2IG2SN(self):
sn = tn.graph_socioPatterns2012(format=tn.DynGraphSN)
ig = sn.to_DynGraphIG()
sn2 = ig.to_DynGraphSN(slices=20)
self.assertEqual(sn.snapshots_timesteps(),sn2.snapshots_timesteps())
for t in sn.snapshots_timesteps():
self.assertEqual(sn.graph_at_time(t).edges(), sn2.graph_at_time(t).edges())
def test_IG2SN2IG(self):
ig = tn.graph_socioPatterns2012(format=tn.DynGraphIG)
sn = ig.to_DynGraphSN(slices=20)
ig2 = sn.to_DynGraphIG()
self.assertEqual(ig.interactions(),ig2.interactions())
def test_SN2LS2SN(self):
sn = tn.graph_socioPatterns2012(format=tn.DynGraphSN)
sn = sn.aggregate_sliding_window(60 * 60 * 24)
ls = sn.to_DynGraphLS()
sn2 = ls.to_DynGraphSN(slices=20,weighted=False)
self.assertEqual(sn.snapshots_timesteps(), sn2.snapshots_timesteps())
for t in sn.snapshots_timesteps():
self.assertEqual(sn.graph_at_time(t).edges(), sn2.graph_at_time(t).edges())
def test_aggregateDay(self):
dg = tn.graph_socioPatterns2012(format=tn.DynGraphSN)
dgd = dg.aggregate_time_period("day")
sg = dgd.to_DynGraphIG()
dg2 = sg.to_DynGraphSN(60*60*24)
self.assertEqual(dgd.snapshots_timesteps(),dg2.snapshots_timesteps())
for t in dgd.snapshots_timesteps():
self.assertEqual(dgd.graph_at_time(t).edges(), dg2.graph_at_time(t).edges())
def test_SN2IGaggregatePeriod(self):
dg = tn.graph_socioPatterns2012(format=tn.DynGraphSN)
dgd = dg.aggregate_sliding_window(60*60*24)
sg = dgd.to_DynGraphIG()
dg2 = sg.to_DynGraphSN(60*60*24)
self.assertEqual(dgd.snapshots_timesteps(),dg2.snapshots_timesteps())
for t in dgd.snapshots_timesteps():
self.assertEqual(dgd.graph_at_time(t).edges(), dg2.graph_at_time(t).edges())
if __name__ == '__main__':
unittest.main()
| 36.660714 | 88 | 0.67511 | import unittest
import tnetwork as tn
class FunctionTestCase(unittest.TestCase):
def test_SN2IG2SN(self):
sn = tn.graph_socioPatterns2012(format=tn.DynGraphSN)
ig = sn.to_DynGraphIG()
sn2 = ig.to_DynGraphSN(slices=20)
self.assertEqual(sn.snapshots_timesteps(),sn2.snapshots_timesteps())
for t in sn.snapshots_timesteps():
self.assertEqual(sn.graph_at_time(t).edges(), sn2.graph_at_time(t).edges())
def test_IG2SN2IG(self):
ig = tn.graph_socioPatterns2012(format=tn.DynGraphIG)
sn = ig.to_DynGraphSN(slices=20)
ig2 = sn.to_DynGraphIG()
self.assertEqual(ig.interactions(),ig2.interactions())
def test_SN2LS2SN(self):
sn = tn.graph_socioPatterns2012(format=tn.DynGraphSN)
sn = sn.aggregate_sliding_window(60 * 60 * 24)
ls = sn.to_DynGraphLS()
sn2 = ls.to_DynGraphSN(slices=20,weighted=False)
self.assertEqual(sn.snapshots_timesteps(), sn2.snapshots_timesteps())
for t in sn.snapshots_timesteps():
self.assertEqual(sn.graph_at_time(t).edges(), sn2.graph_at_time(t).edges())
def test_aggregateDay(self):
dg = tn.graph_socioPatterns2012(format=tn.DynGraphSN)
dgd = dg.aggregate_time_period("day")
sg = dgd.to_DynGraphIG()
dg2 = sg.to_DynGraphSN(60*60*24)
self.assertEqual(dgd.snapshots_timesteps(),dg2.snapshots_timesteps())
for t in dgd.snapshots_timesteps():
self.assertEqual(dgd.graph_at_time(t).edges(), dg2.graph_at_time(t).edges())
def test_SN2IGaggregatePeriod(self):
dg = tn.graph_socioPatterns2012(format=tn.DynGraphSN)
dgd = dg.aggregate_sliding_window(60*60*24)
sg = dgd.to_DynGraphIG()
dg2 = sg.to_DynGraphSN(60*60*24)
self.assertEqual(dgd.snapshots_timesteps(),dg2.snapshots_timesteps())
for t in dgd.snapshots_timesteps():
self.assertEqual(dgd.graph_at_time(t).edges(), dg2.graph_at_time(t).edges())
if __name__ == '__main__':
unittest.main()
| true | true |
f7f98eb70fc18671c3d0b8b711ceebc9123ebc31 | 598 | py | Python | home/migrations/0003_blogmodel_user.py | suyogojha/Blog-Django | d75d2e021f3b66cece462dfbdf3a3405a433a254 | [
"MIT"
] | null | null | null | home/migrations/0003_blogmodel_user.py | suyogojha/Blog-Django | d75d2e021f3b66cece462dfbdf3a3405a433a254 | [
"MIT"
] | null | null | null | home/migrations/0003_blogmodel_user.py | suyogojha/Blog-Django | d75d2e021f3b66cece462dfbdf3a3405a433a254 | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-04-18 14:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('home', '0002_alter_blogmodel_slug'),
]
operations = [
migrations.AddField(
model_name='blogmodel',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 27.181818 | 133 | 0.67893 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('home', '0002_alter_blogmodel_slug'),
]
operations = [
migrations.AddField(
model_name='blogmodel',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| true | true |
f7f98f259ed1d8115b399156659e50ec8ececc60 | 268 | py | Python | npbench/benchmarks/polybench/covariance/covariance.py | frahlg/npbench | 1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26 | [
"BSD-3-Clause"
] | 27 | 2021-05-10T11:49:13.000Z | 2022-03-22T18:07:19.000Z | npbench/benchmarks/polybench/covariance/covariance.py | frahlg/npbench | 1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26 | [
"BSD-3-Clause"
] | 3 | 2021-12-01T13:03:17.000Z | 2022-03-17T10:53:00.000Z | npbench/benchmarks/polybench/covariance/covariance.py | frahlg/npbench | 1bc4d9e2e22f3ca67fa2bc7f40e2e751a9c8dd26 | [
"BSD-3-Clause"
] | 7 | 2021-06-24T03:40:25.000Z | 2022-01-26T09:04:33.000Z | # Copyright 2021 ETH Zurich and the NPBench authors. All rights reserved.
import numpy as np
def initialize(M, N, datatype=np.float64):
float_n = datatype(N)
data = np.fromfunction(lambda i, j: (i * j) / M, (N, M), dtype=datatype)
return float_n, data
| 24.363636 | 76 | 0.682836 |
import numpy as np
def initialize(M, N, datatype=np.float64):
float_n = datatype(N)
data = np.fromfunction(lambda i, j: (i * j) / M, (N, M), dtype=datatype)
return float_n, data
| true | true |
f7f9902f153f6b92527da0bf1853d1044d33cd33 | 10,695 | py | Python | estimators/neural_dual_dice.py | isabella232/dice_rl | 1af2cda70e7e90302520c4576da8e5fdbdc9f261 | [
"Apache-2.0"
] | 85 | 2020-08-05T18:20:15.000Z | 2022-03-23T08:44:16.000Z | estimators/neural_dual_dice.py | google-research/dice_rl | 3d9f74fe43c49dfd5d0ce7405a59226a9c139381 | [
"Apache-2.0"
] | 7 | 2020-08-16T09:31:11.000Z | 2021-07-15T00:41:53.000Z | estimators/neural_dual_dice.py | rajatvd/dice_rl | 0e9e1a0963cb99ae3d995aa302fa19094c580d35 | [
"Apache-2.0"
] | 10 | 2020-08-16T04:24:43.000Z | 2022-03-11T23:27:20.000Z | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tf_agents.specs import tensor_spec
from tf_agents.policies import tf_policy
from tf_agents.utils import common as tfagents_common
from typing import Any, Callable, Iterable, Optional, Sequence, Tuple, Union
import dice_rl.data.dataset as dataset_lib
import dice_rl.utils.common as common_lib
import dice_rl.estimators.estimator as estimator_lib
class NeuralDualDice(object):
"""Policy evaluation with DualDICE."""
def __init__(self, dataset_spec,
nu_network,
zeta_network,
nu_optimizer,
zeta_optimizer,
gamma: Union[float, tf.Tensor],
reward_fn: Optional[Callable] = None,
solve_for_state_action_ratio: bool = True,
f_exponent: float = 1.5,
primal_form: bool = False,
num_samples: Optional[int] = None,
nu_regularizer: float = 0.,
zeta_regularizer: float = 0.):
"""Initializes the solver.
Args:
dataset_spec: The spec of the dataset that will be given.
nu_network: The nu-value network.
zeta_network: The zeta-value network.
nu_optimizer: The optimizer to use for nu.
zeta_optimizer: The optimizer to use for zeta.
gamma: The discount factor to use.
reward_fn: A function that takes in an EnvStep and returns the reward
for that step. If not specified, defaults to just EnvStep.reward.
solve_for_state_action_ratio: Whether to solve for state-action density
ratio. Defaults to True.
f_exponent: Exponent p to use for f(x) = |x|^p / p.
primal_form: Whether to use primal form of DualDICE, which optimizes for
nu independent of zeta. This form is biased in stochastic environments.
Defaults to False, which uses the saddle-point formulation of DualDICE.
num_samples: Number of samples to take from policy to estimate average
next nu value. If actions are discrete, this defaults to computing
average explicitly. If actions are not discrete, this defaults to using
a single sample.
nu_regularizer: Regularization coefficient on nu network.
zeta_regularizer: Regularization coefficient on zeta network.
"""
self._dataset_spec = dataset_spec
self._nu_network = nu_network
self._nu_network.create_variables()
self._zeta_network = zeta_network
self._zeta_network.create_variables()
self._nu_optimizer = nu_optimizer
self._zeta_optimizer = zeta_optimizer
self._nu_regularizer = nu_regularizer
self._zeta_regularizer = zeta_regularizer
self._gamma = gamma
if reward_fn is None:
reward_fn = lambda env_step: env_step.reward
self._reward_fn = reward_fn
self._num_samples = num_samples
self._solve_for_state_action_ratio = solve_for_state_action_ratio
if (not self._solve_for_state_action_ratio and
not self._dataset_spec.has_log_probability()):
raise ValueError('Dataset must contain log-probability when '
'solve_for_state_action_ratio is False.')
if f_exponent <= 1:
raise ValueError('Exponent for f must be greater than 1.')
fstar_exponent = f_exponent / (f_exponent - 1)
self._f_fn = lambda x: tf.abs(x) ** f_exponent / f_exponent
self._fstar_fn = lambda x: tf.abs(x) ** fstar_exponent / fstar_exponent
self._categorical_action = common_lib.is_categorical_spec(
self._dataset_spec.action)
if not self._categorical_action and self._num_samples is None:
self._num_samples = 1
self._primal_form = primal_form
self._initialize()
def _initialize(self):
pass
def _get_value(self, network, env_step):
if self._solve_for_state_action_ratio:
return network((env_step.observation, env_step.action))[0]
else:
return network(env_step.observation)[0]
def _get_average_value(self, network, env_step, policy):
if self._solve_for_state_action_ratio:
tfagents_step = dataset_lib.convert_to_tfagents_timestep(env_step)
if self._categorical_action and self._num_samples is None:
action_weights = policy.distribution(
tfagents_step).action.probs_parameter()
action_dtype = self._dataset_spec.action.dtype
batch_size = tf.shape(action_weights)[0]
num_actions = tf.shape(action_weights)[-1]
actions = ( # Broadcast actions
tf.ones([batch_size, 1], dtype=action_dtype) *
tf.range(num_actions, dtype=action_dtype)[None, :])
else:
batch_size = tf.shape(env_step.observation)[0]
num_actions = self._num_samples
action_weights = tf.ones([batch_size, num_actions]) / num_actions
actions = tf.stack(
[policy.action(tfagents_step).action
for _ in range(num_actions)],
axis=1)
flat_actions = tf.reshape(actions, [batch_size * num_actions] +
actions.shape[2:].as_list())
flat_observations = tf.reshape(
tf.tile(env_step.observation[:, None, ...],
[1, num_actions] + [1] * len(env_step.observation.shape[1:])),
[batch_size * num_actions] + env_step.observation.shape[1:].as_list())
flat_values, _ = network((flat_observations, flat_actions))
values = tf.reshape(flat_values, [batch_size, num_actions] +
flat_values.shape[1:].as_list())
return tf.reduce_sum(
values * common_lib.reverse_broadcast(action_weights, values),
axis=1)
else:
return network(env_step.observation)[0]
def _orthogonal_regularization(self, network):
reg = 0
for layer in network.layers:
if isinstance(layer, tf.keras.layers.Dense):
prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)
reg += tf.reduce_sum(
tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))
return reg
def train_loss(self, initial_env_step, env_step, next_env_step, policy):
nu_values = self._get_value(self._nu_network, env_step)
initial_nu_values = self._get_average_value(
self._nu_network, initial_env_step, policy)
next_nu_values = self._get_average_value(
self._nu_network, next_env_step, policy)
zeta_values = self._get_value(self._zeta_network, env_step)
discounts = self._gamma * next_env_step.discount
policy_ratio = 1.0
if not self._solve_for_state_action_ratio:
tfagents_step = dataset_lib.convert_to_tfagents_timestep(env_step)
policy_log_probabilities = policy.distribution(
tfagents_step).action.log_prob(env_step.action)
policy_ratio = tf.exp(
policy_log_probabilities - env_step.get_log_probability())
bellman_residuals = (
nu_values - common_lib.reverse_broadcast(
discounts * policy_ratio, nu_values) * next_nu_values)
zeta_loss = self._fstar_fn(zeta_values) - bellman_residuals * zeta_values
if self._primal_form:
nu_loss = (self._f_fn(bellman_residuals)
- (1 - self._gamma) * initial_nu_values)
else:
nu_loss = -zeta_loss - (1 - self._gamma) * initial_nu_values
return nu_loss, zeta_loss
@tf.function
def train_step(self,
initial_env_step: dataset_lib.EnvStep,
experience: dataset_lib.EnvStep,
target_policy: tf_policy.TFPolicy):
"""Performs a single training step based on batch.
Args:
initial_env_step: A batch of initial steps.
experience: A batch of transitions. Elements must have shape
[batch_size, 2, ...].
target_policy: The policy whose value we want to estimate.
Returns:
The losses and the train op.
"""
env_step = tf.nest.map_structure(lambda t: t[:, 0, ...], experience)
next_env_step = tf.nest.map_structure(lambda t: t[:, 1, ...], experience)
with tf.GradientTape(watch_accessed_variables=False,
persistent=True) as tape:
tape.watch(self._nu_network.variables)
tape.watch(self._zeta_network.variables)
nu_loss, zeta_loss = self.train_loss(
initial_env_step, env_step, next_env_step, target_policy)
nu_loss += self._nu_regularizer * self._orthogonal_regularization(
self._nu_network)
zeta_loss += self._zeta_regularizer * self._orthogonal_regularization(
self._zeta_network)
nu_grads = tape.gradient(nu_loss, self._nu_network.variables)
nu_grad_op = self._nu_optimizer.apply_gradients(
zip(nu_grads, self._nu_network.variables))
zeta_grads = tape.gradient(zeta_loss, self._zeta_network.variables)
zeta_grad_op = self._zeta_optimizer.apply_gradients(
zip(zeta_grads, self._zeta_network.variables))
return (tf.reduce_mean(nu_loss), tf.reduce_mean(zeta_loss))
def estimate_average_reward(self,
dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy):
"""Estimates value (average per-step reward) of policy.
Args:
dataset: The dataset to sample experience from.
target_policy: The policy whose value we want to estimate.
Returns:
Estimated average per-step reward of the target policy.
"""
def weight_fn(env_step):
zeta = self._get_value(self._zeta_network, env_step)
policy_ratio = 1.0
if not self._solve_for_state_action_ratio:
tfagents_timestep = dataset_lib.convert_to_tfagents_timestep(env_step)
target_log_probabilities = target_policy.distribution(
tfagents_timestep).action.log_prob(env_step.action)
policy_ratio = tf.exp(
target_log_probabilities -
env_step.get_log_probability())
return zeta * common_lib.reverse_broadcast(policy_ratio, zeta)
return estimator_lib.get_fullbatch_average(
dataset, limit=None, by_steps=True,
reward_fn=self._reward_fn, weight_fn=weight_fn)
| 40.820611 | 80 | 0.692006 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tf_agents.specs import tensor_spec
from tf_agents.policies import tf_policy
from tf_agents.utils import common as tfagents_common
from typing import Any, Callable, Iterable, Optional, Sequence, Tuple, Union
import dice_rl.data.dataset as dataset_lib
import dice_rl.utils.common as common_lib
import dice_rl.estimators.estimator as estimator_lib
class NeuralDualDice(object):
def __init__(self, dataset_spec,
nu_network,
zeta_network,
nu_optimizer,
zeta_optimizer,
gamma: Union[float, tf.Tensor],
reward_fn: Optional[Callable] = None,
solve_for_state_action_ratio: bool = True,
f_exponent: float = 1.5,
primal_form: bool = False,
num_samples: Optional[int] = None,
nu_regularizer: float = 0.,
zeta_regularizer: float = 0.):
self._dataset_spec = dataset_spec
self._nu_network = nu_network
self._nu_network.create_variables()
self._zeta_network = zeta_network
self._zeta_network.create_variables()
self._nu_optimizer = nu_optimizer
self._zeta_optimizer = zeta_optimizer
self._nu_regularizer = nu_regularizer
self._zeta_regularizer = zeta_regularizer
self._gamma = gamma
if reward_fn is None:
reward_fn = lambda env_step: env_step.reward
self._reward_fn = reward_fn
self._num_samples = num_samples
self._solve_for_state_action_ratio = solve_for_state_action_ratio
if (not self._solve_for_state_action_ratio and
not self._dataset_spec.has_log_probability()):
raise ValueError('Dataset must contain log-probability when '
'solve_for_state_action_ratio is False.')
if f_exponent <= 1:
raise ValueError('Exponent for f must be greater than 1.')
fstar_exponent = f_exponent / (f_exponent - 1)
self._f_fn = lambda x: tf.abs(x) ** f_exponent / f_exponent
self._fstar_fn = lambda x: tf.abs(x) ** fstar_exponent / fstar_exponent
self._categorical_action = common_lib.is_categorical_spec(
self._dataset_spec.action)
if not self._categorical_action and self._num_samples is None:
self._num_samples = 1
self._primal_form = primal_form
self._initialize()
def _initialize(self):
pass
def _get_value(self, network, env_step):
if self._solve_for_state_action_ratio:
return network((env_step.observation, env_step.action))[0]
else:
return network(env_step.observation)[0]
def _get_average_value(self, network, env_step, policy):
if self._solve_for_state_action_ratio:
tfagents_step = dataset_lib.convert_to_tfagents_timestep(env_step)
if self._categorical_action and self._num_samples is None:
action_weights = policy.distribution(
tfagents_step).action.probs_parameter()
action_dtype = self._dataset_spec.action.dtype
batch_size = tf.shape(action_weights)[0]
num_actions = tf.shape(action_weights)[-1]
actions = (
tf.ones([batch_size, 1], dtype=action_dtype) *
tf.range(num_actions, dtype=action_dtype)[None, :])
else:
batch_size = tf.shape(env_step.observation)[0]
num_actions = self._num_samples
action_weights = tf.ones([batch_size, num_actions]) / num_actions
actions = tf.stack(
[policy.action(tfagents_step).action
for _ in range(num_actions)],
axis=1)
flat_actions = tf.reshape(actions, [batch_size * num_actions] +
actions.shape[2:].as_list())
flat_observations = tf.reshape(
tf.tile(env_step.observation[:, None, ...],
[1, num_actions] + [1] * len(env_step.observation.shape[1:])),
[batch_size * num_actions] + env_step.observation.shape[1:].as_list())
flat_values, _ = network((flat_observations, flat_actions))
values = tf.reshape(flat_values, [batch_size, num_actions] +
flat_values.shape[1:].as_list())
return tf.reduce_sum(
values * common_lib.reverse_broadcast(action_weights, values),
axis=1)
else:
return network(env_step.observation)[0]
def _orthogonal_regularization(self, network):
reg = 0
for layer in network.layers:
if isinstance(layer, tf.keras.layers.Dense):
prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)
reg += tf.reduce_sum(
tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))
return reg
def train_loss(self, initial_env_step, env_step, next_env_step, policy):
nu_values = self._get_value(self._nu_network, env_step)
initial_nu_values = self._get_average_value(
self._nu_network, initial_env_step, policy)
next_nu_values = self._get_average_value(
self._nu_network, next_env_step, policy)
zeta_values = self._get_value(self._zeta_network, env_step)
discounts = self._gamma * next_env_step.discount
policy_ratio = 1.0
if not self._solve_for_state_action_ratio:
tfagents_step = dataset_lib.convert_to_tfagents_timestep(env_step)
policy_log_probabilities = policy.distribution(
tfagents_step).action.log_prob(env_step.action)
policy_ratio = tf.exp(
policy_log_probabilities - env_step.get_log_probability())
bellman_residuals = (
nu_values - common_lib.reverse_broadcast(
discounts * policy_ratio, nu_values) * next_nu_values)
zeta_loss = self._fstar_fn(zeta_values) - bellman_residuals * zeta_values
if self._primal_form:
nu_loss = (self._f_fn(bellman_residuals)
- (1 - self._gamma) * initial_nu_values)
else:
nu_loss = -zeta_loss - (1 - self._gamma) * initial_nu_values
return nu_loss, zeta_loss
@tf.function
def train_step(self,
initial_env_step: dataset_lib.EnvStep,
experience: dataset_lib.EnvStep,
target_policy: tf_policy.TFPolicy):
env_step = tf.nest.map_structure(lambda t: t[:, 0, ...], experience)
next_env_step = tf.nest.map_structure(lambda t: t[:, 1, ...], experience)
with tf.GradientTape(watch_accessed_variables=False,
persistent=True) as tape:
tape.watch(self._nu_network.variables)
tape.watch(self._zeta_network.variables)
nu_loss, zeta_loss = self.train_loss(
initial_env_step, env_step, next_env_step, target_policy)
nu_loss += self._nu_regularizer * self._orthogonal_regularization(
self._nu_network)
zeta_loss += self._zeta_regularizer * self._orthogonal_regularization(
self._zeta_network)
nu_grads = tape.gradient(nu_loss, self._nu_network.variables)
nu_grad_op = self._nu_optimizer.apply_gradients(
zip(nu_grads, self._nu_network.variables))
zeta_grads = tape.gradient(zeta_loss, self._zeta_network.variables)
zeta_grad_op = self._zeta_optimizer.apply_gradients(
zip(zeta_grads, self._zeta_network.variables))
return (tf.reduce_mean(nu_loss), tf.reduce_mean(zeta_loss))
def estimate_average_reward(self,
dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy):
def weight_fn(env_step):
zeta = self._get_value(self._zeta_network, env_step)
policy_ratio = 1.0
if not self._solve_for_state_action_ratio:
tfagents_timestep = dataset_lib.convert_to_tfagents_timestep(env_step)
target_log_probabilities = target_policy.distribution(
tfagents_timestep).action.log_prob(env_step.action)
policy_ratio = tf.exp(
target_log_probabilities -
env_step.get_log_probability())
return zeta * common_lib.reverse_broadcast(policy_ratio, zeta)
return estimator_lib.get_fullbatch_average(
dataset, limit=None, by_steps=True,
reward_fn=self._reward_fn, weight_fn=weight_fn)
| true | true |
f7f9913f6080ce29d5c4c2558a99aa468982316a | 5,113 | py | Python | botnet/modules/baseresponder.py | admdev8/botnet-2 | 2fd43237e628869eb34d8e7a6747da6d71c1192c | [
"MIT"
] | 69 | 2015-02-24T19:24:23.000Z | 2022-02-23T08:04:53.000Z | botnet/modules/baseresponder.py | admdev8/botnet-2 | 2fd43237e628869eb34d8e7a6747da6d71c1192c | [
"MIT"
] | 10 | 2017-06-28T21:08:29.000Z | 2022-01-26T07:46:02.000Z | botnet/modules/baseresponder.py | admdev8/botnet-2 | 2fd43237e628869eb34d8e7a6747da6d71c1192c | [
"MIT"
] | 39 | 2015-11-19T10:07:21.000Z | 2022-03-30T10:56:24.000Z | import re
from ..helpers import is_channel_name
from ..message import Message
from ..signals import message_out
from .base import BaseModule
from .mixins import ConfigMixin, MessageDispatcherMixin
from .lib import parse_command
class BaseResponder(ConfigMixin, MessageDispatcherMixin, BaseModule):
"""Inherit from this class to quickly create a module which reacts to users'
messages.
Example module config:
"botnet": {
"base_responder": {
"command_prefix": "."
}
}
"""
# Don't send description of the .help command - normally each module which
# is based on this class would respond to such request and flood a user with
# messages. This should be set to False only in one module (by default in
# module meta). Note that this parameter is a terrible workaround. The
# problem is caused by the fact that modules do not have direct access to
# each another, so it is not possible to query others modules for commands.
# Each module has to report them separately, and in effect the same command
# had to be defined in all modules.
ignore_help = True
# A module is expected to store the config in
# config['module_config'][config_namespace][config_name]
config_namespace = None
config_name = None
# This is the default config for this class
base_default_config = {
"command_prefix": "."
}
# Default config for the class which inherits from BaseResponder
default_config = {}
def __init__(self, config):
super().__init__(config)
self.register_default_config(self.base_default_config)
self.register_default_config(self.default_config)
self.register_config('botnet', 'base_responder')
if self.config_namespace and self.config_name:
self.register_config(self.config_namespace, self.config_name)
def _get_commands_from_handlers(self, handler_prefix):
"""Generates a list of supported commands from defined handlers."""
commands = []
for name in dir(self):
if name.startswith(handler_prefix):
attr = getattr(self, name)
if hasattr(attr, '__call__'):
command_name = name[len(handler_prefix):]
if not (self.ignore_help and command_name == 'help'):
commands.append(command_name)
return commands
def _get_help_for_command(self, handler_prefix, name):
handler = self._get_command_handler(handler_prefix, name)
if not handler:
return None
# Header
rw = 'Module %s, help for `%s`: ' % (self.__class__.__name__,
name)
# Docstring
help_text = handler.__doc__
if help_text:
rw += ' '.join(help_text.splitlines())
else:
rw += 'No help available.'
rw = re.sub(' +', ' ', rw)
return rw
def get_command_prefix(self):
return self.config_get('command_prefix')
def respond(self, priv_msg, text, pm=False):
"""Send a text in response to a message. Text will be automatically
sent to a proper channel or user.
priv_msg: Message object to which we are responding.
text: Response text.
pm: If True response will be a private message.
"""
# If this is supposed to be sent as a private message or was sent in
# a private message to the bot respond also in private message.
if pm or not is_channel_name(priv_msg.params[0]):
target = priv_msg.nickname
else:
target = priv_msg.params[0]
response = Message(command='PRIVMSG', params=[target, text])
message_out.send(self, msg=response)
def get_all_commands(self):
"""Should return a list of strings containing all commands supported by
this module.
"""
return self._get_commands_from_handlers(self.handler_prefix)
def get_all_admin_commands(self):
"""Should return a list of strings containing all admin commands
supported by this module.
"""
return self._get_commands_from_handlers(self.admin_handler_prefix)
@parse_command([('command_names', '*')])
def command_help(self, msg, args):
"""If COMMAND is specified sends detailed help for the commands in a
private message.
Syntax: help [COMMAND ...]
"""
if len(args.command_names) > 0:
# Display help for a specific command
for name in args.command_names:
if self.ignore_help and name == 'help':
continue
# get help
lines = []
for prefix in [self.handler_prefix, self.admin_handler_prefix]:
text = self._get_help_for_command(prefix, name)
if text:
lines.append(text)
# send help
for line in lines:
self.respond(msg, line, pm=True)
| 36.784173 | 80 | 0.621357 | import re
from ..helpers import is_channel_name
from ..message import Message
from ..signals import message_out
from .base import BaseModule
from .mixins import ConfigMixin, MessageDispatcherMixin
from .lib import parse_command
class BaseResponder(ConfigMixin, MessageDispatcherMixin, BaseModule):
# is based on this class would respond to such request and flood a user with
# messages. This should be set to False only in one module (by default in
# module meta). Note that this parameter is a terrible workaround. The
# problem is caused by the fact that modules do not have direct access to
# each another, so it is not possible to query others modules for commands.
# Each module has to report them separately, and in effect the same command
# had to be defined in all modules.
ignore_help = True
# A module is expected to store the config in
# config['module_config'][config_namespace][config_name]
config_namespace = None
config_name = None
# This is the default config for this class
base_default_config = {
"command_prefix": "."
}
# Default config for the class which inherits from BaseResponder
default_config = {}
def __init__(self, config):
super().__init__(config)
self.register_default_config(self.base_default_config)
self.register_default_config(self.default_config)
self.register_config('botnet', 'base_responder')
if self.config_namespace and self.config_name:
self.register_config(self.config_namespace, self.config_name)
def _get_commands_from_handlers(self, handler_prefix):
commands = []
for name in dir(self):
if name.startswith(handler_prefix):
attr = getattr(self, name)
if hasattr(attr, '__call__'):
command_name = name[len(handler_prefix):]
if not (self.ignore_help and command_name == 'help'):
commands.append(command_name)
return commands
def _get_help_for_command(self, handler_prefix, name):
handler = self._get_command_handler(handler_prefix, name)
if not handler:
return None
# Header
rw = 'Module %s, help for `%s`: ' % (self.__class__.__name__,
name)
# Docstring
help_text = handler.__doc__
if help_text:
rw += ' '.join(help_text.splitlines())
else:
rw += 'No help available.'
rw = re.sub(' +', ' ', rw)
return rw
def get_command_prefix(self):
return self.config_get('command_prefix')
def respond(self, priv_msg, text, pm=False):
# If this is supposed to be sent as a private message or was sent in
# a private message to the bot respond also in private message.
if pm or not is_channel_name(priv_msg.params[0]):
target = priv_msg.nickname
else:
target = priv_msg.params[0]
response = Message(command='PRIVMSG', params=[target, text])
message_out.send(self, msg=response)
def get_all_commands(self):
return self._get_commands_from_handlers(self.handler_prefix)
def get_all_admin_commands(self):
return self._get_commands_from_handlers(self.admin_handler_prefix)
@parse_command([('command_names', '*')])
def command_help(self, msg, args):
if len(args.command_names) > 0:
# Display help for a specific command
for name in args.command_names:
if self.ignore_help and name == 'help':
continue
# get help
lines = []
for prefix in [self.handler_prefix, self.admin_handler_prefix]:
text = self._get_help_for_command(prefix, name)
if text:
lines.append(text)
# send help
for line in lines:
self.respond(msg, line, pm=True)
| true | true |
f7f991602b439168bface83da9407a895bf5d90d | 1,014 | py | Python | model.py | eric-sentient/MIDI-reAE-chords | ac16210bbeb822cb4babb95974a4a05d527763cc | [
"MIT"
] | null | null | null | model.py | eric-sentient/MIDI-reAE-chords | ac16210bbeb822cb4babb95974a4a05d527763cc | [
"MIT"
] | null | null | null | model.py | eric-sentient/MIDI-reAE-chords | ac16210bbeb822cb4babb95974a4a05d527763cc | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import load_train
from keras.models import Sequential, Model
from keras.layers import Flatten, Dropout, RepeatVector, LSTM, Dense, Dropout, Embedding, Masking, Bidirectional, Layer
from keras.callbacks import EarlyStopping, ModelCheckpoint
def make_model():
model = Sequential()
model.add(LSTM(32,
input_dim=101,
return_sequences=True,
dropout=0.1,
recurrent_dropout=0.1))
model.add(LSTM(10,
activation='relu',
return_sequences=False))
model.add(RepeatVector(2))
model.add(LSTM(10,
activation='relu',
return_sequences=True))
model.add(LSTM(32,
return_sequences=True,
dropout=0.1,
recurrent_dropout=0.1))
# Output layer
model.add(Dense(101,
activation='sigmoid'))
# Compile the model
model.compile(
optimizer='RMSprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
return model | 26 | 119 | 0.657791 | import pandas as pd
import numpy as np
import load_train
from keras.models import Sequential, Model
from keras.layers import Flatten, Dropout, RepeatVector, LSTM, Dense, Dropout, Embedding, Masking, Bidirectional, Layer
from keras.callbacks import EarlyStopping, ModelCheckpoint
def make_model():
model = Sequential()
model.add(LSTM(32,
input_dim=101,
return_sequences=True,
dropout=0.1,
recurrent_dropout=0.1))
model.add(LSTM(10,
activation='relu',
return_sequences=False))
model.add(RepeatVector(2))
model.add(LSTM(10,
activation='relu',
return_sequences=True))
model.add(LSTM(32,
return_sequences=True,
dropout=0.1,
recurrent_dropout=0.1))
model.add(Dense(101,
activation='sigmoid'))
model.compile(
optimizer='RMSprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
return model | true | true |
f7f991f11a49bed2aef71fe3917e5d676de18124 | 2,242 | py | Python | visualize.py | Toroi0610/visualize_interactive | 8ddc8c248902ff59f1800d2418c697ffe1e5b472 | [
"MIT"
] | null | null | null | visualize.py | Toroi0610/visualize_interactive | 8ddc8c248902ff59f1800d2418c697ffe1e5b472 | [
"MIT"
] | null | null | null | visualize.py | Toroi0610/visualize_interactive | 8ddc8c248902ff59f1800d2418c697ffe1e5b472 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[5]:
import joblib
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
# In[6]:
with open("model.pkl", "rb") as f:
model = joblib.load(f)
# In[7]:
def pred(model, a, b, c, d):
res = []
for x in np.arange(-5.0, 5.0, 0.1):
for y in np.arange(-5.0, 5.0, 0.1):
res.append(model.predict([[x, y, a, b, c, d]]))
return np.array(res).reshape([100, 100])
# In[12]:
fig, ax = plt.subplots(figsize=(12, 12))
plt.subplots_adjust(left=0.25, bottom=0.35)
x = np.arange(-5, 5, 0.1)
y = np.arange(-5, 5, 0.1)
a = 0.2
b = 0.2
c = 0.2
d = 0.2
s = pred(model, a, b, c, d)
im = plt.imshow(s)
ax.margins(x=0)
axcolor = 'lightgoldenrodyellow'
axa = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)
axb = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor=axcolor)
axc = plt.axes([0.25, 0.2, 0.65, 0.03], facecolor=axcolor)
axd = plt.axes([0.25, 0.25, 0.65, 0.03], facecolor=axcolor)
sa = Slider(axa, 'a', 0.0, 1.0, valinit=a)
sb = Slider(axb, 'b', -1.0, 1.0, valinit=b)
sc = Slider(axc, 'c', 0.0, 1.0, valinit=c)
sd = Slider(axd, 'd', -1.0, 1.0, valinit=d)
def update(val):
a = sa.val
b = sb.val
c = sc.val
d = sd.val
im.set_data(pred(model, a, b, c, d))
fig.canvas.draw_idle()
# amp = samp.val
# freq = sfreq.val
# l.set_ydata(amp*np.sin(2*np.pi*freq*t))
# fig.canvas.draw_idle()
sa.on_changed(update)
sb.on_changed(update)
sc.on_changed(update)
sd.on_changed(update)
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
sa.reset()
sb.reset()
sc.reset()
sd.reset()
button.on_clicked(reset)
# rax = plt.axes([0.025, 0.5, 0.15, 0.15], facecolor=axcolor)
# radio = RadioButtons(rax, ('red', 'blue', 'green'), active=0)
# def colorfunc(label):
# im.set_color(label)
# fig.canvas.draw_idle()
# radio.on_clicked(colorfunc)
# Initialize plot with correct initial active value
# colorfunc(radio.value_selected)
plt.show()
# In[10]:
s = pred(model, 0.1, 0.1, 0, 0)
# In[11]:
plt.imshow(s)
# In[ ]:
| 18.080645 | 68 | 0.609277 |
import joblib
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
with open("model.pkl", "rb") as f:
model = joblib.load(f)
def pred(model, a, b, c, d):
res = []
for x in np.arange(-5.0, 5.0, 0.1):
for y in np.arange(-5.0, 5.0, 0.1):
res.append(model.predict([[x, y, a, b, c, d]]))
return np.array(res).reshape([100, 100])
fig, ax = plt.subplots(figsize=(12, 12))
plt.subplots_adjust(left=0.25, bottom=0.35)
x = np.arange(-5, 5, 0.1)
y = np.arange(-5, 5, 0.1)
a = 0.2
b = 0.2
c = 0.2
d = 0.2
s = pred(model, a, b, c, d)
im = plt.imshow(s)
ax.margins(x=0)
axcolor = 'lightgoldenrodyellow'
axa = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)
axb = plt.axes([0.25, 0.15, 0.65, 0.03], facecolor=axcolor)
axc = plt.axes([0.25, 0.2, 0.65, 0.03], facecolor=axcolor)
axd = plt.axes([0.25, 0.25, 0.65, 0.03], facecolor=axcolor)
sa = Slider(axa, 'a', 0.0, 1.0, valinit=a)
sb = Slider(axb, 'b', -1.0, 1.0, valinit=b)
sc = Slider(axc, 'c', 0.0, 1.0, valinit=c)
sd = Slider(axd, 'd', -1.0, 1.0, valinit=d)
def update(val):
a = sa.val
b = sb.val
c = sc.val
d = sd.val
im.set_data(pred(model, a, b, c, d))
fig.canvas.draw_idle()
sa.on_changed(update)
sb.on_changed(update)
sc.on_changed(update)
sd.on_changed(update)
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
sa.reset()
sb.reset()
sc.reset()
sd.reset()
button.on_clicked(reset)
plt.show()
s = pred(model, 0.1, 0.1, 0, 0)
plt.imshow(s)
| true | true |
f7f992fcea49f9bec747311a0772f563c14ccfeb | 66,995 | py | Python | Lib/test/test_asyncio/test_unix_events.py | NihalAgarwal/cpython | e1179a5096fb12297ececd7a1c79969aa5747e28 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/test/test_asyncio/test_unix_events.py | NihalAgarwal/cpython | e1179a5096fb12297ececd7a1c79969aa5747e28 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/test/test_asyncio/test_unix_events.py | NihalAgarwal/cpython | e1179a5096fb12297ececd7a1c79969aa5747e28 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | """Tests for unix_events.py."""
import collections
import contextlib
import errno
import io
import os
import pathlib
import signal
import socket
import stat
import sys
import tempfile
import threading
import unittest
from unittest import mock
from test import support
if sys.platform == 'win32':
raise unittest.SkipTest('UNIX only')
import asyncio
from asyncio import log
from asyncio import base_events
from asyncio import events
from asyncio import unix_events
from test.test_asyncio import utils as test_utils
MOCK_ANY = mock.ANY
def tearDownModule():
asyncio.set_event_loop_policy(None)
def close_pipe_transport(transport):
# Don't call transport.close() because the event loop and the selector
# are mocked
if transport._pipe is None:
return
transport._pipe.close()
transport._pipe = None
@unittest.skipUnless(signal, 'Signals are not supported')
class SelectorEventLoopSignalTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
def test_check_signal(self):
self.assertRaises(
TypeError, self.loop._check_signal, '1')
self.assertRaises(
ValueError, self.loop._check_signal, signal.NSIG + 1)
def test_handle_signal_no_handler(self):
self.loop._handle_signal(signal.NSIG + 1)
def test_handle_signal_cancelled_handler(self):
h = asyncio.Handle(mock.Mock(), (),
loop=mock.Mock())
h.cancel()
self.loop._signal_handlers[signal.NSIG + 1] = h
self.loop.remove_signal_handler = mock.Mock()
self.loop._handle_signal(signal.NSIG + 1)
self.loop.remove_signal_handler.assert_called_with(signal.NSIG + 1)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_setup_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
m_signal.set_wakeup_fd.side_effect = ValueError
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_coroutine_error(self, m_signal):
m_signal.NSIG = signal.NSIG
async def simple_coroutine():
pass
# callback must not be a coroutine function
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
self.assertRaisesRegex(
TypeError, 'coroutines cannot be used with add_signal_handler',
self.loop.add_signal_handler,
signal.SIGINT, func)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
cb = lambda: True
self.loop.add_signal_handler(signal.SIGHUP, cb)
h = self.loop._signal_handlers.get(signal.SIGHUP)
self.assertIsInstance(h, asyncio.Handle)
self.assertEqual(h._callback, cb)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_install_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
def set_wakeup_fd(fd):
if fd == -1:
raise ValueError()
m_signal.set_wakeup_fd = set_wakeup_fd
class Err(OSError):
errno = errno.EFAULT
m_signal.signal.side_effect = Err
self.assertRaises(
Err,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error2(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.loop._signal_handlers[signal.SIGHUP] = lambda: True
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(1, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error3(self, m_logging, m_signal):
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(2, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGHUP))
self.assertTrue(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGHUP, m_signal.SIG_DFL), m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.SIGINT = signal.SIGINT
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGINT, lambda: True)
self.loop._signal_handlers[signal.SIGHUP] = object()
m_signal.set_wakeup_fd.reset_mock()
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGINT))
self.assertFalse(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGINT, m_signal.default_int_handler),
m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_remove_signal_handler_cleanup_error(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.set_wakeup_fd.side_effect = ValueError
self.loop.remove_signal_handler(signal.SIGHUP)
self.assertTrue(m_logging.info)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.signal.side_effect = OSError
self.assertRaises(
OSError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.assertRaises(
RuntimeError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_close(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.loop.add_signal_handler(signal.SIGCHLD, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 2)
m_signal.set_wakeup_fd.reset_mock()
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
m_signal.set_wakeup_fd.assert_called_once_with(-1)
@mock.patch('asyncio.unix_events.sys')
@mock.patch('asyncio.unix_events.signal')
def test_close_on_finalizing(self, m_signal, m_sys):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 1)
m_sys.is_finalizing.return_value = True
m_signal.signal.reset_mock()
with self.assertWarnsRegex(ResourceWarning,
"skipping signal handlers removal"):
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
self.assertFalse(m_signal.signal.called)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'),
'UNIX Sockets are not supported')
class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
@support.skip_unless_bind_unix_socket
def test_create_unix_server_existing_path_sock(self):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX)
sock.bind(path)
sock.listen(1)
sock.close()
coro = self.loop.create_unix_server(lambda: None, path)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@support.skip_unless_bind_unix_socket
def test_create_unix_server_pathlib(self):
with test_utils.unix_socket_path() as path:
path = pathlib.Path(path)
srv_coro = self.loop.create_unix_server(lambda: None, path)
srv = self.loop.run_until_complete(srv_coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_create_unix_connection_pathlib(self):
with test_utils.unix_socket_path() as path:
path = pathlib.Path(path)
coro = self.loop.create_unix_connection(lambda: None, path)
with self.assertRaises(FileNotFoundError):
# If pathlib.Path wasn't supported, the exception would be
# different.
self.loop.run_until_complete(coro)
def test_create_unix_server_existing_path_nonsock(self):
with tempfile.NamedTemporaryFile() as file:
coro = self.loop.create_unix_server(lambda: None, file.name)
with self.assertRaisesRegex(OSError,
'Address.*is already in use'):
self.loop.run_until_complete(coro)
def test_create_unix_server_ssl_bool(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl=True)
with self.assertRaisesRegex(TypeError,
'ssl argument must be an SSLContext'):
self.loop.run_until_complete(coro)
def test_create_unix_server_nopath_nosock(self):
coro = self.loop.create_unix_server(lambda: None, path=None)
with self.assertRaisesRegex(ValueError,
'path was not specified, and no sock'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_inetsock(self):
sock = socket.socket()
with sock:
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_dgram(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
@support.skip_unless_bind_unix_socket
def test_create_unix_server_path_stream_bittype(self):
sock = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with tempfile.NamedTemporaryFile() as file:
fn = file.name
try:
with sock:
sock.bind(fn)
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
finally:
os.unlink(fn)
def test_create_unix_server_ssl_timeout_with_plain_sock(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_path_inetsock(self):
sock = socket.socket()
with sock:
coro = self.loop.create_unix_connection(lambda: None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.unix_events.socket')
def test_create_unix_server_bind_error(self, m_socket):
# Ensure that the socket is closed on any bind error
sock = mock.Mock()
m_socket.socket.return_value = sock
sock.bind.side_effect = OSError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
sock.bind.side_effect = MemoryError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(MemoryError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_unix_connection_path_sock(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, sock=object())
with self.assertRaisesRegex(ValueError, 'path and sock can not be'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nopath_nosock(self):
coro = self.loop.create_unix_connection(
lambda: None, None)
with self.assertRaisesRegex(ValueError,
'no path and sock were specified'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nossl_serverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, server_hostname='spam')
with self.assertRaisesRegex(ValueError,
'server_hostname is only meaningful'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_noserverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, ssl=True)
with self.assertRaisesRegex(
ValueError, 'you have to pass server_hostname when using ssl'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_timeout_with_plain_sock(self):
coro = self.loop.create_unix_connection(lambda: None, path='spam',
ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(os, 'sendfile'),
'sendfile is not supported')
class SelectorEventLoopUnixSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
self._ready = loop.create_future()
def connection_made(self, transport):
self.started = True
self.transport = transport
self._ready.set_result(None)
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
with open(support.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
super().tearDownClass()
def setUp(self):
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
self.file = open(support.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, cleanup=True):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
if cleanup:
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
port = support.find_unused_port()
srv_sock = self.make_socket(cleanup=False)
srv_sock.bind((support.HOST, port))
server = self.run_loop(self.loop.create_server(
lambda: proto, sock=srv_sock))
self.run_loop(self.loop.sock_connect(sock, (support.HOST, port)))
self.run_loop(proto._ready)
def cleanup():
proto.transport.close()
self.run_loop(proto.wait_closed())
server.close()
self.run_loop(server.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test_sock_sendfile_not_available(self):
sock, proto = self.prepare()
with mock.patch('asyncio.unix_events.os', spec=[]):
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"os[.]sendfile[(][)] is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_not_a_file(self):
sock, proto = self.prepare()
f = object()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_iobuffer(self):
sock, proto = self.prepare()
f = io.BytesIO()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_not_regular_file(self):
sock, proto = self.prepare()
f = mock.Mock()
f.fileno.return_value = -1
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_cancel1(self):
sock, proto = self.prepare()
fut = self.loop.create_future()
fileno = self.file.fileno()
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
fut.cancel()
with contextlib.suppress(asyncio.CancelledError):
self.run_loop(fut)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
def test_sock_sendfile_cancel2(self):
sock, proto = self.prepare()
fut = self.loop.create_future()
fileno = self.file.fileno()
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
fut.cancel()
self.loop._sock_sendfile_native_impl(fut, sock.fileno(), sock, fileno,
0, None, len(self.DATA), 0)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
def test_sock_sendfile_blocking_error(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = mock.Mock()
fut.cancelled.return_value = False
with mock.patch('os.sendfile', side_effect=BlockingIOError()):
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
key = self.loop._selector.get_key(sock)
self.assertIsNotNone(key)
fut.add_done_callback.assert_called_once_with(mock.ANY)
def test_sock_sendfile_os_error_first_call(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
with mock.patch('os.sendfile', side_effect=OSError()):
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIsInstance(exc, asyncio.SendfileNotAvailableError)
self.assertEqual(0, self.file.tell())
def test_sock_sendfile_os_error_next_call(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
err = OSError()
with mock.patch('os.sendfile', side_effect=err):
self.loop._sock_sendfile_native_impl(fut, sock.fileno(),
sock, fileno,
1000, None, len(self.DATA),
1000)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIs(exc, err)
self.assertEqual(1000, self.file.tell())
def test_sock_sendfile_exception(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
err = asyncio.SendfileNotAvailableError()
with mock.patch('os.sendfile', side_effect=err):
self.loop._sock_sendfile_native_impl(fut, sock.fileno(),
sock, fileno,
1000, None, len(self.DATA),
1000)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIs(exc, err)
self.assertEqual(1000, self.file.tell())
class UnixReadPipeTransportTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.Protocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFIFO
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def read_pipe_transport(self, waiter=None):
transport = unix_events._UnixReadPipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
waiter = asyncio.Future(loop=self.loop)
tr = self.read_pipe_transport(waiter=waiter)
self.loop.run_until_complete(waiter)
self.protocol.connection_made.assert_called_with(tr)
self.loop.assert_reader(5, tr._read_ready)
self.assertIsNone(waiter.result())
@mock.patch('os.read')
def test__read_ready(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b'data'
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.protocol.data_received.assert_called_with(b'data')
@mock.patch('os.read')
def test__read_ready_eof(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b''
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.eof_received.assert_called_with()
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.read')
def test__read_ready_blocked(self, m_read):
tr = self.read_pipe_transport()
m_read.side_effect = BlockingIOError
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.data_received.called)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.read')
def test__read_ready_error(self, m_read, m_logexc):
tr = self.read_pipe_transport()
err = OSError()
m_read.side_effect = err
tr._close = mock.Mock()
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
tr._close.assert_called_with(err)
m_logexc.assert_called_with(
test_utils.MockPattern(
'Fatal read error on pipe transport'
'\nprotocol:.*\ntransport:.*'),
exc_info=(OSError, MOCK_ANY, MOCK_ANY))
@mock.patch('os.read')
def test_pause_reading(self, m_read):
tr = self.read_pipe_transport()
m = mock.Mock()
self.loop.add_reader(5, m)
tr.pause_reading()
self.assertFalse(self.loop.readers)
@mock.patch('os.read')
def test_resume_reading(self, m_read):
tr = self.read_pipe_transport()
tr.resume_reading()
self.loop.assert_reader(5, tr._read_ready)
@mock.patch('os.read')
def test_close(self, m_read):
tr = self.read_pipe_transport()
tr._close = mock.Mock()
tr.close()
tr._close.assert_called_with(None)
@mock.patch('os.read')
def test_close_already_closing(self, m_read):
tr = self.read_pipe_transport()
tr._closing = True
tr._close = mock.Mock()
tr.close()
self.assertFalse(tr._close.called)
@mock.patch('os.read')
def test__close(self, m_read):
tr = self.read_pipe_transport()
err = object()
tr._close(err)
self.assertTrue(tr.is_closing())
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
def test__call_connection_lost(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
class UnixWritePipeTransportTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.BaseProtocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFSOCK
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def write_pipe_transport(self, waiter=None):
transport = unix_events._UnixWritePipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
waiter = asyncio.Future(loop=self.loop)
tr = self.write_pipe_transport(waiter=waiter)
self.loop.run_until_complete(waiter)
self.protocol.connection_made.assert_called_with(tr)
self.loop.assert_reader(5, tr._read_ready)
self.assertEqual(None, waiter.result())
def test_can_write_eof(self):
tr = self.write_pipe_transport()
self.assertTrue(tr.can_write_eof())
@mock.patch('os.write')
def test_write(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 4
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
@mock.patch('os.write')
def test_write_no_data(self, m_write):
tr = self.write_pipe_transport()
tr.write(b'')
self.assertFalse(m_write.called)
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(b''), tr._buffer)
@mock.patch('os.write')
def test_write_partial(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 2
tr.write(b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'ta'), tr._buffer)
@mock.patch('os.write')
def test_write_buffer(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'previous')
tr.write(b'data')
self.assertFalse(m_write.called)
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'previousdata'), tr._buffer)
@mock.patch('os.write')
def test_write_again(self, m_write):
tr = self.write_pipe_transport()
m_write.side_effect = BlockingIOError()
tr.write(b'data')
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('asyncio.unix_events.logger')
@mock.patch('os.write')
def test_write_err(self, m_write, m_log):
tr = self.write_pipe_transport()
err = OSError()
m_write.side_effect = err
tr._fatal_error = mock.Mock()
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
tr._fatal_error.assert_called_with(
err,
'Fatal write error on pipe transport')
self.assertEqual(1, tr._conn_lost)
tr.write(b'data')
self.assertEqual(2, tr._conn_lost)
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
# This is a bit overspecified. :-(
m_log.warning.assert_called_with(
'pipe closed by peer or os.write(pipe, data) raised exception.')
tr.close()
@mock.patch('os.write')
def test_write_close(self, m_write):
tr = self.write_pipe_transport()
tr._read_ready() # pipe was closed by peer
tr.write(b'data')
self.assertEqual(tr._conn_lost, 1)
tr.write(b'data')
self.assertEqual(tr._conn_lost, 2)
def test__read_ready(self):
tr = self.write_pipe_transport()
tr._read_ready()
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertTrue(tr.is_closing())
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.write')
def test__write_ready(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 4
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
@mock.patch('os.write')
def test__write_ready_partial(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 3
tr._write_ready()
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'a'), tr._buffer)
@mock.patch('os.write')
def test__write_ready_again(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.side_effect = BlockingIOError()
tr._write_ready()
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('os.write')
def test__write_ready_empty(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 0
tr._write_ready()
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.write')
def test__write_ready_err(self, m_write, m_logexc):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.side_effect = err = OSError()
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual(bytearray(), tr._buffer)
self.assertTrue(tr.is_closing())
m_logexc.assert_not_called()
self.assertEqual(1, tr._conn_lost)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
@mock.patch('os.write')
def test__write_ready_closing(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._closing = True
tr._buffer = bytearray(b'data')
m_write.return_value = 4
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual(bytearray(), tr._buffer)
self.protocol.connection_lost.assert_called_with(None)
self.pipe.close.assert_called_with()
@mock.patch('os.write')
def test_abort(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
self.loop.add_reader(5, tr._read_ready)
tr._buffer = [b'da', b'ta']
tr.abort()
self.assertFalse(m_write.called)
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
self.assertTrue(tr.is_closing())
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test__call_connection_lost(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test_close(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr.close()
tr.write_eof.assert_called_with()
# closing the transport twice must not fail
tr.close()
def test_close_closing(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr._closing = True
tr.close()
self.assertFalse(tr.write_eof.called)
def test_write_eof(self):
tr = self.write_pipe_transport()
tr.write_eof()
self.assertTrue(tr.is_closing())
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test_write_eof_pending(self):
tr = self.write_pipe_transport()
tr._buffer = [b'data']
tr.write_eof()
self.assertTrue(tr.is_closing())
self.assertFalse(self.protocol.connection_lost.called)
class AbstractChildWatcherTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = asyncio.AbstractChildWatcher()
self.assertRaises(
NotImplementedError, watcher.add_child_handler, f, f)
self.assertRaises(
NotImplementedError, watcher.remove_child_handler, f)
self.assertRaises(
NotImplementedError, watcher.attach_loop, f)
self.assertRaises(
NotImplementedError, watcher.close)
self.assertRaises(
NotImplementedError, watcher.is_active)
self.assertRaises(
NotImplementedError, watcher.__enter__)
self.assertRaises(
NotImplementedError, watcher.__exit__, f, f, f)
class BaseChildWatcherTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = unix_events.BaseChildWatcher()
self.assertRaises(
NotImplementedError, watcher._do_waitpid, f)
WaitPidMocks = collections.namedtuple("WaitPidMocks",
("waitpid",
"WIFEXITED",
"WIFSIGNALED",
"WEXITSTATUS",
"WTERMSIG",
))
class ChildWatcherTestsMixin:
ignore_warnings = mock.patch.object(log.logger, "warning")
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.running = False
self.zombies = {}
with mock.patch.object(
self.loop, "add_signal_handler") as self.m_add_signal_handler:
self.watcher = self.create_watcher()
self.watcher.attach_loop(self.loop)
def waitpid(self, pid, flags):
if isinstance(self.watcher, asyncio.SafeChildWatcher) or pid != -1:
self.assertGreater(pid, 0)
try:
if pid < 0:
return self.zombies.popitem()
else:
return pid, self.zombies.pop(pid)
except KeyError:
pass
if self.running:
return 0, 0
else:
raise ChildProcessError()
def add_zombie(self, pid, returncode):
self.zombies[pid] = returncode + 32768
def WIFEXITED(self, status):
return status >= 32768
def WIFSIGNALED(self, status):
return 32700 < status < 32768
def WEXITSTATUS(self, status):
self.assertTrue(self.WIFEXITED(status))
return status - 32768
def WTERMSIG(self, status):
self.assertTrue(self.WIFSIGNALED(status))
return 32768 - status
def test_create_watcher(self):
self.m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
def waitpid_mocks(func):
def wrapped_func(self):
def patch(target, wrapper):
return mock.patch(target, wraps=wrapper,
new_callable=mock.Mock)
with patch('os.WTERMSIG', self.WTERMSIG) as m_WTERMSIG, \
patch('os.WEXITSTATUS', self.WEXITSTATUS) as m_WEXITSTATUS, \
patch('os.WIFSIGNALED', self.WIFSIGNALED) as m_WIFSIGNALED, \
patch('os.WIFEXITED', self.WIFEXITED) as m_WIFEXITED, \
patch('os.waitpid', self.waitpid) as m_waitpid:
func(self, WaitPidMocks(m_waitpid,
m_WIFEXITED, m_WIFSIGNALED,
m_WEXITSTATUS, m_WTERMSIG,
))
return wrapped_func
@waitpid_mocks
def test_sigchld(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(42, callback, 9, 10, 14)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child is running
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (returncode 12)
self.running = False
self.add_zombie(42, 12)
self.watcher._sig_chld()
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.assert_called_once_with(42, 12, 9, 10, 14)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(42, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
# sigchld called again
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register child 1
with self.watcher:
self.running = True
self.watcher.add_child_handler(43, callback1, 7, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register child 2
with self.watcher:
self.watcher.add_child_handler(44, callback2, 147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# children are running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 1 terminates (signal 3)
self.add_zombie(43, -3)
self.watcher._sig_chld()
callback1.assert_called_once_with(43, -3, 7, 8)
self.assertFalse(callback2.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback1.reset_mock()
# child 2 still running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 2 terminates (code 108)
self.add_zombie(44, 108)
self.running = False
self.watcher._sig_chld()
callback2.assert_called_once_with(44, 108, 147, 18)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback2.reset_mock()
# ensure that the children are effectively reaped
self.add_zombie(43, 14)
self.add_zombie(44, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
# sigchld called again
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children_terminating_together(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register child 1
with self.watcher:
self.running = True
self.watcher.add_child_handler(45, callback1, 17, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register child 2
with self.watcher:
self.watcher.add_child_handler(46, callback2, 1147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# children are running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 1 terminates (code 78)
# child 2 terminates (signal 5)
self.add_zombie(45, 78)
self.add_zombie(46, -5)
self.running = False
self.watcher._sig_chld()
callback1.assert_called_once_with(45, 78, 17, 8)
callback2.assert_called_once_with(46, -5, 1147, 18)
self.assertTrue(m.WIFSIGNALED.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
m.WEXITSTATUS.reset_mock()
callback1.reset_mock()
callback2.reset_mock()
# ensure that the children are effectively reaped
self.add_zombie(45, 14)
self.add_zombie(46, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_race_condition(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
# child terminates before being registered
self.add_zombie(50, 4)
self.watcher._sig_chld()
self.watcher.add_child_handler(50, callback, 1, 12)
callback.assert_called_once_with(50, 4, 1, 12)
callback.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(50, -1)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_replace_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(51, callback1, 19)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register the same child again
with self.watcher:
self.watcher.add_child_handler(51, callback2, 21)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (signal 8)
self.running = False
self.add_zombie(51, -8)
self.watcher._sig_chld()
callback2.assert_called_once_with(51, -8, 21)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback2.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(51, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_remove_handler(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(52, callback, 1984)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# unregister the child
self.watcher.remove_child_handler(52)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (code 99)
self.running = False
self.add_zombie(52, 99)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_unknown_status(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(53, callback, -19)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# terminate with unknown status
self.zombies[53] = 1178
self.running = False
self.watcher._sig_chld()
callback.assert_called_once_with(53, 1178, -19)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.reset_mock()
m.WIFEXITED.reset_mock()
m.WIFSIGNALED.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(53, 101)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_remove_child_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
# register children
with self.watcher:
self.running = True
self.watcher.add_child_handler(54, callback1, 1)
self.watcher.add_child_handler(55, callback2, 2)
self.watcher.add_child_handler(56, callback3, 3)
# remove child handler 1
self.assertTrue(self.watcher.remove_child_handler(54))
# remove child handler 2 multiple times
self.assertTrue(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
# all children terminate
self.add_zombie(54, 0)
self.add_zombie(55, 1)
self.add_zombie(56, 2)
self.running = False
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(56, 2, 3)
@waitpid_mocks
def test_sigchld_unhandled_exception(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(57, callback)
# raise an exception
m.waitpid.side_effect = ValueError
with mock.patch.object(log.logger,
'error') as m_error:
self.assertEqual(self.watcher._sig_chld(), None)
self.assertTrue(m_error.called)
@waitpid_mocks
def test_sigchld_child_reaped_elsewhere(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(58, callback)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates
self.running = False
self.add_zombie(58, 4)
# waitpid is called elsewhere
os.waitpid(58, os.WNOHANG)
m.waitpid.reset_mock()
# sigchld
with self.ignore_warnings:
self.watcher._sig_chld()
if isinstance(self.watcher, asyncio.FastChildWatcher):
# here the FastChildWatche enters a deadlock
# (there is no way to prevent it)
self.assertFalse(callback.called)
else:
callback.assert_called_once_with(58, 255)
@waitpid_mocks
def test_sigchld_unknown_pid_during_registration(self, m):
# register two children
callback1 = mock.Mock()
callback2 = mock.Mock()
with self.ignore_warnings, self.watcher:
self.running = True
# child 1 terminates
self.add_zombie(591, 7)
# an unknown child terminates
self.add_zombie(593, 17)
self.watcher._sig_chld()
self.watcher.add_child_handler(591, callback1)
self.watcher.add_child_handler(592, callback2)
callback1.assert_called_once_with(591, 7)
self.assertFalse(callback2.called)
@waitpid_mocks
def test_set_loop(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(60, callback)
# attach a new loop
old_loop = self.loop
self.loop = self.new_test_loop()
patch = mock.patch.object
with patch(old_loop, "remove_signal_handler") as m_old_remove, \
patch(self.loop, "add_signal_handler") as m_new_add:
self.watcher.attach_loop(self.loop)
m_old_remove.assert_called_once_with(
signal.SIGCHLD)
m_new_add.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
# child terminates
self.running = False
self.add_zombie(60, 9)
self.watcher._sig_chld()
callback.assert_called_once_with(60, 9)
@waitpid_mocks
def test_set_loop_race_condition(self, m):
# register 3 children
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(61, callback1)
self.watcher.add_child_handler(62, callback2)
self.watcher.add_child_handler(622, callback3)
# detach the loop
old_loop = self.loop
self.loop = None
with mock.patch.object(
old_loop, "remove_signal_handler") as m_remove_signal_handler:
with self.assertWarnsRegex(
RuntimeWarning, 'A loop is being detached'):
self.watcher.attach_loop(None)
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
# child 1 & 2 terminate
self.add_zombie(61, 11)
self.add_zombie(62, -5)
# SIGCHLD was not caught
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(callback3.called)
# attach a new loop
self.loop = self.new_test_loop()
with mock.patch.object(
self.loop, "add_signal_handler") as m_add_signal_handler:
self.watcher.attach_loop(self.loop)
m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
callback1.assert_called_once_with(61, 11) # race condition!
callback2.assert_called_once_with(62, -5) # race condition!
self.assertFalse(callback3.called)
callback1.reset_mock()
callback2.reset_mock()
# child 3 terminates
self.running = False
self.add_zombie(622, 19)
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(622, 19)
@waitpid_mocks
def test_close(self, m):
# register two children
callback1 = mock.Mock()
with self.watcher:
self.running = True
# child 1 terminates
self.add_zombie(63, 9)
# other child terminates
self.add_zombie(65, 18)
self.watcher._sig_chld()
self.watcher.add_child_handler(63, callback1)
self.watcher.add_child_handler(64, callback1)
self.assertEqual(len(self.watcher._callbacks), 1)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertEqual(len(self.watcher._zombies), 1)
with mock.patch.object(
self.loop,
"remove_signal_handler") as m_remove_signal_handler:
self.watcher.close()
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
self.assertFalse(self.watcher._callbacks)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertFalse(self.watcher._zombies)
class SafeChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
def create_watcher(self):
return asyncio.SafeChildWatcher()
class FastChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
def create_watcher(self):
return asyncio.FastChildWatcher()
class PolicyTests(unittest.TestCase):
def create_policy(self):
return asyncio.DefaultEventLoopPolicy()
def test_get_default_child_watcher(self):
policy = self.create_policy()
self.assertIsNone(policy._watcher)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.ThreadedChildWatcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
def test_get_child_watcher_after_set(self):
policy = self.create_policy()
watcher = asyncio.FastChildWatcher()
policy.set_child_watcher(watcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
def test_get_child_watcher_thread(self):
def f():
policy.set_event_loop(policy.new_event_loop())
self.assertIsInstance(policy.get_event_loop(),
asyncio.AbstractEventLoop)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.SafeChildWatcher)
self.assertIsNone(watcher._loop)
policy.get_event_loop().close()
policy = self.create_policy()
th = threading.Thread(target=f)
th.start()
th.join()
def test_child_watcher_replace_mainloop_existing(self):
policy = self.create_policy()
loop = policy.get_event_loop()
# Explicitly setup SafeChildWatcher,
# default ThreadedChildWatcher has no _loop property
watcher = asyncio.SafeChildWatcher()
policy.set_child_watcher(watcher)
watcher.attach_loop(loop)
self.assertIs(watcher._loop, loop)
new_loop = policy.new_event_loop()
policy.set_event_loop(new_loop)
self.assertIs(watcher._loop, new_loop)
policy.set_event_loop(None)
self.assertIs(watcher._loop, None)
loop.close()
new_loop.close()
class TestFunctional(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
asyncio.set_event_loop(None)
def test_add_reader_invalid_argument(self):
def assert_raises():
return self.assertRaisesRegex(ValueError, r'Invalid file object')
cb = lambda: None
with assert_raises():
self.loop.add_reader(object(), cb)
with assert_raises():
self.loop.add_writer(object(), cb)
with assert_raises():
self.loop.remove_reader(object())
with assert_raises():
self.loop.remove_writer(object())
def test_add_reader_or_writer_transport_fd(self):
def assert_raises():
return self.assertRaisesRegex(
RuntimeError,
r'File descriptor .* is used by transport')
async def runner():
tr, pr = await self.loop.create_connection(
lambda: asyncio.Protocol(), sock=rsock)
try:
cb = lambda: None
with assert_raises():
self.loop.add_reader(rsock, cb)
with assert_raises():
self.loop.add_reader(rsock.fileno(), cb)
with assert_raises():
self.loop.remove_reader(rsock)
with assert_raises():
self.loop.remove_reader(rsock.fileno())
with assert_raises():
self.loop.add_writer(rsock, cb)
with assert_raises():
self.loop.add_writer(rsock.fileno(), cb)
with assert_raises():
self.loop.remove_writer(rsock)
with assert_raises():
self.loop.remove_writer(rsock.fileno())
finally:
tr.close()
rsock, wsock = socket.socketpair()
try:
self.loop.run_until_complete(runner())
finally:
rsock.close()
wsock.close()
if __name__ == '__main__':
unittest.main()
| 34.515714 | 80 | 0.624345 |
import collections
import contextlib
import errno
import io
import os
import pathlib
import signal
import socket
import stat
import sys
import tempfile
import threading
import unittest
from unittest import mock
from test import support
if sys.platform == 'win32':
raise unittest.SkipTest('UNIX only')
import asyncio
from asyncio import log
from asyncio import base_events
from asyncio import events
from asyncio import unix_events
from test.test_asyncio import utils as test_utils
MOCK_ANY = mock.ANY
def tearDownModule():
asyncio.set_event_loop_policy(None)
def close_pipe_transport(transport):
# are mocked
if transport._pipe is None:
return
transport._pipe.close()
transport._pipe = None
@unittest.skipUnless(signal, 'Signals are not supported')
class SelectorEventLoopSignalTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
def test_check_signal(self):
self.assertRaises(
TypeError, self.loop._check_signal, '1')
self.assertRaises(
ValueError, self.loop._check_signal, signal.NSIG + 1)
def test_handle_signal_no_handler(self):
self.loop._handle_signal(signal.NSIG + 1)
def test_handle_signal_cancelled_handler(self):
h = asyncio.Handle(mock.Mock(), (),
loop=mock.Mock())
h.cancel()
self.loop._signal_handlers[signal.NSIG + 1] = h
self.loop.remove_signal_handler = mock.Mock()
self.loop._handle_signal(signal.NSIG + 1)
self.loop.remove_signal_handler.assert_called_with(signal.NSIG + 1)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_setup_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
m_signal.set_wakeup_fd.side_effect = ValueError
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_coroutine_error(self, m_signal):
m_signal.NSIG = signal.NSIG
async def simple_coroutine():
pass
# callback must not be a coroutine function
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
self.assertRaisesRegex(
TypeError, 'coroutines cannot be used with add_signal_handler',
self.loop.add_signal_handler,
signal.SIGINT, func)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
cb = lambda: True
self.loop.add_signal_handler(signal.SIGHUP, cb)
h = self.loop._signal_handlers.get(signal.SIGHUP)
self.assertIsInstance(h, asyncio.Handle)
self.assertEqual(h._callback, cb)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_install_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
def set_wakeup_fd(fd):
if fd == -1:
raise ValueError()
m_signal.set_wakeup_fd = set_wakeup_fd
class Err(OSError):
errno = errno.EFAULT
m_signal.signal.side_effect = Err
self.assertRaises(
Err,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error2(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.loop._signal_handlers[signal.SIGHUP] = lambda: True
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(1, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error3(self, m_logging, m_signal):
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(2, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGHUP))
self.assertTrue(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGHUP, m_signal.SIG_DFL), m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.SIGINT = signal.SIGINT
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGINT, lambda: True)
self.loop._signal_handlers[signal.SIGHUP] = object()
m_signal.set_wakeup_fd.reset_mock()
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGINT))
self.assertFalse(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGINT, m_signal.default_int_handler),
m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_remove_signal_handler_cleanup_error(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.set_wakeup_fd.side_effect = ValueError
self.loop.remove_signal_handler(signal.SIGHUP)
self.assertTrue(m_logging.info)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.signal.side_effect = OSError
self.assertRaises(
OSError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.assertRaises(
RuntimeError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_close(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.loop.add_signal_handler(signal.SIGCHLD, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 2)
m_signal.set_wakeup_fd.reset_mock()
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
m_signal.set_wakeup_fd.assert_called_once_with(-1)
@mock.patch('asyncio.unix_events.sys')
@mock.patch('asyncio.unix_events.signal')
def test_close_on_finalizing(self, m_signal, m_sys):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 1)
m_sys.is_finalizing.return_value = True
m_signal.signal.reset_mock()
with self.assertWarnsRegex(ResourceWarning,
"skipping signal handlers removal"):
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
self.assertFalse(m_signal.signal.called)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'),
'UNIX Sockets are not supported')
class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
@support.skip_unless_bind_unix_socket
def test_create_unix_server_existing_path_sock(self):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX)
sock.bind(path)
sock.listen(1)
sock.close()
coro = self.loop.create_unix_server(lambda: None, path)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@support.skip_unless_bind_unix_socket
def test_create_unix_server_pathlib(self):
with test_utils.unix_socket_path() as path:
path = pathlib.Path(path)
srv_coro = self.loop.create_unix_server(lambda: None, path)
srv = self.loop.run_until_complete(srv_coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_create_unix_connection_pathlib(self):
with test_utils.unix_socket_path() as path:
path = pathlib.Path(path)
coro = self.loop.create_unix_connection(lambda: None, path)
with self.assertRaises(FileNotFoundError):
# If pathlib.Path wasn't supported, the exception would be
self.loop.run_until_complete(coro)
def test_create_unix_server_existing_path_nonsock(self):
with tempfile.NamedTemporaryFile() as file:
coro = self.loop.create_unix_server(lambda: None, file.name)
with self.assertRaisesRegex(OSError,
'Address.*is already in use'):
self.loop.run_until_complete(coro)
def test_create_unix_server_ssl_bool(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl=True)
with self.assertRaisesRegex(TypeError,
'ssl argument must be an SSLContext'):
self.loop.run_until_complete(coro)
def test_create_unix_server_nopath_nosock(self):
coro = self.loop.create_unix_server(lambda: None, path=None)
with self.assertRaisesRegex(ValueError,
'path was not specified, and no sock'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_inetsock(self):
sock = socket.socket()
with sock:
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_dgram(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
@support.skip_unless_bind_unix_socket
def test_create_unix_server_path_stream_bittype(self):
sock = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with tempfile.NamedTemporaryFile() as file:
fn = file.name
try:
with sock:
sock.bind(fn)
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
finally:
os.unlink(fn)
def test_create_unix_server_ssl_timeout_with_plain_sock(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_path_inetsock(self):
sock = socket.socket()
with sock:
coro = self.loop.create_unix_connection(lambda: None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.unix_events.socket')
def test_create_unix_server_bind_error(self, m_socket):
sock = mock.Mock()
m_socket.socket.return_value = sock
sock.bind.side_effect = OSError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
sock.bind.side_effect = MemoryError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(MemoryError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_unix_connection_path_sock(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, sock=object())
with self.assertRaisesRegex(ValueError, 'path and sock can not be'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nopath_nosock(self):
coro = self.loop.create_unix_connection(
lambda: None, None)
with self.assertRaisesRegex(ValueError,
'no path and sock were specified'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nossl_serverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, server_hostname='spam')
with self.assertRaisesRegex(ValueError,
'server_hostname is only meaningful'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_noserverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, ssl=True)
with self.assertRaisesRegex(
ValueError, 'you have to pass server_hostname when using ssl'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_timeout_with_plain_sock(self):
coro = self.loop.create_unix_connection(lambda: None, path='spam',
ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(os, 'sendfile'),
'sendfile is not supported')
class SelectorEventLoopUnixSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
self._ready = loop.create_future()
def connection_made(self, transport):
self.started = True
self.transport = transport
self._ready.set_result(None)
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
with open(support.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
super().tearDownClass()
def setUp(self):
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
self.file = open(support.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, cleanup=True):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
if cleanup:
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
port = support.find_unused_port()
srv_sock = self.make_socket(cleanup=False)
srv_sock.bind((support.HOST, port))
server = self.run_loop(self.loop.create_server(
lambda: proto, sock=srv_sock))
self.run_loop(self.loop.sock_connect(sock, (support.HOST, port)))
self.run_loop(proto._ready)
def cleanup():
proto.transport.close()
self.run_loop(proto.wait_closed())
server.close()
self.run_loop(server.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test_sock_sendfile_not_available(self):
sock, proto = self.prepare()
with mock.patch('asyncio.unix_events.os', spec=[]):
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"os[.]sendfile[(][)] is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_not_a_file(self):
sock, proto = self.prepare()
f = object()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_iobuffer(self):
sock, proto = self.prepare()
f = io.BytesIO()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_not_regular_file(self):
sock, proto = self.prepare()
f = mock.Mock()
f.fileno.return_value = -1
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_cancel1(self):
sock, proto = self.prepare()
fut = self.loop.create_future()
fileno = self.file.fileno()
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
fut.cancel()
with contextlib.suppress(asyncio.CancelledError):
self.run_loop(fut)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
def test_sock_sendfile_cancel2(self):
sock, proto = self.prepare()
fut = self.loop.create_future()
fileno = self.file.fileno()
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
fut.cancel()
self.loop._sock_sendfile_native_impl(fut, sock.fileno(), sock, fileno,
0, None, len(self.DATA), 0)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
def test_sock_sendfile_blocking_error(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = mock.Mock()
fut.cancelled.return_value = False
with mock.patch('os.sendfile', side_effect=BlockingIOError()):
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
key = self.loop._selector.get_key(sock)
self.assertIsNotNone(key)
fut.add_done_callback.assert_called_once_with(mock.ANY)
def test_sock_sendfile_os_error_first_call(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
with mock.patch('os.sendfile', side_effect=OSError()):
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIsInstance(exc, asyncio.SendfileNotAvailableError)
self.assertEqual(0, self.file.tell())
def test_sock_sendfile_os_error_next_call(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
err = OSError()
with mock.patch('os.sendfile', side_effect=err):
self.loop._sock_sendfile_native_impl(fut, sock.fileno(),
sock, fileno,
1000, None, len(self.DATA),
1000)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIs(exc, err)
self.assertEqual(1000, self.file.tell())
def test_sock_sendfile_exception(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
err = asyncio.SendfileNotAvailableError()
with mock.patch('os.sendfile', side_effect=err):
self.loop._sock_sendfile_native_impl(fut, sock.fileno(),
sock, fileno,
1000, None, len(self.DATA),
1000)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIs(exc, err)
self.assertEqual(1000, self.file.tell())
class UnixReadPipeTransportTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.Protocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFIFO
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def read_pipe_transport(self, waiter=None):
transport = unix_events._UnixReadPipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
waiter = asyncio.Future(loop=self.loop)
tr = self.read_pipe_transport(waiter=waiter)
self.loop.run_until_complete(waiter)
self.protocol.connection_made.assert_called_with(tr)
self.loop.assert_reader(5, tr._read_ready)
self.assertIsNone(waiter.result())
@mock.patch('os.read')
def test__read_ready(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b'data'
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.protocol.data_received.assert_called_with(b'data')
@mock.patch('os.read')
def test__read_ready_eof(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b''
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.eof_received.assert_called_with()
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.read')
def test__read_ready_blocked(self, m_read):
tr = self.read_pipe_transport()
m_read.side_effect = BlockingIOError
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.data_received.called)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.read')
def test__read_ready_error(self, m_read, m_logexc):
tr = self.read_pipe_transport()
err = OSError()
m_read.side_effect = err
tr._close = mock.Mock()
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
tr._close.assert_called_with(err)
m_logexc.assert_called_with(
test_utils.MockPattern(
'Fatal read error on pipe transport'
'\nprotocol:.*\ntransport:.*'),
exc_info=(OSError, MOCK_ANY, MOCK_ANY))
@mock.patch('os.read')
def test_pause_reading(self, m_read):
tr = self.read_pipe_transport()
m = mock.Mock()
self.loop.add_reader(5, m)
tr.pause_reading()
self.assertFalse(self.loop.readers)
@mock.patch('os.read')
def test_resume_reading(self, m_read):
tr = self.read_pipe_transport()
tr.resume_reading()
self.loop.assert_reader(5, tr._read_ready)
@mock.patch('os.read')
def test_close(self, m_read):
tr = self.read_pipe_transport()
tr._close = mock.Mock()
tr.close()
tr._close.assert_called_with(None)
@mock.patch('os.read')
def test_close_already_closing(self, m_read):
tr = self.read_pipe_transport()
tr._closing = True
tr._close = mock.Mock()
tr.close()
self.assertFalse(tr._close.called)
@mock.patch('os.read')
def test__close(self, m_read):
tr = self.read_pipe_transport()
err = object()
tr._close(err)
self.assertTrue(tr.is_closing())
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
def test__call_connection_lost(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
class UnixWritePipeTransportTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.BaseProtocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFSOCK
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def write_pipe_transport(self, waiter=None):
transport = unix_events._UnixWritePipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
waiter = asyncio.Future(loop=self.loop)
tr = self.write_pipe_transport(waiter=waiter)
self.loop.run_until_complete(waiter)
self.protocol.connection_made.assert_called_with(tr)
self.loop.assert_reader(5, tr._read_ready)
self.assertEqual(None, waiter.result())
def test_can_write_eof(self):
tr = self.write_pipe_transport()
self.assertTrue(tr.can_write_eof())
@mock.patch('os.write')
def test_write(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 4
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
@mock.patch('os.write')
def test_write_no_data(self, m_write):
tr = self.write_pipe_transport()
tr.write(b'')
self.assertFalse(m_write.called)
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(b''), tr._buffer)
@mock.patch('os.write')
def test_write_partial(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 2
tr.write(b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'ta'), tr._buffer)
@mock.patch('os.write')
def test_write_buffer(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'previous')
tr.write(b'data')
self.assertFalse(m_write.called)
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'previousdata'), tr._buffer)
@mock.patch('os.write')
def test_write_again(self, m_write):
tr = self.write_pipe_transport()
m_write.side_effect = BlockingIOError()
tr.write(b'data')
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('asyncio.unix_events.logger')
@mock.patch('os.write')
def test_write_err(self, m_write, m_log):
tr = self.write_pipe_transport()
err = OSError()
m_write.side_effect = err
tr._fatal_error = mock.Mock()
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
tr._fatal_error.assert_called_with(
err,
'Fatal write error on pipe transport')
self.assertEqual(1, tr._conn_lost)
tr.write(b'data')
self.assertEqual(2, tr._conn_lost)
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
m_log.warning.assert_called_with(
'pipe closed by peer or os.write(pipe, data) raised exception.')
tr.close()
@mock.patch('os.write')
def test_write_close(self, m_write):
tr = self.write_pipe_transport()
tr._read_ready()
tr.write(b'data')
self.assertEqual(tr._conn_lost, 1)
tr.write(b'data')
self.assertEqual(tr._conn_lost, 2)
def test__read_ready(self):
tr = self.write_pipe_transport()
tr._read_ready()
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertTrue(tr.is_closing())
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.write')
def test__write_ready(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 4
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
@mock.patch('os.write')
def test__write_ready_partial(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 3
tr._write_ready()
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'a'), tr._buffer)
@mock.patch('os.write')
def test__write_ready_again(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.side_effect = BlockingIOError()
tr._write_ready()
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('os.write')
def test__write_ready_empty(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 0
tr._write_ready()
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.write')
def test__write_ready_err(self, m_write, m_logexc):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.side_effect = err = OSError()
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual(bytearray(), tr._buffer)
self.assertTrue(tr.is_closing())
m_logexc.assert_not_called()
self.assertEqual(1, tr._conn_lost)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
@mock.patch('os.write')
def test__write_ready_closing(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._closing = True
tr._buffer = bytearray(b'data')
m_write.return_value = 4
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual(bytearray(), tr._buffer)
self.protocol.connection_lost.assert_called_with(None)
self.pipe.close.assert_called_with()
@mock.patch('os.write')
def test_abort(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
self.loop.add_reader(5, tr._read_ready)
tr._buffer = [b'da', b'ta']
tr.abort()
self.assertFalse(m_write.called)
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
self.assertTrue(tr.is_closing())
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test__call_connection_lost(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test_close(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr.close()
tr.write_eof.assert_called_with()
tr.close()
def test_close_closing(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr._closing = True
tr.close()
self.assertFalse(tr.write_eof.called)
def test_write_eof(self):
tr = self.write_pipe_transport()
tr.write_eof()
self.assertTrue(tr.is_closing())
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test_write_eof_pending(self):
tr = self.write_pipe_transport()
tr._buffer = [b'data']
tr.write_eof()
self.assertTrue(tr.is_closing())
self.assertFalse(self.protocol.connection_lost.called)
class AbstractChildWatcherTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = asyncio.AbstractChildWatcher()
self.assertRaises(
NotImplementedError, watcher.add_child_handler, f, f)
self.assertRaises(
NotImplementedError, watcher.remove_child_handler, f)
self.assertRaises(
NotImplementedError, watcher.attach_loop, f)
self.assertRaises(
NotImplementedError, watcher.close)
self.assertRaises(
NotImplementedError, watcher.is_active)
self.assertRaises(
NotImplementedError, watcher.__enter__)
self.assertRaises(
NotImplementedError, watcher.__exit__, f, f, f)
class BaseChildWatcherTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = unix_events.BaseChildWatcher()
self.assertRaises(
NotImplementedError, watcher._do_waitpid, f)
WaitPidMocks = collections.namedtuple("WaitPidMocks",
("waitpid",
"WIFEXITED",
"WIFSIGNALED",
"WEXITSTATUS",
"WTERMSIG",
))
class ChildWatcherTestsMixin:
ignore_warnings = mock.patch.object(log.logger, "warning")
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.running = False
self.zombies = {}
with mock.patch.object(
self.loop, "add_signal_handler") as self.m_add_signal_handler:
self.watcher = self.create_watcher()
self.watcher.attach_loop(self.loop)
def waitpid(self, pid, flags):
if isinstance(self.watcher, asyncio.SafeChildWatcher) or pid != -1:
self.assertGreater(pid, 0)
try:
if pid < 0:
return self.zombies.popitem()
else:
return pid, self.zombies.pop(pid)
except KeyError:
pass
if self.running:
return 0, 0
else:
raise ChildProcessError()
def add_zombie(self, pid, returncode):
self.zombies[pid] = returncode + 32768
def WIFEXITED(self, status):
return status >= 32768
def WIFSIGNALED(self, status):
return 32700 < status < 32768
def WEXITSTATUS(self, status):
self.assertTrue(self.WIFEXITED(status))
return status - 32768
def WTERMSIG(self, status):
self.assertTrue(self.WIFSIGNALED(status))
return 32768 - status
def test_create_watcher(self):
self.m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
def waitpid_mocks(func):
def wrapped_func(self):
def patch(target, wrapper):
return mock.patch(target, wraps=wrapper,
new_callable=mock.Mock)
with patch('os.WTERMSIG', self.WTERMSIG) as m_WTERMSIG, \
patch('os.WEXITSTATUS', self.WEXITSTATUS) as m_WEXITSTATUS, \
patch('os.WIFSIGNALED', self.WIFSIGNALED) as m_WIFSIGNALED, \
patch('os.WIFEXITED', self.WIFEXITED) as m_WIFEXITED, \
patch('os.waitpid', self.waitpid) as m_waitpid:
func(self, WaitPidMocks(m_waitpid,
m_WIFEXITED, m_WIFSIGNALED,
m_WEXITSTATUS, m_WTERMSIG,
))
return wrapped_func
@waitpid_mocks
def test_sigchld(self, m):
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(42, callback, 9, 10, 14)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
self.running = False
self.add_zombie(42, 12)
self.watcher._sig_chld()
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.assert_called_once_with(42, 12, 9, 10, 14)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback.reset_mock()
self.add_zombie(42, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(43, callback1, 7, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
with self.watcher:
self.watcher.add_child_handler(44, callback2, 147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
self.add_zombie(43, -3)
self.watcher._sig_chld()
callback1.assert_called_once_with(43, -3, 7, 8)
self.assertFalse(callback2.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback1.reset_mock()
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
self.add_zombie(44, 108)
self.running = False
self.watcher._sig_chld()
callback2.assert_called_once_with(44, 108, 147, 18)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback2.reset_mock()
self.add_zombie(43, 14)
self.add_zombie(44, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children_terminating_together(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(45, callback1, 17, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
with self.watcher:
self.watcher.add_child_handler(46, callback2, 1147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
self.add_zombie(45, 78)
self.add_zombie(46, -5)
self.running = False
self.watcher._sig_chld()
callback1.assert_called_once_with(45, 78, 17, 8)
callback2.assert_called_once_with(46, -5, 1147, 18)
self.assertTrue(m.WIFSIGNALED.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
m.WEXITSTATUS.reset_mock()
callback1.reset_mock()
callback2.reset_mock()
self.add_zombie(45, 14)
self.add_zombie(46, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_race_condition(self, m):
callback = mock.Mock()
with self.watcher:
self.add_zombie(50, 4)
self.watcher._sig_chld()
self.watcher.add_child_handler(50, callback, 1, 12)
callback.assert_called_once_with(50, 4, 1, 12)
callback.reset_mock()
self.add_zombie(50, -1)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_replace_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(51, callback1, 19)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
with self.watcher:
self.watcher.add_child_handler(51, callback2, 21)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
self.running = False
self.add_zombie(51, -8)
self.watcher._sig_chld()
callback2.assert_called_once_with(51, -8, 21)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback2.reset_mock()
self.add_zombie(51, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_remove_handler(self, m):
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(52, callback, 1984)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
self.watcher.remove_child_handler(52)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
self.running = False
self.add_zombie(52, 99)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_unknown_status(self, m):
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(53, callback, -19)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
self.zombies[53] = 1178
self.running = False
self.watcher._sig_chld()
callback.assert_called_once_with(53, 1178, -19)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.reset_mock()
m.WIFEXITED.reset_mock()
m.WIFSIGNALED.reset_mock()
self.add_zombie(53, 101)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_remove_child_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(54, callback1, 1)
self.watcher.add_child_handler(55, callback2, 2)
self.watcher.add_child_handler(56, callback3, 3)
self.assertTrue(self.watcher.remove_child_handler(54))
self.assertTrue(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
self.add_zombie(54, 0)
self.add_zombie(55, 1)
self.add_zombie(56, 2)
self.running = False
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(56, 2, 3)
@waitpid_mocks
def test_sigchld_unhandled_exception(self, m):
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(57, callback)
m.waitpid.side_effect = ValueError
with mock.patch.object(log.logger,
'error') as m_error:
self.assertEqual(self.watcher._sig_chld(), None)
self.assertTrue(m_error.called)
@waitpid_mocks
def test_sigchld_child_reaped_elsewhere(self, m):
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(58, callback)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
self.running = False
self.add_zombie(58, 4)
os.waitpid(58, os.WNOHANG)
m.waitpid.reset_mock()
with self.ignore_warnings:
self.watcher._sig_chld()
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertFalse(callback.called)
else:
callback.assert_called_once_with(58, 255)
@waitpid_mocks
def test_sigchld_unknown_pid_during_registration(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
with self.ignore_warnings, self.watcher:
self.running = True
self.add_zombie(591, 7)
self.add_zombie(593, 17)
self.watcher._sig_chld()
self.watcher.add_child_handler(591, callback1)
self.watcher.add_child_handler(592, callback2)
callback1.assert_called_once_with(591, 7)
self.assertFalse(callback2.called)
@waitpid_mocks
def test_set_loop(self, m):
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(60, callback)
old_loop = self.loop
self.loop = self.new_test_loop()
patch = mock.patch.object
with patch(old_loop, "remove_signal_handler") as m_old_remove, \
patch(self.loop, "add_signal_handler") as m_new_add:
self.watcher.attach_loop(self.loop)
m_old_remove.assert_called_once_with(
signal.SIGCHLD)
m_new_add.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
self.running = False
self.add_zombie(60, 9)
self.watcher._sig_chld()
callback.assert_called_once_with(60, 9)
@waitpid_mocks
def test_set_loop_race_condition(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(61, callback1)
self.watcher.add_child_handler(62, callback2)
self.watcher.add_child_handler(622, callback3)
old_loop = self.loop
self.loop = None
with mock.patch.object(
old_loop, "remove_signal_handler") as m_remove_signal_handler:
with self.assertWarnsRegex(
RuntimeWarning, 'A loop is being detached'):
self.watcher.attach_loop(None)
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
self.add_zombie(61, 11)
self.add_zombie(62, -5)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(callback3.called)
self.loop = self.new_test_loop()
with mock.patch.object(
self.loop, "add_signal_handler") as m_add_signal_handler:
self.watcher.attach_loop(self.loop)
m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
callback1.assert_called_once_with(61, 11)
callback2.assert_called_once_with(62, -5)
self.assertFalse(callback3.called)
callback1.reset_mock()
callback2.reset_mock()
self.running = False
self.add_zombie(622, 19)
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(622, 19)
@waitpid_mocks
def test_close(self, m):
callback1 = mock.Mock()
with self.watcher:
self.running = True
self.add_zombie(63, 9)
self.add_zombie(65, 18)
self.watcher._sig_chld()
self.watcher.add_child_handler(63, callback1)
self.watcher.add_child_handler(64, callback1)
self.assertEqual(len(self.watcher._callbacks), 1)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertEqual(len(self.watcher._zombies), 1)
with mock.patch.object(
self.loop,
"remove_signal_handler") as m_remove_signal_handler:
self.watcher.close()
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
self.assertFalse(self.watcher._callbacks)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertFalse(self.watcher._zombies)
class SafeChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
def create_watcher(self):
return asyncio.SafeChildWatcher()
class FastChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
def create_watcher(self):
return asyncio.FastChildWatcher()
class PolicyTests(unittest.TestCase):
def create_policy(self):
return asyncio.DefaultEventLoopPolicy()
def test_get_default_child_watcher(self):
policy = self.create_policy()
self.assertIsNone(policy._watcher)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.ThreadedChildWatcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
def test_get_child_watcher_after_set(self):
policy = self.create_policy()
watcher = asyncio.FastChildWatcher()
policy.set_child_watcher(watcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
def test_get_child_watcher_thread(self):
def f():
policy.set_event_loop(policy.new_event_loop())
self.assertIsInstance(policy.get_event_loop(),
asyncio.AbstractEventLoop)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.SafeChildWatcher)
self.assertIsNone(watcher._loop)
policy.get_event_loop().close()
policy = self.create_policy()
th = threading.Thread(target=f)
th.start()
th.join()
def test_child_watcher_replace_mainloop_existing(self):
policy = self.create_policy()
loop = policy.get_event_loop()
watcher = asyncio.SafeChildWatcher()
policy.set_child_watcher(watcher)
watcher.attach_loop(loop)
self.assertIs(watcher._loop, loop)
new_loop = policy.new_event_loop()
policy.set_event_loop(new_loop)
self.assertIs(watcher._loop, new_loop)
policy.set_event_loop(None)
self.assertIs(watcher._loop, None)
loop.close()
new_loop.close()
class TestFunctional(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
asyncio.set_event_loop(None)
def test_add_reader_invalid_argument(self):
def assert_raises():
return self.assertRaisesRegex(ValueError, r'Invalid file object')
cb = lambda: None
with assert_raises():
self.loop.add_reader(object(), cb)
with assert_raises():
self.loop.add_writer(object(), cb)
with assert_raises():
self.loop.remove_reader(object())
with assert_raises():
self.loop.remove_writer(object())
def test_add_reader_or_writer_transport_fd(self):
def assert_raises():
return self.assertRaisesRegex(
RuntimeError,
r'File descriptor .* is used by transport')
async def runner():
tr, pr = await self.loop.create_connection(
lambda: asyncio.Protocol(), sock=rsock)
try:
cb = lambda: None
with assert_raises():
self.loop.add_reader(rsock, cb)
with assert_raises():
self.loop.add_reader(rsock.fileno(), cb)
with assert_raises():
self.loop.remove_reader(rsock)
with assert_raises():
self.loop.remove_reader(rsock.fileno())
with assert_raises():
self.loop.add_writer(rsock, cb)
with assert_raises():
self.loop.add_writer(rsock.fileno(), cb)
with assert_raises():
self.loop.remove_writer(rsock)
with assert_raises():
self.loop.remove_writer(rsock.fileno())
finally:
tr.close()
rsock, wsock = socket.socketpair()
try:
self.loop.run_until_complete(runner())
finally:
rsock.close()
wsock.close()
if __name__ == '__main__':
unittest.main()
| true | true |
f7f994ba76665067e1ccbdba2cef4940b951dc04 | 2,370 | py | Python | BurpExtensions/modules/webcommon.py | afollett/mywebappscripts | 83dc8984cd4d4216578aee506278a2e6e9473ce9 | [
"MIT"
] | 129 | 2015-02-05T04:45:54.000Z | 2021-11-27T06:54:17.000Z | BurpExtensions/modules/webcommon.py | YarboJanks/mywebappscripts | 83dc8984cd4d4216578aee506278a2e6e9473ce9 | [
"MIT"
] | 1 | 2016-03-21T09:22:34.000Z | 2016-03-21T10:57:36.000Z | BurpExtensions/modules/webcommon.py | YarboJanks/mywebappscripts | 83dc8984cd4d4216578aee506278a2e6e9473ce9 | [
"MIT"
] | 60 | 2015-02-20T15:49:46.000Z | 2021-03-01T01:44:04.000Z | import re
def get_host_header_from_request(self,requestInfo):
t1 = requestInfo.getHeaders()
header_name='Host:'
regex=re.compile('^.*%s.*'%header_name,re.IGNORECASE)
for i in t1:
#Search for the Host header
m1=regex.match(i)
#Extract and store the Host header
if m1:
t2=i.split(': ')
return t2
def extract_directory(self,callbacks,url):
t0=url.split('/')
if len(t0) > 1:
t0.pop(-1)
i=0
t1=''
while i<len(t0):
t1=t1+'/'+t0[i]
i+=1
return t1[1:]
def extract_urls(self,callbacks,url):
t0=url.split('/')
i=0
t1=''
while i<len(t0):
t1=t1+'/'+t0[i]
i+=1
return t1[1:]
def get_referer_header_from_request(self,requestInfo):
t1 = requestInfo.getHeaders()
header_name='Referer:'
regex=re.compile('^.*%s.*'%header_name,re.IGNORECASE)
for i in t1:
#Search for the Referer header
m1=regex.match(i)
#Extract and store the Referer header
if m1:
t2=i.split(': ')
return t2
def get_setcookie_from_header(self,responseInfo):
t1 = responseInfo.getHeaders()
header_name='Set-Cookie:'
#Search for the Set Cookie header
regex=re.compile('^.*%s.*'%header_name,re.IGNORECASE)
for i in t1:
m1=regex.match(i)
#Extract and store the Set Cookie header
if m1:
t2=i.split(': ')
return t2
def get_response_code_from_headers(self,responseInfo):
t1 = responseInfo.getHeaders()
return t1
def get_location_from_headers(self,responseInfo):
t1 = responseInfo.getHeaders()
header_name='Location:'
#Search for the Location header
regex=re.compile('^.*%s.*'%header_name,re.IGNORECASE)
for i in t1:
m1=regex.match(i)
#Extract and store the Location header
if m1:
t2=i.split(': ')
return t2
def get_response_body(self,response_byte_array,responseInfo):
responseBody=response_byte_array[responseInfo.getBodyOffset():]
return responseBody
def get_banner_from_response(self,responseInfo):
t1 = responseInfo.getHeaders()
#header_name='Server:'
header_name=['Server:','X-AspNet-Version:','X-AspNetMvc-Version:','X-Powered-By:','X-Requested-With:','X-UA-Compatible:','Via:']
for h1 in header_name:
regex=re.compile('^.*%s.*'%h1,re.IGNORECASE)
for i in t1:
#Search for the Server header
m1=regex.match(i)
#Extract and store the Server header
if m1:
return i
| 23.009709 | 130 | 0.666245 | import re
def get_host_header_from_request(self,requestInfo):
t1 = requestInfo.getHeaders()
header_name='Host:'
regex=re.compile('^.*%s.*'%header_name,re.IGNORECASE)
for i in t1:
m1=regex.match(i)
if m1:
t2=i.split(': ')
return t2
def extract_directory(self,callbacks,url):
t0=url.split('/')
if len(t0) > 1:
t0.pop(-1)
i=0
t1=''
while i<len(t0):
t1=t1+'/'+t0[i]
i+=1
return t1[1:]
def extract_urls(self,callbacks,url):
t0=url.split('/')
i=0
t1=''
while i<len(t0):
t1=t1+'/'+t0[i]
i+=1
return t1[1:]
def get_referer_header_from_request(self,requestInfo):
t1 = requestInfo.getHeaders()
header_name='Referer:'
regex=re.compile('^.*%s.*'%header_name,re.IGNORECASE)
for i in t1:
m1=regex.match(i)
if m1:
t2=i.split(': ')
return t2
def get_setcookie_from_header(self,responseInfo):
t1 = responseInfo.getHeaders()
header_name='Set-Cookie:'
regex=re.compile('^.*%s.*'%header_name,re.IGNORECASE)
for i in t1:
m1=regex.match(i)
if m1:
t2=i.split(': ')
return t2
def get_response_code_from_headers(self,responseInfo):
t1 = responseInfo.getHeaders()
return t1
def get_location_from_headers(self,responseInfo):
t1 = responseInfo.getHeaders()
header_name='Location:'
regex=re.compile('^.*%s.*'%header_name,re.IGNORECASE)
for i in t1:
m1=regex.match(i)
if m1:
t2=i.split(': ')
return t2
def get_response_body(self,response_byte_array,responseInfo):
responseBody=response_byte_array[responseInfo.getBodyOffset():]
return responseBody
def get_banner_from_response(self,responseInfo):
t1 = responseInfo.getHeaders()
header_name=['Server:','X-AspNet-Version:','X-AspNetMvc-Version:','X-Powered-By:','X-Requested-With:','X-UA-Compatible:','Via:']
for h1 in header_name:
regex=re.compile('^.*%s.*'%h1,re.IGNORECASE)
for i in t1:
m1=regex.match(i)
if m1:
return i
| true | true |
f7f9951a302081ac98159ad5fbfd7f3467802a8a | 3,222 | py | Python | phigaro/misc/vis.py | bobeobibo/phigaro | 342a3454bb5324426b25feb4a4d1f640b58bf8f8 | [
"MIT"
] | 31 | 2019-03-06T14:33:37.000Z | 2022-03-08T07:16:07.000Z | phigaro/misc/vis.py | bobeobibo/phigaro | 342a3454bb5324426b25feb4a4d1f640b58bf8f8 | [
"MIT"
] | 27 | 2019-05-17T05:06:58.000Z | 2022-03-27T00:38:56.000Z | phigaro/misc/vis.py | bobeobibo/phigaro | 342a3454bb5324426b25feb4a4d1f640b58bf8f8 | [
"MIT"
] | 12 | 2017-08-23T12:48:38.000Z | 2021-06-24T00:57:22.000Z | import numpy as np
from plotly import tools
from plotly.graph_objs import Bar
import plotly.offline as py
from phigaro.finder.v2 import calc_scores
def _make_coords_colors(data_len, real_phage_coords):
colors = np.zeros(data_len)
for begin, end in real_phage_coords:
for i in range(begin, end + 1):
colors[i] = 1
return colors
def plot_scores(scores, title, real_phage_coords=None):
indices = np.arange(len(scores))
colors = None
if real_phage_coords is not None:
colors = _make_coords_colors(len(scores), real_phage_coords)
data = Bar(
x=indices, y=scores + 0.1, name=title, marker=dict(color=colors,)
)
return data
def plot_phage(phage, title):
ind = np.arange(len(phage))
int_phage = [c + 0.1 for c in phage]
data = Bar(x=ind, y=int_phage, marker=dict(color='black',), name=title)
return data
def _make_rects(coords, ymin, ymax, fillcolor, opacity):
return [
dict(
type='rect',
xref='x',
yref='y',
x0=x_begin,
y0=ymin,
x1=x_end,
y1=ymax,
fillcolor=fillcolor,
opacity=opacity,
line={'width': 0},
)
for (x_begin, x_end) in coords
]
def plot_scores_and_phage(
phage, window_len, score_func=None, scan_func=None, real_phage_coords=None
):
score_func = score_func or score_tri
fig = tools.make_subplots(rows=2, cols=1, shared_xaxes=True)
title = 'Scores: window: {}'.format(window_len)
scores = np.array(calc_scores(phage, window_len, score_func))
ranges = []
if scan_func is not None:
ranges = scan_func(scores)
score_fig = plot_scores(scores, title, real_phage_coords=None)
phage_fig = plot_phage(phage, 'Phage')
fig.append_trace(score_fig, 1, 1)
fig.append_trace(phage_fig, 2, 1)
ymax = window_len / 2
if real_phage_coords is not None or ranges:
fig['layout'].update(
dict(
shapes=_make_rects(ranges, ymax, 'rgb(50, 171, 96)', 0.5)
+ _make_rects(real_phage_coords or [], ymax, '#ff0000', 0.5)
)
)
py.iplot(fig)
def plot_scores_and_phage2(
phage,
scores,
found_phage_coords,
real_phage_coords=None,
filename='filename',
):
# real_phage_coords = real_phage_coords or []
fig = tools.make_subplots(rows=2, cols=1, shared_xaxes=True)
title = 'Scores'
score_fig = plot_scores(scores, title, real_phage_coords=None)
phage_fig = plot_phage(phage, 'Phage')
fig.append_trace(score_fig, 1, 1)
fig.append_trace(phage_fig, 2, 1)
ymax = max(scores)
# print(len(real_phage_coords), len(found_phage_coords))
if (len(real_phage_coords) + len(found_phage_coords)) != 0:
# print('got real coords')
fig['layout'].update(
dict(
shapes=_make_rects(
found_phage_coords, ymax * 0.5, ymax * 0.75, '#0000ff', 0.5
)
+ _make_rects(
real_phage_coords, ymax * 0.75, ymax, '#aaaa00', 0.5
)
)
)
py.plot(fig, filename=filename + '.html')
| 27.305085 | 79 | 0.606145 | import numpy as np
from plotly import tools
from plotly.graph_objs import Bar
import plotly.offline as py
from phigaro.finder.v2 import calc_scores
def _make_coords_colors(data_len, real_phage_coords):
colors = np.zeros(data_len)
for begin, end in real_phage_coords:
for i in range(begin, end + 1):
colors[i] = 1
return colors
def plot_scores(scores, title, real_phage_coords=None):
indices = np.arange(len(scores))
colors = None
if real_phage_coords is not None:
colors = _make_coords_colors(len(scores), real_phage_coords)
data = Bar(
x=indices, y=scores + 0.1, name=title, marker=dict(color=colors,)
)
return data
def plot_phage(phage, title):
ind = np.arange(len(phage))
int_phage = [c + 0.1 for c in phage]
data = Bar(x=ind, y=int_phage, marker=dict(color='black',), name=title)
return data
def _make_rects(coords, ymin, ymax, fillcolor, opacity):
return [
dict(
type='rect',
xref='x',
yref='y',
x0=x_begin,
y0=ymin,
x1=x_end,
y1=ymax,
fillcolor=fillcolor,
opacity=opacity,
line={'width': 0},
)
for (x_begin, x_end) in coords
]
def plot_scores_and_phage(
phage, window_len, score_func=None, scan_func=None, real_phage_coords=None
):
score_func = score_func or score_tri
fig = tools.make_subplots(rows=2, cols=1, shared_xaxes=True)
title = 'Scores: window: {}'.format(window_len)
scores = np.array(calc_scores(phage, window_len, score_func))
ranges = []
if scan_func is not None:
ranges = scan_func(scores)
score_fig = plot_scores(scores, title, real_phage_coords=None)
phage_fig = plot_phage(phage, 'Phage')
fig.append_trace(score_fig, 1, 1)
fig.append_trace(phage_fig, 2, 1)
ymax = window_len / 2
if real_phage_coords is not None or ranges:
fig['layout'].update(
dict(
shapes=_make_rects(ranges, ymax, 'rgb(50, 171, 96)', 0.5)
+ _make_rects(real_phage_coords or [], ymax, '#ff0000', 0.5)
)
)
py.iplot(fig)
def plot_scores_and_phage2(
phage,
scores,
found_phage_coords,
real_phage_coords=None,
filename='filename',
):
fig = tools.make_subplots(rows=2, cols=1, shared_xaxes=True)
title = 'Scores'
score_fig = plot_scores(scores, title, real_phage_coords=None)
phage_fig = plot_phage(phage, 'Phage')
fig.append_trace(score_fig, 1, 1)
fig.append_trace(phage_fig, 2, 1)
ymax = max(scores)
if (len(real_phage_coords) + len(found_phage_coords)) != 0:
fig['layout'].update(
dict(
shapes=_make_rects(
found_phage_coords, ymax * 0.5, ymax * 0.75, '#0000ff', 0.5
)
+ _make_rects(
real_phage_coords, ymax * 0.75, ymax, '#aaaa00', 0.5
)
)
)
py.plot(fig, filename=filename + '.html')
| true | true |
f7f9953808294f5b02d2ec4faf583a807d423e7a | 4,693 | py | Python | grr/lib/flows/cron/filestore_stats.py | theGreenJedi/grr | d9e11e304dc299d49c76b7fdf6fdbfcd4b8eec39 | [
"Apache-2.0"
] | null | null | null | grr/lib/flows/cron/filestore_stats.py | theGreenJedi/grr | d9e11e304dc299d49c76b7fdf6fdbfcd4b8eec39 | [
"Apache-2.0"
] | null | null | null | grr/lib/flows/cron/filestore_stats.py | theGreenJedi/grr | d9e11e304dc299d49c76b7fdf6fdbfcd4b8eec39 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Filestore stats crons."""
from grr.lib import aff4
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import stats as stats_lib
from grr.lib import utils
from grr.lib.aff4_objects import cronjobs
from grr.lib.aff4_objects import stats as aff4_stats
# pylint: enable=unused-import
class ClassCounter(object):
"""Populates a stats.Graph with counts of each object class."""
def __init__(self, attribute, title):
self.attribute = attribute
self.value_dict = {}
self.graph = self.attribute(title=title)
def ProcessFile(self, fd):
classname = fd.__class__.__name__
self.value_dict[classname] = self.value_dict.get(classname, 0) + 1
def Save(self, fd):
for classname, count in self.value_dict.items():
self.graph.Append(label=classname, y_value=count)
fd.Set(self.attribute, self.graph)
class ClassFileSizeCounter(ClassCounter):
"""Count total filesize by classtype."""
GB = 1024 * 1024 * 1024
def ProcessFile(self, fd):
classname = fd.__class__.__name__
self.value_dict[classname] = self.value_dict.get(classname,
0) + fd.Get(fd.Schema.SIZE)
def Save(self, fd):
for classname, count in self.value_dict.items():
self.graph.Append(label=classname, y_value=count / float(self.GB))
fd.Set(self.attribute, self.graph)
class GraphDistribution(stats_lib.Distribution):
"""Abstract class for building histograms."""
_bins = []
def __init__(self, attribute, title):
self.attribute = attribute
self.graph = self.attribute(title=title)
super(GraphDistribution, self).__init__(bins=self._bins)
def ProcessFile(self, fd):
raise NotImplementedError()
def Save(self, fd):
for x, y in sorted(self.bins_heights.items()):
if x >= 0:
self.graph.Append(x_value=int(x), y_value=y)
fd.Set(self.attribute, self.graph)
class FileSizeHistogram(GraphDistribution):
"""Graph filesize."""
_bins = [0, 2, 50, 100, 1e3, 10e3, 100e3, 500e3, 1e6, 5e6, 10e6, 50e6, 100e6,
500e6, 1e9, 5e9, 10e9]
def ProcessFile(self, fd):
self.Record(fd.Get(fd.Schema.SIZE))
class ClientCountHistogram(GraphDistribution):
"""Graph the number of files that are found on 0, 1, 5...etc clients."""
_bins = [0, 1, 5, 10, 20, 50, 100]
def ProcessFile(self, fd):
# The same file can be in multiple locations on the one client so we use a
# set to kill the dups.
clients = set()
for urn in fd.Query("aff4:/C"):
client, _ = urn.Split(2)
clients.add(client)
self.Record(len(clients))
class FilestoreStatsCronFlow(cronjobs.SystemCronFlow):
"""Build statistics about the filestore."""
frequency = rdfvalue.Duration("1w")
lifetime = rdfvalue.Duration("1d")
HASH_PATH = "aff4:/files/hash/generic/sha256"
FILESTORE_STATS_URN = rdfvalue.RDFURN("aff4:/stats/FileStoreStats")
OPEN_FILES_LIMIT = 500
def _CreateConsumers(self):
self.consumers = [ClassCounter(self.stats.Schema.FILESTORE_FILETYPES,
"Number of files in the filestore by type"),
ClassFileSizeCounter(
self.stats.Schema.FILESTORE_FILETYPES_SIZE,
"Total filesize (GB) files in the filestore by type"),
FileSizeHistogram(
self.stats.Schema.FILESTORE_FILESIZE_HISTOGRAM,
"Filesize distribution in bytes"),
ClientCountHistogram(
self.stats.Schema.FILESTORE_CLIENTCOUNT_HISTOGRAM,
"Number of files found on X clients")]
@flow.StateHandler()
def Start(self):
"""Retrieve all the clients for the AbstractClientStatsCollectors."""
self.stats = aff4.FACTORY.Create(self.FILESTORE_STATS_URN,
aff4_stats.FilestoreStats,
mode="w",
token=self.token)
self._CreateConsumers()
hashes = list(aff4.FACTORY.Open(self.HASH_PATH,
token=self.token).ListChildren())
try:
for urns in utils.Grouper(hashes, self.OPEN_FILES_LIMIT):
for fd in aff4.FACTORY.MultiOpen(urns,
mode="r",
token=self.token,
age=aff4.NEWEST_TIME):
for consumer in self.consumers:
consumer.ProcessFile(fd)
self.HeartBeat()
finally:
for consumer in self.consumers:
consumer.Save(self.stats)
self.stats.Close()
| 32.365517 | 80 | 0.622843 |
from grr.lib import aff4
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import stats as stats_lib
from grr.lib import utils
from grr.lib.aff4_objects import cronjobs
from grr.lib.aff4_objects import stats as aff4_stats
class ClassCounter(object):
def __init__(self, attribute, title):
self.attribute = attribute
self.value_dict = {}
self.graph = self.attribute(title=title)
def ProcessFile(self, fd):
classname = fd.__class__.__name__
self.value_dict[classname] = self.value_dict.get(classname, 0) + 1
def Save(self, fd):
for classname, count in self.value_dict.items():
self.graph.Append(label=classname, y_value=count)
fd.Set(self.attribute, self.graph)
class ClassFileSizeCounter(ClassCounter):
GB = 1024 * 1024 * 1024
def ProcessFile(self, fd):
classname = fd.__class__.__name__
self.value_dict[classname] = self.value_dict.get(classname,
0) + fd.Get(fd.Schema.SIZE)
def Save(self, fd):
for classname, count in self.value_dict.items():
self.graph.Append(label=classname, y_value=count / float(self.GB))
fd.Set(self.attribute, self.graph)
class GraphDistribution(stats_lib.Distribution):
_bins = []
def __init__(self, attribute, title):
self.attribute = attribute
self.graph = self.attribute(title=title)
super(GraphDistribution, self).__init__(bins=self._bins)
def ProcessFile(self, fd):
raise NotImplementedError()
def Save(self, fd):
for x, y in sorted(self.bins_heights.items()):
if x >= 0:
self.graph.Append(x_value=int(x), y_value=y)
fd.Set(self.attribute, self.graph)
class FileSizeHistogram(GraphDistribution):
_bins = [0, 2, 50, 100, 1e3, 10e3, 100e3, 500e3, 1e6, 5e6, 10e6, 50e6, 100e6,
500e6, 1e9, 5e9, 10e9]
def ProcessFile(self, fd):
self.Record(fd.Get(fd.Schema.SIZE))
class ClientCountHistogram(GraphDistribution):
_bins = [0, 1, 5, 10, 20, 50, 100]
def ProcessFile(self, fd):
clients = set()
for urn in fd.Query("aff4:/C"):
client, _ = urn.Split(2)
clients.add(client)
self.Record(len(clients))
class FilestoreStatsCronFlow(cronjobs.SystemCronFlow):
frequency = rdfvalue.Duration("1w")
lifetime = rdfvalue.Duration("1d")
HASH_PATH = "aff4:/files/hash/generic/sha256"
FILESTORE_STATS_URN = rdfvalue.RDFURN("aff4:/stats/FileStoreStats")
OPEN_FILES_LIMIT = 500
def _CreateConsumers(self):
self.consumers = [ClassCounter(self.stats.Schema.FILESTORE_FILETYPES,
"Number of files in the filestore by type"),
ClassFileSizeCounter(
self.stats.Schema.FILESTORE_FILETYPES_SIZE,
"Total filesize (GB) files in the filestore by type"),
FileSizeHistogram(
self.stats.Schema.FILESTORE_FILESIZE_HISTOGRAM,
"Filesize distribution in bytes"),
ClientCountHistogram(
self.stats.Schema.FILESTORE_CLIENTCOUNT_HISTOGRAM,
"Number of files found on X clients")]
@flow.StateHandler()
def Start(self):
self.stats = aff4.FACTORY.Create(self.FILESTORE_STATS_URN,
aff4_stats.FilestoreStats,
mode="w",
token=self.token)
self._CreateConsumers()
hashes = list(aff4.FACTORY.Open(self.HASH_PATH,
token=self.token).ListChildren())
try:
for urns in utils.Grouper(hashes, self.OPEN_FILES_LIMIT):
for fd in aff4.FACTORY.MultiOpen(urns,
mode="r",
token=self.token,
age=aff4.NEWEST_TIME):
for consumer in self.consumers:
consumer.ProcessFile(fd)
self.HeartBeat()
finally:
for consumer in self.consumers:
consumer.Save(self.stats)
self.stats.Close()
| true | true |
f7f9967e6e3514f8f4b1ce4db5f80834c49af67c | 2,806 | py | Python | SimpleGP/tests/test_egp.py | mgraffg/simplegp | 7e4639e3ac76571a4e67669cad6e8e775b3fc345 | [
"Apache-2.0"
] | 2 | 2015-03-18T17:26:20.000Z | 2019-03-18T17:28:16.000Z | SimpleGP/tests/test_egp.py | mgraffg/simplegp | 7e4639e3ac76571a4e67669cad6e8e775b3fc345 | [
"Apache-2.0"
] | 1 | 2015-04-19T17:02:49.000Z | 2015-07-21T18:48:34.000Z | SimpleGP/tests/test_egp.py | mgraffg/simplegp | 7e4639e3ac76571a4e67669cad6e8e775b3fc345 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from SimpleGP import EGPS, SparseArray
class TestSGPS(object):
def __init__(self):
x = np.linspace(-10, 10, 100)
pol = np.array([0.2, -0.3, 0.2])
self._pol = pol
X = np.vstack((x**2, x, np.ones(x.shape[0])))
self._y = (X.T * pol).sum(axis=1)
self._x = x[:, np.newaxis]
def test_EGPS(self):
gp = EGPS(generations=3).fit(self._x,
self._y, test=self._x,
test_y=self._y)
assert isinstance(gp.early_stopping[-1], SparseArray)
def test_EGPS_wo_st(self):
gp = EGPS(generations=3, use_st=0).fit(self._x,
self._y, test=self._x,
test_y=self._y)
assert isinstance(gp.early_stopping[-1], SparseArray)
def test_cache(self):
gp = EGPS(seed=0, nrandom=0,
use_st=1, generations=3).fit(self._x,
self._y)
while True:
if gp.pre_crossover() and gp._xo_father1 != 0 and\
gp._xo_father2 != 0:
res = gp.crossover(gp.population[gp._xo_father1],
gp.population[gp._xo_father2])
if gp._ind_eval_st is not None:
break
f1 = gp._xo_father1
gp.kill_ind(0, res)
assert np.all(gp._eval_A_st[f1] == gp._eval_A_st[0])
f1_o = map(lambda x: gp._tree.get_pos_arg(gp.population[f1], 0, x),
range(gp._ntrees))
trees = map(lambda x: gp._eval_st[f1][x], f1_o)
s_o = map(lambda x: gp._tree.get_pos_arg(gp.population[0], 0, x),
range(gp._ntrees))
s_trees = map(lambda x: gp._eval_st[0][x], s_o)
assert len(filter(lambda x: x is None, s_trees)) == 1
gp.eval(0)
np.set_printoptions(precision=3)
r = filter(lambda x: x[1] is None, enumerate(s_trees))[0][0]
assert (gp._eval_A_st[f1] - gp._eval_A_st[0])[r, r] != 0
trees2 = gp._eval.get_output()
r = filter(lambda (x, y): x == y, zip(trees, trees2))
assert len(r) == gp._ntrees - 1
| 41.264706 | 75 | 0.561654 |
import numpy as np
from SimpleGP import EGPS, SparseArray
class TestSGPS(object):
def __init__(self):
x = np.linspace(-10, 10, 100)
pol = np.array([0.2, -0.3, 0.2])
self._pol = pol
X = np.vstack((x**2, x, np.ones(x.shape[0])))
self._y = (X.T * pol).sum(axis=1)
self._x = x[:, np.newaxis]
def test_EGPS(self):
gp = EGPS(generations=3).fit(self._x,
self._y, test=self._x,
test_y=self._y)
assert isinstance(gp.early_stopping[-1], SparseArray)
def test_EGPS_wo_st(self):
gp = EGPS(generations=3, use_st=0).fit(self._x,
self._y, test=self._x,
test_y=self._y)
assert isinstance(gp.early_stopping[-1], SparseArray)
def test_cache(self):
gp = EGPS(seed=0, nrandom=0,
use_st=1, generations=3).fit(self._x,
self._y)
while True:
if gp.pre_crossover() and gp._xo_father1 != 0 and\
gp._xo_father2 != 0:
res = gp.crossover(gp.population[gp._xo_father1],
gp.population[gp._xo_father2])
if gp._ind_eval_st is not None:
break
f1 = gp._xo_father1
gp.kill_ind(0, res)
assert np.all(gp._eval_A_st[f1] == gp._eval_A_st[0])
f1_o = map(lambda x: gp._tree.get_pos_arg(gp.population[f1], 0, x),
range(gp._ntrees))
trees = map(lambda x: gp._eval_st[f1][x], f1_o)
s_o = map(lambda x: gp._tree.get_pos_arg(gp.population[0], 0, x),
range(gp._ntrees))
s_trees = map(lambda x: gp._eval_st[0][x], s_o)
assert len(filter(lambda x: x is None, s_trees)) == 1
gp.eval(0)
np.set_printoptions(precision=3)
r = filter(lambda x: x[1] is None, enumerate(s_trees))[0][0]
assert (gp._eval_A_st[f1] - gp._eval_A_st[0])[r, r] != 0
trees2 = gp._eval.get_output()
r = filter(lambda (x, y): x == y, zip(trees, trees2))
assert len(r) == gp._ntrees - 1
| false | true |
f7f996ca97a20b576548371acc14f3991fd12df0 | 1,763 | py | Python | users/rest_apis/forms.py | sharif-42/Personal_Website | 7c385bec272ec7b5c816eab92e3b5bfb8cd80016 | [
"MIT"
] | null | null | null | users/rest_apis/forms.py | sharif-42/Personal_Website | 7c385bec272ec7b5c816eab92e3b5bfb8cd80016 | [
"MIT"
] | null | null | null | users/rest_apis/forms.py | sharif-42/Personal_Website | 7c385bec272ec7b5c816eab92e3b5bfb8cd80016 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.core.exceptions import ValidationError
from users.models import User
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('email',)
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = ('email', 'password', 'is_active', 'is_admin')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
| 34.568627 | 90 | 0.684061 | from django import forms
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.core.exceptions import ValidationError
from users.models import User
class UserCreationForm(forms.ModelForm):
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('email',)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = ('email', 'password', 'is_active', 'is_admin')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
| true | true |
f7f996d5c1950581964dcb5d43d27508f902e39c | 15,810 | py | Python | test/core/end2end/gen_build_yaml.py | BusyJay/grpc | 6bf5f833efe2cb9e2ecc14358dd9699cd5d05263 | [
"Apache-2.0"
] | null | null | null | test/core/end2end/gen_build_yaml.py | BusyJay/grpc | 6bf5f833efe2cb9e2ecc14358dd9699cd5d05263 | [
"Apache-2.0"
] | null | null | null | test/core/end2end/gen_build_yaml.py | BusyJay/grpc | 6bf5f833efe2cb9e2ecc14358dd9699cd5d05263 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the end2end tests."""
import yaml
import collections
import hashlib
FixtureOptions = collections.namedtuple(
'FixtureOptions',
'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2 supports_proxy_auth supports_write_buffering')
default_unsecure_fixture_options = FixtureOptions(
True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, [], [], True, False, True, False, True, False, True)
socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(fullstack=False, dns_resolver=False)
default_secure_fixture_options = default_unsecure_fixture_options._replace(secure=True)
uds_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, platforms=['linux', 'mac', 'posix'], exclude_iomgrs=['uv'])
fd_unsecure_fixture_options = default_unsecure_fixture_options._replace(
dns_resolver=False, fullstack=False, platforms=['linux', 'mac', 'posix'], exclude_iomgrs=['uv'])
inproc_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, fullstack=False, name_resolution=False, supports_compression=False, is_inproc=True, is_http2=False, supports_write_buffering=False)
# maps fixture name to whether it requires the security library
END2END_FIXTURES = {
'h2_compress': default_unsecure_fixture_options._replace(enables_compression=True),
'h2_census': default_unsecure_fixture_options,
'h2_load_reporting': default_unsecure_fixture_options,
'h2_fakesec': default_secure_fixture_options._replace(ci_mac=False),
'h2_fd': fd_unsecure_fixture_options,
'h2_full': default_unsecure_fixture_options,
'h2_full+pipe': default_unsecure_fixture_options._replace(
platforms=['linux'], exclude_iomgrs=['uv']),
'h2_full+trace': default_unsecure_fixture_options._replace(tracing=True),
'h2_full+workarounds': default_unsecure_fixture_options,
'h2_http_proxy': default_unsecure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv'], supports_proxy_auth=True),
'h2_oauth2': default_secure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv']),
'h2_proxy': default_unsecure_fixture_options._replace(
includes_proxy=True, ci_mac=False, exclude_iomgrs=['uv']),
'h2_sockpair_1byte': socketpair_unsecure_fixture_options._replace(
ci_mac=False, exclude_configs=['msan'], large_writes=False,
exclude_iomgrs=['uv']),
'h2_sockpair': socketpair_unsecure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv']),
'h2_sockpair+trace': socketpair_unsecure_fixture_options._replace(
ci_mac=False, tracing=True, large_writes=False, exclude_iomgrs=['uv']),
'h2_ssl': default_secure_fixture_options,
'h2_ssl_proxy': default_secure_fixture_options._replace(
includes_proxy=True, ci_mac=False, exclude_iomgrs=['uv']),
'h2_uds': uds_fixture_options,
'inproc': inproc_fixture_options
}
TestOptions = collections.namedtuple(
'TestOptions',
'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2 needs_proxy_auth needs_write_buffering')
default_test_options = TestOptions(False, False, False, True, False, True, 1.0, [], False, False, True, False, False, False, False, False)
connectivity_test_options = default_test_options._replace(needs_fullstack=True)
LOWCPU = 0.1
# maps test names to options
END2END_TESTS = {
'authority_not_supported': default_test_options,
'bad_hostname': default_test_options._replace(needs_names=True),
'bad_ping': connectivity_test_options._replace(proxyable=False),
'binary_metadata': default_test_options._replace(cpu_cost=LOWCPU),
'resource_quota_server': default_test_options._replace(large_writes=True,
proxyable=False,
allows_compression=False),
'call_creds': default_test_options._replace(secure=True),
'cancel_after_accept': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_client_done': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_round_trip': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_before_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_in_a_vacuum': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_with_status': default_test_options._replace(cpu_cost=LOWCPU),
'compressed_payload': default_test_options._replace(proxyable=False,needs_compression=True),
'connectivity': connectivity_test_options._replace(needs_names=True,
proxyable=False, cpu_cost=LOWCPU, exclude_iomgrs=['uv']),
'default_host': default_test_options._replace(needs_fullstack=True,
needs_dns=True,needs_names=True),
'disappearing_server': connectivity_test_options._replace(flaky=True,needs_names=True),
'empty_batch': default_test_options._replace(cpu_cost=LOWCPU),
'filter_causes_close': default_test_options._replace(cpu_cost=LOWCPU),
'filter_call_init_fails': default_test_options,
'filter_latency': default_test_options._replace(cpu_cost=LOWCPU),
'graceful_server_shutdown': default_test_options._replace(cpu_cost=LOWCPU,exclude_inproc=True),
'hpack_size': default_test_options._replace(proxyable=False,
traceable=False,
cpu_cost=LOWCPU),
'high_initial_seqno': default_test_options._replace(cpu_cost=LOWCPU),
'idempotent_request': default_test_options,
'invoke_large_request': default_test_options,
'keepalive_timeout': default_test_options._replace(proxyable=False,
cpu_cost=LOWCPU,
needs_http2=True),
'large_metadata': default_test_options,
'max_concurrent_streams': default_test_options._replace(
proxyable=False, cpu_cost=LOWCPU, exclude_inproc=True),
'max_connection_age': default_test_options._replace(cpu_cost=LOWCPU,
exclude_inproc=True),
'max_connection_idle': connectivity_test_options._replace(
proxyable=False, exclude_iomgrs=['uv'], cpu_cost=LOWCPU),
'max_message_length': default_test_options._replace(cpu_cost=LOWCPU),
'negative_deadline': default_test_options,
'network_status_change': default_test_options._replace(cpu_cost=LOWCPU),
'no_logging': default_test_options._replace(traceable=False),
'no_op': default_test_options,
'payload': default_test_options,
'load_reporting_hook': default_test_options,
'ping_pong_streaming': default_test_options._replace(cpu_cost=LOWCPU),
'ping': connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'proxy_auth': default_test_options._replace(needs_proxy_auth=True),
'registered_call': default_test_options,
'request_with_flags': default_test_options._replace(
proxyable=False, cpu_cost=LOWCPU),
'request_with_payload': default_test_options._replace(cpu_cost=LOWCPU),
'server_finishes_request': default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_calls': default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_tags': default_test_options._replace(cpu_cost=LOWCPU),
'simple_cacheable_request': default_test_options._replace(cpu_cost=LOWCPU),
'stream_compression_compressed_payload': default_test_options._replace(proxyable=False,
exclude_inproc=True),
'stream_compression_payload': default_test_options._replace(exclude_inproc=True),
'stream_compression_ping_pong_streaming': default_test_options._replace(exclude_inproc=True),
'simple_delayed_request': connectivity_test_options,
'simple_metadata': default_test_options,
'simple_request': default_test_options,
'streaming_error_response': default_test_options._replace(cpu_cost=LOWCPU),
'trailing_metadata': default_test_options,
'workaround_cronet_compression': default_test_options,
'write_buffering': default_test_options._replace(cpu_cost=LOWCPU,
needs_write_buffering=True),
'write_buffering_at_end': default_test_options._replace(cpu_cost=LOWCPU,
needs_write_buffering=True),
}
def compatible(f, t):
if END2END_TESTS[t].needs_fullstack:
if not END2END_FIXTURES[f].fullstack:
return False
if END2END_TESTS[t].needs_dns:
if not END2END_FIXTURES[f].dns_resolver:
return False
if END2END_TESTS[t].needs_names:
if not END2END_FIXTURES[f].name_resolution:
return False
if not END2END_TESTS[t].proxyable:
if END2END_FIXTURES[f].includes_proxy:
return False
if not END2END_TESTS[t].traceable:
if END2END_FIXTURES[f].tracing:
return False
if END2END_TESTS[t].large_writes:
if not END2END_FIXTURES[f].large_writes:
return False
if not END2END_TESTS[t].allows_compression:
if END2END_FIXTURES[f].enables_compression:
return False
if END2END_TESTS[t].needs_compression:
if not END2END_FIXTURES[f].supports_compression:
return False
if END2END_TESTS[t].exclude_inproc:
if END2END_FIXTURES[f].is_inproc:
return False
if END2END_TESTS[t].needs_http2:
if not END2END_FIXTURES[f].is_http2:
return False
if END2END_TESTS[t].needs_proxy_auth:
if not END2END_FIXTURES[f].supports_proxy_auth:
return False
if END2END_TESTS[t].needs_write_buffering:
if not END2END_FIXTURES[f].supports_write_buffering:
return False
return True
def without(l, e):
l = l[:]
l.remove(e)
return l
def main():
sec_deps = [
'grpc_test_util',
'grpc',
'gpr_test_util',
'gpr'
]
unsec_deps = [
'grpc_test_util_unsecure',
'grpc_unsecure',
'gpr_test_util',
'gpr'
]
json = {
'#': 'generated with test/end2end/gen_build_json.py',
'libs': [
{
'name': 'end2end_tests',
'build': 'private',
'language': 'c',
'secure': True,
'src': ['test/core/end2end/end2end_tests.c',
'test/core/end2end/end2end_test_utils.c'] + [
'test/core/end2end/tests/%s.c' % t
for t in sorted(END2END_TESTS.keys())],
'headers': ['test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'],
'deps': sec_deps,
'vs_proj_dir': 'test/end2end/tests',
}
] + [
{
'name': 'end2end_nosec_tests',
'build': 'private',
'language': 'c',
'secure': False,
'src': ['test/core/end2end/end2end_nosec_tests.c',
'test/core/end2end/end2end_test_utils.c'] + [
'test/core/end2end/tests/%s.c' % t
for t in sorted(END2END_TESTS.keys())
if not END2END_TESTS[t].secure],
'headers': ['test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'],
'deps': unsec_deps,
'vs_proj_dir': 'test/end2end/tests',
}
],
'targets': [
{
'name': '%s_test' % f,
'build': 'test',
'language': 'c',
'run': False,
'src': ['test/core/end2end/fixtures/%s.c' % f],
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_tests'
] + sec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
] + [
{
'name': '%s_nosec_test' % f,
'build': 'test',
'language': 'c',
'secure': False,
'src': ['test/core/end2end/fixtures/%s.c' % f],
'run': False,
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_nosec_tests'
] + unsec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
],
'tests': [
{
'name': '%s_test' % f,
'args': [t],
'exclude_configs': END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs': list(set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': END2END_TESTS[t].flaky,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
for t in sorted(END2END_TESTS.keys()) if compatible(f, t)
] + [
{
'name': '%s_nosec_test' % f,
'args': [t],
'exclude_configs': END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs': list(set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': END2END_TESTS[t].flaky,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
for t in sorted(END2END_TESTS.keys())
if compatible(f, t) and not END2END_TESTS[t].secure
],
'core_end2end_tests': dict(
(t, END2END_TESTS[t].secure)
for t in END2END_TESTS.keys()
)
}
print yaml.dump(json)
if __name__ == '__main__':
main()
| 47.477477 | 241 | 0.658065 |
"""Generates the appropriate build.json data for all the end2end tests."""
import yaml
import collections
import hashlib
FixtureOptions = collections.namedtuple(
'FixtureOptions',
'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2 supports_proxy_auth supports_write_buffering')
default_unsecure_fixture_options = FixtureOptions(
True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, [], [], True, False, True, False, True, False, True)
socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(fullstack=False, dns_resolver=False)
default_secure_fixture_options = default_unsecure_fixture_options._replace(secure=True)
uds_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, platforms=['linux', 'mac', 'posix'], exclude_iomgrs=['uv'])
fd_unsecure_fixture_options = default_unsecure_fixture_options._replace(
dns_resolver=False, fullstack=False, platforms=['linux', 'mac', 'posix'], exclude_iomgrs=['uv'])
inproc_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, fullstack=False, name_resolution=False, supports_compression=False, is_inproc=True, is_http2=False, supports_write_buffering=False)
END2END_FIXTURES = {
'h2_compress': default_unsecure_fixture_options._replace(enables_compression=True),
'h2_census': default_unsecure_fixture_options,
'h2_load_reporting': default_unsecure_fixture_options,
'h2_fakesec': default_secure_fixture_options._replace(ci_mac=False),
'h2_fd': fd_unsecure_fixture_options,
'h2_full': default_unsecure_fixture_options,
'h2_full+pipe': default_unsecure_fixture_options._replace(
platforms=['linux'], exclude_iomgrs=['uv']),
'h2_full+trace': default_unsecure_fixture_options._replace(tracing=True),
'h2_full+workarounds': default_unsecure_fixture_options,
'h2_http_proxy': default_unsecure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv'], supports_proxy_auth=True),
'h2_oauth2': default_secure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv']),
'h2_proxy': default_unsecure_fixture_options._replace(
includes_proxy=True, ci_mac=False, exclude_iomgrs=['uv']),
'h2_sockpair_1byte': socketpair_unsecure_fixture_options._replace(
ci_mac=False, exclude_configs=['msan'], large_writes=False,
exclude_iomgrs=['uv']),
'h2_sockpair': socketpair_unsecure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv']),
'h2_sockpair+trace': socketpair_unsecure_fixture_options._replace(
ci_mac=False, tracing=True, large_writes=False, exclude_iomgrs=['uv']),
'h2_ssl': default_secure_fixture_options,
'h2_ssl_proxy': default_secure_fixture_options._replace(
includes_proxy=True, ci_mac=False, exclude_iomgrs=['uv']),
'h2_uds': uds_fixture_options,
'inproc': inproc_fixture_options
}
TestOptions = collections.namedtuple(
'TestOptions',
'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2 needs_proxy_auth needs_write_buffering')
default_test_options = TestOptions(False, False, False, True, False, True, 1.0, [], False, False, True, False, False, False, False, False)
connectivity_test_options = default_test_options._replace(needs_fullstack=True)
LOWCPU = 0.1
END2END_TESTS = {
'authority_not_supported': default_test_options,
'bad_hostname': default_test_options._replace(needs_names=True),
'bad_ping': connectivity_test_options._replace(proxyable=False),
'binary_metadata': default_test_options._replace(cpu_cost=LOWCPU),
'resource_quota_server': default_test_options._replace(large_writes=True,
proxyable=False,
allows_compression=False),
'call_creds': default_test_options._replace(secure=True),
'cancel_after_accept': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_client_done': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_round_trip': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_before_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_in_a_vacuum': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_with_status': default_test_options._replace(cpu_cost=LOWCPU),
'compressed_payload': default_test_options._replace(proxyable=False,needs_compression=True),
'connectivity': connectivity_test_options._replace(needs_names=True,
proxyable=False, cpu_cost=LOWCPU, exclude_iomgrs=['uv']),
'default_host': default_test_options._replace(needs_fullstack=True,
needs_dns=True,needs_names=True),
'disappearing_server': connectivity_test_options._replace(flaky=True,needs_names=True),
'empty_batch': default_test_options._replace(cpu_cost=LOWCPU),
'filter_causes_close': default_test_options._replace(cpu_cost=LOWCPU),
'filter_call_init_fails': default_test_options,
'filter_latency': default_test_options._replace(cpu_cost=LOWCPU),
'graceful_server_shutdown': default_test_options._replace(cpu_cost=LOWCPU,exclude_inproc=True),
'hpack_size': default_test_options._replace(proxyable=False,
traceable=False,
cpu_cost=LOWCPU),
'high_initial_seqno': default_test_options._replace(cpu_cost=LOWCPU),
'idempotent_request': default_test_options,
'invoke_large_request': default_test_options,
'keepalive_timeout': default_test_options._replace(proxyable=False,
cpu_cost=LOWCPU,
needs_http2=True),
'large_metadata': default_test_options,
'max_concurrent_streams': default_test_options._replace(
proxyable=False, cpu_cost=LOWCPU, exclude_inproc=True),
'max_connection_age': default_test_options._replace(cpu_cost=LOWCPU,
exclude_inproc=True),
'max_connection_idle': connectivity_test_options._replace(
proxyable=False, exclude_iomgrs=['uv'], cpu_cost=LOWCPU),
'max_message_length': default_test_options._replace(cpu_cost=LOWCPU),
'negative_deadline': default_test_options,
'network_status_change': default_test_options._replace(cpu_cost=LOWCPU),
'no_logging': default_test_options._replace(traceable=False),
'no_op': default_test_options,
'payload': default_test_options,
'load_reporting_hook': default_test_options,
'ping_pong_streaming': default_test_options._replace(cpu_cost=LOWCPU),
'ping': connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'proxy_auth': default_test_options._replace(needs_proxy_auth=True),
'registered_call': default_test_options,
'request_with_flags': default_test_options._replace(
proxyable=False, cpu_cost=LOWCPU),
'request_with_payload': default_test_options._replace(cpu_cost=LOWCPU),
'server_finishes_request': default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_calls': default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_tags': default_test_options._replace(cpu_cost=LOWCPU),
'simple_cacheable_request': default_test_options._replace(cpu_cost=LOWCPU),
'stream_compression_compressed_payload': default_test_options._replace(proxyable=False,
exclude_inproc=True),
'stream_compression_payload': default_test_options._replace(exclude_inproc=True),
'stream_compression_ping_pong_streaming': default_test_options._replace(exclude_inproc=True),
'simple_delayed_request': connectivity_test_options,
'simple_metadata': default_test_options,
'simple_request': default_test_options,
'streaming_error_response': default_test_options._replace(cpu_cost=LOWCPU),
'trailing_metadata': default_test_options,
'workaround_cronet_compression': default_test_options,
'write_buffering': default_test_options._replace(cpu_cost=LOWCPU,
needs_write_buffering=True),
'write_buffering_at_end': default_test_options._replace(cpu_cost=LOWCPU,
needs_write_buffering=True),
}
def compatible(f, t):
if END2END_TESTS[t].needs_fullstack:
if not END2END_FIXTURES[f].fullstack:
return False
if END2END_TESTS[t].needs_dns:
if not END2END_FIXTURES[f].dns_resolver:
return False
if END2END_TESTS[t].needs_names:
if not END2END_FIXTURES[f].name_resolution:
return False
if not END2END_TESTS[t].proxyable:
if END2END_FIXTURES[f].includes_proxy:
return False
if not END2END_TESTS[t].traceable:
if END2END_FIXTURES[f].tracing:
return False
if END2END_TESTS[t].large_writes:
if not END2END_FIXTURES[f].large_writes:
return False
if not END2END_TESTS[t].allows_compression:
if END2END_FIXTURES[f].enables_compression:
return False
if END2END_TESTS[t].needs_compression:
if not END2END_FIXTURES[f].supports_compression:
return False
if END2END_TESTS[t].exclude_inproc:
if END2END_FIXTURES[f].is_inproc:
return False
if END2END_TESTS[t].needs_http2:
if not END2END_FIXTURES[f].is_http2:
return False
if END2END_TESTS[t].needs_proxy_auth:
if not END2END_FIXTURES[f].supports_proxy_auth:
return False
if END2END_TESTS[t].needs_write_buffering:
if not END2END_FIXTURES[f].supports_write_buffering:
return False
return True
def without(l, e):
l = l[:]
l.remove(e)
return l
def main():
sec_deps = [
'grpc_test_util',
'grpc',
'gpr_test_util',
'gpr'
]
unsec_deps = [
'grpc_test_util_unsecure',
'grpc_unsecure',
'gpr_test_util',
'gpr'
]
json = {
'#': 'generated with test/end2end/gen_build_json.py',
'libs': [
{
'name': 'end2end_tests',
'build': 'private',
'language': 'c',
'secure': True,
'src': ['test/core/end2end/end2end_tests.c',
'test/core/end2end/end2end_test_utils.c'] + [
'test/core/end2end/tests/%s.c' % t
for t in sorted(END2END_TESTS.keys())],
'headers': ['test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'],
'deps': sec_deps,
'vs_proj_dir': 'test/end2end/tests',
}
] + [
{
'name': 'end2end_nosec_tests',
'build': 'private',
'language': 'c',
'secure': False,
'src': ['test/core/end2end/end2end_nosec_tests.c',
'test/core/end2end/end2end_test_utils.c'] + [
'test/core/end2end/tests/%s.c' % t
for t in sorted(END2END_TESTS.keys())
if not END2END_TESTS[t].secure],
'headers': ['test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'],
'deps': unsec_deps,
'vs_proj_dir': 'test/end2end/tests',
}
],
'targets': [
{
'name': '%s_test' % f,
'build': 'test',
'language': 'c',
'run': False,
'src': ['test/core/end2end/fixtures/%s.c' % f],
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_tests'
] + sec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
] + [
{
'name': '%s_nosec_test' % f,
'build': 'test',
'language': 'c',
'secure': False,
'src': ['test/core/end2end/fixtures/%s.c' % f],
'run': False,
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_nosec_tests'
] + unsec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
],
'tests': [
{
'name': '%s_test' % f,
'args': [t],
'exclude_configs': END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs': list(set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': END2END_TESTS[t].flaky,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
for t in sorted(END2END_TESTS.keys()) if compatible(f, t)
] + [
{
'name': '%s_nosec_test' % f,
'args': [t],
'exclude_configs': END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs': list(set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': END2END_TESTS[t].flaky,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
for t in sorted(END2END_TESTS.keys())
if compatible(f, t) and not END2END_TESTS[t].secure
],
'core_end2end_tests': dict(
(t, END2END_TESTS[t].secure)
for t in END2END_TESTS.keys()
)
}
print yaml.dump(json)
if __name__ == '__main__':
main()
| false | true |
f7f9973b62bbdac27ac66682c0069d222c248ccf | 664 | py | Python | deepnog/utils/tests/conftest.py | alepfu/deepnog | 5cd3dbf15364c88240d7b71793a4e532439b3811 | [
"BSD-3-Clause"
] | 15 | 2020-03-02T10:56:28.000Z | 2022-02-22T14:03:36.000Z | deepnog/utils/tests/conftest.py | alepfu/deepnog | 5cd3dbf15364c88240d7b71793a4e532439b3811 | [
"BSD-3-Clause"
] | 47 | 2020-03-02T11:01:25.000Z | 2021-03-26T11:54:58.000Z | deepnog/utils/tests/conftest.py | alepfu/deepnog | 5cd3dbf15364c88240d7b71793a4e532439b3811 | [
"BSD-3-Clause"
] | 6 | 2020-03-14T12:36:37.000Z | 2021-08-05T12:59:50.000Z | import pytest
def pytest_addoption(parser):
parser.addoption(
"--hide-torch",
action='store_true',
default=False,
help="Run tests hiding torch."
)
def pytest_configure(config):
config.addinivalue_line("markers", "hide_torch: mark test as hiding torch")
def pytest_collection_modifyitems(config, items):
if config.getoption("--hide-torch"):
# --hide-torch given in cli: do not skip the tests mocking torch
return
skip_torch = pytest.mark.skip(reason="needs --hide-torch option to run")
for item in items:
if "hide_torch" in item.keywords:
item.add_marker(skip_torch)
| 26.56 | 79 | 0.661145 | import pytest
def pytest_addoption(parser):
parser.addoption(
"--hide-torch",
action='store_true',
default=False,
help="Run tests hiding torch."
)
def pytest_configure(config):
config.addinivalue_line("markers", "hide_torch: mark test as hiding torch")
def pytest_collection_modifyitems(config, items):
if config.getoption("--hide-torch"):
return
skip_torch = pytest.mark.skip(reason="needs --hide-torch option to run")
for item in items:
if "hide_torch" in item.keywords:
item.add_marker(skip_torch)
| true | true |
f7f9976062337e95cf838b5119df4c3b38efef76 | 4,528 | py | Python | web/Shitty_Imgur/app/application.py | abs0lut3pwn4g3/dc91120-CTF | 5582b250f672747dde985e1e39499af10774cee3 | [
"MIT"
] | 3 | 2019-04-20T16:00:33.000Z | 2022-02-13T09:04:04.000Z | web/Shitty_Imgur/app/application.py | abs0lut3pwn4g3/dc91120-CTF | 5582b250f672747dde985e1e39499af10774cee3 | [
"MIT"
] | null | null | null | web/Shitty_Imgur/app/application.py | abs0lut3pwn4g3/dc91120-CTF | 5582b250f672747dde985e1e39499af10774cee3 | [
"MIT"
] | null | null | null | import os
from flask import Flask, render_template, request, flash, redirect, url_for, session, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
app = Flask(__name__, static_folder='static')
app.secret_key = "a3d5c6cd65752b6fab43ec3f32705a58"
project_dir = os.path.dirname(os.path.abspath(__file__))
database_file = "sqlite:///{}".format(os.path.join(project_dir, "site.db"))
app.config["SQLALCHEMY_DATABASE_URI"] = database_file
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['MAX_CONTENT_LENGTH'] = 1 * 1024 * 1024 # maximum file size = 1mb
db = SQLAlchemy(app)
db.init_app(app)
bcrypt = Bcrypt(app)
''' Models '''
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
password = db.Column(db.String(60), nullable=False)
def __repr__(self):
return f"User('{self.username}')"
''' views / routes '''
@app.route('/robots.txt')
@app.route('/application-v1.zip')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
@app.route('/', methods=['GET', 'POST'])
@app.route('/home', methods=['GET', 'POST'])
def home():
username=session.get('username')
user = User.query.filter_by(username=username).first()
if not user:
return render_template('home.html')
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file:
filename = file.filename
hashed_username = h(username.encode('utf-8'))
file.save(os.path.join(hashed_username, filename))
c = __import__(hashed_username)
imgs = c.images
imgs.append(filename)
open(f"{hashed_username}.py", 'w').write("images = {}".format(repr(imgs)))
return redirect(request.url)
else:
user_hash=h(username.encode('utf-8'))
c = __import__(user_hash)
imgs = c.images
return render_template('home.html', user=username, user_hash=user_hash, images=imgs)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
if auth(username, password):
session['username'] = username
hashed_username = h(username.encode('utf-8'))
flash('Login Successful.', 'success')
return redirect('/')
else:
flash('Login Unsuccessful. Please check username and password', 'danger')
return render_template('login.html', title='Login')
else:
return render_template('login.html', title='Login')
@app.route('/logout')
def logout():
session.pop('username', None)
flash('Logout Successful.', 'info')
return redirect(url_for('home'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
if not is_valid(username):
flash("Invalid username. Length: 4-20 only.", "danger")
return render_template('register.html', title='Register')
if user_exists(username):
flash("A user already exists with this username.", "danger")
return render_template('register.html', title='Register')
hashed_password = bcrypt.generate_password_hash(password).decode('utf-8')
hashed_username = h(username.encode('utf-8'))
user = User(username=username, password=hashed_password)
db.session.add(user)
db.session.commit()
os.mkdir(hashed_username)
open(f"{hashed_username}.py", 'w').write("images = []")
flash('Registration Successful. You can now login.', 'success')
return redirect(url_for('login'))
else:
return render_template('register.html', title='Register')
def is_valid(username):
import re
if not re.match(r"\A[0-9a-zA-Z]{4,20}\Z", username):
return False
else:
return True
def user_exists(username):
user = User.query.filter_by(username=username).first()
if user:
return True
else:
return False
def auth(username, password):
user = User.query.filter_by(username=username).first()
if user and bcrypt.check_password_hash(user.password, password):
return True
else:
return False
def h(s):
from hashlib import md5
return md5(s).hexdigest()
if __name__ == "__main__":
app.run(debug=False)
| 29.986755 | 105 | 0.702076 | import os
from flask import Flask, render_template, request, flash, redirect, url_for, session, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
app = Flask(__name__, static_folder='static')
app.secret_key = "a3d5c6cd65752b6fab43ec3f32705a58"
project_dir = os.path.dirname(os.path.abspath(__file__))
database_file = "sqlite:///{}".format(os.path.join(project_dir, "site.db"))
app.config["SQLALCHEMY_DATABASE_URI"] = database_file
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['MAX_CONTENT_LENGTH'] = 1 * 1024 * 1024
db = SQLAlchemy(app)
db.init_app(app)
bcrypt = Bcrypt(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
password = db.Column(db.String(60), nullable=False)
def __repr__(self):
return f"User('{self.username}')"
@app.route('/robots.txt')
@app.route('/application-v1.zip')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
@app.route('/', methods=['GET', 'POST'])
@app.route('/home', methods=['GET', 'POST'])
def home():
username=session.get('username')
user = User.query.filter_by(username=username).first()
if not user:
return render_template('home.html')
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file:
filename = file.filename
hashed_username = h(username.encode('utf-8'))
file.save(os.path.join(hashed_username, filename))
c = __import__(hashed_username)
imgs = c.images
imgs.append(filename)
open(f"{hashed_username}.py", 'w').write("images = {}".format(repr(imgs)))
return redirect(request.url)
else:
user_hash=h(username.encode('utf-8'))
c = __import__(user_hash)
imgs = c.images
return render_template('home.html', user=username, user_hash=user_hash, images=imgs)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
if auth(username, password):
session['username'] = username
hashed_username = h(username.encode('utf-8'))
flash('Login Successful.', 'success')
return redirect('/')
else:
flash('Login Unsuccessful. Please check username and password', 'danger')
return render_template('login.html', title='Login')
else:
return render_template('login.html', title='Login')
@app.route('/logout')
def logout():
session.pop('username', None)
flash('Logout Successful.', 'info')
return redirect(url_for('home'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
if not is_valid(username):
flash("Invalid username. Length: 4-20 only.", "danger")
return render_template('register.html', title='Register')
if user_exists(username):
flash("A user already exists with this username.", "danger")
return render_template('register.html', title='Register')
hashed_password = bcrypt.generate_password_hash(password).decode('utf-8')
hashed_username = h(username.encode('utf-8'))
user = User(username=username, password=hashed_password)
db.session.add(user)
db.session.commit()
os.mkdir(hashed_username)
open(f"{hashed_username}.py", 'w').write("images = []")
flash('Registration Successful. You can now login.', 'success')
return redirect(url_for('login'))
else:
return render_template('register.html', title='Register')
def is_valid(username):
import re
if not re.match(r"\A[0-9a-zA-Z]{4,20}\Z", username):
return False
else:
return True
def user_exists(username):
user = User.query.filter_by(username=username).first()
if user:
return True
else:
return False
def auth(username, password):
user = User.query.filter_by(username=username).first()
if user and bcrypt.check_password_hash(user.password, password):
return True
else:
return False
def h(s):
from hashlib import md5
return md5(s).hexdigest()
if __name__ == "__main__":
app.run(debug=False)
| true | true |
f7f997711a07128f9f56f5ac02c9883830d9e8df | 341 | py | Python | main.py | Heroadn/HighVoltagePy | d5f7edbfff707a053498fce070673b9bc5aa726c | [
"MIT"
] | 2 | 2019-10-07T18:57:37.000Z | 2019-10-09T02:05:36.000Z | main.py | Heroadn/HighVoltagePy | d5f7edbfff707a053498fce070673b9bc5aa726c | [
"MIT"
] | 7 | 2020-02-12T03:15:28.000Z | 2022-02-11T03:44:26.000Z | main.py | Heroadn/HighVoltagePy | d5f7edbfff707a053498fce070673b9bc5aa726c | [
"MIT"
] | null | null | null | import VoltagePy
import helper
#setup
VoltagePy.setup('webdrivers/Chrome/76/linux_76',["--disable-gpu"])
browser = VoltagePy.open("http://localhost:8082/usuario/registrar")#https://www.duckduckgo.com
#usuario form
data = helper.fileToJson('usuario.json')
VoltagePy.fillForm(data,"form")
#form.submit()
#inputElement.send_keys(Keys.ENTER)
| 24.357143 | 94 | 0.771261 | import VoltagePy
import helper
VoltagePy.setup('webdrivers/Chrome/76/linux_76',["--disable-gpu"])
browser = VoltagePy.open("http://localhost:8082/usuario/registrar")
data = helper.fileToJson('usuario.json')
VoltagePy.fillForm(data,"form")
| true | true |
f7f997cac8ed855c59b7d339d3a705745f9c02d5 | 3,396 | py | Python | lib/config/default.py | haotianliu001/HRNet-Lesion | 9dae108879456e084b2200e39d7e58c1c08c2b16 | [
"MIT"
] | null | null | null | lib/config/default.py | haotianliu001/HRNet-Lesion | 9dae108879456e084b2200e39d7e58c1c08c2b16 | [
"MIT"
] | null | null | null | lib/config/default.py | haotianliu001/HRNet-Lesion | 9dae108879456e084b2200e39d7e58c1c08c2b16 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Ke Sun (sunk@mail.ustc.edu.cn)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from yacs.config import CfgNode as CN
_C = CN()
_C.OUTPUT_DIR = ''
_C.LOG_DIR = ''
_C.GPUS = (0,)
_C.WORKERS = 4
_C.PRINT_FREQ = 20
_C.AUTO_RESUME = False
_C.PIN_MEMORY = True
_C.RANK = 0
# Cudnn related params
_C.CUDNN = CN()
_C.CUDNN.BENCHMARK = True
_C.CUDNN.DETERMINISTIC = False
_C.CUDNN.ENABLED = True
# common params for NETWORK
_C.MODEL = CN()
_C.MODEL.NAME = 'seg_hrnet'
_C.MODEL.PRETRAINED = ''
_C.MODEL.ALIGN_CORNERS = True
_C.MODEL.NUM_OUTPUTS = 1
_C.MODEL.EXTRA = CN(new_allowed=True)
_C.MODEL.OCR = CN()
_C.MODEL.OCR.MID_CHANNELS = 512
_C.MODEL.OCR.KEY_CHANNELS = 256
_C.MODEL.OCR.DROPOUT = 0.05
_C.MODEL.OCR.SCALE = 1
_C.LOSS = CN()
_C.LOSS.USE_OHEM = False
_C.LOSS.OHEMTHRES = 0.9
_C.LOSS.OHEMKEEP = 100000
_C.LOSS.CLASS_BALANCE = False
_C.LOSS.BALANCE_WEIGHTS = [1]
# DATASET related params
_C.DATASET = CN()
_C.DATASET.ROOT = ''
_C.DATASET.DATASET = 'cityscapes'
_C.DATASET.NUM_CLASSES = 19
_C.DATASET.TRAIN_SET = 'list/cityscapes/train.lst'
_C.DATASET.EXTRA_TRAIN_SET = ''
_C.DATASET.TEST_SET = 'list/cityscapes/val.lst'
# training
_C.TRAIN = CN()
_C.TRAIN.FREEZE_LAYERS = ''
_C.TRAIN.FREEZE_EPOCHS = -1
_C.TRAIN.NONBACKBONE_KEYWORDS = []
_C.TRAIN.NONBACKBONE_MULT = 10
_C.TRAIN.IMAGE_SIZE = [1024, 512] # width * height
_C.TRAIN.BASE_SIZE = 2048
_C.TRAIN.DOWNSAMPLERATE = 1
_C.TRAIN.FLIP = True
_C.TRAIN.MULTI_SCALE = True
_C.TRAIN.SCALE_FACTOR = 16
_C.TRAIN.RANDOM_BRIGHTNESS = False
_C.TRAIN.RANDOM_BRIGHTNESS_SHIFT_VALUE = 10
_C.TRAIN.LR_FACTOR = 0.1
_C.TRAIN.LR_STEP = [90, 110]
_C.TRAIN.LR = 0.01
_C.TRAIN.EXTRA_LR = 0.001
_C.TRAIN.OPTIMIZER = 'sgd'
_C.TRAIN.MOMENTUM = 0.9
_C.TRAIN.WD = 0.0001
_C.TRAIN.NESTEROV = False
_C.TRAIN.IGNORE_LABEL = -1
_C.TRAIN.BEGIN_EPOCH = 0
_C.TRAIN.END_EPOCH = 484
_C.TRAIN.EXTRA_EPOCH = 0
_C.TRAIN.RESUME = False
_C.TRAIN.BATCH_SIZE_PER_GPU = 32
_C.TRAIN.SHUFFLE = True
# only using some training samples
_C.TRAIN.NUM_SAMPLES = 0
# testing
_C.TEST = CN()
_C.TEST.IMAGE_SIZE = [2048, 1024] # width * height
_C.TEST.BASE_SIZE = 2048
_C.TEST.BATCH_SIZE_PER_GPU = 32
# only testing some samples
_C.TEST.NUM_SAMPLES = 0
_C.TEST.MODEL_FILE = ''
_C.TEST.FLIP_TEST = False
_C.TEST.MULTI_SCALE = False
_C.TEST.SCALE_LIST = [1]
_C.TEST.OUTPUT_INDEX = -1
# debug
_C.DEBUG = CN()
_C.DEBUG.DEBUG = False
_C.DEBUG.SAVE_BATCH_IMAGES_GT = False
_C.DEBUG.SAVE_BATCH_IMAGES_PRED = False
_C.DEBUG.SAVE_HEATMAPS_GT = False
_C.DEBUG.SAVE_HEATMAPS_PRED = False
# NEW:
_C.DATASET.CLASS_NAMES = None
_C.DATASET.TEST_CROP_INFO_PATH = None
_C.DATASET.MEAN = [116.513, 56.437, 16.309]
_C.DATASET.STD = [80.206, 41.232, 13.293]
_C.TRAIN.LR_MIN = None
_C.TRAIN.RATIO_RANGE = [0.5, 2.0]
_C.TRAIN.CROP_SIZE = [1200, 1440]
_C.TRAIN.ROTATE = True
_C.TRAIN.EVAL_INTERVAL = 100
_C.TEST.PAD_SIZE = None
# END NEW
def update_config(cfg, args):
cfg.defrost()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
cfg.freeze()
if __name__ == '__main__':
import sys
with open(sys.argv[1], 'w') as f:
print(_C, file=f)
| 21.909677 | 80 | 0.693463 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from yacs.config import CfgNode as CN
_C = CN()
_C.OUTPUT_DIR = ''
_C.LOG_DIR = ''
_C.GPUS = (0,)
_C.WORKERS = 4
_C.PRINT_FREQ = 20
_C.AUTO_RESUME = False
_C.PIN_MEMORY = True
_C.RANK = 0
_C.CUDNN = CN()
_C.CUDNN.BENCHMARK = True
_C.CUDNN.DETERMINISTIC = False
_C.CUDNN.ENABLED = True
_C.MODEL = CN()
_C.MODEL.NAME = 'seg_hrnet'
_C.MODEL.PRETRAINED = ''
_C.MODEL.ALIGN_CORNERS = True
_C.MODEL.NUM_OUTPUTS = 1
_C.MODEL.EXTRA = CN(new_allowed=True)
_C.MODEL.OCR = CN()
_C.MODEL.OCR.MID_CHANNELS = 512
_C.MODEL.OCR.KEY_CHANNELS = 256
_C.MODEL.OCR.DROPOUT = 0.05
_C.MODEL.OCR.SCALE = 1
_C.LOSS = CN()
_C.LOSS.USE_OHEM = False
_C.LOSS.OHEMTHRES = 0.9
_C.LOSS.OHEMKEEP = 100000
_C.LOSS.CLASS_BALANCE = False
_C.LOSS.BALANCE_WEIGHTS = [1]
_C.DATASET = CN()
_C.DATASET.ROOT = ''
_C.DATASET.DATASET = 'cityscapes'
_C.DATASET.NUM_CLASSES = 19
_C.DATASET.TRAIN_SET = 'list/cityscapes/train.lst'
_C.DATASET.EXTRA_TRAIN_SET = ''
_C.DATASET.TEST_SET = 'list/cityscapes/val.lst'
_C.TRAIN = CN()
_C.TRAIN.FREEZE_LAYERS = ''
_C.TRAIN.FREEZE_EPOCHS = -1
_C.TRAIN.NONBACKBONE_KEYWORDS = []
_C.TRAIN.NONBACKBONE_MULT = 10
_C.TRAIN.IMAGE_SIZE = [1024, 512]
_C.TRAIN.BASE_SIZE = 2048
_C.TRAIN.DOWNSAMPLERATE = 1
_C.TRAIN.FLIP = True
_C.TRAIN.MULTI_SCALE = True
_C.TRAIN.SCALE_FACTOR = 16
_C.TRAIN.RANDOM_BRIGHTNESS = False
_C.TRAIN.RANDOM_BRIGHTNESS_SHIFT_VALUE = 10
_C.TRAIN.LR_FACTOR = 0.1
_C.TRAIN.LR_STEP = [90, 110]
_C.TRAIN.LR = 0.01
_C.TRAIN.EXTRA_LR = 0.001
_C.TRAIN.OPTIMIZER = 'sgd'
_C.TRAIN.MOMENTUM = 0.9
_C.TRAIN.WD = 0.0001
_C.TRAIN.NESTEROV = False
_C.TRAIN.IGNORE_LABEL = -1
_C.TRAIN.BEGIN_EPOCH = 0
_C.TRAIN.END_EPOCH = 484
_C.TRAIN.EXTRA_EPOCH = 0
_C.TRAIN.RESUME = False
_C.TRAIN.BATCH_SIZE_PER_GPU = 32
_C.TRAIN.SHUFFLE = True
_C.TRAIN.NUM_SAMPLES = 0
_C.TEST = CN()
_C.TEST.IMAGE_SIZE = [2048, 1024]
_C.TEST.BASE_SIZE = 2048
_C.TEST.BATCH_SIZE_PER_GPU = 32
_C.TEST.NUM_SAMPLES = 0
_C.TEST.MODEL_FILE = ''
_C.TEST.FLIP_TEST = False
_C.TEST.MULTI_SCALE = False
_C.TEST.SCALE_LIST = [1]
_C.TEST.OUTPUT_INDEX = -1
_C.DEBUG = CN()
_C.DEBUG.DEBUG = False
_C.DEBUG.SAVE_BATCH_IMAGES_GT = False
_C.DEBUG.SAVE_BATCH_IMAGES_PRED = False
_C.DEBUG.SAVE_HEATMAPS_GT = False
_C.DEBUG.SAVE_HEATMAPS_PRED = False
_C.DATASET.CLASS_NAMES = None
_C.DATASET.TEST_CROP_INFO_PATH = None
_C.DATASET.MEAN = [116.513, 56.437, 16.309]
_C.DATASET.STD = [80.206, 41.232, 13.293]
_C.TRAIN.LR_MIN = None
_C.TRAIN.RATIO_RANGE = [0.5, 2.0]
_C.TRAIN.CROP_SIZE = [1200, 1440]
_C.TRAIN.ROTATE = True
_C.TRAIN.EVAL_INTERVAL = 100
_C.TEST.PAD_SIZE = None
def update_config(cfg, args):
cfg.defrost()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
cfg.freeze()
if __name__ == '__main__':
import sys
with open(sys.argv[1], 'w') as f:
print(_C, file=f)
| true | true |
f7f999a04e00037fbe5d680f907bc536e2224643 | 808 | py | Python | halotools/mock_observables/counts_in_cells/engines/setup_package.py | pllim/halotools | 6499cff09e7e0f169e4f425ee265403f6be816e8 | [
"BSD-3-Clause"
] | 83 | 2015-01-15T14:54:16.000Z | 2021-12-09T11:28:02.000Z | halotools/mock_observables/counts_in_cells/engines/setup_package.py | pllim/halotools | 6499cff09e7e0f169e4f425ee265403f6be816e8 | [
"BSD-3-Clause"
] | 579 | 2015-01-14T15:57:37.000Z | 2022-01-13T18:58:44.000Z | halotools/mock_observables/counts_in_cells/engines/setup_package.py | pllim/halotools | 6499cff09e7e0f169e4f425ee265403f6be816e8 | [
"BSD-3-Clause"
] | 70 | 2015-01-14T15:15:58.000Z | 2021-12-22T18:18:31.000Z | from distutils.extension import Extension
import os
PATH_TO_PKG = os.path.relpath(os.path.dirname(__file__))
SOURCES = ("counts_in_cylinders_engine.pyx", )
THIS_PKG_NAME = '.'.join(__name__.split('.')[:-1])
def get_extensions():
names = [THIS_PKG_NAME + "." + src.replace('.pyx', '') for src in SOURCES]
sources = [os.path.join(PATH_TO_PKG, srcfn) for srcfn in SOURCES]
include_dirs = ['numpy']
libraries = []
language = 'c++'
extra_compile_args = ['-Ofast']
extensions = []
for name, source in zip(names, sources):
extensions.append(Extension(name=name,
sources=[source],
include_dirs=include_dirs,
libraries=libraries,
language=language,
extra_compile_args=extra_compile_args))
return extensions
| 28.857143 | 78 | 0.648515 | from distutils.extension import Extension
import os
PATH_TO_PKG = os.path.relpath(os.path.dirname(__file__))
SOURCES = ("counts_in_cylinders_engine.pyx", )
THIS_PKG_NAME = '.'.join(__name__.split('.')[:-1])
def get_extensions():
names = [THIS_PKG_NAME + "." + src.replace('.pyx', '') for src in SOURCES]
sources = [os.path.join(PATH_TO_PKG, srcfn) for srcfn in SOURCES]
include_dirs = ['numpy']
libraries = []
language = 'c++'
extra_compile_args = ['-Ofast']
extensions = []
for name, source in zip(names, sources):
extensions.append(Extension(name=name,
sources=[source],
include_dirs=include_dirs,
libraries=libraries,
language=language,
extra_compile_args=extra_compile_args))
return extensions
| true | true |
f7f99b1d745e8b5a1dc661b26642a0ef4948f065 | 669 | py | Python | sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/__init__.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/__init__.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/__init__.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from ._configuration import FaceClientConfiguration
from ._face_client import FaceClient
__all__ = ['FaceClient', 'FaceClientConfiguration']
from .version import VERSION
__version__ = VERSION
| 33.45 | 76 | 0.608371 |
from ._configuration import FaceClientConfiguration
from ._face_client import FaceClient
__all__ = ['FaceClient', 'FaceClientConfiguration']
from .version import VERSION
__version__ = VERSION
| true | true |
f7f99c541ae09f6b48dbeed514425ef8a4f55eb3 | 2,941 | py | Python | src/split_image/split.py | whiplashoo/split-image | 9dde800135d6d7cba19087ac8a1ffbb2e25809cc | [
"MIT"
] | 6 | 2021-11-06T07:55:56.000Z | 2022-03-30T09:26:12.000Z | src/split_image/split.py | whiplashoo/split-image | 9dde800135d6d7cba19087ac8a1ffbb2e25809cc | [
"MIT"
] | null | null | null | src/split_image/split.py | whiplashoo/split-image | 9dde800135d6d7cba19087ac8a1ffbb2e25809cc | [
"MIT"
] | null | null | null | #!usr/bin/env python
import argparse
from collections import Counter
from PIL import Image
def split(im, rows, cols, image_path):
im_width, im_height = im.size
row_width = int(im_width / rows)
row_height = int(im_height / cols)
n = 0
for i in range(0, cols):
for j in range(0, rows):
box = (j * row_width, i * row_height, j * row_width +
row_width, i * row_height + row_height)
outp = im.crop(box)
outp_path = image_path.split(".")[0] + "_" + str(n) + ".png"
print("Exporting image tile: " + outp_path)
outp.save(outp_path)
n += 1
def determine_bg_color(im):
print("Determining background color...")
im_width, im_height = im.size
rgb_im = im.convert('RGBA')
all_colors = []
areas = [[(0, 0), (im_width, im_height / 10)],
[(0, 0), (im_width / 10, im_height)],
[(im_width * 9 / 10, 0), (im_width, im_height)],
[(0, im_height * 9 / 10), (im_width, im_height)]]
for area in areas:
start = area[0]
end = area[1]
for x in range(int(start[0]), int(end[0])):
for y in range(int(start[1]), int(end[1])):
pix = rgb_im.getpixel((x, y))
all_colors.append(pix)
return Counter(all_colors).most_common(1)[0][0]
def main():
parser = argparse.ArgumentParser(
description="Split an image into rows and columns.")
parser.add_argument("image_path", nargs=1,
help="The path of the image to split.")
parser.add_argument("rows", type=int, default=2,
help="How many rows to split the image into (horizontal split).")
parser.add_argument("cols", type=int, default=2,
help="How many columns to split the image into (vertical split).")
parser.add_argument("-s", "--square", action="store_true",
help="If the image should be resized into a square before splitting.")
args = parser.parse_args()
image_path = args.image_path[0]
im = Image.open(image_path)
im_width, im_height = im.size
min_dimension = min(im_width, im_height)
max_dimension = max(im_width, im_height)
if args.square:
print("Resizing image to a square...")
bg_color = determine_bg_color(im)
print("Background color is... " + str(bg_color))
im_r = Image.new("RGBA", (max_dimension, max_dimension), bg_color)
offset = int((max_dimension - min_dimension) / 2)
if im_width > im_height:
im_r.paste(im, (0, offset))
else:
im_r.paste(im, (offset, 0))
split(im_r, args.rows, args.cols, image_path)
print("Exporting resized image...")
im_r.save(image_path + "_squared.png")
else:
split(im, args.rows, args.cols, image_path)
print("Done!")
if __name__ == "__main__":
main()
| 35.865854 | 94 | 0.581095 |
import argparse
from collections import Counter
from PIL import Image
def split(im, rows, cols, image_path):
im_width, im_height = im.size
row_width = int(im_width / rows)
row_height = int(im_height / cols)
n = 0
for i in range(0, cols):
for j in range(0, rows):
box = (j * row_width, i * row_height, j * row_width +
row_width, i * row_height + row_height)
outp = im.crop(box)
outp_path = image_path.split(".")[0] + "_" + str(n) + ".png"
print("Exporting image tile: " + outp_path)
outp.save(outp_path)
n += 1
def determine_bg_color(im):
print("Determining background color...")
im_width, im_height = im.size
rgb_im = im.convert('RGBA')
all_colors = []
areas = [[(0, 0), (im_width, im_height / 10)],
[(0, 0), (im_width / 10, im_height)],
[(im_width * 9 / 10, 0), (im_width, im_height)],
[(0, im_height * 9 / 10), (im_width, im_height)]]
for area in areas:
start = area[0]
end = area[1]
for x in range(int(start[0]), int(end[0])):
for y in range(int(start[1]), int(end[1])):
pix = rgb_im.getpixel((x, y))
all_colors.append(pix)
return Counter(all_colors).most_common(1)[0][0]
def main():
parser = argparse.ArgumentParser(
description="Split an image into rows and columns.")
parser.add_argument("image_path", nargs=1,
help="The path of the image to split.")
parser.add_argument("rows", type=int, default=2,
help="How many rows to split the image into (horizontal split).")
parser.add_argument("cols", type=int, default=2,
help="How many columns to split the image into (vertical split).")
parser.add_argument("-s", "--square", action="store_true",
help="If the image should be resized into a square before splitting.")
args = parser.parse_args()
image_path = args.image_path[0]
im = Image.open(image_path)
im_width, im_height = im.size
min_dimension = min(im_width, im_height)
max_dimension = max(im_width, im_height)
if args.square:
print("Resizing image to a square...")
bg_color = determine_bg_color(im)
print("Background color is... " + str(bg_color))
im_r = Image.new("RGBA", (max_dimension, max_dimension), bg_color)
offset = int((max_dimension - min_dimension) / 2)
if im_width > im_height:
im_r.paste(im, (0, offset))
else:
im_r.paste(im, (offset, 0))
split(im_r, args.rows, args.cols, image_path)
print("Exporting resized image...")
im_r.save(image_path + "_squared.png")
else:
split(im, args.rows, args.cols, image_path)
print("Done!")
if __name__ == "__main__":
main()
| true | true |
f7f99d9512683e4a04ae1998805f8f4aa8c5d913 | 3,769 | py | Python | ludwig/datasets/flickr8k/__init__.py | carlogrisetti/ludwig | 5c0887f14867e1577e0ddc3806c5cf7a781fb665 | [
"Apache-2.0"
] | null | null | null | ludwig/datasets/flickr8k/__init__.py | carlogrisetti/ludwig | 5c0887f14867e1577e0ddc3806c5cf7a781fb665 | [
"Apache-2.0"
] | null | null | null | ludwig/datasets/flickr8k/__init__.py | carlogrisetti/ludwig | 5c0887f14867e1577e0ddc3806c5cf7a781fb665 | [
"Apache-2.0"
] | 1 | 2019-09-05T09:19:39.000Z | 2019-09-05T09:19:39.000Z | #! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import re
from collections import defaultdict
from ludwig.datasets.base_dataset import DEFAULT_CACHE_LOCATION, BaseDataset
from ludwig.datasets.mixins.download import ZipDownloadMixin
from ludwig.datasets.mixins.load import CSVLoadMixin
from ludwig.utils.fs_utils import makedirs, rename
def load(cache_dir=DEFAULT_CACHE_LOCATION, split=False):
dataset = Flickr8k(cache_dir=cache_dir)
return dataset.load(split=split)
class Flickr8k(CSVLoadMixin, ZipDownloadMixin, BaseDataset):
"""The Flickr8k dataset.
This pulls in an array of mixins for different types of functionality
which belongs in the workflow for ingesting and transforming training data into a destination
dataframe that can fit into Ludwig's training API.
"""
def __init__(self, cache_dir=DEFAULT_CACHE_LOCATION):
super().__init__(dataset_name="flickr8k", cache_dir=cache_dir)
def process_downloaded_dataset(self):
makedirs(self.processed_temp_path, exist_ok=True)
# create a dictionary matching image_path --> list of captions
image_to_caption = defaultdict(list)
with open(
f"{self.raw_dataset_path}/Flickr8k.token.txt",
"r"
) as captions_file:
image_to_caption = defaultdict(list)
for line in captions_file:
line = line.split("#")
# the regex is to format the string to fit properly in a csv
line[1] = line[1].strip("\n01234.\t ")
line[1] = re.sub('\"', '\"\"', line[1])
line[1] = '\"' + line[1] + '\"'
image_to_caption[line[0]].append(line[1])
# create csv file with 7 columns: image_path, 5 captions, and split
with open(
os.path.join(self.processed_temp_path, self.csv_filename),
'w'
) as output_file:
output_file.write('image_path,caption0,caption1,caption2,')
output_file.write('caption3,caption4,split\n')
splits = ["train", "dev", "test"]
for i in range(len(splits)):
split = splits[i]
with open(
f"{self.raw_dataset_path}/Flickr_8k.{split}Images.txt",
"r"
) as split_file:
for image_name in split_file:
image_name = image_name.strip('\n')
if image_name in image_to_caption:
output_file.write('{},{},{},{},{},{},{}\n'.format(
# Note: image folder is named Flicker8k_Dataset
"{}/Flicker8k_Dataset/{}".format(
self.raw_dataset_path, image_name
),
*image_to_caption[image_name],
i
))
# Note: csv is stored in /processed while images are stored in /raw
rename(self.processed_temp_path, self.processed_dataset_path)
| 43.321839 | 97 | 0.597241 |
import os
import re
from collections import defaultdict
from ludwig.datasets.base_dataset import DEFAULT_CACHE_LOCATION, BaseDataset
from ludwig.datasets.mixins.download import ZipDownloadMixin
from ludwig.datasets.mixins.load import CSVLoadMixin
from ludwig.utils.fs_utils import makedirs, rename
def load(cache_dir=DEFAULT_CACHE_LOCATION, split=False):
dataset = Flickr8k(cache_dir=cache_dir)
return dataset.load(split=split)
class Flickr8k(CSVLoadMixin, ZipDownloadMixin, BaseDataset):
def __init__(self, cache_dir=DEFAULT_CACHE_LOCATION):
super().__init__(dataset_name="flickr8k", cache_dir=cache_dir)
def process_downloaded_dataset(self):
makedirs(self.processed_temp_path, exist_ok=True)
image_to_caption = defaultdict(list)
with open(
f"{self.raw_dataset_path}/Flickr8k.token.txt",
"r"
) as captions_file:
image_to_caption = defaultdict(list)
for line in captions_file:
line = line.split("#")
line[1] = line[1].strip("\n01234.\t ")
line[1] = re.sub('\"', '\"\"', line[1])
line[1] = '\"' + line[1] + '\"'
image_to_caption[line[0]].append(line[1])
# create csv file with 7 columns: image_path, 5 captions, and split
with open(
os.path.join(self.processed_temp_path, self.csv_filename),
'w'
) as output_file:
output_file.write('image_path,caption0,caption1,caption2,')
output_file.write('caption3,caption4,split\n')
splits = ["train", "dev", "test"]
for i in range(len(splits)):
split = splits[i]
with open(
f"{self.raw_dataset_path}/Flickr_8k.{split}Images.txt",
"r"
) as split_file:
for image_name in split_file:
image_name = image_name.strip('\n')
if image_name in image_to_caption:
output_file.write('{},{},{},{},{},{},{}\n'.format(
# Note: image folder is named Flicker8k_Dataset
"{}/Flicker8k_Dataset/{}".format(
self.raw_dataset_path, image_name
),
*image_to_caption[image_name],
i
))
# Note: csv is stored in /processed while images are stored in /raw
rename(self.processed_temp_path, self.processed_dataset_path)
| true | true |
f7f99e7d47f0196f9f61d42e699a57a7aa5e4ba3 | 3,675 | py | Python | relation_rcnn/test.py | super-wcg/Relation-Network-for-Object-Detection | f91ef685b8d713b2a88fb5300222b1932e7ddd64 | [
"MIT"
] | null | null | null | relation_rcnn/test.py | super-wcg/Relation-Network-for-Object-Detection | f91ef685b8d713b2a88fb5300222b1932e7ddd64 | [
"MIT"
] | null | null | null | relation_rcnn/test.py | super-wcg/Relation-Network-for-Object-Detection | f91ef685b8d713b2a88fb5300222b1932e7ddd64 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Relation Networks for Object Detection
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by Jiayuan Gu, Dazhi Cheng, Yuwen Xiong
# --------------------------------------------------------
# Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------
import _init_paths
import argparse
import os
import sys
import time
import logging
from config.config import config, update_config
def parse_args():
parser = argparse.ArgumentParser(description='Test a Faster R-CNN network')
# general
parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)
args, rest = parser.parse_known_args()
update_config(args.cfg)
# rcnn
parser.add_argument('--vis', help='turn on visualization', action='store_true')
parser.add_argument('--ignore_cache', help='ignore cached results boxes', action='store_true')
parser.add_argument('--thresh', help='valid detection threshold', default=1e-3, type=float)
parser.add_argument('--shuffle', help='shuffle data on visualization', action='store_true')
parser.add_argument('--test_epoch', help='the epoch model to be test', default=config.TEST.test_epoch, type=int)
# nms
parser.add_argument('--nms', help='params for nms or softnms', default=config.TEST.NMS, type=float)
parser.add_argument('--softnms', help='whether to enable softnms', default=config.TEST.SOFTNMS, action='store_true')
parser.add_argument('--naive_nms', help='whether to enable naive nms', default=False, action='store_true')
parser.add_argument('--first_n', help='first_n for learn nms or nms', default=config.TEST.FIRST_N, type=int)
parser.add_argument('--merge', help='merge method for learn nms', default=config.TEST.MERGE_METHOD, type=int)
parser.add_argument('--debug', help='whether to enable debug mode', default=False, action='store_true')
# dataset
parser.add_argument('--test_set', help='which set to be tested', default=config.dataset.test_image_set, type=str)
args, rest = parser.parse_known_args()
# update config
config.TEST.test_epoch = args.test_epoch
config.TEST.NMS = args.nms
config.TEST.SOFTNMS = args.softnms and (not args.naive_nms)
config.TEST.FIRST_N = args.first_n
config.TEST.MERGE_METHOD = args.merge
config.dataset.test_image_set = args.test_set
#print(args.test_set)
return args
args = parse_args()
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(curr_path, '../external/mxnet', config.MXNET_VERSION))
import mxnet as mx
import numpy as np
from function.test_rcnn import test_rcnn
from utils.create_logger import create_logger
def main():
ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
print args
np.random.seed(0)
mx.random.seed(0)
logger, final_output_path = create_logger(config.output_path, args.cfg, config.dataset.test_image_set)
print(config.dataset.dataset)
test_rcnn(config, config.dataset.dataset, config.dataset.test_image_set, config.dataset.root_path, config.dataset.dataset_path,
ctx, os.path.join(final_output_path, '..', '_'.join([iset for iset in config.dataset.image_set.split('+')]), config.TRAIN.model_prefix), config.TEST.test_epoch,
args.vis, args.ignore_cache, args.shuffle, config.TEST.HAS_RPN, config.dataset.proposal, args.thresh, logger=logger, output_path=final_output_path)
if __name__ == '__main__':
main()
| 44.277108 | 174 | 0.698776 |
import _init_paths
import argparse
import os
import sys
import time
import logging
from config.config import config, update_config
def parse_args():
parser = argparse.ArgumentParser(description='Test a Faster R-CNN network')
parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)
args, rest = parser.parse_known_args()
update_config(args.cfg)
parser.add_argument('--vis', help='turn on visualization', action='store_true')
parser.add_argument('--ignore_cache', help='ignore cached results boxes', action='store_true')
parser.add_argument('--thresh', help='valid detection threshold', default=1e-3, type=float)
parser.add_argument('--shuffle', help='shuffle data on visualization', action='store_true')
parser.add_argument('--test_epoch', help='the epoch model to be test', default=config.TEST.test_epoch, type=int)
parser.add_argument('--nms', help='params for nms or softnms', default=config.TEST.NMS, type=float)
parser.add_argument('--softnms', help='whether to enable softnms', default=config.TEST.SOFTNMS, action='store_true')
parser.add_argument('--naive_nms', help='whether to enable naive nms', default=False, action='store_true')
parser.add_argument('--first_n', help='first_n for learn nms or nms', default=config.TEST.FIRST_N, type=int)
parser.add_argument('--merge', help='merge method for learn nms', default=config.TEST.MERGE_METHOD, type=int)
parser.add_argument('--debug', help='whether to enable debug mode', default=False, action='store_true')
parser.add_argument('--test_set', help='which set to be tested', default=config.dataset.test_image_set, type=str)
args, rest = parser.parse_known_args()
config.TEST.test_epoch = args.test_epoch
config.TEST.NMS = args.nms
config.TEST.SOFTNMS = args.softnms and (not args.naive_nms)
config.TEST.FIRST_N = args.first_n
config.TEST.MERGE_METHOD = args.merge
config.dataset.test_image_set = args.test_set
return args
args = parse_args()
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(curr_path, '../external/mxnet', config.MXNET_VERSION))
import mxnet as mx
import numpy as np
from function.test_rcnn import test_rcnn
from utils.create_logger import create_logger
def main():
ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
print args
np.random.seed(0)
mx.random.seed(0)
logger, final_output_path = create_logger(config.output_path, args.cfg, config.dataset.test_image_set)
print(config.dataset.dataset)
test_rcnn(config, config.dataset.dataset, config.dataset.test_image_set, config.dataset.root_path, config.dataset.dataset_path,
ctx, os.path.join(final_output_path, '..', '_'.join([iset for iset in config.dataset.image_set.split('+')]), config.TRAIN.model_prefix), config.TEST.test_epoch,
args.vis, args.ignore_cache, args.shuffle, config.TEST.HAS_RPN, config.dataset.proposal, args.thresh, logger=logger, output_path=final_output_path)
if __name__ == '__main__':
main()
| false | true |
f7f99f15967abee3be0eb8f1392685b53540e401 | 3,984 | py | Python | configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/shufflenetv1_coco_256x192.py | jlgzb/mmpose | 0ecf06e3580f141f6ab44645768a0d6d8ba48383 | [
"Apache-2.0"
] | 4 | 2021-10-02T08:21:43.000Z | 2021-11-01T07:25:24.000Z | configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/shufflenetv1_coco_256x192.py | jlgzb/mmpose | 0ecf06e3580f141f6ab44645768a0d6d8ba48383 | [
"Apache-2.0"
] | null | null | null | configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/shufflenetv1_coco_256x192.py | jlgzb/mmpose | 0ecf06e3580f141f6ab44645768a0d6d8ba48383 | [
"Apache-2.0"
] | 1 | 2021-06-22T06:41:45.000Z | 2021-06-22T06:41:45.000Z | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', save_best='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
model = dict(
type='TopDown',
pretrained='mmcls://shufflenet_v1',
backbone=dict(type='ShuffleNetV1', groups=3),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=960,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 28.056338 | 79 | 0.632028 | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', save_best='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
model = dict(
type='TopDown',
pretrained='mmcls://shufflenet_v1',
backbone=dict(type='ShuffleNetV1', groups=3),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=960,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| true | true |
f7f9a1a87e35653efaa2f39e5ea3d37157bf7df7 | 968 | py | Python | code/deep/adarnn/base/loss/mutual_info.py | jiaruonan/transferlearning | 0360ba0b09df6c1033d466ddc65f0c870187331e | [
"MIT"
] | 9,657 | 2017-05-01T03:29:35.000Z | 2022-03-31T21:25:30.000Z | code/deep/adarnn/base/loss/mutual_info.py | xiaohuihui-com/transferlearning | 17583db86db19709ff483a24590f0d5b88e25fe5 | [
"MIT"
] | 262 | 2017-09-16T09:33:02.000Z | 2022-03-30T05:08:45.000Z | code/deep/adarnn/base/loss/mutual_info.py | xiaohuihui-com/transferlearning | 17583db86db19709ff483a24590f0d5b88e25fe5 | [
"MIT"
] | 3,273 | 2017-05-01T06:28:31.000Z | 2022-03-31T09:57:48.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
class Mine_estimator(nn.Module):
def __init__(self, input_dim=2048, hidden_dim=512):
super(Mine_estimator, self).__init__()
self.mine_model = Mine(input_dim, hidden_dim)
def forward(self, X, Y):
Y_shffle = Y[torch.randperm(len(Y))]
loss_joint = self.mine_model(X, Y)
loss_marginal = self.mine_model(X, Y_shffle)
ret = torch.mean(loss_joint) - \
torch.log(torch.mean(torch.exp(loss_marginal)))
loss = -ret
return loss
class Mine(nn.Module):
def __init__(self, input_dim=2048, hidden_dim=512):
super(Mine, self).__init__()
self.fc1_x = nn.Linear(input_dim, hidden_dim)
self.fc1_y = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, 1)
def forward(self, x, y):
h1 = F.leaky_relu(self.fc1_x(x)+self.fc1_y(y))
h2 = self.fc2(h1)
return h2 | 32.266667 | 59 | 0.636364 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Mine_estimator(nn.Module):
def __init__(self, input_dim=2048, hidden_dim=512):
super(Mine_estimator, self).__init__()
self.mine_model = Mine(input_dim, hidden_dim)
def forward(self, X, Y):
Y_shffle = Y[torch.randperm(len(Y))]
loss_joint = self.mine_model(X, Y)
loss_marginal = self.mine_model(X, Y_shffle)
ret = torch.mean(loss_joint) - \
torch.log(torch.mean(torch.exp(loss_marginal)))
loss = -ret
return loss
class Mine(nn.Module):
def __init__(self, input_dim=2048, hidden_dim=512):
super(Mine, self).__init__()
self.fc1_x = nn.Linear(input_dim, hidden_dim)
self.fc1_y = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, 1)
def forward(self, x, y):
h1 = F.leaky_relu(self.fc1_x(x)+self.fc1_y(y))
h2 = self.fc2(h1)
return h2 | true | true |
f7f9a2c7ab062c209125ef4e457b4996dc74124d | 1,519 | py | Python | AdnReport/test/test_Adn_Report_dialog.py | METIS-GEO/plugins | 1673f22dec90aafd7eef61842f01e6b8e0de833e | [
"MIT"
] | null | null | null | AdnReport/test/test_Adn_Report_dialog.py | METIS-GEO/plugins | 1673f22dec90aafd7eef61842f01e6b8e0de833e | [
"MIT"
] | null | null | null | AdnReport/test/test_Adn_Report_dialog.py | METIS-GEO/plugins | 1673f22dec90aafd7eef61842f01e6b8e0de833e | [
"MIT"
] | null | null | null | # coding=utf-8
"""Dialog test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'g.bruel@metis-reseaux.fr'
__date__ = '2018-01-08'
__copyright__ = 'Copyright 2018, gbruel/metis'
import unittest
from PyQt4.QtGui import QDialogButtonBox, QDialog
from Adn_Report_dialog import AdnReportDialog
from utilities import get_qgis_app
QGIS_APP = get_qgis_app()
class AdnReportDialogTest(unittest.TestCase):
"""Test dialog works."""
def setUp(self):
"""Runs before each test."""
self.dialog = AdnReportDialog(None)
def tearDown(self):
"""Runs after each test."""
self.dialog = None
def test_dialog_ok(self):
"""Test we can click OK."""
button = self.dialog.button_box.button(QDialogButtonBox.Ok)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Accepted)
def test_dialog_cancel(self):
"""Test we can click cancel."""
button = self.dialog.button_box.button(QDialogButtonBox.Cancel)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Rejected)
if __name__ == "__main__":
suite = unittest.makeSuite(AdnReportDialogTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| 27.125 | 78 | 0.687294 |
__author__ = 'g.bruel@metis-reseaux.fr'
__date__ = '2018-01-08'
__copyright__ = 'Copyright 2018, gbruel/metis'
import unittest
from PyQt4.QtGui import QDialogButtonBox, QDialog
from Adn_Report_dialog import AdnReportDialog
from utilities import get_qgis_app
QGIS_APP = get_qgis_app()
class AdnReportDialogTest(unittest.TestCase):
def setUp(self):
self.dialog = AdnReportDialog(None)
def tearDown(self):
self.dialog = None
def test_dialog_ok(self):
button = self.dialog.button_box.button(QDialogButtonBox.Ok)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Accepted)
def test_dialog_cancel(self):
button = self.dialog.button_box.button(QDialogButtonBox.Cancel)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Rejected)
if __name__ == "__main__":
suite = unittest.makeSuite(AdnReportDialogTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| true | true |
f7f9a2d690b276c094aee2a450d85ba36d107780 | 3,409 | py | Python | test/strandtest.py | edbgon/rpipin | 99f3023288dbb08b5f8530ba5f23bdc5802ac427 | [
"MIT"
] | null | null | null | test/strandtest.py | edbgon/rpipin | 99f3023288dbb08b5f8530ba5f23bdc5802ac427 | [
"MIT"
] | null | null | null | test/strandtest.py | edbgon/rpipin | 99f3023288dbb08b5f8530ba5f23bdc5802ac427 | [
"MIT"
] | null | null | null | # NeoPixel library strandtest example
# Author: Tony DiCola (tony@tonydicola.com)
#
# Direct port of the Arduino NeoPixel library strandtest example. Showcases
# various animations on a strip of NeoPixels.
import time
from neopixel import *
# LED strip configuration:
LED_COUNT = 10 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 128 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
# Define functions which animate LEDs in various ways.
def colorWipe(strip, color, wait_ms=50):
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(wait_ms/1000.0)
def theaterChase(strip, color, wait_ms=50, iterations=10):
"""Movie theater light style chaser animation."""
for j in range(iterations):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, color)
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0)
def wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
pos = int(pos)
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def rainbow(strip, wait_ms=20, iterations=1):
"""Draw rainbow that fades across all pixels at once."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def rainbowCycle(strip, wait_ms=20, iterations=5):
"""Draw rainbow that uniformly distributes itself across all pixels."""
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, int(wheel(((i * 256 / strip.numPixels())) + j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def theaterChaseRainbow(strip, wait_ms=50):
"""Rainbow movie theater light style chaser animation."""
for j in range(256):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, wheel((i+j) % 255))
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0)
# Main program logic follows:
if __name__ == '__main__':
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
# Intialize the library (must be called once before other functions).
strip.begin()
print ('Press Ctrl-C to quit.')
while True:
# Color wipe animations.
colorWipe(strip, Color(255, 0, 0)) # Red wipe
colorWipe(strip, Color(0, 255, 0)) # Blue wipe
colorWipe(strip, Color(0, 0, 255)) # Green wipe
# Theater chase animations.
theaterChase(strip, Color(127, 127, 127)) # White theater chase
theaterChase(strip, Color(127, 0, 0)) # Red theater chase
theaterChase(strip, Color( 0, 0, 127)) # Blue theater chase
# Rainbow animations.
rainbow(strip)
rainbowCycle(strip)
theaterChaseRainbow(strip)
| 34.09 | 96 | 0.692285 |
import time
from neopixel import *
LED_COUNT = 10
LED_PIN = 18
LED_FREQ_HZ = 800000
LED_DMA = 5
LED_BRIGHTNESS = 128
LED_INVERT = False
def colorWipe(strip, color, wait_ms=50):
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(wait_ms/1000.0)
def theaterChase(strip, color, wait_ms=50, iterations=10):
for j in range(iterations):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, color)
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0)
def wheel(pos):
pos = int(pos)
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def rainbow(strip, wait_ms=20, iterations=1):
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((i+j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def rainbowCycle(strip, wait_ms=20, iterations=5):
for j in range(256*iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, int(wheel(((i * 256 / strip.numPixels())) + j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def theaterChaseRainbow(strip, wait_ms=50):
for j in range(256):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, wheel((i+j) % 255))
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0)
if __name__ == '__main__':
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
strip.begin()
print ('Press Ctrl-C to quit.')
while True:
colorWipe(strip, Color(255, 0, 0))
colorWipe(strip, Color(0, 255, 0))
colorWipe(strip, Color(0, 0, 255))
theaterChase(strip, Color(127, 127, 127))
theaterChase(strip, Color(127, 0, 0))
theaterChase(strip, Color( 0, 0, 127))
rainbow(strip)
rainbowCycle(strip)
theaterChaseRainbow(strip)
| true | true |
f7f9a373d9eccad91b95aeb1cb2578c3cbbdf211 | 6,516 | py | Python | euca2ools/commands/euimage/installpackedimage.py | vasiliykochergin/euca2ools | 23781599a9da28dcc582cffd1df7a7e3f8ce8ba0 | [
"BSD-2-Clause"
] | null | null | null | euca2ools/commands/euimage/installpackedimage.py | vasiliykochergin/euca2ools | 23781599a9da28dcc582cffd1df7a7e3f8ce8ba0 | [
"BSD-2-Clause"
] | null | null | null | euca2ools/commands/euimage/installpackedimage.py | vasiliykochergin/euca2ools | 23781599a9da28dcc582cffd1df7a7e3f8ce8ba0 | [
"BSD-2-Clause"
] | null | null | null | # Copyright 2014-2015 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import hashlib
import tempfile
from requestbuilder import Arg
from requestbuilder.auth.aws import QueryHmacV2Auth
from requestbuilder.mixins import FileTransferProgressBarMixin, TabifyingMixin
import euca2ools
from euca2ools.commands.ec2 import EC2
from euca2ools.commands.euimage.pack import ImagePack
from euca2ools.commands.s3 import S3Request
class InstallPackedImage(S3Request, FileTransferProgressBarMixin,
TabifyingMixin):
DESCRIPTION = '***TECH PREVIEW***\n\nInstall a packed image into the cloud'
ARGS = [Arg('pack_filename', metavar='FILE',
help='the pack to install (required)'),
Arg('--profile', help='''which of the image's profiles to
install (default: "default")'''),
# Upload stuff (for bundle pieces or imported disk image pieces)
Arg('-b', '--bucket', metavar='BUCKET[/PREFIX]',
help='bucket to upload the image to (required)'),
Arg('--location', help='''location constraint of the destination
bucket (default: inferred from s3-location-constraint in
configuration, or otherwise none'''),
# Bundle stuff
Arg('--privatekey', metavar='FILE', help='''file containing
your private key to sign the bundle's manifest with. This
private key will also be required to unbundle the bundle in
the future. (instance-store only)'''),
Arg('--cert', metavar='FILE', help='''file containing your
X.509 certificate (instance-store only)'''),
Arg('--ec2cert', metavar='FILE', help='''file containing the
cloud's X.509 certificate (instance-store only)'''),
Arg('--user', metavar='ACCOUNT',
help='your account ID (instance-store only)'),
# Registration stuff
Arg('--kernel', metavar='IMAGE', help='''ID of the kernel image to
associate with this machine image (paravirtual only)'''),
Arg('--ramdisk', metavar='IMAGE', help='''ID of the ramdisk image
to associate with this machine image (paravirtual only)'''),
Arg('--ec2-url', help='compute service endpoint URL'),
Arg('--ec2-auth', help=argparse.SUPPRESS),
Arg('--ec2-service', help=argparse.SUPPRESS)]
def configure(self):
S3Request.configure(self)
if not self.args.get('ec2_service'):
self.args['ec2_service'] = EC2.from_other(
self.service, url=self.args.get('ec2_url'))
if not self.args.get('ec2_auth'):
self.args['ec2_auth'] = QueryHmacV2Auth.from_other(self.auth)
if not self.args.get('profile'):
self.args['profile'] = 'default'
def main(self):
services = {'s3': {'service': self.service, 'auth': self.auth},
'ec2': {'service': self.args['ec2_service'],
'auth': self.args['ec2_auth']}}
unpacked_image = tempfile.TemporaryFile()
with ImagePack.open(self.args['pack_filename']) as pack:
if self.args['profile'] not in pack.image_md.profiles:
raise ValueError(
'no such profile: "{0}" (choose from {1})'.format(
self.args['profile'],
', '.join(pack.image_md.profiles.keys())))
with pack.open_image() as image:
# We could technically hand the image file object
# directly to the installation process and calculate
# checksums on fly, but that would mean we error out
# only after everything finishes and force people to
# clean up after if the checksum happens to be bad.
# We thus do this in two steps instead.
digest = hashlib.sha256()
bytes_written = 0
pbar = self.get_progressbar(label='Decompressing',
maxval=pack.pack_md.image_size)
pbar.start()
while True:
chunk = image.read(euca2ools.BUFSIZE)
if not chunk:
break
digest.update(chunk)
unpacked_image.write(chunk)
bytes_written += len(chunk)
pbar.update(bytes_written)
pbar.finish()
if digest.hexdigest() != pack.pack_md.image_sha256sum:
raise RuntimeError('image appears to be corrupt '
'(expected SHA256: {0}, actual: {1})',
pack.pack_md.image_sha256sum,
digest.hexdigest())
unpacked_image.seek(0)
image_id = pack.image_md.install_profile(
self.args['profile'], services, unpacked_image,
pack.pack_md.image_size, self.args)
unpacked_image.close()
return image_id
def print_result(self, image_id):
print self.tabify(('IMAGE', image_id))
| 50.123077 | 79 | 0.610344 |
import argparse
import hashlib
import tempfile
from requestbuilder import Arg
from requestbuilder.auth.aws import QueryHmacV2Auth
from requestbuilder.mixins import FileTransferProgressBarMixin, TabifyingMixin
import euca2ools
from euca2ools.commands.ec2 import EC2
from euca2ools.commands.euimage.pack import ImagePack
from euca2ools.commands.s3 import S3Request
class InstallPackedImage(S3Request, FileTransferProgressBarMixin,
TabifyingMixin):
DESCRIPTION = '***TECH PREVIEW***\n\nInstall a packed image into the cloud'
ARGS = [Arg('pack_filename', metavar='FILE',
help='the pack to install (required)'),
Arg('--profile', help='''which of the image's profiles to
install (default: "default")'''),
# Upload stuff (for bundle pieces or imported disk image pieces)
Arg('-b', '--bucket', metavar='BUCKET[/PREFIX]',
help='bucket to upload the image to (required)'),
Arg('--location', help='''location constraint of the destination
bucket (default: inferred from s3-location-constraint in
configuration, or otherwise none'''),
# Bundle stuff
Arg('--privatekey', metavar='FILE', help='''file containing
your private key to sign the bundle's manifest with. This
private key will also be required to unbundle the bundle in
the future. (instance-store only)'''),
Arg('--cert', metavar='FILE', help='''file containing your
X.509 certificate (instance-store only)'''),
Arg('--ec2cert', metavar='FILE', help='''file containing the
cloud's X.509 certificate (instance-store only)'''),
Arg('--user', metavar='ACCOUNT',
help='your account ID (instance-store only)'),
# Registration stuff
Arg('--kernel', metavar='IMAGE', help='''ID of the kernel image to
associate with this machine image (paravirtual only)'''),
Arg('--ramdisk', metavar='IMAGE', help='''ID of the ramdisk image
to associate with this machine image (paravirtual only)'''),
Arg('--ec2-url', help='compute service endpoint URL'),
Arg('--ec2-auth', help=argparse.SUPPRESS),
Arg('--ec2-service', help=argparse.SUPPRESS)]
def configure(self):
S3Request.configure(self)
if not self.args.get('ec2_service'):
self.args['ec2_service'] = EC2.from_other(
self.service, url=self.args.get('ec2_url'))
if not self.args.get('ec2_auth'):
self.args['ec2_auth'] = QueryHmacV2Auth.from_other(self.auth)
if not self.args.get('profile'):
self.args['profile'] = 'default'
def main(self):
services = {'s3': {'service': self.service, 'auth': self.auth},
'ec2': {'service': self.args['ec2_service'],
'auth': self.args['ec2_auth']}}
unpacked_image = tempfile.TemporaryFile()
with ImagePack.open(self.args['pack_filename']) as pack:
if self.args['profile'] not in pack.image_md.profiles:
raise ValueError(
'no such profile: "{0}" (choose from {1})'.format(
self.args['profile'],
', '.join(pack.image_md.profiles.keys())))
with pack.open_image() as image:
# We could technically hand the image file object
# directly to the installation process and calculate
# checksums on fly, but that would mean we error out
# only after everything finishes and force people to
# clean up after if the checksum happens to be bad.
# We thus do this in two steps instead.
digest = hashlib.sha256()
bytes_written = 0
pbar = self.get_progressbar(label='Decompressing',
maxval=pack.pack_md.image_size)
pbar.start()
while True:
chunk = image.read(euca2ools.BUFSIZE)
if not chunk:
break
digest.update(chunk)
unpacked_image.write(chunk)
bytes_written += len(chunk)
pbar.update(bytes_written)
pbar.finish()
if digest.hexdigest() != pack.pack_md.image_sha256sum:
raise RuntimeError('image appears to be corrupt '
'(expected SHA256: {0}, actual: {1})',
pack.pack_md.image_sha256sum,
digest.hexdigest())
unpacked_image.seek(0)
image_id = pack.image_md.install_profile(
self.args['profile'], services, unpacked_image,
pack.pack_md.image_size, self.args)
unpacked_image.close()
return image_id
def print_result(self, image_id):
print self.tabify(('IMAGE', image_id))
| false | true |
f7f9a38e0cf2bdd2a6ecda9998ccb93f100e3316 | 3,799 | py | Python | gluon/tests/test_languages.py | himelpdas/biddrive | 786e0fc58d8c0afeef0bcad9125b0d98eaabc709 | [
"BSD-3-Clause"
] | 97 | 2015-01-17T09:41:57.000Z | 2022-03-15T11:39:03.000Z | webui/gluon/tests/test_languages.py | pouyana/teireader | ac0a92d8b2e570eae1c0a03fd35a7b281eccd250 | [
"MIT"
] | 26 | 2015-01-02T13:35:48.000Z | 2015-06-10T14:39:07.000Z | webui/gluon/tests/test_languages.py | pouyana/teireader | ac0a92d8b2e570eae1c0a03fd35a7b281eccd250 | [
"MIT"
] | 35 | 2015-01-25T04:48:37.000Z | 2021-01-29T20:32:26.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unit tests for gluon.languages
"""
import sys
import os
import unittest
import tempfile
import threading
import logging
def fix_sys_path():
"""
logic to have always the correct sys.path
'', web2py/gluon, web2py/site-packages, web2py/ ...
"""
def add_path_first(path):
sys.path = [path] + [p for p in sys.path if (
not p == path and not p == (path + '/'))]
path = os.path.dirname(os.path.abspath(__file__))
if not os.path.isfile(os.path.join(path,'web2py.py')):
i = 0
while i<10:
i += 1
if os.path.exists(os.path.join(path,'web2py.py')):
break
path = os.path.abspath(os.path.join(path, '..'))
paths = [path,
os.path.abspath(os.path.join(path, 'site-packages')),
os.path.abspath(os.path.join(path, 'gluon')),
'']
[add_path_first(path) for path in paths]
fix_sys_path()
import languages
from storage import Storage
try:
import multiprocessing
def read_write(args):
(filename, iterations) = args
for i in range(0, iterations):
content = languages.read_dict(filename)
if not len(content):
return False
languages.write_dict(filename, content)
return True
class TestLanguagesParallel(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
contents = dict()
for i in range(1000):
contents["key%d" % i] = "value%d" % i
languages.write_dict(self.filename, contents)
languages.read_dict(self.filename)
def tearDown(self):
try:
os.remove(self.filename)
except:
pass
def test_reads_and_writes(self):
readwriters = 10
pool = multiprocessing.Pool(processes=readwriters)
results = pool.map(read_write, [[self.filename, 10]] * readwriters)
for result in results:
self.assertTrue(result)
class TestTranslations(unittest.TestCase):
def setUp(self):
if os.path.isdir('gluon'):
self.langpath = 'applications/welcome/languages'
else:
self.langpath = os.path.realpath(
'../../applications/welcome/languages')
self.http_accept_language = 'en'
def tearDown(self):
pass
def test_plain(self):
T = languages.translator(self.langpath, self.http_accept_language)
self.assertEqual(str(T('Hello World')),
'Hello World')
self.assertEqual(str(T('Hello World## comment')),
'Hello World')
self.assertEqual(str(T('%s %%{shop}', 1)),
'1 shop')
self.assertEqual(str(T('%s %%{shop}', 2)),
'2 shops')
self.assertEqual(str(T('%s %%{shop[0]}', 1)),
'1 shop')
self.assertEqual(str(T('%s %%{shop[0]}', 2)),
'2 shops')
self.assertEqual(str(T('%s %%{quark[0]}', 1)),
'1 quark')
self.assertEqual(str(T('%s %%{quark[0]}', 2)),
'2 quarks')
self.assertEqual(str(T.M('**Hello World**')),
'<strong>Hello World</strong>')
T.force('it')
self.assertEqual(str(T('Hello World')),
'Salve Mondo')
except ImportError:
logging.warning("Skipped test case, no multiprocessing module.")
if __name__ == '__main__':
unittest.main()
| 30.637097 | 79 | 0.515662 |
import sys
import os
import unittest
import tempfile
import threading
import logging
def fix_sys_path():
def add_path_first(path):
sys.path = [path] + [p for p in sys.path if (
not p == path and not p == (path + '/'))]
path = os.path.dirname(os.path.abspath(__file__))
if not os.path.isfile(os.path.join(path,'web2py.py')):
i = 0
while i<10:
i += 1
if os.path.exists(os.path.join(path,'web2py.py')):
break
path = os.path.abspath(os.path.join(path, '..'))
paths = [path,
os.path.abspath(os.path.join(path, 'site-packages')),
os.path.abspath(os.path.join(path, 'gluon')),
'']
[add_path_first(path) for path in paths]
fix_sys_path()
import languages
from storage import Storage
try:
import multiprocessing
def read_write(args):
(filename, iterations) = args
for i in range(0, iterations):
content = languages.read_dict(filename)
if not len(content):
return False
languages.write_dict(filename, content)
return True
class TestLanguagesParallel(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
contents = dict()
for i in range(1000):
contents["key%d" % i] = "value%d" % i
languages.write_dict(self.filename, contents)
languages.read_dict(self.filename)
def tearDown(self):
try:
os.remove(self.filename)
except:
pass
def test_reads_and_writes(self):
readwriters = 10
pool = multiprocessing.Pool(processes=readwriters)
results = pool.map(read_write, [[self.filename, 10]] * readwriters)
for result in results:
self.assertTrue(result)
class TestTranslations(unittest.TestCase):
def setUp(self):
if os.path.isdir('gluon'):
self.langpath = 'applications/welcome/languages'
else:
self.langpath = os.path.realpath(
'../../applications/welcome/languages')
self.http_accept_language = 'en'
def tearDown(self):
pass
def test_plain(self):
T = languages.translator(self.langpath, self.http_accept_language)
self.assertEqual(str(T('Hello World')),
'Hello World')
self.assertEqual(str(T('Hello World## comment')),
'Hello World')
self.assertEqual(str(T('%s %%{shop}', 1)),
'1 shop')
self.assertEqual(str(T('%s %%{shop}', 2)),
'2 shops')
self.assertEqual(str(T('%s %%{shop[0]}', 1)),
'1 shop')
self.assertEqual(str(T('%s %%{shop[0]}', 2)),
'2 shops')
self.assertEqual(str(T('%s %%{quark[0]}', 1)),
'1 quark')
self.assertEqual(str(T('%s %%{quark[0]}', 2)),
'2 quarks')
self.assertEqual(str(T.M('**Hello World**')),
'<strong>Hello World</strong>')
T.force('it')
self.assertEqual(str(T('Hello World')),
'Salve Mondo')
except ImportError:
logging.warning("Skipped test case, no multiprocessing module.")
if __name__ == '__main__':
unittest.main()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.