hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79091bcfd56bdba1a5823f454f617da8931533a4
| 204
|
py
|
Python
|
trixi/logger/file/__init__.py
|
comeonfox/trixi
|
e25545104a2e17b1673f4990df5183d610259208
|
[
"MIT"
] | 1
|
2018-11-30T21:53:36.000Z
|
2018-11-30T21:53:36.000Z
|
trixi/logger/file/__init__.py
|
comeonfox/trixi
|
e25545104a2e17b1673f4990df5183d610259208
|
[
"MIT"
] | null | null | null |
trixi/logger/file/__init__.py
|
comeonfox/trixi
|
e25545104a2e17b1673f4990df5183d610259208
|
[
"MIT"
] | null | null | null |
from trixi.logger.file.numpyplotfilelogger import NumpyPlotFileLogger
from trixi.logger.file.pytorchplotfilelogger import PytorchPlotFileLogger
from trixi.logger.file.textfilelogger import TextFileLogger
| 51
| 73
| 0.897059
|
from trixi.logger.file.numpyplotfilelogger import NumpyPlotFileLogger
from trixi.logger.file.pytorchplotfilelogger import PytorchPlotFileLogger
from trixi.logger.file.textfilelogger import TextFileLogger
| true
| true
|
79091c4835b89bd60b100b699a288e8a4e535277
| 8,043
|
py
|
Python
|
docs/conf.py
|
josl/ASM_challenge
|
f6bc31ab29d7589e259e1f3a2acbb613db6f03f3
|
[
"Apache-2.0"
] | 2
|
2015-11-12T11:18:11.000Z
|
2015-11-12T22:29:59.000Z
|
docs/conf.py
|
josl/ASM_challenge
|
f6bc31ab29d7589e259e1f3a2acbb613db6f03f3
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
josl/ASM_challenge
|
f6bc31ab29d7589e259e1f3a2acbb613db6f03f3
|
[
"Apache-2.0"
] | 1
|
2015-11-10T16:10:36.000Z
|
2015-11-10T16:10:36.000Z
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
package = "asm_challenge"
namespace = []
namespace_pkg = ".".join([namespace[-1], package]) if namespace else package
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'asm_challenge'
copyright = u'2015, Jose Luis Bellod Cisneros'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from namespace_pkg import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'asm_challenge-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index', 'user_guide.tex', u'asm_challenge Documentation',
u'Jose Luis Bellod Cisneros', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ---------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| 33.65272
| 85
| 0.708566
|
import sys
import os
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
package = "asm_challenge"
namespace = []
namespace_pkg = ".".join([namespace[-1], package]) if namespace else package
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'asm_challenge'
copyright = u'2015, Jose Luis Bellod Cisneros'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from namespace_pkg import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'asm_challenge-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index', 'user_guide.tex', u'asm_challenge Documentation',
u'Jose Luis Bellod Cisneros', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ---------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| true
| true
|
79091ca70931c7b85c0a6b24ba9004a8dd2149d7
| 8,679
|
py
|
Python
|
ptlflow/__init__.py
|
hmorimitsu/ptlflow
|
26f753322aef91b95ad78e743d847064e5d531b9
|
[
"Apache-2.0"
] | 78
|
2021-06-15T03:11:33.000Z
|
2022-03-25T05:51:25.000Z
|
ptlflow/__init__.py
|
hmorimitsu/ptlflow
|
26f753322aef91b95ad78e743d847064e5d531b9
|
[
"Apache-2.0"
] | 12
|
2021-07-04T17:02:57.000Z
|
2022-02-09T09:30:43.000Z
|
ptlflow/__init__.py
|
hmorimitsu/ptlflow
|
26f753322aef91b95ad78e743d847064e5d531b9
|
[
"Apache-2.0"
] | 3
|
2021-07-27T21:28:51.000Z
|
2021-09-17T10:06:27.000Z
|
"""Provide useful functions for using PTLFlow."""
# =============================================================================
# Copyright 2021 Henrique Morimitsu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
__version__ = '0.2.5'
import logging
from argparse import Namespace
from pathlib import Path
from typing import List, Optional
import requests
import torch
from torch import hub
from ptlflow.models.base_model.base_model import BaseModel
from ptlflow.models.dicl.dicl import DICL
from ptlflow.models.fastflownet.fastflownet import FastFlowNet
from ptlflow.models.flownet.flownet2 import FlowNet2
from ptlflow.models.flownet.flownetc import FlowNetC
from ptlflow.models.flownet.flownetcs import FlowNetCS
from ptlflow.models.flownet.flownetcss import FlowNetCSS
from ptlflow.models.flownet.flownets import FlowNetS
from ptlflow.models.flownet.flownetsd import FlowNetSD
from ptlflow.models.gma.gma import GMA
from ptlflow.models.hd3.hd3 import HD3, HD3Context
from ptlflow.models.irr.pwcnet import IRRPWCNet
from ptlflow.models.irr.pwcnet_irr import IRRPWCNetIRR
from ptlflow.models.irr.irr_pwc import IRRPWC
from ptlflow.models.lcv.lcv_raft import LCV_RAFT, LCV_RAFTSmall
from ptlflow.models.liteflownet.liteflownet import LiteFlowNet
from ptlflow.models.liteflownet.liteflownet3 import (
LiteFlowNet3, LiteFlowNet3PseudoReg, LiteFlowNet3S, LiteFlowNet3SPseudoReg)
from ptlflow.models.liteflownet.liteflownet2 import LiteFlowNet2, LiteFlowNet2PseudoReg
from ptlflow.models.maskflownet.maskflownet import MaskFlownet, MaskFlownet_S
from ptlflow.models.pwcnet.pwcnet import PWCNet, PWCDCNet
from ptlflow.models.raft.raft import RAFT, RAFTSmall
from ptlflow.models.scopeflow.irr_pwc_v2 import ScopeFlow
from ptlflow.models.starflow.starflow import StarFlow
from ptlflow.models.vcn.vcn import VCN, VCNSmall
from ptlflow.utils.utils import config_logging
try:
from ptlflow.models.scv.scv import SCVEighth, SCVQuarter
except ImportError as e:
print(e)
SCVEighth = None
SCVQuarter = None
config_logging()
models_dict = {
'dicl': DICL,
'fastflownet': FastFlowNet,
'flownet2': FlowNet2,
'flownetc': FlowNetC,
'flownetcs': FlowNetCS,
'flownetcss': FlowNetCSS,
'flownets': FlowNetS,
'flownetsd': FlowNetSD,
'gma': GMA,
'hd3': HD3,
'hd3_ctxt': HD3Context,
'irr_pwc': IRRPWC,
'irr_pwcnet': IRRPWCNet,
'irr_pwcnet_irr': IRRPWCNetIRR,
'lcv_raft': LCV_RAFT,
'lcv_raft_small': LCV_RAFTSmall,
'liteflownet': LiteFlowNet,
'liteflownet2': LiteFlowNet2,
'liteflownet2_pseudoreg': LiteFlowNet2PseudoReg,
'liteflownet3': LiteFlowNet3,
'liteflownet3_pseudoreg': LiteFlowNet3PseudoReg,
'liteflownet3s': LiteFlowNet3S,
'liteflownet3s_pseudoreg': LiteFlowNet3SPseudoReg,
'maskflownet': MaskFlownet,
'maskflownet_s': MaskFlownet_S,
'pwcnet': PWCNet,
'pwcdcnet': PWCDCNet,
'raft': RAFT,
'raft_small': RAFTSmall,
'scopeflow': ScopeFlow,
'scv4': SCVQuarter,
'scv8': SCVEighth,
'starflow': StarFlow,
'vcn': VCN,
'vcn_small': VCNSmall,
}
def download_scripts(
destination_dir: Path = Path('ptlflow_scripts')
) -> None:
"""Download the main scripts and configs to start working with PTLFlow."""
github_url = 'https://raw.githubusercontent.com/hmorimitsu/ptlflow/main/'
script_names = [
'datasets.yml',
'infer.py',
'test.py',
'train.py',
'validate.py'
]
destination_dir.mkdir(parents=True, exist_ok=True)
for sname in script_names:
script_url = github_url + sname
data = requests.get(script_url)
if data.status_code == 200:
with open(destination_dir / sname, 'wb') as f:
f.write(data.content)
else:
logging.warning('Script %s was not found.', script_url)
logging.info('Downloaded scripts to %s.', str(destination_dir))
def get_model(
model_name: str,
pretrained_ckpt: Optional[str] = None,
args: Optional[Namespace] = None
) -> BaseModel:
"""Return an instance of a chosen model.
The instance can have configured by he arguments, and load some existing pretrained weights.
Note that this is different from get_model_reference(), which returns a reference to the model class. The instance,
returned by this function, is a class already instantiated. Therefore, the return of this function is equivalent to
"return get_model_reference()()", which looks confusing. This can be rewritten as
"model_ref = get_model_reference(); return model_ref()".
Parameters
----------
model_name : str
Name of the model to get an instance of.
pretrained_ckpt : Optional[str], optional
Name of the pretrained weight to load or a path to a local checkpoint file.
args : Optional[Namespace], optional
Some arguments that ill be provided to the model.
Returns
-------
BaseModel
The instance of the chosen model.
Raises
------
ValueError
If the given checkpoint name is not a valid choice.
ValueError
If a checkpoint name is given, but the model does not have any pretrained weights available.
See Also
--------
get_model_reference : To get a reference to the class of a model.
"""
model_ref = get_model_reference(model_name)
if args is None:
parser = model_ref.add_model_specific_args()
args = parser.parse_args([])
model = model_ref(args)
if pretrained_ckpt is None and args is not None and args.pretrained_ckpt is not None:
pretrained_ckpt = args.pretrained_ckpt
if pretrained_ckpt is not None:
if Path(pretrained_ckpt).exists():
ckpt_path = pretrained_ckpt
elif hasattr(model_ref, 'pretrained_checkpoints'):
ckpt_path = model_ref.pretrained_checkpoints.get(pretrained_ckpt)
if ckpt_path is None:
raise ValueError(
f'Invalid checkpoint name {pretrained_ckpt}. '
f'Choose one from {{{",".join(model.pretrained_checkpoints.keys())}}}')
else:
raise ValueError(f'Cannot find checkpoint {pretrained_ckpt} for model {model_name}')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if Path(ckpt_path).exists():
ckpt = torch.load(ckpt_path, map_location=torch.device(device))
else:
model_dir = Path(hub.get_dir()) / 'ptlflow' / 'checkpoints'
ckpt = hub.load_state_dict_from_url(
ckpt_path, model_dir=model_dir, map_location=torch.device(device), check_hash=True)
state_dict = ckpt['state_dict']
model.load_state_dict(state_dict)
return model
def get_model_reference(
model_name: str
) -> BaseModel:
"""Return a reference to the class of a chosen model.
Note that this is different from get_model(), which returns an instance of a model. The reference, returned by this
function, is a class before instantiation. Therefore, the return of this function can be used to instantiate a model as
"model_ref = get_model_reference(); model_instance = model_ref()".
Parameters
----------
model_name : str
Name of the model to get a reference of.
Returns
-------
BaseModel
A reference to the chosen model.
Raises
------
ValueError
If the given name is not a valid choice.
See Also
--------
get_model : To get an instance of a model.
"""
try:
return models_dict[model_name]
except KeyError:
raise ValueError(f'Unknown model name: {model_name}. Choose from [{", ".join(models_dict.keys())}]')
def get_trainable_model_names() -> List[str]:
"""Return a list of model names that are able to be trained.
This function return the names of the model that have a loss function defined.
Returns
=======
List[str]
The list of the model names that can be trained.
"""
return [mname for mname in models_dict.keys() if get_model(mname).loss_fn is not None]
| 34.440476
| 123
| 0.689019
|
__version__ = '0.2.5'
import logging
from argparse import Namespace
from pathlib import Path
from typing import List, Optional
import requests
import torch
from torch import hub
from ptlflow.models.base_model.base_model import BaseModel
from ptlflow.models.dicl.dicl import DICL
from ptlflow.models.fastflownet.fastflownet import FastFlowNet
from ptlflow.models.flownet.flownet2 import FlowNet2
from ptlflow.models.flownet.flownetc import FlowNetC
from ptlflow.models.flownet.flownetcs import FlowNetCS
from ptlflow.models.flownet.flownetcss import FlowNetCSS
from ptlflow.models.flownet.flownets import FlowNetS
from ptlflow.models.flownet.flownetsd import FlowNetSD
from ptlflow.models.gma.gma import GMA
from ptlflow.models.hd3.hd3 import HD3, HD3Context
from ptlflow.models.irr.pwcnet import IRRPWCNet
from ptlflow.models.irr.pwcnet_irr import IRRPWCNetIRR
from ptlflow.models.irr.irr_pwc import IRRPWC
from ptlflow.models.lcv.lcv_raft import LCV_RAFT, LCV_RAFTSmall
from ptlflow.models.liteflownet.liteflownet import LiteFlowNet
from ptlflow.models.liteflownet.liteflownet3 import (
LiteFlowNet3, LiteFlowNet3PseudoReg, LiteFlowNet3S, LiteFlowNet3SPseudoReg)
from ptlflow.models.liteflownet.liteflownet2 import LiteFlowNet2, LiteFlowNet2PseudoReg
from ptlflow.models.maskflownet.maskflownet import MaskFlownet, MaskFlownet_S
from ptlflow.models.pwcnet.pwcnet import PWCNet, PWCDCNet
from ptlflow.models.raft.raft import RAFT, RAFTSmall
from ptlflow.models.scopeflow.irr_pwc_v2 import ScopeFlow
from ptlflow.models.starflow.starflow import StarFlow
from ptlflow.models.vcn.vcn import VCN, VCNSmall
from ptlflow.utils.utils import config_logging
try:
from ptlflow.models.scv.scv import SCVEighth, SCVQuarter
except ImportError as e:
print(e)
SCVEighth = None
SCVQuarter = None
config_logging()
models_dict = {
'dicl': DICL,
'fastflownet': FastFlowNet,
'flownet2': FlowNet2,
'flownetc': FlowNetC,
'flownetcs': FlowNetCS,
'flownetcss': FlowNetCSS,
'flownets': FlowNetS,
'flownetsd': FlowNetSD,
'gma': GMA,
'hd3': HD3,
'hd3_ctxt': HD3Context,
'irr_pwc': IRRPWC,
'irr_pwcnet': IRRPWCNet,
'irr_pwcnet_irr': IRRPWCNetIRR,
'lcv_raft': LCV_RAFT,
'lcv_raft_small': LCV_RAFTSmall,
'liteflownet': LiteFlowNet,
'liteflownet2': LiteFlowNet2,
'liteflownet2_pseudoreg': LiteFlowNet2PseudoReg,
'liteflownet3': LiteFlowNet3,
'liteflownet3_pseudoreg': LiteFlowNet3PseudoReg,
'liteflownet3s': LiteFlowNet3S,
'liteflownet3s_pseudoreg': LiteFlowNet3SPseudoReg,
'maskflownet': MaskFlownet,
'maskflownet_s': MaskFlownet_S,
'pwcnet': PWCNet,
'pwcdcnet': PWCDCNet,
'raft': RAFT,
'raft_small': RAFTSmall,
'scopeflow': ScopeFlow,
'scv4': SCVQuarter,
'scv8': SCVEighth,
'starflow': StarFlow,
'vcn': VCN,
'vcn_small': VCNSmall,
}
def download_scripts(
destination_dir: Path = Path('ptlflow_scripts')
) -> None:
github_url = 'https://raw.githubusercontent.com/hmorimitsu/ptlflow/main/'
script_names = [
'datasets.yml',
'infer.py',
'test.py',
'train.py',
'validate.py'
]
destination_dir.mkdir(parents=True, exist_ok=True)
for sname in script_names:
script_url = github_url + sname
data = requests.get(script_url)
if data.status_code == 200:
with open(destination_dir / sname, 'wb') as f:
f.write(data.content)
else:
logging.warning('Script %s was not found.', script_url)
logging.info('Downloaded scripts to %s.', str(destination_dir))
def get_model(
model_name: str,
pretrained_ckpt: Optional[str] = None,
args: Optional[Namespace] = None
) -> BaseModel:
model_ref = get_model_reference(model_name)
if args is None:
parser = model_ref.add_model_specific_args()
args = parser.parse_args([])
model = model_ref(args)
if pretrained_ckpt is None and args is not None and args.pretrained_ckpt is not None:
pretrained_ckpt = args.pretrained_ckpt
if pretrained_ckpt is not None:
if Path(pretrained_ckpt).exists():
ckpt_path = pretrained_ckpt
elif hasattr(model_ref, 'pretrained_checkpoints'):
ckpt_path = model_ref.pretrained_checkpoints.get(pretrained_ckpt)
if ckpt_path is None:
raise ValueError(
f'Invalid checkpoint name {pretrained_ckpt}. '
f'Choose one from {{{",".join(model.pretrained_checkpoints.keys())}}}')
else:
raise ValueError(f'Cannot find checkpoint {pretrained_ckpt} for model {model_name}')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if Path(ckpt_path).exists():
ckpt = torch.load(ckpt_path, map_location=torch.device(device))
else:
model_dir = Path(hub.get_dir()) / 'ptlflow' / 'checkpoints'
ckpt = hub.load_state_dict_from_url(
ckpt_path, model_dir=model_dir, map_location=torch.device(device), check_hash=True)
state_dict = ckpt['state_dict']
model.load_state_dict(state_dict)
return model
def get_model_reference(
model_name: str
) -> BaseModel:
try:
return models_dict[model_name]
except KeyError:
raise ValueError(f'Unknown model name: {model_name}. Choose from [{", ".join(models_dict.keys())}]')
def get_trainable_model_names() -> List[str]:
return [mname for mname in models_dict.keys() if get_model(mname).loss_fn is not None]
| true
| true
|
79091d01f2a7d5c083961aea6c0a54a53f35e56c
| 264
|
py
|
Python
|
part_2-mvc_structures/app/routes.py
|
perogeremmer/latihan-flask
|
4a0098d8f23595d2b092b35b2f9b15f8abcf8ff5
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2021-09-18T17:48:34.000Z
|
2021-09-18T17:48:34.000Z
|
part_2-mvc_structures/app/routes.py
|
perogeremmer/latihan-flask
|
4a0098d8f23595d2b092b35b2f9b15f8abcf8ff5
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
part_2-mvc_structures/app/routes.py
|
perogeremmer/latihan-flask
|
4a0098d8f23595d2b092b35b2f9b15f8abcf8ff5
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
from app import api, web
from app.controllers import MyController, MyViewController
api.add_resource(MyController.MyController, '/')
web.add_resource(MyViewController.MyViewController, '/')
web.add_resource(MyViewController.MySecondViewController, '/say-my-name')
| 44
| 73
| 0.829545
|
from app import api, web
from app.controllers import MyController, MyViewController
api.add_resource(MyController.MyController, '/')
web.add_resource(MyViewController.MyViewController, '/')
web.add_resource(MyViewController.MySecondViewController, '/say-my-name')
| true
| true
|
79091d211abb1b5cb10893671591591a401bb86e
| 1,374
|
py
|
Python
|
startdialog.py
|
jibonaronno/Rhythm
|
1c8670d99960b7379fdf6dd006339b96143e7d90
|
[
"CC0-1.0"
] | null | null | null |
startdialog.py
|
jibonaronno/Rhythm
|
1c8670d99960b7379fdf6dd006339b96143e7d90
|
[
"CC0-1.0"
] | null | null | null |
startdialog.py
|
jibonaronno/Rhythm
|
1c8670d99960b7379fdf6dd006339b96143e7d90
|
[
"CC0-1.0"
] | null | null | null |
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import QDialog, QApplication, QWidget, QVBoxLayout, QHBoxLayout, QGroupBox
from PyQt5 import uic
from os.path import join, dirname, abspath
from qtpy.QtCore import Slot, QTimer, QThread, Signal, QObject, Qt
#from PyQt5 import Qt
_ST_DLG = join(dirname(abspath(__file__)), 'startdialog.ui')
class StartDialog(QDialog):
def __init__(self, parent):
super(StartDialog, self).__init__() # Call the inherited classes __init__ method
#super().__init__(parent)
uic.loadUi(_ST_DLG, self)
self.hideText()
self.index = 0
self.labels = [self.label01, self.label02, self.label03, self.label04, self.label05, self.label06]
self.timer = QTimer()
self.timer.timeout.connect(self.serialText)
self.timer.start(1060)
self.setWindowModality(Qt.ApplicationModal)
self.exec_()
@Slot()
def on_ok_clicked(self):
self.timer.stop()
self.close()
def hideText(self):
self.label01.hide()
self.label02.hide()
self.label03.hide()
self.label04.hide()
self.label05.hide()
self.label06.hide()
def serialText(self):
self.labels[self.index].show()
if self.index < 5:
self.index += 1
else:
self.timer.stop()
| 31.227273
| 106
| 0.642649
|
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import QDialog, QApplication, QWidget, QVBoxLayout, QHBoxLayout, QGroupBox
from PyQt5 import uic
from os.path import join, dirname, abspath
from qtpy.QtCore import Slot, QTimer, QThread, Signal, QObject, Qt
_ST_DLG = join(dirname(abspath(__file__)), 'startdialog.ui')
class StartDialog(QDialog):
def __init__(self, parent):
super(StartDialog, self).__init__()
uic.loadUi(_ST_DLG, self)
self.hideText()
self.index = 0
self.labels = [self.label01, self.label02, self.label03, self.label04, self.label05, self.label06]
self.timer = QTimer()
self.timer.timeout.connect(self.serialText)
self.timer.start(1060)
self.setWindowModality(Qt.ApplicationModal)
self.exec_()
@Slot()
def on_ok_clicked(self):
self.timer.stop()
self.close()
def hideText(self):
self.label01.hide()
self.label02.hide()
self.label03.hide()
self.label04.hide()
self.label05.hide()
self.label06.hide()
def serialText(self):
self.labels[self.index].show()
if self.index < 5:
self.index += 1
else:
self.timer.stop()
| true
| true
|
79091d9d2a7176661512c69cf03cbfc321ef7321
| 1,608
|
py
|
Python
|
Test.py
|
YuriyAksenov/ImageRecognition
|
70a45ca44eb54f66dac23951011fdf487d34bd79
|
[
"MIT"
] | null | null | null |
Test.py
|
YuriyAksenov/ImageRecognition
|
70a45ca44eb54f66dac23951011fdf487d34bd79
|
[
"MIT"
] | null | null | null |
Test.py
|
YuriyAksenov/ImageRecognition
|
70a45ca44eb54f66dac23951011fdf487d34bd79
|
[
"MIT"
] | null | null | null |
from ui import *
startUI()
# # - read the input data:
# import MnistLoader
# training_data, validation_data, test_data = MnistLoader.load_data_wrapper()
# training_data = list(training_data)
# # ---------------------
# # - network.py example:
# from Network import Network, vectorized_result
# from NetworkLoader import save, load
# # netPath = "E:\\ITMO University\\Интеллектуальные системы и технологии\\Lab5\Lab\\Models\\model_5epochs.json";
# # net = load(netPath)
# # # imgPath = "E:\\ITMO University\\Интеллектуальные системы и технологии\\Lab5\\Lab\\HandTestImages\\0.png"
# # # predict(imgPath, 7, net)
# # # net = Network([784, 30, 10])
# # # net.run(training_data, 5, 10, 3.0, test_data=test_data, monitor_evaluation_cost=True,
# # # monitor_evaluation_accuracy=True,
# # # monitor_training_cost=True,
# # # monitor_training_accuracy=True)
# # imgPath = "E:\\ITMO University\\Интеллектуальные системы и технологии\\Lab5\\Lab\\HandTestImages\\0.png"
# # #predict(imgPath, net)
# # save(net, "E:\ITMO University\Интеллектуальные системы и технологии\Lab5\Lab\Models\model_5epochs.json")
# from ui import *
# net = ""
# startUI()
# # ----------------------
# # - network2.py example:
# # import network2
# # net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost)
# # #net.large_weight_initializer()
# # net.SGD(training_data, 30, 10, 0.1, lmbda = 5.0,evaluation_data=validation_data,
# # monitor_evaluation_accuracy=True)
| 10.374194
| 113
| 0.631219
|
from ui import *
startUI()
| true
| true
|
79091d9e2ef838f4354bc146d6447b448633f8d5
| 42,131
|
py
|
Python
|
kujenga/consensus/blockchain.py
|
Kujenga-Network/kujenga-blockchain
|
ef1cdaa46bf780be97c63efa99ee1695a190cdf1
|
[
"Apache-2.0"
] | 4
|
2021-09-19T18:58:56.000Z
|
2022-02-09T04:30:02.000Z
|
kujenga/consensus/blockchain.py
|
Kujenga-Network/kujenga-blockchain
|
ef1cdaa46bf780be97c63efa99ee1695a190cdf1
|
[
"Apache-2.0"
] | 11
|
2021-09-14T01:07:54.000Z
|
2021-10-04T17:06:12.000Z
|
kujenga/consensus/blockchain.py
|
Kujenga-Network/kujenga-blockchain
|
ef1cdaa46bf780be97c63efa99ee1695a190cdf1
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import dataclasses
import logging
import multiprocessing
from concurrent.futures.process import ProcessPoolExecutor
from enum import Enum
from typing import Dict, List, Optional, Set, Tuple, Union
from clvm.casts import int_from_bytes
from kujenga.consensus.block_body_validation import validate_block_body
from kujenga.consensus.block_header_validation import validate_finished_header_block, validate_unfinished_header_block
from kujenga.consensus.block_record import BlockRecord
from kujenga.consensus.blockchain_interface import BlockchainInterface
from kujenga.consensus.constants import ConsensusConstants
from kujenga.consensus.cost_calculator import NPCResult
from kujenga.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from kujenga.consensus.find_fork_point import find_fork_point_in_chain
from kujenga.consensus.full_block_to_block_record import block_to_block_record
from kujenga.consensus.multiprocess_validation import PreValidationResult, pre_validate_blocks_multiprocessing
from kujenga.full_node.block_store import BlockStore
from kujenga.full_node.coin_store import CoinStore
from kujenga.full_node.hint_store import HintStore
from kujenga.full_node.mempool_check_conditions import get_name_puzzle_conditions
from kujenga.types.blockchain_format.coin import Coin
from kujenga.types.blockchain_format.sized_bytes import bytes32
from kujenga.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from kujenga.types.blockchain_format.vdf import VDFInfo
from kujenga.types.coin_record import CoinRecord
from kujenga.types.condition_opcodes import ConditionOpcode
from kujenga.types.end_of_slot_bundle import EndOfSubSlotBundle
from kujenga.types.full_block import FullBlock
from kujenga.types.generator_types import BlockGenerator, GeneratorArg
from kujenga.types.header_block import HeaderBlock
from kujenga.types.unfinished_block import UnfinishedBlock
from kujenga.types.unfinished_header_block import UnfinishedHeaderBlock
from kujenga.types.weight_proof import SubEpochChallengeSegment
from kujenga.util.errors import Err
from kujenga.util.generator_tools import get_block_header, tx_removals_and_additions
from kujenga.util.ints import uint16, uint32, uint64, uint128
from kujenga.util.streamable import recurse_jsonify
log = logging.getLogger(__name__)
class ReceiveBlockResult(Enum):
"""
When Blockchain.receive_block(b) is called, one of these results is returned,
showing whether the block was added to the chain (extending the peak),
and if not, why it was not added.
"""
NEW_PEAK = 1 # Added to the peak of the blockchain
ADDED_AS_ORPHAN = 2 # Added as an orphan/stale block (not a new peak of the chain)
INVALID_BLOCK = 3 # Block was not added because it was invalid
ALREADY_HAVE_BLOCK = 4 # Block is already present in this blockchain
DISCONNECTED_BLOCK = 5 # Block's parent (previous pointer) is not in this blockchain
class Blockchain(BlockchainInterface):
constants: ConsensusConstants
constants_json: Dict
# peak of the blockchain
_peak_height: Optional[uint32]
# All blocks in peak path are guaranteed to be included, can include orphan blocks
__block_records: Dict[bytes32, BlockRecord]
# all hashes of blocks in block_record by height, used for garbage collection
__heights_in_cache: Dict[uint32, Set[bytes32]]
# Defines the path from genesis to the peak, no orphan blocks
__height_to_hash: Dict[uint32, bytes32]
# All sub-epoch summaries that have been included in the blockchain from the beginning until and including the peak
# (height_included, SubEpochSummary). Note: ONLY for the blocks in the path to the peak
__sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {}
# Unspent Store
coin_store: CoinStore
# Store
block_store: BlockStore
# Used to verify blocks in parallel
pool: ProcessPoolExecutor
# Set holding seen compact proofs, in order to avoid duplicates.
_seen_compact_proofs: Set[Tuple[VDFInfo, uint32]]
# Whether blockchain is shut down or not
_shut_down: bool
# Lock to prevent simultaneous reads and writes
lock: asyncio.Lock
compact_proof_lock: asyncio.Lock
hint_store: HintStore
@staticmethod
async def create(
coin_store: CoinStore, block_store: BlockStore, consensus_constants: ConsensusConstants, hint_store: HintStore
):
"""
Initializes a blockchain with the BlockRecords from disk, assuming they have all been
validated. Uses the genesis block given in override_constants, or as a fallback,
in the consensus constants config.
"""
self = Blockchain()
self.lock = asyncio.Lock() # External lock handled by full node
self.compact_proof_lock = asyncio.Lock()
cpu_count = multiprocessing.cpu_count()
if cpu_count > 61:
cpu_count = 61 # Windows Server 2016 has an issue https://bugs.python.org/issue26903
num_workers = max(cpu_count - 2, 1)
self.pool = ProcessPoolExecutor(max_workers=num_workers)
log.info(f"Started {num_workers} processes for block validation")
self.constants = consensus_constants
self.coin_store = coin_store
self.block_store = block_store
self.constants_json = recurse_jsonify(dataclasses.asdict(self.constants))
self._shut_down = False
await self._load_chain_from_store()
self._seen_compact_proofs = set()
self.hint_store = hint_store
return self
def shut_down(self):
self._shut_down = True
self.pool.shutdown(wait=True)
async def _load_chain_from_store(self) -> None:
"""
Initializes the state of the Blockchain class from the database.
"""
height_to_hash, sub_epoch_summaries = await self.block_store.get_peak_height_dicts()
self.__height_to_hash = height_to_hash
self.__sub_epoch_summaries = sub_epoch_summaries
self.__block_records = {}
self.__heights_in_cache = {}
block_records, peak = await self.block_store.get_block_records_close_to_peak(self.constants.BLOCKS_CACHE_SIZE)
for block in block_records.values():
self.add_block_record(block)
if len(block_records) == 0:
assert peak is None
self._peak_height = None
return None
assert peak is not None
self._peak_height = self.block_record(peak).height
assert len(self.__height_to_hash) == self._peak_height + 1
def get_peak(self) -> Optional[BlockRecord]:
"""
Return the peak of the blockchain
"""
if self._peak_height is None:
return None
return self.height_to_block_record(self._peak_height)
async def get_full_peak(self) -> Optional[FullBlock]:
if self._peak_height is None:
return None
""" Return list of FullBlocks that are peaks"""
block = await self.block_store.get_full_block(self.height_to_hash(self._peak_height))
assert block is not None
return block
async def get_full_block(self, header_hash: bytes32) -> Optional[FullBlock]:
return await self.block_store.get_full_block(header_hash)
async def receive_block(
self,
block: FullBlock,
pre_validation_result: Optional[PreValidationResult] = None,
fork_point_with_peak: Optional[uint32] = None,
) -> Tuple[
ReceiveBlockResult,
Optional[Err],
Optional[uint32],
Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]],
]:
"""
This method must be called under the blockchain lock
Adds a new block into the blockchain, if it's valid and connected to the current
blockchain, regardless of whether it is the child of a head, or another block.
Returns a header if block is added to head. Returns an error if the block is
invalid. Also returns the fork height, in the case of a new peak.
"""
genesis: bool = block.height == 0
if self.contains_block(block.header_hash):
return ReceiveBlockResult.ALREADY_HAVE_BLOCK, None, None, ([], {})
if not self.contains_block(block.prev_header_hash) and not genesis:
return (ReceiveBlockResult.DISCONNECTED_BLOCK, Err.INVALID_PREV_BLOCK_HASH, None, ([], {}))
if not genesis and (self.block_record(block.prev_header_hash).height + 1) != block.height:
return ReceiveBlockResult.INVALID_BLOCK, Err.INVALID_HEIGHT, None, ([], {})
npc_result: Optional[NPCResult] = None
if pre_validation_result is None:
if block.height == 0:
prev_b: Optional[BlockRecord] = None
else:
prev_b = self.block_record(block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants, len(block.finished_sub_slots) > 0, prev_b, self
)
if block.is_transaction_block():
if block.transactions_generator is not None:
try:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
except ValueError:
return ReceiveBlockResult.INVALID_BLOCK, Err.GENERATOR_REF_HAS_NO_GENERATOR, None, ([], {})
assert block_generator is not None and block.transactions_info is not None
npc_result = get_name_puzzle_conditions(
block_generator,
min(self.constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost),
cost_per_byte=self.constants.COST_PER_BYTE,
safe_mode=False,
)
removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
removals, tx_additions = [], []
header_block = get_block_header(block, tx_additions, removals)
else:
npc_result = None
header_block = get_block_header(block, [], [])
required_iters, error = validate_finished_header_block(
self.constants,
self,
header_block,
False,
difficulty,
sub_slot_iters,
)
if error is not None:
return ReceiveBlockResult.INVALID_BLOCK, error.code, None, ([], {})
else:
npc_result = pre_validation_result.npc_result
required_iters = pre_validation_result.required_iters
assert pre_validation_result.error is None
assert required_iters is not None
error_code, _ = await validate_block_body(
self.constants,
self,
self.block_store,
self.coin_store,
self.get_peak(),
block,
block.height,
npc_result,
fork_point_with_peak,
self.get_block_generator,
)
if error_code is not None:
return ReceiveBlockResult.INVALID_BLOCK, error_code, None, ([], {})
block_record = block_to_block_record(
self.constants,
self,
required_iters,
block,
None,
)
# Always add the block to the database
async with self.block_store.db_wrapper.lock:
try:
header_hash: bytes32 = block.header_hash
# Perform the DB operations to update the state, and rollback if something goes wrong
await self.block_store.db_wrapper.begin_transaction()
await self.block_store.add_full_block(header_hash, block, block_record)
fork_height, peak_height, records, (coin_record_change, hint_changes) = await self._reconsider_peak(
block_record, genesis, fork_point_with_peak, npc_result
)
await self.block_store.db_wrapper.commit_transaction()
# Then update the memory cache. It is important that this task is not cancelled and does not throw
self.add_block_record(block_record)
for fetched_block_record in records:
self.__height_to_hash[fetched_block_record.height] = fetched_block_record.header_hash
if fetched_block_record.sub_epoch_summary_included is not None:
self.__sub_epoch_summaries[
fetched_block_record.height
] = fetched_block_record.sub_epoch_summary_included
if peak_height is not None:
self._peak_height = peak_height
except BaseException:
self.block_store.rollback_cache_block(header_hash)
await self.block_store.db_wrapper.rollback_transaction()
raise
if fork_height is not None:
# new coin records added
assert coin_record_change is not None
return ReceiveBlockResult.NEW_PEAK, None, fork_height, (coin_record_change, hint_changes)
else:
return ReceiveBlockResult.ADDED_AS_ORPHAN, None, None, ([], {})
def get_hint_list(self, npc_result: NPCResult) -> List[Tuple[bytes32, bytes]]:
h_list = []
for npc in npc_result.npc_list:
for opcode, conditions in npc.conditions:
if opcode == ConditionOpcode.CREATE_COIN:
for condition in conditions:
if len(condition.vars) > 2 and condition.vars[2] != b"":
puzzle_hash, amount_bin = condition.vars[0], condition.vars[1]
amount = int_from_bytes(amount_bin)
coin_id = Coin(npc.coin_name, puzzle_hash, amount).name()
h_list.append((coin_id, condition.vars[2]))
return h_list
async def _reconsider_peak(
self,
block_record: BlockRecord,
genesis: bool,
fork_point_with_peak: Optional[uint32],
npc_result: Optional[NPCResult],
) -> Tuple[
Optional[uint32],
Optional[uint32],
List[BlockRecord],
Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]],
]:
"""
When a new block is added, this is called, to check if the new block is the new peak of the chain.
This also handles reorgs by reverting blocks which are not in the heaviest chain.
It returns the height of the fork between the previous chain and the new chain, or returns
None if there was no update to the heaviest chain.
"""
peak = self.get_peak()
lastest_coin_state: Dict[bytes32, CoinRecord] = {}
hint_coin_state: Dict[bytes32, Dict[bytes32, CoinRecord]] = {}
if genesis:
if peak is None:
block: Optional[FullBlock] = await self.block_store.get_full_block(block_record.header_hash)
assert block is not None
if npc_result is not None:
tx_removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
tx_removals, tx_additions = [], []
if block.is_transaction_block():
assert block.foliage_transaction_block is not None
added = await self.coin_store.new_block(
block.height,
block.foliage_transaction_block.timestamp,
block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
else:
added, _ = [], []
await self.block_store.set_peak(block_record.header_hash)
return uint32(0), uint32(0), [block_record], (added, {})
return None, None, [], ([], {})
assert peak is not None
if block_record.weight > peak.weight:
# Find the fork. if the block is just being appended, it will return the peak
# If no blocks in common, returns -1, and reverts all blocks
if block_record.prev_hash == peak.header_hash:
fork_height: int = peak.height
elif fork_point_with_peak is not None:
fork_height = fork_point_with_peak
else:
fork_height = find_fork_point_in_chain(self, block_record, peak)
if block_record.prev_hash != peak.header_hash:
roll_changes: List[CoinRecord] = await self.coin_store.rollback_to_block(fork_height)
for coin_record in roll_changes:
lastest_coin_state[coin_record.name] = coin_record
# Rollback sub_epoch_summaries
heights_to_delete = []
for ses_included_height in self.__sub_epoch_summaries.keys():
if ses_included_height > fork_height:
heights_to_delete.append(ses_included_height)
for height in heights_to_delete:
log.info(f"delete ses at height {height}")
del self.__sub_epoch_summaries[height]
# Collect all blocks from fork point to new peak
blocks_to_add: List[Tuple[FullBlock, BlockRecord]] = []
curr = block_record.header_hash
while fork_height < 0 or curr != self.height_to_hash(uint32(fork_height)):
fetched_full_block: Optional[FullBlock] = await self.block_store.get_full_block(curr)
fetched_block_record: Optional[BlockRecord] = await self.block_store.get_block_record(curr)
assert fetched_full_block is not None
assert fetched_block_record is not None
blocks_to_add.append((fetched_full_block, fetched_block_record))
if fetched_full_block.height == 0:
# Doing a full reorg, starting at height 0
break
curr = fetched_block_record.prev_hash
records_to_add = []
for fetched_full_block, fetched_block_record in reversed(blocks_to_add):
records_to_add.append(fetched_block_record)
if fetched_full_block.is_transaction_block():
if fetched_block_record.header_hash == block_record.header_hash:
tx_removals, tx_additions, npc_res = await self.get_tx_removals_and_additions(
fetched_full_block, npc_result
)
else:
tx_removals, tx_additions, npc_res = await self.get_tx_removals_and_additions(
fetched_full_block, None
)
assert fetched_full_block.foliage_transaction_block is not None
added_rec = await self.coin_store.new_block(
fetched_full_block.height,
fetched_full_block.foliage_transaction_block.timestamp,
fetched_full_block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
removed_rec: List[Optional[CoinRecord]] = [
await self.coin_store.get_coin_record(name) for name in tx_removals
]
# Set additions first, then removals in order to handle ephemeral coin state
# Add in height order is also required
record: Optional[CoinRecord]
for record in added_rec:
assert record
lastest_coin_state[record.name] = record
for record in removed_rec:
assert record
lastest_coin_state[record.name] = record
if npc_res is not None:
hint_list: List[Tuple[bytes32, bytes]] = self.get_hint_list(npc_res)
await self.hint_store.add_hints(hint_list)
# There can be multiple coins for the same hint
for coin_id, hint in hint_list:
key = hint
if key not in hint_coin_state:
hint_coin_state[key] = {}
hint_coin_state[key][coin_id] = lastest_coin_state[coin_id]
# Changes the peak to be the new peak
await self.block_store.set_peak(block_record.header_hash)
return (
uint32(max(fork_height, 0)),
block_record.height,
records_to_add,
(list(lastest_coin_state.values()), hint_coin_state),
)
# This is not a heavier block than the heaviest we have seen, so we don't change the coin set
return None, None, [], ([], {})
async def get_tx_removals_and_additions(
self, block: FullBlock, npc_result: Optional[NPCResult] = None
) -> Tuple[List[bytes32], List[Coin], Optional[NPCResult]]:
if block.is_transaction_block():
if block.transactions_generator is not None:
if npc_result is None:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
assert block_generator is not None
npc_result = get_name_puzzle_conditions(
block_generator,
self.constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=self.constants.COST_PER_BYTE,
safe_mode=False,
)
tx_removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
return tx_removals, tx_additions, npc_result
else:
return [], [], None
else:
return [], [], None
def get_next_difficulty(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.DIFFICULTY_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[1]
def get_next_slot_iters(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.SUB_SLOT_ITERS_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[0]
async def get_sp_and_ip_sub_slots(
self, header_hash: bytes32
) -> Optional[Tuple[Optional[EndOfSubSlotBundle], Optional[EndOfSubSlotBundle]]]:
block: Optional[FullBlock] = await self.block_store.get_full_block(header_hash)
if block is None:
return None
curr_br: BlockRecord = self.block_record(block.header_hash)
is_overflow = curr_br.overflow
curr: Optional[FullBlock] = block
assert curr is not None
while True:
if curr_br.first_in_sub_slot:
curr = await self.block_store.get_full_block(curr_br.header_hash)
assert curr is not None
break
if curr_br.height == 0:
break
curr_br = self.block_record(curr_br.prev_hash)
if len(curr.finished_sub_slots) == 0:
# This means we got to genesis and still no sub-slots
return None, None
ip_sub_slot = curr.finished_sub_slots[-1]
if not is_overflow:
# Pos sub-slot is the same as infusion sub slot
return None, ip_sub_slot
if len(curr.finished_sub_slots) > 1:
# Have both sub-slots
return curr.finished_sub_slots[-2], ip_sub_slot
prev_curr: Optional[FullBlock] = await self.block_store.get_full_block(curr.prev_header_hash)
if prev_curr is None:
assert curr.height == 0
prev_curr = curr
prev_curr_br = self.block_record(curr.header_hash)
else:
prev_curr_br = self.block_record(curr.prev_header_hash)
assert prev_curr_br is not None
while prev_curr_br.height > 0:
if prev_curr_br.first_in_sub_slot:
prev_curr = await self.block_store.get_full_block(prev_curr_br.header_hash)
assert prev_curr is not None
break
prev_curr_br = self.block_record(prev_curr_br.prev_hash)
if len(prev_curr.finished_sub_slots) == 0:
return None, ip_sub_slot
return prev_curr.finished_sub_slots[-1], ip_sub_slot
def get_recent_reward_challenges(self) -> List[Tuple[bytes32, uint128]]:
peak = self.get_peak()
if peak is None:
return []
recent_rc: List[Tuple[bytes32, uint128]] = []
curr: Optional[BlockRecord] = peak
while curr is not None and len(recent_rc) < 2 * self.constants.MAX_SUB_SLOT_BLOCKS:
if curr != peak:
recent_rc.append((curr.reward_infusion_new_challenge, curr.total_iters))
if curr.first_in_sub_slot:
assert curr.finished_reward_slot_hashes is not None
sub_slot_total_iters = curr.ip_sub_slot_total_iters(self.constants)
# Start from the most recent
for rc in reversed(curr.finished_reward_slot_hashes):
if sub_slot_total_iters < curr.sub_slot_iters:
break
recent_rc.append((rc, sub_slot_total_iters))
sub_slot_total_iters = uint128(sub_slot_total_iters - curr.sub_slot_iters)
curr = self.try_block_record(curr.prev_hash)
return list(reversed(recent_rc))
async def validate_unfinished_block(
self, block: UnfinishedBlock, skip_overflow_ss_validation=True
) -> PreValidationResult:
if (
not self.contains_block(block.prev_header_hash)
and not block.prev_header_hash == self.constants.GENESIS_CHALLENGE
):
return PreValidationResult(uint16(Err.INVALID_PREV_BLOCK_HASH.value), None, None)
unfinished_header_block = UnfinishedHeaderBlock(
block.finished_sub_slots,
block.reward_chain_block,
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
b"",
)
prev_b = self.try_block_record(unfinished_header_block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants, len(unfinished_header_block.finished_sub_slots) > 0, prev_b, self
)
required_iters, error = validate_unfinished_header_block(
self.constants,
self,
unfinished_header_block,
False,
difficulty,
sub_slot_iters,
skip_overflow_ss_validation,
)
if error is not None:
return PreValidationResult(uint16(error.code.value), None, None)
prev_height = (
-1
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE
else self.block_record(block.prev_header_hash).height
)
npc_result = None
if block.transactions_generator is not None:
assert block.transactions_info is not None
try:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
except ValueError:
return PreValidationResult(uint16(Err.GENERATOR_REF_HAS_NO_GENERATOR.value), None, None)
if block_generator is None:
return PreValidationResult(uint16(Err.GENERATOR_REF_HAS_NO_GENERATOR.value), None, None)
npc_result = get_name_puzzle_conditions(
block_generator,
min(self.constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost),
cost_per_byte=self.constants.COST_PER_BYTE,
safe_mode=False,
)
error_code, cost_result = await validate_block_body(
self.constants,
self,
self.block_store,
self.coin_store,
self.get_peak(),
block,
uint32(prev_height + 1),
npc_result,
None,
self.get_block_generator,
)
if error_code is not None:
return PreValidationResult(uint16(error_code.value), None, None)
return PreValidationResult(None, required_iters, cost_result)
async def pre_validate_blocks_multiprocessing(
self,
blocks: List[FullBlock],
npc_results: Dict[uint32, NPCResult],
batch_size: int = 4,
wp_summaries: Optional[List[SubEpochSummary]] = None,
) -> Optional[List[PreValidationResult]]:
return await pre_validate_blocks_multiprocessing(
self.constants,
self.constants_json,
self,
blocks,
self.pool,
True,
npc_results,
self.get_block_generator,
batch_size,
wp_summaries,
)
def contains_block(self, header_hash: bytes32) -> bool:
"""
True if we have already added this block to the chain. This may return false for orphan blocks
that we have added but no longer keep in memory.
"""
return header_hash in self.__block_records
def block_record(self, header_hash: bytes32) -> BlockRecord:
return self.__block_records[header_hash]
def height_to_block_record(self, height: uint32) -> BlockRecord:
header_hash = self.height_to_hash(height)
return self.block_record(header_hash)
def get_ses_heights(self) -> List[uint32]:
return sorted(self.__sub_epoch_summaries.keys())
def get_ses(self, height: uint32) -> SubEpochSummary:
return self.__sub_epoch_summaries[height]
def height_to_hash(self, height: uint32) -> Optional[bytes32]:
return self.__height_to_hash[height]
def contains_height(self, height: uint32) -> bool:
return height in self.__height_to_hash
def get_peak_height(self) -> Optional[uint32]:
return self._peak_height
async def warmup(self, fork_point: uint32):
"""
Loads blocks into the cache. The blocks loaded include all blocks from
fork point - BLOCKS_CACHE_SIZE up to and including the fork_point.
Args:
fork_point: the last block height to load in the cache
"""
if self._peak_height is None:
return None
block_records = await self.block_store.get_block_records_in_range(
max(fork_point - self.constants.BLOCKS_CACHE_SIZE, uint32(0)), fork_point
)
for block_record in block_records.values():
self.add_block_record(block_record)
def clean_block_record(self, height: int):
"""
Clears all block records in the cache which have block_record < height.
Args:
height: Minimum height that we need to keep in the cache
"""
if height < 0:
return None
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
while blocks_to_remove is not None and height >= 0:
for header_hash in blocks_to_remove:
del self.__block_records[header_hash] # remove from blocks
del self.__heights_in_cache[uint32(height)] # remove height from heights in cache
if height == 0:
break
height = height - 1
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
def clean_block_records(self):
"""
Cleans the cache so that we only maintain relevant blocks. This removes
block records that have height < peak - BLOCKS_CACHE_SIZE.
These blocks are necessary for calculating future difficulty adjustments.
"""
if len(self.__block_records) < self.constants.BLOCKS_CACHE_SIZE:
return None
peak = self.get_peak()
assert peak is not None
if peak.height - self.constants.BLOCKS_CACHE_SIZE < 0:
return None
self.clean_block_record(peak.height - self.constants.BLOCKS_CACHE_SIZE)
async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]:
return await self.block_store.get_block_records_in_range(start, stop)
async def get_header_blocks_in_range(
self, start: int, stop: int, tx_filter: bool = True
) -> Dict[bytes32, HeaderBlock]:
hashes = []
for height in range(start, stop + 1):
if self.contains_height(uint32(height)):
header_hash: bytes32 = self.height_to_hash(uint32(height))
hashes.append(header_hash)
blocks: List[FullBlock] = []
for hash in hashes.copy():
block = self.block_store.block_cache.get(hash)
if block is not None:
blocks.append(block)
hashes.remove(hash)
blocks_on_disk: List[FullBlock] = await self.block_store.get_blocks_by_hash(hashes)
blocks.extend(blocks_on_disk)
header_blocks: Dict[bytes32, HeaderBlock] = {}
for block in blocks:
if self.height_to_hash(block.height) != block.header_hash:
raise ValueError(f"Block at {block.header_hash} is no longer in the blockchain (it's in a fork)")
if tx_filter is False:
header = get_block_header(block, [], [])
else:
tx_additions: List[CoinRecord] = [
c for c in (await self.coin_store.get_coins_added_at_height(block.height)) if not c.coinbase
]
removed: List[CoinRecord] = await self.coin_store.get_coins_removed_at_height(block.height)
header = get_block_header(
block, [record.coin for record in tx_additions], [record.coin.name() for record in removed]
)
header_blocks[header.header_hash] = header
return header_blocks
async def get_header_block_by_height(
self, height: int, header_hash: bytes32, tx_filter: bool = True
) -> Optional[HeaderBlock]:
header_dict: Dict[bytes32, HeaderBlock] = await self.get_header_blocks_in_range(height, height, tx_filter)
if len(header_dict) == 0:
return None
if header_hash not in header_dict:
return None
return header_dict[header_hash]
async def get_block_records_at(self, heights: List[uint32], batch_size=900) -> List[BlockRecord]:
"""
gets block records by height (only blocks that are part of the chain)
"""
records: List[BlockRecord] = []
hashes = []
assert batch_size < 999 # sqlite in python 3.7 has a limit on 999 variables in queries
for height in heights:
hashes.append(self.height_to_hash(height))
if len(hashes) > batch_size:
res = await self.block_store.get_block_records_by_hash(hashes)
records.extend(res)
hashes = []
if len(hashes) > 0:
res = await self.block_store.get_block_records_by_hash(hashes)
records.extend(res)
return records
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
if header_hash in self.__block_records:
return self.__block_records[header_hash]
return await self.block_store.get_block_record(header_hash)
def remove_block_record(self, header_hash: bytes32):
sbr = self.block_record(header_hash)
del self.__block_records[header_hash]
self.__heights_in_cache[sbr.height].remove(header_hash)
def add_block_record(self, block_record: BlockRecord):
"""
Adds a block record to the cache.
"""
self.__block_records[block_record.header_hash] = block_record
if block_record.height not in self.__heights_in_cache.keys():
self.__heights_in_cache[block_record.height] = set()
self.__heights_in_cache[block_record.height].add(block_record.header_hash)
async def persist_sub_epoch_challenge_segments(
self, ses_block_hash: bytes32, segments: List[SubEpochChallengeSegment]
):
return await self.block_store.persist_sub_epoch_challenge_segments(ses_block_hash, segments)
async def get_sub_epoch_challenge_segments(
self,
ses_block_hash: bytes32,
) -> Optional[List[SubEpochChallengeSegment]]:
segments: Optional[List[SubEpochChallengeSegment]] = await self.block_store.get_sub_epoch_challenge_segments(
ses_block_hash
)
if segments is None:
return None
return segments
# Returns 'True' if the info is already in the set, otherwise returns 'False' and stores it.
def seen_compact_proofs(self, vdf_info: VDFInfo, height: uint32) -> bool:
pot_tuple = (vdf_info, height)
if pot_tuple in self._seen_compact_proofs:
return True
# Periodically cleanup to keep size small. TODO: make this smarter, like FIFO.
if len(self._seen_compact_proofs) > 10000:
self._seen_compact_proofs.clear()
self._seen_compact_proofs.add(pot_tuple)
return False
async def get_block_generator(
self, block: Union[FullBlock, UnfinishedBlock], additional_blocks=None
) -> Optional[BlockGenerator]:
if additional_blocks is None:
additional_blocks = {}
ref_list = block.transactions_generator_ref_list
if block.transactions_generator is None:
assert len(ref_list) == 0
return None
if len(ref_list) == 0:
return BlockGenerator(block.transactions_generator, [])
result: List[GeneratorArg] = []
previous_block_hash = block.prev_header_hash
if (
self.try_block_record(previous_block_hash)
and self.height_to_hash(self.block_record(previous_block_hash).height) == previous_block_hash
):
# We are not in a reorg, no need to look up alternate header hashes (we can get them from height_to_hash)
for ref_height in block.transactions_generator_ref_list:
header_hash = self.height_to_hash(ref_height)
ref_block = await self.get_full_block(header_hash)
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(GeneratorArg(ref_block.height, ref_block.transactions_generator))
else:
# First tries to find the blocks in additional_blocks
reorg_chain: Dict[uint32, FullBlock] = {}
curr: Union[FullBlock, UnfinishedBlock] = block
additional_height_dict = {}
while curr.prev_header_hash in additional_blocks:
prev: FullBlock = additional_blocks[curr.prev_header_hash]
additional_height_dict[prev.height] = prev
if isinstance(curr, FullBlock):
assert curr.height == prev.height + 1
reorg_chain[prev.height] = prev
curr = prev
peak: Optional[BlockRecord] = self.get_peak()
if self.contains_block(curr.prev_header_hash) and peak is not None:
# Then we look up blocks up to fork point one at a time, backtracking
previous_block_hash = curr.prev_header_hash
prev_block_record = await self.block_store.get_block_record(previous_block_hash)
prev_block = await self.block_store.get_full_block(previous_block_hash)
assert prev_block is not None
assert prev_block_record is not None
fork = find_fork_point_in_chain(self, peak, prev_block_record)
curr_2: Optional[FullBlock] = prev_block
assert curr_2 is not None and isinstance(curr_2, FullBlock)
reorg_chain[curr_2.height] = curr_2
while curr_2.height > fork and curr_2.height > 0:
curr_2 = await self.block_store.get_full_block(curr_2.prev_header_hash)
assert curr_2 is not None
reorg_chain[curr_2.height] = curr_2
for ref_height in block.transactions_generator_ref_list:
if ref_height in reorg_chain:
ref_block = reorg_chain[ref_height]
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(GeneratorArg(ref_block.height, ref_block.transactions_generator))
else:
if ref_height in additional_height_dict:
ref_block = additional_height_dict[ref_height]
else:
header_hash = self.height_to_hash(ref_height)
ref_block = await self.get_full_block(header_hash)
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(GeneratorArg(ref_block.height, ref_block.transactions_generator))
assert len(result) == len(ref_list)
return BlockGenerator(block.transactions_generator, result)
| 45.399784
| 119
| 0.636396
|
import asyncio
import dataclasses
import logging
import multiprocessing
from concurrent.futures.process import ProcessPoolExecutor
from enum import Enum
from typing import Dict, List, Optional, Set, Tuple, Union
from clvm.casts import int_from_bytes
from kujenga.consensus.block_body_validation import validate_block_body
from kujenga.consensus.block_header_validation import validate_finished_header_block, validate_unfinished_header_block
from kujenga.consensus.block_record import BlockRecord
from kujenga.consensus.blockchain_interface import BlockchainInterface
from kujenga.consensus.constants import ConsensusConstants
from kujenga.consensus.cost_calculator import NPCResult
from kujenga.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from kujenga.consensus.find_fork_point import find_fork_point_in_chain
from kujenga.consensus.full_block_to_block_record import block_to_block_record
from kujenga.consensus.multiprocess_validation import PreValidationResult, pre_validate_blocks_multiprocessing
from kujenga.full_node.block_store import BlockStore
from kujenga.full_node.coin_store import CoinStore
from kujenga.full_node.hint_store import HintStore
from kujenga.full_node.mempool_check_conditions import get_name_puzzle_conditions
from kujenga.types.blockchain_format.coin import Coin
from kujenga.types.blockchain_format.sized_bytes import bytes32
from kujenga.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from kujenga.types.blockchain_format.vdf import VDFInfo
from kujenga.types.coin_record import CoinRecord
from kujenga.types.condition_opcodes import ConditionOpcode
from kujenga.types.end_of_slot_bundle import EndOfSubSlotBundle
from kujenga.types.full_block import FullBlock
from kujenga.types.generator_types import BlockGenerator, GeneratorArg
from kujenga.types.header_block import HeaderBlock
from kujenga.types.unfinished_block import UnfinishedBlock
from kujenga.types.unfinished_header_block import UnfinishedHeaderBlock
from kujenga.types.weight_proof import SubEpochChallengeSegment
from kujenga.util.errors import Err
from kujenga.util.generator_tools import get_block_header, tx_removals_and_additions
from kujenga.util.ints import uint16, uint32, uint64, uint128
from kujenga.util.streamable import recurse_jsonify
log = logging.getLogger(__name__)
class ReceiveBlockResult(Enum):
NEW_PEAK = 1
ADDED_AS_ORPHAN = 2
INVALID_BLOCK = 3
ALREADY_HAVE_BLOCK = 4
DISCONNECTED_BLOCK = 5
class Blockchain(BlockchainInterface):
constants: ConsensusConstants
constants_json: Dict
# peak of the blockchain
_peak_height: Optional[uint32]
# All blocks in peak path are guaranteed to be included, can include orphan blocks
__block_records: Dict[bytes32, BlockRecord]
# all hashes of blocks in block_record by height, used for garbage collection
__heights_in_cache: Dict[uint32, Set[bytes32]]
# Defines the path from genesis to the peak, no orphan blocks
__height_to_hash: Dict[uint32, bytes32]
# All sub-epoch summaries that have been included in the blockchain from the beginning until and including the peak
# (height_included, SubEpochSummary). Note: ONLY for the blocks in the path to the peak
__sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {}
# Unspent Store
coin_store: CoinStore
# Store
block_store: BlockStore
# Used to verify blocks in parallel
pool: ProcessPoolExecutor
# Set holding seen compact proofs, in order to avoid duplicates.
_seen_compact_proofs: Set[Tuple[VDFInfo, uint32]]
# Whether blockchain is shut down or not
_shut_down: bool
# Lock to prevent simultaneous reads and writes
lock: asyncio.Lock
compact_proof_lock: asyncio.Lock
hint_store: HintStore
@staticmethod
async def create(
coin_store: CoinStore, block_store: BlockStore, consensus_constants: ConsensusConstants, hint_store: HintStore
):
self = Blockchain()
self.lock = asyncio.Lock() # External lock handled by full node
self.compact_proof_lock = asyncio.Lock()
cpu_count = multiprocessing.cpu_count()
if cpu_count > 61:
cpu_count = 61 # Windows Server 2016 has an issue https://bugs.python.org/issue26903
num_workers = max(cpu_count - 2, 1)
self.pool = ProcessPoolExecutor(max_workers=num_workers)
log.info(f"Started {num_workers} processes for block validation")
self.constants = consensus_constants
self.coin_store = coin_store
self.block_store = block_store
self.constants_json = recurse_jsonify(dataclasses.asdict(self.constants))
self._shut_down = False
await self._load_chain_from_store()
self._seen_compact_proofs = set()
self.hint_store = hint_store
return self
def shut_down(self):
self._shut_down = True
self.pool.shutdown(wait=True)
async def _load_chain_from_store(self) -> None:
height_to_hash, sub_epoch_summaries = await self.block_store.get_peak_height_dicts()
self.__height_to_hash = height_to_hash
self.__sub_epoch_summaries = sub_epoch_summaries
self.__block_records = {}
self.__heights_in_cache = {}
block_records, peak = await self.block_store.get_block_records_close_to_peak(self.constants.BLOCKS_CACHE_SIZE)
for block in block_records.values():
self.add_block_record(block)
if len(block_records) == 0:
assert peak is None
self._peak_height = None
return None
assert peak is not None
self._peak_height = self.block_record(peak).height
assert len(self.__height_to_hash) == self._peak_height + 1
def get_peak(self) -> Optional[BlockRecord]:
if self._peak_height is None:
return None
return self.height_to_block_record(self._peak_height)
async def get_full_peak(self) -> Optional[FullBlock]:
if self._peak_height is None:
return None
block = await self.block_store.get_full_block(self.height_to_hash(self._peak_height))
assert block is not None
return block
async def get_full_block(self, header_hash: bytes32) -> Optional[FullBlock]:
return await self.block_store.get_full_block(header_hash)
async def receive_block(
self,
block: FullBlock,
pre_validation_result: Optional[PreValidationResult] = None,
fork_point_with_peak: Optional[uint32] = None,
) -> Tuple[
ReceiveBlockResult,
Optional[Err],
Optional[uint32],
Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]],
]:
genesis: bool = block.height == 0
if self.contains_block(block.header_hash):
return ReceiveBlockResult.ALREADY_HAVE_BLOCK, None, None, ([], {})
if not self.contains_block(block.prev_header_hash) and not genesis:
return (ReceiveBlockResult.DISCONNECTED_BLOCK, Err.INVALID_PREV_BLOCK_HASH, None, ([], {}))
if not genesis and (self.block_record(block.prev_header_hash).height + 1) != block.height:
return ReceiveBlockResult.INVALID_BLOCK, Err.INVALID_HEIGHT, None, ([], {})
npc_result: Optional[NPCResult] = None
if pre_validation_result is None:
if block.height == 0:
prev_b: Optional[BlockRecord] = None
else:
prev_b = self.block_record(block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants, len(block.finished_sub_slots) > 0, prev_b, self
)
if block.is_transaction_block():
if block.transactions_generator is not None:
try:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
except ValueError:
return ReceiveBlockResult.INVALID_BLOCK, Err.GENERATOR_REF_HAS_NO_GENERATOR, None, ([], {})
assert block_generator is not None and block.transactions_info is not None
npc_result = get_name_puzzle_conditions(
block_generator,
min(self.constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost),
cost_per_byte=self.constants.COST_PER_BYTE,
safe_mode=False,
)
removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
removals, tx_additions = [], []
header_block = get_block_header(block, tx_additions, removals)
else:
npc_result = None
header_block = get_block_header(block, [], [])
required_iters, error = validate_finished_header_block(
self.constants,
self,
header_block,
False,
difficulty,
sub_slot_iters,
)
if error is not None:
return ReceiveBlockResult.INVALID_BLOCK, error.code, None, ([], {})
else:
npc_result = pre_validation_result.npc_result
required_iters = pre_validation_result.required_iters
assert pre_validation_result.error is None
assert required_iters is not None
error_code, _ = await validate_block_body(
self.constants,
self,
self.block_store,
self.coin_store,
self.get_peak(),
block,
block.height,
npc_result,
fork_point_with_peak,
self.get_block_generator,
)
if error_code is not None:
return ReceiveBlockResult.INVALID_BLOCK, error_code, None, ([], {})
block_record = block_to_block_record(
self.constants,
self,
required_iters,
block,
None,
)
# Always add the block to the database
async with self.block_store.db_wrapper.lock:
try:
header_hash: bytes32 = block.header_hash
# Perform the DB operations to update the state, and rollback if something goes wrong
await self.block_store.db_wrapper.begin_transaction()
await self.block_store.add_full_block(header_hash, block, block_record)
fork_height, peak_height, records, (coin_record_change, hint_changes) = await self._reconsider_peak(
block_record, genesis, fork_point_with_peak, npc_result
)
await self.block_store.db_wrapper.commit_transaction()
# Then update the memory cache. It is important that this task is not cancelled and does not throw
self.add_block_record(block_record)
for fetched_block_record in records:
self.__height_to_hash[fetched_block_record.height] = fetched_block_record.header_hash
if fetched_block_record.sub_epoch_summary_included is not None:
self.__sub_epoch_summaries[
fetched_block_record.height
] = fetched_block_record.sub_epoch_summary_included
if peak_height is not None:
self._peak_height = peak_height
except BaseException:
self.block_store.rollback_cache_block(header_hash)
await self.block_store.db_wrapper.rollback_transaction()
raise
if fork_height is not None:
# new coin records added
assert coin_record_change is not None
return ReceiveBlockResult.NEW_PEAK, None, fork_height, (coin_record_change, hint_changes)
else:
return ReceiveBlockResult.ADDED_AS_ORPHAN, None, None, ([], {})
def get_hint_list(self, npc_result: NPCResult) -> List[Tuple[bytes32, bytes]]:
h_list = []
for npc in npc_result.npc_list:
for opcode, conditions in npc.conditions:
if opcode == ConditionOpcode.CREATE_COIN:
for condition in conditions:
if len(condition.vars) > 2 and condition.vars[2] != b"":
puzzle_hash, amount_bin = condition.vars[0], condition.vars[1]
amount = int_from_bytes(amount_bin)
coin_id = Coin(npc.coin_name, puzzle_hash, amount).name()
h_list.append((coin_id, condition.vars[2]))
return h_list
async def _reconsider_peak(
self,
block_record: BlockRecord,
genesis: bool,
fork_point_with_peak: Optional[uint32],
npc_result: Optional[NPCResult],
) -> Tuple[
Optional[uint32],
Optional[uint32],
List[BlockRecord],
Tuple[List[CoinRecord], Dict[bytes, Dict[bytes32, CoinRecord]]],
]:
peak = self.get_peak()
lastest_coin_state: Dict[bytes32, CoinRecord] = {}
hint_coin_state: Dict[bytes32, Dict[bytes32, CoinRecord]] = {}
if genesis:
if peak is None:
block: Optional[FullBlock] = await self.block_store.get_full_block(block_record.header_hash)
assert block is not None
if npc_result is not None:
tx_removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
tx_removals, tx_additions = [], []
if block.is_transaction_block():
assert block.foliage_transaction_block is not None
added = await self.coin_store.new_block(
block.height,
block.foliage_transaction_block.timestamp,
block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
else:
added, _ = [], []
await self.block_store.set_peak(block_record.header_hash)
return uint32(0), uint32(0), [block_record], (added, {})
return None, None, [], ([], {})
assert peak is not None
if block_record.weight > peak.weight:
# Find the fork. if the block is just being appended, it will return the peak
# If no blocks in common, returns -1, and reverts all blocks
if block_record.prev_hash == peak.header_hash:
fork_height: int = peak.height
elif fork_point_with_peak is not None:
fork_height = fork_point_with_peak
else:
fork_height = find_fork_point_in_chain(self, block_record, peak)
if block_record.prev_hash != peak.header_hash:
roll_changes: List[CoinRecord] = await self.coin_store.rollback_to_block(fork_height)
for coin_record in roll_changes:
lastest_coin_state[coin_record.name] = coin_record
# Rollback sub_epoch_summaries
heights_to_delete = []
for ses_included_height in self.__sub_epoch_summaries.keys():
if ses_included_height > fork_height:
heights_to_delete.append(ses_included_height)
for height in heights_to_delete:
log.info(f"delete ses at height {height}")
del self.__sub_epoch_summaries[height]
# Collect all blocks from fork point to new peak
blocks_to_add: List[Tuple[FullBlock, BlockRecord]] = []
curr = block_record.header_hash
while fork_height < 0 or curr != self.height_to_hash(uint32(fork_height)):
fetched_full_block: Optional[FullBlock] = await self.block_store.get_full_block(curr)
fetched_block_record: Optional[BlockRecord] = await self.block_store.get_block_record(curr)
assert fetched_full_block is not None
assert fetched_block_record is not None
blocks_to_add.append((fetched_full_block, fetched_block_record))
if fetched_full_block.height == 0:
# Doing a full reorg, starting at height 0
break
curr = fetched_block_record.prev_hash
records_to_add = []
for fetched_full_block, fetched_block_record in reversed(blocks_to_add):
records_to_add.append(fetched_block_record)
if fetched_full_block.is_transaction_block():
if fetched_block_record.header_hash == block_record.header_hash:
tx_removals, tx_additions, npc_res = await self.get_tx_removals_and_additions(
fetched_full_block, npc_result
)
else:
tx_removals, tx_additions, npc_res = await self.get_tx_removals_and_additions(
fetched_full_block, None
)
assert fetched_full_block.foliage_transaction_block is not None
added_rec = await self.coin_store.new_block(
fetched_full_block.height,
fetched_full_block.foliage_transaction_block.timestamp,
fetched_full_block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
removed_rec: List[Optional[CoinRecord]] = [
await self.coin_store.get_coin_record(name) for name in tx_removals
]
# Set additions first, then removals in order to handle ephemeral coin state
# Add in height order is also required
record: Optional[CoinRecord]
for record in added_rec:
assert record
lastest_coin_state[record.name] = record
for record in removed_rec:
assert record
lastest_coin_state[record.name] = record
if npc_res is not None:
hint_list: List[Tuple[bytes32, bytes]] = self.get_hint_list(npc_res)
await self.hint_store.add_hints(hint_list)
# There can be multiple coins for the same hint
for coin_id, hint in hint_list:
key = hint
if key not in hint_coin_state:
hint_coin_state[key] = {}
hint_coin_state[key][coin_id] = lastest_coin_state[coin_id]
# Changes the peak to be the new peak
await self.block_store.set_peak(block_record.header_hash)
return (
uint32(max(fork_height, 0)),
block_record.height,
records_to_add,
(list(lastest_coin_state.values()), hint_coin_state),
)
# This is not a heavier block than the heaviest we have seen, so we don't change the coin set
return None, None, [], ([], {})
async def get_tx_removals_and_additions(
self, block: FullBlock, npc_result: Optional[NPCResult] = None
) -> Tuple[List[bytes32], List[Coin], Optional[NPCResult]]:
if block.is_transaction_block():
if block.transactions_generator is not None:
if npc_result is None:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
assert block_generator is not None
npc_result = get_name_puzzle_conditions(
block_generator,
self.constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=self.constants.COST_PER_BYTE,
safe_mode=False,
)
tx_removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
return tx_removals, tx_additions, npc_result
else:
return [], [], None
else:
return [], [], None
def get_next_difficulty(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.DIFFICULTY_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[1]
def get_next_slot_iters(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.SUB_SLOT_ITERS_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[0]
async def get_sp_and_ip_sub_slots(
self, header_hash: bytes32
) -> Optional[Tuple[Optional[EndOfSubSlotBundle], Optional[EndOfSubSlotBundle]]]:
block: Optional[FullBlock] = await self.block_store.get_full_block(header_hash)
if block is None:
return None
curr_br: BlockRecord = self.block_record(block.header_hash)
is_overflow = curr_br.overflow
curr: Optional[FullBlock] = block
assert curr is not None
while True:
if curr_br.first_in_sub_slot:
curr = await self.block_store.get_full_block(curr_br.header_hash)
assert curr is not None
break
if curr_br.height == 0:
break
curr_br = self.block_record(curr_br.prev_hash)
if len(curr.finished_sub_slots) == 0:
return None, None
ip_sub_slot = curr.finished_sub_slots[-1]
if not is_overflow:
return None, ip_sub_slot
if len(curr.finished_sub_slots) > 1:
return curr.finished_sub_slots[-2], ip_sub_slot
prev_curr: Optional[FullBlock] = await self.block_store.get_full_block(curr.prev_header_hash)
if prev_curr is None:
assert curr.height == 0
prev_curr = curr
prev_curr_br = self.block_record(curr.header_hash)
else:
prev_curr_br = self.block_record(curr.prev_header_hash)
assert prev_curr_br is not None
while prev_curr_br.height > 0:
if prev_curr_br.first_in_sub_slot:
prev_curr = await self.block_store.get_full_block(prev_curr_br.header_hash)
assert prev_curr is not None
break
prev_curr_br = self.block_record(prev_curr_br.prev_hash)
if len(prev_curr.finished_sub_slots) == 0:
return None, ip_sub_slot
return prev_curr.finished_sub_slots[-1], ip_sub_slot
def get_recent_reward_challenges(self) -> List[Tuple[bytes32, uint128]]:
peak = self.get_peak()
if peak is None:
return []
recent_rc: List[Tuple[bytes32, uint128]] = []
curr: Optional[BlockRecord] = peak
while curr is not None and len(recent_rc) < 2 * self.constants.MAX_SUB_SLOT_BLOCKS:
if curr != peak:
recent_rc.append((curr.reward_infusion_new_challenge, curr.total_iters))
if curr.first_in_sub_slot:
assert curr.finished_reward_slot_hashes is not None
sub_slot_total_iters = curr.ip_sub_slot_total_iters(self.constants)
for rc in reversed(curr.finished_reward_slot_hashes):
if sub_slot_total_iters < curr.sub_slot_iters:
break
recent_rc.append((rc, sub_slot_total_iters))
sub_slot_total_iters = uint128(sub_slot_total_iters - curr.sub_slot_iters)
curr = self.try_block_record(curr.prev_hash)
return list(reversed(recent_rc))
async def validate_unfinished_block(
self, block: UnfinishedBlock, skip_overflow_ss_validation=True
) -> PreValidationResult:
if (
not self.contains_block(block.prev_header_hash)
and not block.prev_header_hash == self.constants.GENESIS_CHALLENGE
):
return PreValidationResult(uint16(Err.INVALID_PREV_BLOCK_HASH.value), None, None)
unfinished_header_block = UnfinishedHeaderBlock(
block.finished_sub_slots,
block.reward_chain_block,
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
b"",
)
prev_b = self.try_block_record(unfinished_header_block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants, len(unfinished_header_block.finished_sub_slots) > 0, prev_b, self
)
required_iters, error = validate_unfinished_header_block(
self.constants,
self,
unfinished_header_block,
False,
difficulty,
sub_slot_iters,
skip_overflow_ss_validation,
)
if error is not None:
return PreValidationResult(uint16(error.code.value), None, None)
prev_height = (
-1
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE
else self.block_record(block.prev_header_hash).height
)
npc_result = None
if block.transactions_generator is not None:
assert block.transactions_info is not None
try:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
except ValueError:
return PreValidationResult(uint16(Err.GENERATOR_REF_HAS_NO_GENERATOR.value), None, None)
if block_generator is None:
return PreValidationResult(uint16(Err.GENERATOR_REF_HAS_NO_GENERATOR.value), None, None)
npc_result = get_name_puzzle_conditions(
block_generator,
min(self.constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost),
cost_per_byte=self.constants.COST_PER_BYTE,
safe_mode=False,
)
error_code, cost_result = await validate_block_body(
self.constants,
self,
self.block_store,
self.coin_store,
self.get_peak(),
block,
uint32(prev_height + 1),
npc_result,
None,
self.get_block_generator,
)
if error_code is not None:
return PreValidationResult(uint16(error_code.value), None, None)
return PreValidationResult(None, required_iters, cost_result)
async def pre_validate_blocks_multiprocessing(
self,
blocks: List[FullBlock],
npc_results: Dict[uint32, NPCResult],
batch_size: int = 4,
wp_summaries: Optional[List[SubEpochSummary]] = None,
) -> Optional[List[PreValidationResult]]:
return await pre_validate_blocks_multiprocessing(
self.constants,
self.constants_json,
self,
blocks,
self.pool,
True,
npc_results,
self.get_block_generator,
batch_size,
wp_summaries,
)
def contains_block(self, header_hash: bytes32) -> bool:
return header_hash in self.__block_records
def block_record(self, header_hash: bytes32) -> BlockRecord:
return self.__block_records[header_hash]
def height_to_block_record(self, height: uint32) -> BlockRecord:
header_hash = self.height_to_hash(height)
return self.block_record(header_hash)
def get_ses_heights(self) -> List[uint32]:
return sorted(self.__sub_epoch_summaries.keys())
def get_ses(self, height: uint32) -> SubEpochSummary:
return self.__sub_epoch_summaries[height]
def height_to_hash(self, height: uint32) -> Optional[bytes32]:
return self.__height_to_hash[height]
def contains_height(self, height: uint32) -> bool:
return height in self.__height_to_hash
def get_peak_height(self) -> Optional[uint32]:
return self._peak_height
async def warmup(self, fork_point: uint32):
if self._peak_height is None:
return None
block_records = await self.block_store.get_block_records_in_range(
max(fork_point - self.constants.BLOCKS_CACHE_SIZE, uint32(0)), fork_point
)
for block_record in block_records.values():
self.add_block_record(block_record)
def clean_block_record(self, height: int):
if height < 0:
return None
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
while blocks_to_remove is not None and height >= 0:
for header_hash in blocks_to_remove:
del self.__block_records[header_hash]
del self.__heights_in_cache[uint32(height)]
if height == 0:
break
height = height - 1
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
def clean_block_records(self):
if len(self.__block_records) < self.constants.BLOCKS_CACHE_SIZE:
return None
peak = self.get_peak()
assert peak is not None
if peak.height - self.constants.BLOCKS_CACHE_SIZE < 0:
return None
self.clean_block_record(peak.height - self.constants.BLOCKS_CACHE_SIZE)
async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]:
return await self.block_store.get_block_records_in_range(start, stop)
async def get_header_blocks_in_range(
self, start: int, stop: int, tx_filter: bool = True
) -> Dict[bytes32, HeaderBlock]:
hashes = []
for height in range(start, stop + 1):
if self.contains_height(uint32(height)):
header_hash: bytes32 = self.height_to_hash(uint32(height))
hashes.append(header_hash)
blocks: List[FullBlock] = []
for hash in hashes.copy():
block = self.block_store.block_cache.get(hash)
if block is not None:
blocks.append(block)
hashes.remove(hash)
blocks_on_disk: List[FullBlock] = await self.block_store.get_blocks_by_hash(hashes)
blocks.extend(blocks_on_disk)
header_blocks: Dict[bytes32, HeaderBlock] = {}
for block in blocks:
if self.height_to_hash(block.height) != block.header_hash:
raise ValueError(f"Block at {block.header_hash} is no longer in the blockchain (it's in a fork)")
if tx_filter is False:
header = get_block_header(block, [], [])
else:
tx_additions: List[CoinRecord] = [
c for c in (await self.coin_store.get_coins_added_at_height(block.height)) if not c.coinbase
]
removed: List[CoinRecord] = await self.coin_store.get_coins_removed_at_height(block.height)
header = get_block_header(
block, [record.coin for record in tx_additions], [record.coin.name() for record in removed]
)
header_blocks[header.header_hash] = header
return header_blocks
async def get_header_block_by_height(
self, height: int, header_hash: bytes32, tx_filter: bool = True
) -> Optional[HeaderBlock]:
header_dict: Dict[bytes32, HeaderBlock] = await self.get_header_blocks_in_range(height, height, tx_filter)
if len(header_dict) == 0:
return None
if header_hash not in header_dict:
return None
return header_dict[header_hash]
async def get_block_records_at(self, heights: List[uint32], batch_size=900) -> List[BlockRecord]:
records: List[BlockRecord] = []
hashes = []
assert batch_size < 999 # sqlite in python 3.7 has a limit on 999 variables in queries
for height in heights:
hashes.append(self.height_to_hash(height))
if len(hashes) > batch_size:
res = await self.block_store.get_block_records_by_hash(hashes)
records.extend(res)
hashes = []
if len(hashes) > 0:
res = await self.block_store.get_block_records_by_hash(hashes)
records.extend(res)
return records
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
if header_hash in self.__block_records:
return self.__block_records[header_hash]
return await self.block_store.get_block_record(header_hash)
def remove_block_record(self, header_hash: bytes32):
sbr = self.block_record(header_hash)
del self.__block_records[header_hash]
self.__heights_in_cache[sbr.height].remove(header_hash)
def add_block_record(self, block_record: BlockRecord):
self.__block_records[block_record.header_hash] = block_record
if block_record.height not in self.__heights_in_cache.keys():
self.__heights_in_cache[block_record.height] = set()
self.__heights_in_cache[block_record.height].add(block_record.header_hash)
async def persist_sub_epoch_challenge_segments(
self, ses_block_hash: bytes32, segments: List[SubEpochChallengeSegment]
):
return await self.block_store.persist_sub_epoch_challenge_segments(ses_block_hash, segments)
async def get_sub_epoch_challenge_segments(
self,
ses_block_hash: bytes32,
) -> Optional[List[SubEpochChallengeSegment]]:
segments: Optional[List[SubEpochChallengeSegment]] = await self.block_store.get_sub_epoch_challenge_segments(
ses_block_hash
)
if segments is None:
return None
return segments
# Returns 'True' if the info is already in the set, otherwise returns 'False' and stores it.
def seen_compact_proofs(self, vdf_info: VDFInfo, height: uint32) -> bool:
pot_tuple = (vdf_info, height)
if pot_tuple in self._seen_compact_proofs:
return True
# Periodically cleanup to keep size small. TODO: make this smarter, like FIFO.
if len(self._seen_compact_proofs) > 10000:
self._seen_compact_proofs.clear()
self._seen_compact_proofs.add(pot_tuple)
return False
async def get_block_generator(
self, block: Union[FullBlock, UnfinishedBlock], additional_blocks=None
) -> Optional[BlockGenerator]:
if additional_blocks is None:
additional_blocks = {}
ref_list = block.transactions_generator_ref_list
if block.transactions_generator is None:
assert len(ref_list) == 0
return None
if len(ref_list) == 0:
return BlockGenerator(block.transactions_generator, [])
result: List[GeneratorArg] = []
previous_block_hash = block.prev_header_hash
if (
self.try_block_record(previous_block_hash)
and self.height_to_hash(self.block_record(previous_block_hash).height) == previous_block_hash
):
# We are not in a reorg, no need to look up alternate header hashes (we can get them from height_to_hash)
for ref_height in block.transactions_generator_ref_list:
header_hash = self.height_to_hash(ref_height)
ref_block = await self.get_full_block(header_hash)
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(GeneratorArg(ref_block.height, ref_block.transactions_generator))
else:
# First tries to find the blocks in additional_blocks
reorg_chain: Dict[uint32, FullBlock] = {}
curr: Union[FullBlock, UnfinishedBlock] = block
additional_height_dict = {}
while curr.prev_header_hash in additional_blocks:
prev: FullBlock = additional_blocks[curr.prev_header_hash]
additional_height_dict[prev.height] = prev
if isinstance(curr, FullBlock):
assert curr.height == prev.height + 1
reorg_chain[prev.height] = prev
curr = prev
peak: Optional[BlockRecord] = self.get_peak()
if self.contains_block(curr.prev_header_hash) and peak is not None:
# Then we look up blocks up to fork point one at a time, backtracking
previous_block_hash = curr.prev_header_hash
prev_block_record = await self.block_store.get_block_record(previous_block_hash)
prev_block = await self.block_store.get_full_block(previous_block_hash)
assert prev_block is not None
assert prev_block_record is not None
fork = find_fork_point_in_chain(self, peak, prev_block_record)
curr_2: Optional[FullBlock] = prev_block
assert curr_2 is not None and isinstance(curr_2, FullBlock)
reorg_chain[curr_2.height] = curr_2
while curr_2.height > fork and curr_2.height > 0:
curr_2 = await self.block_store.get_full_block(curr_2.prev_header_hash)
assert curr_2 is not None
reorg_chain[curr_2.height] = curr_2
for ref_height in block.transactions_generator_ref_list:
if ref_height in reorg_chain:
ref_block = reorg_chain[ref_height]
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(GeneratorArg(ref_block.height, ref_block.transactions_generator))
else:
if ref_height in additional_height_dict:
ref_block = additional_height_dict[ref_height]
else:
header_hash = self.height_to_hash(ref_height)
ref_block = await self.get_full_block(header_hash)
assert ref_block is not None
if ref_block.transactions_generator is None:
raise ValueError(Err.GENERATOR_REF_HAS_NO_GENERATOR)
result.append(GeneratorArg(ref_block.height, ref_block.transactions_generator))
assert len(result) == len(ref_list)
return BlockGenerator(block.transactions_generator, result)
| true
| true
|
79091dc535b4c7364005a9788cfe0cf496bafba7
| 90
|
py
|
Python
|
venv/lib/python3.6/site-packages/django/apps/__init__.py
|
xiegudong45/typeidea
|
db6504a232d120d6ffa185730bd35b9b9ecffa6c
|
[
"Apache-2.0"
] | 61,676
|
2015-01-01T00:05:13.000Z
|
2022-03-31T20:37:54.000Z
|
checkerista/.env/Lib/site-packages/django/apps/__init__.py
|
LybaFatimaNasir/CS311S20PID02
|
bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39
|
[
"MIT"
] | 8,884
|
2015-01-01T00:12:05.000Z
|
2022-03-31T19:53:11.000Z
|
checkerista/.env/Lib/site-packages/django/apps/__init__.py
|
LybaFatimaNasir/CS311S20PID02
|
bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39
|
[
"MIT"
] | 33,143
|
2015-01-01T02:04:52.000Z
|
2022-03-31T19:42:46.000Z
|
from .config import AppConfig
from .registry import apps
__all__ = ['AppConfig', 'apps']
| 18
| 31
| 0.744444
|
from .config import AppConfig
from .registry import apps
__all__ = ['AppConfig', 'apps']
| true
| true
|
79091dd365f46ef0db6b9df02f6aac6814c69002
| 9,630
|
py
|
Python
|
mmrazor/models/architectures/components/backbones/darts_backbone.py
|
HIT-cwh/mmrazor
|
2dad24044d7f1dad88f20221f8fc071dd40fdd4f
|
[
"Apache-2.0"
] | 553
|
2021-12-23T11:43:35.000Z
|
2022-03-31T01:04:20.000Z
|
mmrazor/models/architectures/components/backbones/darts_backbone.py
|
HIT-cwh/mmrazor
|
2dad24044d7f1dad88f20221f8fc071dd40fdd4f
|
[
"Apache-2.0"
] | 113
|
2021-12-23T12:09:06.000Z
|
2022-03-30T10:13:42.000Z
|
mmrazor/models/architectures/components/backbones/darts_backbone.py
|
HIT-cwh/mmrazor
|
2dad24044d7f1dad88f20221f8fc071dd40fdd4f
|
[
"Apache-2.0"
] | 76
|
2021-12-23T11:48:39.000Z
|
2022-03-29T11:24:35.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch
import torch.nn as nn
from mmcls.models.builder import BACKBONES
from mmcv.cnn import build_activation_layer, build_norm_layer
from ...utils import Placeholder
class FactorizedReduce(nn.Module):
"""Reduce feature map size by factorized pointwise (stride=2)."""
def __init__(self,
in_channels,
out_channels,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.relu = build_activation_layer(self.act_cfg)
self.conv1 = nn.Conv2d(
self.in_channels,
self.out_channels // 2,
1,
stride=2,
padding=0,
bias=False)
self.conv2 = nn.Conv2d(
self.in_channels,
self.out_channels // 2,
1,
stride=2,
padding=0,
bias=False)
self.bn = build_norm_layer(self.norm_cfg, self.out_channels)[1]
def forward(self, x):
x = self.relu(x)
out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1)
out = self.bn(out)
return out
class StandardConv(nn.Module):
"""
Standard conv: ReLU - Conv - BN
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.net = nn.Sequential(
build_activation_layer(self.act_cfg),
nn.Conv2d(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.padding,
bias=False),
build_norm_layer(self.norm_cfg, self.out_channels)[1])
def forward(self, x):
return self.net(x)
class Node(nn.Module):
def __init__(self, node_id, num_prev_nodes, channels,
num_downsample_nodes):
super().__init__()
edges = nn.ModuleDict()
for i in range(num_prev_nodes):
if i < num_downsample_nodes:
stride = 2
else:
stride = 1
edge_id = '{}_p{}'.format(node_id, i)
edges.add_module(
edge_id,
nn.Sequential(
Placeholder(
group='node',
space_id=edge_id,
choice_args=dict(
stride=stride,
in_channels=channels,
out_channels=channels)), ))
self.edges = Placeholder(
group='node_edge', space_id=node_id, choices=edges)
def forward(self, prev_nodes):
return self.edges(prev_nodes)
class Cell(nn.Module):
def __init__(self,
num_nodes,
channels,
prev_channels,
prev_prev_channels,
reduction,
prev_reduction,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.reduction = reduction
self.num_nodes = num_nodes
# If previous cell is reduction cell, current input size does not match
# with output size of cell[k-2]. So the output[k-2] should be reduced
# by preprocessing.
if prev_reduction:
self.preproc0 = FactorizedReduce(prev_prev_channels, channels,
self.act_cfg, self.norm_cfg)
else:
self.preproc0 = StandardConv(prev_prev_channels, channels, 1, 1, 0,
self.act_cfg, self.norm_cfg)
self.preproc1 = StandardConv(prev_channels, channels, 1, 1, 0,
self.act_cfg, self.norm_cfg)
# generate dag
self.nodes = nn.ModuleList()
for depth in range(2, self.num_nodes + 2):
if reduction:
node_id = f'reduce_n{depth}'
num_downsample_nodes = 2
else:
node_id = f'normal_n{depth}'
num_downsample_nodes = 0
self.nodes.append(
Node(node_id, depth, channels, num_downsample_nodes))
def forward(self, s0, s1):
# s0, s1 are the outputs of previous previous cell and previous cell,
# respectively.
tensors = [self.preproc0(s0), self.preproc1(s1)]
for node in self.nodes:
cur_tensor = node(tensors)
tensors.append(cur_tensor)
output = torch.cat(tensors[2:], dim=1)
return output
class AuxiliaryModule(nn.Module):
"""Auxiliary head in 2/3 place of network to let the gradient flow well."""
def __init__(self,
in_channels,
base_channels,
out_channels,
norm_cfg=dict(type='BN')):
super().__init__()
self.norm_cfg = norm_cfg
self.net = nn.Sequential(
nn.ReLU(),
nn.AvgPool2d(5, stride=2, padding=0,
count_include_pad=False), # 2x2 out
nn.Conv2d(in_channels, base_channels, kernel_size=1, bias=False),
build_norm_layer(self.norm_cfg, base_channels)[1],
nn.ReLU(inplace=True),
nn.Conv2d(base_channels, out_channels, kernel_size=2,
bias=False), # 1x1 out
build_norm_layer(self.norm_cfg, out_channels)[1],
nn.ReLU(inplace=True))
def forward(self, x):
return self.net(x)
@BACKBONES.register_module()
class DartsBackbone(nn.Module):
def __init__(self,
in_channels,
base_channels,
num_layers=8,
num_nodes=4,
stem_multiplier=3,
out_indices=(7, ),
auxliary=False,
aux_channels=None,
aux_out_channels=None,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.in_channels = in_channels
self.base_channels = base_channels
self.num_layers = num_layers
self.num_nodes = num_nodes
self.stem_multiplier = stem_multiplier
self.out_indices = out_indices
assert self.out_indices[-1] == self.num_layers - 1
if auxliary:
assert aux_channels is not None
assert aux_out_channels is not None
self.aux_channels = aux_channels
self.aux_out_channels = aux_out_channels
self.auxliary_indice = 2 * self.num_layers // 3
else:
self.auxliary_indice = -1
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.out_channels = self.stem_multiplier * self.base_channels
stem_norm_cfg = copy.deepcopy(self.norm_cfg)
stem_norm_cfg.update(dict(affine=True))
self.stem = nn.Sequential(
nn.Conv2d(
self.in_channels, self.out_channels, 3, 1, 1, bias=False),
build_norm_layer(self.norm_cfg, self.out_channels)[1])
# for the first cell, stem is used for both s0 and s1
# [!] prev_prev_channels and prev_channels is output channel size,
# but c_cur is input channel size.
prev_prev_channels = self.out_channels
prev_channels = self.out_channels
self.out_channels = self.base_channels
self.cells = nn.ModuleList()
prev_reduction, reduction = False, False
for i in range(self.num_layers):
prev_reduction, reduction = reduction, False
# Reduce featuremap size and double channels in 1/3
# and 2/3 layer.
if i == self.num_layers // 3 or i == 2 * self.num_layers // 3:
self.out_channels *= 2
reduction = True
cell = Cell(self.num_nodes, self.out_channels, prev_channels,
prev_prev_channels, reduction, prev_reduction,
self.act_cfg, self.norm_cfg)
self.cells.append(cell)
prev_prev_channels = prev_channels
prev_channels = self.out_channels * self.num_nodes
if i == self.auxliary_indice:
self.auxliary_module = AuxiliaryModule(prev_channels,
self.aux_channels,
self.aux_out_channels,
self.norm_cfg)
def forward(self, x):
outs = []
s0 = s1 = self.stem(x)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1)
if i in self.out_indices:
outs.append(s1)
if i == self.auxliary_indice and self.training:
aux_feature = self.auxliary_module(s1)
outs.insert(0, aux_feature)
return tuple(outs)
| 34.028269
| 79
| 0.538733
|
import copy
import torch
import torch.nn as nn
from mmcls.models.builder import BACKBONES
from mmcv.cnn import build_activation_layer, build_norm_layer
from ...utils import Placeholder
class FactorizedReduce(nn.Module):
def __init__(self,
in_channels,
out_channels,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.relu = build_activation_layer(self.act_cfg)
self.conv1 = nn.Conv2d(
self.in_channels,
self.out_channels // 2,
1,
stride=2,
padding=0,
bias=False)
self.conv2 = nn.Conv2d(
self.in_channels,
self.out_channels // 2,
1,
stride=2,
padding=0,
bias=False)
self.bn = build_norm_layer(self.norm_cfg, self.out_channels)[1]
def forward(self, x):
x = self.relu(x)
out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1)
out = self.bn(out)
return out
class StandardConv(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.net = nn.Sequential(
build_activation_layer(self.act_cfg),
nn.Conv2d(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.padding,
bias=False),
build_norm_layer(self.norm_cfg, self.out_channels)[1])
def forward(self, x):
return self.net(x)
class Node(nn.Module):
def __init__(self, node_id, num_prev_nodes, channels,
num_downsample_nodes):
super().__init__()
edges = nn.ModuleDict()
for i in range(num_prev_nodes):
if i < num_downsample_nodes:
stride = 2
else:
stride = 1
edge_id = '{}_p{}'.format(node_id, i)
edges.add_module(
edge_id,
nn.Sequential(
Placeholder(
group='node',
space_id=edge_id,
choice_args=dict(
stride=stride,
in_channels=channels,
out_channels=channels)), ))
self.edges = Placeholder(
group='node_edge', space_id=node_id, choices=edges)
def forward(self, prev_nodes):
return self.edges(prev_nodes)
class Cell(nn.Module):
def __init__(self,
num_nodes,
channels,
prev_channels,
prev_prev_channels,
reduction,
prev_reduction,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.reduction = reduction
self.num_nodes = num_nodes
if prev_reduction:
self.preproc0 = FactorizedReduce(prev_prev_channels, channels,
self.act_cfg, self.norm_cfg)
else:
self.preproc0 = StandardConv(prev_prev_channels, channels, 1, 1, 0,
self.act_cfg, self.norm_cfg)
self.preproc1 = StandardConv(prev_channels, channels, 1, 1, 0,
self.act_cfg, self.norm_cfg)
self.nodes = nn.ModuleList()
for depth in range(2, self.num_nodes + 2):
if reduction:
node_id = f'reduce_n{depth}'
num_downsample_nodes = 2
else:
node_id = f'normal_n{depth}'
num_downsample_nodes = 0
self.nodes.append(
Node(node_id, depth, channels, num_downsample_nodes))
def forward(self, s0, s1):
tensors = [self.preproc0(s0), self.preproc1(s1)]
for node in self.nodes:
cur_tensor = node(tensors)
tensors.append(cur_tensor)
output = torch.cat(tensors[2:], dim=1)
return output
class AuxiliaryModule(nn.Module):
def __init__(self,
in_channels,
base_channels,
out_channels,
norm_cfg=dict(type='BN')):
super().__init__()
self.norm_cfg = norm_cfg
self.net = nn.Sequential(
nn.ReLU(),
nn.AvgPool2d(5, stride=2, padding=0,
count_include_pad=False),
nn.Conv2d(in_channels, base_channels, kernel_size=1, bias=False),
build_norm_layer(self.norm_cfg, base_channels)[1],
nn.ReLU(inplace=True),
nn.Conv2d(base_channels, out_channels, kernel_size=2,
bias=False),
build_norm_layer(self.norm_cfg, out_channels)[1],
nn.ReLU(inplace=True))
def forward(self, x):
return self.net(x)
@BACKBONES.register_module()
class DartsBackbone(nn.Module):
def __init__(self,
in_channels,
base_channels,
num_layers=8,
num_nodes=4,
stem_multiplier=3,
out_indices=(7, ),
auxliary=False,
aux_channels=None,
aux_out_channels=None,
act_cfg=dict(type='ReLU'),
norm_cfg=dict(type='BN')):
super().__init__()
self.in_channels = in_channels
self.base_channels = base_channels
self.num_layers = num_layers
self.num_nodes = num_nodes
self.stem_multiplier = stem_multiplier
self.out_indices = out_indices
assert self.out_indices[-1] == self.num_layers - 1
if auxliary:
assert aux_channels is not None
assert aux_out_channels is not None
self.aux_channels = aux_channels
self.aux_out_channels = aux_out_channels
self.auxliary_indice = 2 * self.num_layers // 3
else:
self.auxliary_indice = -1
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.out_channels = self.stem_multiplier * self.base_channels
stem_norm_cfg = copy.deepcopy(self.norm_cfg)
stem_norm_cfg.update(dict(affine=True))
self.stem = nn.Sequential(
nn.Conv2d(
self.in_channels, self.out_channels, 3, 1, 1, bias=False),
build_norm_layer(self.norm_cfg, self.out_channels)[1])
prev_prev_channels = self.out_channels
prev_channels = self.out_channels
self.out_channels = self.base_channels
self.cells = nn.ModuleList()
prev_reduction, reduction = False, False
for i in range(self.num_layers):
prev_reduction, reduction = reduction, False
if i == self.num_layers // 3 or i == 2 * self.num_layers // 3:
self.out_channels *= 2
reduction = True
cell = Cell(self.num_nodes, self.out_channels, prev_channels,
prev_prev_channels, reduction, prev_reduction,
self.act_cfg, self.norm_cfg)
self.cells.append(cell)
prev_prev_channels = prev_channels
prev_channels = self.out_channels * self.num_nodes
if i == self.auxliary_indice:
self.auxliary_module = AuxiliaryModule(prev_channels,
self.aux_channels,
self.aux_out_channels,
self.norm_cfg)
def forward(self, x):
outs = []
s0 = s1 = self.stem(x)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1)
if i in self.out_indices:
outs.append(s1)
if i == self.auxliary_indice and self.training:
aux_feature = self.auxliary_module(s1)
outs.insert(0, aux_feature)
return tuple(outs)
| true
| true
|
79091e61bc4c4e5018302b20f6b679e0854e8d2f
| 1,165
|
py
|
Python
|
setup.py
|
stiletto/bnw
|
46e38f379519689ad14d451de8e68acb7ee04405
|
[
"BSD-2-Clause"
] | 23
|
2015-01-14T13:22:37.000Z
|
2022-01-11T11:38:43.000Z
|
setup.py
|
stiletto/bnw
|
46e38f379519689ad14d451de8e68acb7ee04405
|
[
"BSD-2-Clause"
] | 31
|
2015-01-27T19:57:45.000Z
|
2018-10-04T22:35:22.000Z
|
setup.py
|
stiletto/bnw
|
46e38f379519689ad14d451de8e68acb7ee04405
|
[
"BSD-2-Clause"
] | 11
|
2015-01-02T10:29:14.000Z
|
2018-06-28T13:09:53.000Z
|
#!/usr/bin/env python
from setuptools import setup
setup(name='BnW',
version='0.1',
description='Microblogging service',
author='Stiletto',
author_email='blasux@blasux.ru',
url='http://github.com/stiletto/bnw',
packages=['bnw', 'bnw.core', 'bnw.formatting', 'bnw.handlers', 'bnw.scripts', 'bnw.search', 'bnw.web', 'bnw.xmpp'],
dependency_links=['http://github.com/mongodb/motor/tarball/master#egg=motor-0.1.2',
'http://github.com/mongodb/mongo-python-driver/tarball/master#egg=pymongo-2.6',
'https://github.com/stiletto/linkshit/archive/refs/tags/0.2.tar.gz#egg=linkshit-0.2'],
install_requires=['tornado>=2.0,<6.0', 'twisted<16.3.0', 'Pillow<7', 'PyRSS2Gen', 'python-dateutil', 'misaka<2.0.0', 'motor==0.7', 'linkshit', 'libthumbor', 'singledispatch<3.6'],
package_data={'bnw.web': ['templates/*.html','static/*.*', 'static/flot/*', 'static/web-socket-js/*']},
entry_points = {
'console_scripts': [
'bnw = bnw.scripts.entry:instance',
'bnw-search = bnw.scripts.entry:search',
'bnw-admin = bnw.scripts.admin:main',
],
}
)
| 44.807692
| 183
| 0.615451
|
from setuptools import setup
setup(name='BnW',
version='0.1',
description='Microblogging service',
author='Stiletto',
author_email='blasux@blasux.ru',
url='http://github.com/stiletto/bnw',
packages=['bnw', 'bnw.core', 'bnw.formatting', 'bnw.handlers', 'bnw.scripts', 'bnw.search', 'bnw.web', 'bnw.xmpp'],
dependency_links=['http://github.com/mongodb/motor/tarball/master#egg=motor-0.1.2',
'http://github.com/mongodb/mongo-python-driver/tarball/master#egg=pymongo-2.6',
'https://github.com/stiletto/linkshit/archive/refs/tags/0.2.tar.gz#egg=linkshit-0.2'],
install_requires=['tornado>=2.0,<6.0', 'twisted<16.3.0', 'Pillow<7', 'PyRSS2Gen', 'python-dateutil', 'misaka<2.0.0', 'motor==0.7', 'linkshit', 'libthumbor', 'singledispatch<3.6'],
package_data={'bnw.web': ['templates/*.html','static/*.*', 'static/flot/*', 'static/web-socket-js/*']},
entry_points = {
'console_scripts': [
'bnw = bnw.scripts.entry:instance',
'bnw-search = bnw.scripts.entry:search',
'bnw-admin = bnw.scripts.admin:main',
],
}
)
| true
| true
|
79091e6dad491c3eaffe0db0b1d5c615a4a3e4ef
| 1,131
|
py
|
Python
|
ClemBot.Bot/bot/__init__.py
|
glitchedcoder/ClemBot
|
5bc3f811d063f53098ed9d5bcf0194422ba3d7b3
|
[
"MIT"
] | 32
|
2021-07-10T18:51:29.000Z
|
2022-02-27T17:07:28.000Z
|
ClemBot.Bot/bot/__init__.py
|
glitchedcoder/ClemBot
|
5bc3f811d063f53098ed9d5bcf0194422ba3d7b3
|
[
"MIT"
] | 87
|
2021-06-29T05:11:35.000Z
|
2022-03-27T14:37:14.000Z
|
ClemBot.Bot/bot/__init__.py
|
glitchedcoder/ClemBot
|
5bc3f811d063f53098ed9d5bcf0194422ba3d7b3
|
[
"MIT"
] | 21
|
2021-06-23T23:46:17.000Z
|
2022-03-19T16:16:05.000Z
|
import os
import logging
import seqlog
from seqlog import StructuredRootLogger, StructuredLogger, ConsoleStructuredLogHandler
if bool(os.environ.get('PROD')):
# Production logging setup
url = os.environ.get('SEQ_URL')
key = os.environ.get('SEQ_BOT_KEY')
if not key:
raise Exception('SEQ_BOT_KEY not found but SEQ_URL was specified')
seqlog.log_to_seq(
# Initialize the seq logging url before the secrets are loaded
# this is ok because seq logging only happens in prod
server_url=url,
api_key=key,
level=logging.INFO,
batch_size=5,
auto_flush_timeout=10, # seconds
override_root_logger=False,
)
else:
# Development logging setup
logging.setLoggerClass(StructuredLogger)
logging.root = StructuredRootLogger(logging.WARNING)
logging.Logger.root = logging.root
logging.Logger.manager = logging.Manager(logging.Logger.root)
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s',
handlers=[
ConsoleStructuredLogHandler()
],
level=logging.INFO,
)
| 27.585366
| 86
| 0.678161
|
import os
import logging
import seqlog
from seqlog import StructuredRootLogger, StructuredLogger, ConsoleStructuredLogHandler
if bool(os.environ.get('PROD')):
url = os.environ.get('SEQ_URL')
key = os.environ.get('SEQ_BOT_KEY')
if not key:
raise Exception('SEQ_BOT_KEY not found but SEQ_URL was specified')
seqlog.log_to_seq(
server_url=url,
api_key=key,
level=logging.INFO,
batch_size=5,
auto_flush_timeout=10,
override_root_logger=False,
)
else:
logging.setLoggerClass(StructuredLogger)
logging.root = StructuredRootLogger(logging.WARNING)
logging.Logger.root = logging.root
logging.Logger.manager = logging.Manager(logging.Logger.root)
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s',
handlers=[
ConsoleStructuredLogHandler()
],
level=logging.INFO,
)
| true
| true
|
79091f7bf3e3c7224227d62b0127a0c8b5551430
| 1,584
|
py
|
Python
|
scripts/vaspy-incar.py
|
arafune/vaspy
|
36342eb9b2523fc5c878db5e269e77a51352364c
|
[
"BSD-3-Clause"
] | 10
|
2018-01-15T10:41:00.000Z
|
2021-03-31T05:53:50.000Z
|
scripts/vaspy-incar.py
|
arafune/vaspy
|
36342eb9b2523fc5c878db5e269e77a51352364c
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/vaspy-incar.py
|
arafune/vaspy
|
36342eb9b2523fc5c878db5e269e77a51352364c
|
[
"BSD-3-Clause"
] | 3
|
2019-08-13T16:34:56.000Z
|
2021-06-05T15:39:37.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script to demonstrate vaspy.incar functionality.
"""
import argparse
import vaspy
import vaspy.incar
from logging import DEBUG, INFO, Formatter, StreamHandler, getLogger
LOGLEVEL = DEBUG
logger = getLogger(__name__)
fmt = "%(asctime)s %(levelname)s %(name)s :%(message)s"
formatter = Formatter(fmt)
handler = StreamHandler()
handler.setLevel(LOGLEVEL)
logger.setLevel(LOGLEVEL)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.propagate = False
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"-r",
help="""Show reformated INCAR (Use -i if edit in place)""",
action="store_true",
)
parser.add_argument("-i", help="""Edit the INCAR file in place""", action="store_true")
parser.add_argument(
"--lint",
help="""Tyny and private verion of code checker for vasp""",
action="store_true",
)
parser.add_argument("incar_file", metavar="INCAR_file", nargs=1)
args = parser.parse_args()
assert not (
args.lint and (args.i or args.r)
), "Lint option and re-format option (-i, -r) is exclusive."
logger.debug("args: {}".format(args))
incar: vaspy.incar.Incar = vaspy.load(args.incar_file[0])
if args.i:
with open(args.incar_file[0], mode="wt") as incar_file:
incar_file.write(incar.__str__())
if args.r:
print(incar)
if args.lint:
lint_msg = incar.lint_all()
if lint_msg: # if python 3.8 lint_msg:= incar.lint_all() can be used...
print(lint_msg)
else:
print("ALL OK. Submit the job!!")
| 27.310345
| 87
| 0.703283
|
import argparse
import vaspy
import vaspy.incar
from logging import DEBUG, INFO, Formatter, StreamHandler, getLogger
LOGLEVEL = DEBUG
logger = getLogger(__name__)
fmt = "%(asctime)s %(levelname)s %(name)s :%(message)s"
formatter = Formatter(fmt)
handler = StreamHandler()
handler.setLevel(LOGLEVEL)
logger.setLevel(LOGLEVEL)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.propagate = False
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"-r",
help="""Show reformated INCAR (Use -i if edit in place)""",
action="store_true",
)
parser.add_argument("-i", help="""Edit the INCAR file in place""", action="store_true")
parser.add_argument(
"--lint",
help="""Tyny and private verion of code checker for vasp""",
action="store_true",
)
parser.add_argument("incar_file", metavar="INCAR_file", nargs=1)
args = parser.parse_args()
assert not (
args.lint and (args.i or args.r)
), "Lint option and re-format option (-i, -r) is exclusive."
logger.debug("args: {}".format(args))
incar: vaspy.incar.Incar = vaspy.load(args.incar_file[0])
if args.i:
with open(args.incar_file[0], mode="wt") as incar_file:
incar_file.write(incar.__str__())
if args.r:
print(incar)
if args.lint:
lint_msg = incar.lint_all()
if lint_msg:
print(lint_msg)
else:
print("ALL OK. Submit the job!!")
| true
| true
|
790920f5a12e9eb9b517725359bd82e7d56d9f98
| 2,897
|
py
|
Python
|
request_handler/appconfig.py
|
AsAsgard/trading_pr
|
d4290cf256504ffc3f15ede353e9e7dd19e1099f
|
[
"Apache-2.0"
] | 2
|
2019-05-04T08:23:28.000Z
|
2019-07-03T21:53:13.000Z
|
request_handler/appconfig.py
|
AsAsgard/trading_pr
|
d4290cf256504ffc3f15ede353e9e7dd19e1099f
|
[
"Apache-2.0"
] | 7
|
2019-05-01T12:28:17.000Z
|
2019-05-26T14:51:42.000Z
|
request_handler/appconfig.py
|
AsAsgard/trading_pr
|
d4290cf256504ffc3f15ede353e9e7dd19e1099f
|
[
"Apache-2.0"
] | 3
|
2019-05-01T14:01:36.000Z
|
2020-10-13T05:07:25.000Z
|
#!/usr/bin/env python
# coding: utf-8
import logging.config
import os
# Конфигурация базы данных
DB_CONFIG = {
'username': 'root',
'password': os.environ.get('MYSQL_TRADING_PASS'),
'host': '127.0.0.1',
'dbname': 'trading_db',
}
# Конфигурация журналирования
LOGGING = {
'version': 1,
'formatters': { # Форматирование сообщения
'main': {
'format': '[%(asctime)s] %(levelname)s %(module)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': { # Обработчикаи сообщений
'file_handler': {
'class': 'logging.FileHandler',
'filename': '/tmp/trading.log',
'formatter': 'main',
},
'streamlogger': {
'class': 'logging.StreamHandler',
'formatter': 'main',
},
},
'loggers': { # Логгеры
'prod_logger': {
'handlers': ['file_handler', 'streamlogger'],
'level': 'INFO',
},
'devel_logger': {
'handlers': ['file_handler', 'streamlogger'],
'level': 'DEBUG',
},
},
}
logging.config.dictConfig(LOGGING)
# Базовая конфигурация
class Config(object):
DEBUG = False
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{DB_CONFIG['username']}:{DB_CONFIG['password']}" \
f"@{DB_CONFIG['host']}/{DB_CONFIG['dbname']}?charset=utf8"
SQLALCHEMY_TRACK_MODIFICATIONS = False
LOGGER_NAME = 'devel_logger'
MAIL_SERVER = 'smtp.yandex.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TSL = False
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
MAIL_DEFAULT_SENDER = os.environ.get('MAIL_USERNAME')
CELERY_BROKER_URL = 'redis://0.0.0.0:6379/'
CELERY_RESULT_BACKEND = 'redis://0.0.0.0:6379/'
CELERY_DEFAULT_QUEUE = 'request_handler_queue'
# Конфигурация выпуска
class ProductionConfig(Config):
DEBUG = False
LOGGER_NAME = 'prod_logger'
# Конфигурация разработки
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
LOGGER_NAME = 'devel_logger'
# Конфигурация тестирования
class TestConfig(Config):
DEBUG = True
TESTING = True
WTF_CSRF_ENABLED = False
LOGGER_NAME = 'devel_logger'
test_db_name = "test_trading_db"
SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{DB_CONFIG['username']}:{DB_CONFIG['password']}" \
f"@{DB_CONFIG['host']}/{test_db_name}?charset=utf8"
# Текущая конфигурация
# --------------------------------------------------
_currentConfig = DevelopmentConfig
def getConfig():
return _currentConfig
def setConfig(config):
global _currentConfig
_currentConfig = config
# --------------------------------------------------
# Размер буффера данных, загружаемых в базу
chunkSize = 30000
| 25.637168
| 98
| 0.593027
|
import logging.config
import os
DB_CONFIG = {
'username': 'root',
'password': os.environ.get('MYSQL_TRADING_PASS'),
'host': '127.0.0.1',
'dbname': 'trading_db',
}
LOGGING = {
'version': 1,
'formatters': {
'main': {
'format': '[%(asctime)s] %(levelname)s %(module)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
},
'handlers': {
'file_handler': {
'class': 'logging.FileHandler',
'filename': '/tmp/trading.log',
'formatter': 'main',
},
'streamlogger': {
'class': 'logging.StreamHandler',
'formatter': 'main',
},
},
'loggers': {
'prod_logger': {
'handlers': ['file_handler', 'streamlogger'],
'level': 'INFO',
},
'devel_logger': {
'handlers': ['file_handler', 'streamlogger'],
'level': 'DEBUG',
},
},
}
logging.config.dictConfig(LOGGING)
class Config(object):
DEBUG = False
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{DB_CONFIG['username']}:{DB_CONFIG['password']}" \
f"@{DB_CONFIG['host']}/{DB_CONFIG['dbname']}?charset=utf8"
SQLALCHEMY_TRACK_MODIFICATIONS = False
LOGGER_NAME = 'devel_logger'
MAIL_SERVER = 'smtp.yandex.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TSL = False
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
MAIL_DEFAULT_SENDER = os.environ.get('MAIL_USERNAME')
CELERY_BROKER_URL = 'redis://0.0.0.0:6379/'
CELERY_RESULT_BACKEND = 'redis://0.0.0.0:6379/'
CELERY_DEFAULT_QUEUE = 'request_handler_queue'
class ProductionConfig(Config):
DEBUG = False
LOGGER_NAME = 'prod_logger'
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
LOGGER_NAME = 'devel_logger'
class TestConfig(Config):
DEBUG = True
TESTING = True
WTF_CSRF_ENABLED = False
LOGGER_NAME = 'devel_logger'
test_db_name = "test_trading_db"
SQLALCHEMY_DATABASE_URI = f"mysql+pymysql://{DB_CONFIG['username']}:{DB_CONFIG['password']}" \
f"@{DB_CONFIG['host']}/{test_db_name}?charset=utf8"
_currentConfig = DevelopmentConfig
def getConfig():
return _currentConfig
def setConfig(config):
global _currentConfig
_currentConfig = config
chunkSize = 30000
| true
| true
|
790921904856a0105c9416489732fe217404b20f
| 13,224
|
py
|
Python
|
qa/rpc-tests/llmq-is-cl-conflicts.py
|
cryptowithacause/cryptocause-coin
|
f68f3ed504094f8780db4d78d0aef2089a2198a9
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/llmq-is-cl-conflicts.py
|
cryptowithacause/cryptocause-coin
|
f68f3ed504094f8780db4d78d0aef2089a2198a9
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/llmq-is-cl-conflicts.py
|
cryptowithacause/cryptocause-coin
|
f68f3ed504094f8780db4d78d0aef2089a2198a9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.blocktools import get_masternode_payment, create_coinbase, create_block
from test_framework.mininode import *
from test_framework.test_framework import cryptocauseTestFramework
from test_framework.util import *
from time import *
'''
llmq-is-cl-conflicts.py
Checks conflict handling between ChainLocks and InstantSend
'''
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.clsigs = {}
self.islocks = {}
def send_clsig(self, clsig):
hash = uint256_from_str(hash256(clsig.serialize()))
self.clsigs[hash] = clsig
inv = msg_inv([CInv(29, hash)])
self.send_message(inv)
def send_islock(self, islock):
hash = uint256_from_str(hash256(islock.serialize()))
self.islocks[hash] = islock
inv = msg_inv([CInv(30, hash)])
self.send_message(inv)
def on_getdata(self, conn, message):
for inv in message.inv:
if inv.hash in self.clsigs:
self.send_message(self.clsigs[inv.hash])
if inv.hash in self.islocks:
self.send_message(self.islocks[inv.hash])
class LLMQ_IS_CL_Conflicts(cryptocauseTestFramework):
def __init__(self):
super().__init__(6, 5, [], fast_dip3_enforcement=True)
#disable_mocktime()
def run_test(self):
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
self.nodes[0].generate(10)
sync_blocks(self.nodes, timeout=60*5)
self.test_node = TestNode()
self.test_node.add_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
NetworkThread().start() # Start up network handling in another thread
self.test_node.wait_for_verack()
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
self.nodes[0].spork("SPORK_20_INSTANTSEND_LLMQ_BASED", 0)
self.wait_for_sporks_same()
self.mine_quorum()
# mine single block, wait for chainlock
self.nodes[0].generate(1)
self.wait_for_chainlock_tip_all_nodes()
self.test_chainlock_overrides_islock(False)
self.test_chainlock_overrides_islock(True)
self.test_islock_overrides_nonchainlock()
def test_chainlock_overrides_islock(self, test_block_conflict):
# create three raw TXs, they will conflict with each other
rawtx1 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx2 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx3 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx1_obj = FromHex(CTransaction(), rawtx1)
rawtx2_obj = FromHex(CTransaction(), rawtx2)
rawtx3_obj = FromHex(CTransaction(), rawtx3)
rawtx1_txid = self.nodes[0].sendrawtransaction(rawtx1)
rawtx2_txid = encode(hash256(hex_str_to_bytes(rawtx2))[::-1], 'hex_codec').decode('ascii')
rawtx3_txid = encode(hash256(hex_str_to_bytes(rawtx3))[::-1], 'hex_codec').decode('ascii')
# Create a chained TX on top of tx1
inputs = []
n = 0
for out in rawtx1_obj.vout:
if out.nValue == 100000000:
inputs.append({"txid": rawtx1_txid, "vout": n})
n += 1
rawtx4 = self.nodes[0].createrawtransaction(inputs, {self.nodes[0].getnewaddress(): 0.999})
rawtx4 = self.nodes[0].signrawtransaction(rawtx4)['hex']
rawtx4_txid = self.nodes[0].sendrawtransaction(rawtx4)
for node in self.nodes:
self.wait_for_instantlock(rawtx1_txid, node)
self.wait_for_instantlock(rawtx4_txid, node)
block = self.create_block(self.nodes[0], [rawtx2_obj])
if test_block_conflict:
submit_result = self.nodes[0].submitblock(ToHex(block))
assert(submit_result == "conflict-tx-lock")
cl = self.create_chainlock(self.nodes[0].getblockcount() + 1, block.sha256)
self.test_node.send_clsig(cl)
# Give the CLSIG some time to propagate. We unfortunately can't check propagation here as "getblock/getblockheader"
# is required to check for CLSIGs, but this requires the block header to be propagated already
sleep(1)
# The block should get accepted now, and at the same time prune the conflicting ISLOCKs
submit_result = self.nodes[1].submitblock(ToHex(block))
if test_block_conflict:
assert(submit_result == "duplicate")
else:
assert(submit_result is None)
for node in self.nodes:
self.wait_for_chainlock(node, "%064x" % block.sha256)
# Create a chained TX on top of tx2
inputs = []
n = 0
for out in rawtx2_obj.vout:
if out.nValue == 100000000:
inputs.append({"txid": rawtx2_txid, "vout": n})
n += 1
rawtx5 = self.nodes[0].createrawtransaction(inputs, {self.nodes[0].getnewaddress(): 0.999})
rawtx5 = self.nodes[0].signrawtransaction(rawtx5)['hex']
rawtx5_txid = self.nodes[0].sendrawtransaction(rawtx5)
for node in self.nodes:
self.wait_for_instantlock(rawtx5_txid, node)
# Lets verify that the ISLOCKs got pruned
for node in self.nodes:
assert_raises_jsonrpc(-5, "No such mempool or blockchain transaction", node.getrawtransaction, rawtx1_txid, True)
assert_raises_jsonrpc(-5, "No such mempool or blockchain transaction", node.getrawtransaction, rawtx4_txid, True)
rawtx = node.getrawtransaction(rawtx2_txid, True)
assert(rawtx['chainlock'])
assert(rawtx['instantlock'])
assert(not rawtx['instantlock_internal'])
def test_islock_overrides_nonchainlock(self):
# create two raw TXs, they will conflict with each other
rawtx1 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx2 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx1_txid = encode(hash256(hex_str_to_bytes(rawtx1))[::-1], 'hex_codec').decode('ascii')
rawtx2_txid = encode(hash256(hex_str_to_bytes(rawtx2))[::-1], 'hex_codec').decode('ascii')
# Create an ISLOCK but don't broadcast it yet
islock = self.create_islock(rawtx2)
# Stop enough MNs so that ChainLocks don't work anymore
for i in range(3):
self.stop_node(len(self.nodes) - 1)
self.nodes.pop(len(self.nodes) - 1)
self.mninfo.pop(len(self.mninfo) - 1)
# Send tx1, which will later conflict with the ISLOCK
self.nodes[0].sendrawtransaction(rawtx1)
# fast forward 11 minutes, so that the TX is considered safe and included in the next block
set_mocktime(get_mocktime() + int(60 * 11))
set_node_times(self.nodes, get_mocktime())
# Mine the conflicting TX into a block
good_tip = self.nodes[0].getbestblockhash()
self.nodes[0].generate(2)
self.sync_all()
# Assert that the conflicting tx got mined and the locked TX is not valid
assert(self.nodes[0].getrawtransaction(rawtx1_txid, True)['confirmations'] > 0)
assert_raises_jsonrpc(-25, "Missing inputs", self.nodes[0].sendrawtransaction, rawtx2)
# Send the ISLOCK, which should result in the last 2 blocks to be invalidated, even though the nodes don't know
# the locked transaction yet
self.test_node.send_islock(islock)
sleep(5)
assert(self.nodes[0].getbestblockhash() == good_tip)
assert(self.nodes[1].getbestblockhash() == good_tip)
# Send the actual transaction and mine it
self.nodes[0].sendrawtransaction(rawtx2)
self.nodes[0].generate(1)
self.sync_all()
assert(self.nodes[0].getrawtransaction(rawtx2_txid, True)['confirmations'] > 0)
assert(self.nodes[1].getrawtransaction(rawtx2_txid, True)['confirmations'] > 0)
assert(self.nodes[0].getrawtransaction(rawtx2_txid, True)['instantlock'])
assert(self.nodes[1].getrawtransaction(rawtx2_txid, True)['instantlock'])
assert(self.nodes[0].getbestblockhash() != good_tip)
assert(self.nodes[1].getbestblockhash() != good_tip)
def wait_for_chainlock_tip_all_nodes(self):
for node in self.nodes:
tip = node.getbestblockhash()
self.wait_for_chainlock(node, tip)
def wait_for_chainlock_tip(self, node):
tip = node.getbestblockhash()
self.wait_for_chainlock(node, tip)
def wait_for_chainlock(self, node, block_hash):
t = time()
while time() - t < 15:
try:
block = node.getblockheader(block_hash)
if block["confirmations"] > 0 and block["chainlock"]:
return
except:
# block might not be on the node yet
pass
sleep(0.1)
raise AssertionError("wait_for_chainlock timed out")
def create_block(self, node, vtx=[]):
bt = node.getblocktemplate()
height = bt['height']
tip_hash = bt['previousblockhash']
coinbasevalue = bt['coinbasevalue']
miner_address = node.getnewaddress()
mn_payee = bt['masternode'][0]['payee']
# calculate fees that the block template included (we'll have to remove it from the coinbase as we won't
# include the template's transactions
bt_fees = 0
for tx in bt['transactions']:
bt_fees += tx['fee']
new_fees = 0
for tx in vtx:
in_value = 0
out_value = 0
for txin in tx.vin:
txout = node.gettxout("%064x" % txin.prevout.hash, txin.prevout.n, False)
in_value += int(txout['value'] * COIN)
for txout in tx.vout:
out_value += txout.nValue
new_fees += in_value - out_value
# fix fees
coinbasevalue -= bt_fees
coinbasevalue += new_fees
mn_amount = get_masternode_payment(height, coinbasevalue)
miner_amount = coinbasevalue - mn_amount
outputs = {miner_address: str(Decimal(miner_amount) / COIN)}
if mn_amount > 0:
outputs[mn_payee] = str(Decimal(mn_amount) / COIN)
coinbase = FromHex(CTransaction(), node.createrawtransaction([], outputs))
coinbase.vin = create_coinbase(height).vin
# We can't really use this one as it would result in invalid merkle roots for masternode lists
if len(bt['coinbase_payload']) != 0:
cbtx = FromHex(CCbTx(version=1), bt['coinbase_payload'])
coinbase.nVersion = 3
coinbase.nType = 5 # CbTx
coinbase.vExtraPayload = cbtx.serialize()
coinbase.calc_sha256()
block = create_block(int(tip_hash, 16), coinbase, nTime=bt['curtime'])
block.vtx += vtx
# Add quorum commitments from template
for tx in bt['transactions']:
tx2 = FromHex(CTransaction(), tx['data'])
if tx2.nType == 6:
block.vtx.append(tx2)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
def create_chainlock(self, height, blockHash):
request_id = "%064x" % uint256_from_str(hash256(ser_string(b"clsig") + struct.pack("<I", height)))
message_hash = "%064x" % blockHash
for mn in self.mninfo:
mn.node.quorum('sign', 100, request_id, message_hash)
recSig = None
t = time()
while time() - t < 10:
try:
recSig = self.nodes[0].quorum('getrecsig', 100, request_id, message_hash)
break
except:
sleep(0.1)
assert(recSig is not None)
clsig = msg_clsig(height, blockHash, hex_str_to_bytes(recSig['sig']))
return clsig
def create_islock(self, hextx):
tx = FromHex(CTransaction(), hextx)
tx.rehash()
request_id_buf = ser_string(b"islock") + ser_compact_size(len(tx.vin))
inputs = []
for txin in tx.vin:
request_id_buf += txin.prevout.serialize()
inputs.append(txin.prevout)
request_id = "%064x" % uint256_from_str(hash256(request_id_buf))
message_hash = "%064x" % tx.sha256
for mn in self.mninfo:
mn.node.quorum('sign', 100, request_id, message_hash)
recSig = None
t = time()
while time() - t < 10:
try:
recSig = self.nodes[0].quorum('getrecsig', 100, request_id, message_hash)
break
except:
sleep(0.1)
assert(recSig is not None)
islock = msg_islock(inputs, tx.sha256, hex_str_to_bytes(recSig['sig']))
return islock
if __name__ == '__main__':
LLMQ_IS_CL_Conflicts().main()
| 39.00885
| 125
| 0.628176
|
from test_framework.blocktools import get_masternode_payment, create_coinbase, create_block
from test_framework.mininode import *
from test_framework.test_framework import cryptocauseTestFramework
from test_framework.util import *
from time import *
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.clsigs = {}
self.islocks = {}
def send_clsig(self, clsig):
hash = uint256_from_str(hash256(clsig.serialize()))
self.clsigs[hash] = clsig
inv = msg_inv([CInv(29, hash)])
self.send_message(inv)
def send_islock(self, islock):
hash = uint256_from_str(hash256(islock.serialize()))
self.islocks[hash] = islock
inv = msg_inv([CInv(30, hash)])
self.send_message(inv)
def on_getdata(self, conn, message):
for inv in message.inv:
if inv.hash in self.clsigs:
self.send_message(self.clsigs[inv.hash])
if inv.hash in self.islocks:
self.send_message(self.islocks[inv.hash])
class LLMQ_IS_CL_Conflicts(cryptocauseTestFramework):
def __init__(self):
super().__init__(6, 5, [], fast_dip3_enforcement=True)
def run_test(self):
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
self.nodes[0].generate(10)
sync_blocks(self.nodes, timeout=60*5)
self.test_node = TestNode()
self.test_node.add_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
NetworkThread().start()
self.test_node.wait_for_verack()
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
self.nodes[0].spork("SPORK_20_INSTANTSEND_LLMQ_BASED", 0)
self.wait_for_sporks_same()
self.mine_quorum()
self.nodes[0].generate(1)
self.wait_for_chainlock_tip_all_nodes()
self.test_chainlock_overrides_islock(False)
self.test_chainlock_overrides_islock(True)
self.test_islock_overrides_nonchainlock()
def test_chainlock_overrides_islock(self, test_block_conflict):
rawtx1 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx2 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx3 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx1_obj = FromHex(CTransaction(), rawtx1)
rawtx2_obj = FromHex(CTransaction(), rawtx2)
rawtx3_obj = FromHex(CTransaction(), rawtx3)
rawtx1_txid = self.nodes[0].sendrawtransaction(rawtx1)
rawtx2_txid = encode(hash256(hex_str_to_bytes(rawtx2))[::-1], 'hex_codec').decode('ascii')
rawtx3_txid = encode(hash256(hex_str_to_bytes(rawtx3))[::-1], 'hex_codec').decode('ascii')
inputs = []
n = 0
for out in rawtx1_obj.vout:
if out.nValue == 100000000:
inputs.append({"txid": rawtx1_txid, "vout": n})
n += 1
rawtx4 = self.nodes[0].createrawtransaction(inputs, {self.nodes[0].getnewaddress(): 0.999})
rawtx4 = self.nodes[0].signrawtransaction(rawtx4)['hex']
rawtx4_txid = self.nodes[0].sendrawtransaction(rawtx4)
for node in self.nodes:
self.wait_for_instantlock(rawtx1_txid, node)
self.wait_for_instantlock(rawtx4_txid, node)
block = self.create_block(self.nodes[0], [rawtx2_obj])
if test_block_conflict:
submit_result = self.nodes[0].submitblock(ToHex(block))
assert(submit_result == "conflict-tx-lock")
cl = self.create_chainlock(self.nodes[0].getblockcount() + 1, block.sha256)
self.test_node.send_clsig(cl)
# is required to check for CLSIGs, but this requires the block header to be propagated already
sleep(1)
# The block should get accepted now, and at the same time prune the conflicting ISLOCKs
submit_result = self.nodes[1].submitblock(ToHex(block))
if test_block_conflict:
assert(submit_result == "duplicate")
else:
assert(submit_result is None)
for node in self.nodes:
self.wait_for_chainlock(node, "%064x" % block.sha256)
# Create a chained TX on top of tx2
inputs = []
n = 0
for out in rawtx2_obj.vout:
if out.nValue == 100000000:
inputs.append({"txid": rawtx2_txid, "vout": n})
n += 1
rawtx5 = self.nodes[0].createrawtransaction(inputs, {self.nodes[0].getnewaddress(): 0.999})
rawtx5 = self.nodes[0].signrawtransaction(rawtx5)['hex']
rawtx5_txid = self.nodes[0].sendrawtransaction(rawtx5)
for node in self.nodes:
self.wait_for_instantlock(rawtx5_txid, node)
# Lets verify that the ISLOCKs got pruned
for node in self.nodes:
assert_raises_jsonrpc(-5, "No such mempool or blockchain transaction", node.getrawtransaction, rawtx1_txid, True)
assert_raises_jsonrpc(-5, "No such mempool or blockchain transaction", node.getrawtransaction, rawtx4_txid, True)
rawtx = node.getrawtransaction(rawtx2_txid, True)
assert(rawtx['chainlock'])
assert(rawtx['instantlock'])
assert(not rawtx['instantlock_internal'])
def test_islock_overrides_nonchainlock(self):
# create two raw TXs, they will conflict with each other
rawtx1 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx2 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx1_txid = encode(hash256(hex_str_to_bytes(rawtx1))[::-1], 'hex_codec').decode('ascii')
rawtx2_txid = encode(hash256(hex_str_to_bytes(rawtx2))[::-1], 'hex_codec').decode('ascii')
# Create an ISLOCK but don't broadcast it yet
islock = self.create_islock(rawtx2)
for i in range(3):
self.stop_node(len(self.nodes) - 1)
self.nodes.pop(len(self.nodes) - 1)
self.mninfo.pop(len(self.mninfo) - 1)
# Send tx1, which will later conflict with the ISLOCK
self.nodes[0].sendrawtransaction(rawtx1)
# fast forward 11 minutes, so that the TX is considered safe and included in the next block
set_mocktime(get_mocktime() + int(60 * 11))
set_node_times(self.nodes, get_mocktime())
# Mine the conflicting TX into a block
good_tip = self.nodes[0].getbestblockhash()
self.nodes[0].generate(2)
self.sync_all()
# Assert that the conflicting tx got mined and the locked TX is not valid
assert(self.nodes[0].getrawtransaction(rawtx1_txid, True)['confirmations'] > 0)
assert_raises_jsonrpc(-25, "Missing inputs", self.nodes[0].sendrawtransaction, rawtx2)
# Send the ISLOCK, which should result in the last 2 blocks to be invalidated, even though the nodes don't know
self.test_node.send_islock(islock)
sleep(5)
assert(self.nodes[0].getbestblockhash() == good_tip)
assert(self.nodes[1].getbestblockhash() == good_tip)
self.nodes[0].sendrawtransaction(rawtx2)
self.nodes[0].generate(1)
self.sync_all()
assert(self.nodes[0].getrawtransaction(rawtx2_txid, True)['confirmations'] > 0)
assert(self.nodes[1].getrawtransaction(rawtx2_txid, True)['confirmations'] > 0)
assert(self.nodes[0].getrawtransaction(rawtx2_txid, True)['instantlock'])
assert(self.nodes[1].getrawtransaction(rawtx2_txid, True)['instantlock'])
assert(self.nodes[0].getbestblockhash() != good_tip)
assert(self.nodes[1].getbestblockhash() != good_tip)
def wait_for_chainlock_tip_all_nodes(self):
for node in self.nodes:
tip = node.getbestblockhash()
self.wait_for_chainlock(node, tip)
def wait_for_chainlock_tip(self, node):
tip = node.getbestblockhash()
self.wait_for_chainlock(node, tip)
def wait_for_chainlock(self, node, block_hash):
t = time()
while time() - t < 15:
try:
block = node.getblockheader(block_hash)
if block["confirmations"] > 0 and block["chainlock"]:
return
except:
pass
sleep(0.1)
raise AssertionError("wait_for_chainlock timed out")
def create_block(self, node, vtx=[]):
bt = node.getblocktemplate()
height = bt['height']
tip_hash = bt['previousblockhash']
coinbasevalue = bt['coinbasevalue']
miner_address = node.getnewaddress()
mn_payee = bt['masternode'][0]['payee']
bt_fees = 0
for tx in bt['transactions']:
bt_fees += tx['fee']
new_fees = 0
for tx in vtx:
in_value = 0
out_value = 0
for txin in tx.vin:
txout = node.gettxout("%064x" % txin.prevout.hash, txin.prevout.n, False)
in_value += int(txout['value'] * COIN)
for txout in tx.vout:
out_value += txout.nValue
new_fees += in_value - out_value
# fix fees
coinbasevalue -= bt_fees
coinbasevalue += new_fees
mn_amount = get_masternode_payment(height, coinbasevalue)
miner_amount = coinbasevalue - mn_amount
outputs = {miner_address: str(Decimal(miner_amount) / COIN)}
if mn_amount > 0:
outputs[mn_payee] = str(Decimal(mn_amount) / COIN)
coinbase = FromHex(CTransaction(), node.createrawtransaction([], outputs))
coinbase.vin = create_coinbase(height).vin
# We can't really use this one as it would result in invalid merkle roots for masternode lists
if len(bt['coinbase_payload']) != 0:
cbtx = FromHex(CCbTx(version=1), bt['coinbase_payload'])
coinbase.nVersion = 3
coinbase.nType = 5
coinbase.vExtraPayload = cbtx.serialize()
coinbase.calc_sha256()
block = create_block(int(tip_hash, 16), coinbase, nTime=bt['curtime'])
block.vtx += vtx
for tx in bt['transactions']:
tx2 = FromHex(CTransaction(), tx['data'])
if tx2.nType == 6:
block.vtx.append(tx2)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
def create_chainlock(self, height, blockHash):
request_id = "%064x" % uint256_from_str(hash256(ser_string(b"clsig") + struct.pack("<I", height)))
message_hash = "%064x" % blockHash
for mn in self.mninfo:
mn.node.quorum('sign', 100, request_id, message_hash)
recSig = None
t = time()
while time() - t < 10:
try:
recSig = self.nodes[0].quorum('getrecsig', 100, request_id, message_hash)
break
except:
sleep(0.1)
assert(recSig is not None)
clsig = msg_clsig(height, blockHash, hex_str_to_bytes(recSig['sig']))
return clsig
def create_islock(self, hextx):
tx = FromHex(CTransaction(), hextx)
tx.rehash()
request_id_buf = ser_string(b"islock") + ser_compact_size(len(tx.vin))
inputs = []
for txin in tx.vin:
request_id_buf += txin.prevout.serialize()
inputs.append(txin.prevout)
request_id = "%064x" % uint256_from_str(hash256(request_id_buf))
message_hash = "%064x" % tx.sha256
for mn in self.mninfo:
mn.node.quorum('sign', 100, request_id, message_hash)
recSig = None
t = time()
while time() - t < 10:
try:
recSig = self.nodes[0].quorum('getrecsig', 100, request_id, message_hash)
break
except:
sleep(0.1)
assert(recSig is not None)
islock = msg_islock(inputs, tx.sha256, hex_str_to_bytes(recSig['sig']))
return islock
if __name__ == '__main__':
LLMQ_IS_CL_Conflicts().main()
| true
| true
|
7909232e43551fb2c8aa53bac61176526dfa96ed
| 10,936
|
py
|
Python
|
Katna/image_filters/text_detector.py
|
viddik13/katna
|
12256602a5fd24368ffffe2c1a82a46a49215c15
|
[
"MIT"
] | 125
|
2019-08-22T06:53:55.000Z
|
2022-03-24T05:53:41.000Z
|
Katna/image_filters/text_detector.py
|
viddik13/katna
|
12256602a5fd24368ffffe2c1a82a46a49215c15
|
[
"MIT"
] | 19
|
2020-02-13T07:14:59.000Z
|
2021-12-01T15:13:33.000Z
|
Katna/image_filters/text_detector.py
|
viddik13/katna
|
12256602a5fd24368ffffe2c1a82a46a49215c15
|
[
"MIT"
] | 28
|
2019-09-03T07:00:29.000Z
|
2021-12-30T04:20:14.000Z
|
"""
.. module:: Katna.image_filters.text_detector
:platform: OS X
:synopsis: This module is implementation of text detector filter
"""
import os
import cv2
import numpy as np
import time
import requests
import random
from imutils.object_detection import non_max_suppression
from Katna.image_filters.filter import Filter
import Katna.config as config
class TextDetector(Filter):
"""TextDetector Class: Class for implementation of text detector filter, inherit from Filter class
"""
def __init__(self, weight=1.0):
"""Constructor for this class does following tasks, if not already downloaded\
, it first downloads text detector dnn weights file from public URL\
ands save it at USER_HOME/.katna directory, or /tmp/.katna directory.\
After this initializer code initializes internal parameter: \
min_confidence (for text detection)
"""
super().__init__(weight)
self.min_confidence = config.TextDetector.min_confidence
self.merge_threshold = config.TextDetector.merge_threshold
self.layerNames = config.TextDetector.layerNames
self.frozen_weights = config.TextDetector.frozen_weights
self.cache_subdir = config.TextDetector.cache_subdir
try:
self.network_folder_path = os.path.join(os.path.expanduser("~"), ".katna")
if not os.access(self.network_folder_path, os.W_OK):
self.network_folder_path = os.path.join("/tmp", ".katna")
self.datadir = os.path.join(self.network_folder_path, self.cache_subdir)
if not os.path.exists(self.datadir):
os.makedirs(self.datadir)
self.network_file_path = os.path.join(self.datadir, self.frozen_weights)
if not os.path.exists(self.network_file_path):
self.download_data()
self.net = cv2.dnn.readNet(self.network_file_path)
except Exception:
raise FileNotFoundError(
self.frozen_weights
+ " seems to be missing.\
Download the file and specify the full path\
while initializing TextDetector class"
)
def download_data(self):
"""Public function for downloading the network weight from the URL link, to be used for
text detection functionality.
Troubleshooting tip: If you get FileNotFound error during text detector initialization,
initialize the text detector and call this function directly to download the model file from public URL link.
"""
# create response object
link = config.TextDetector.model_download_link
r = requests.get(link, stream=True)
# download started
print("Downloading model file...")
# if not os.path.isfile(self.network_file_path) or not os.path.exists(self.network_file_path):
with open(os.path.join(self.datadir, self.frozen_weights), "wb") as f:
for chunk in r.iter_content(chunk_size=1024 * 1024):
if chunk:
f.write(chunk)
print("Model file downloaded.")
def __decode_predictions(self, scores, geometry):
"""Internal Function for getting bounding box and confidence values
from text detector dnn network output (scores, geometry)
function takes the number of rows and columns from the scores volume, then
initializes set of bounding box rectangles and corresponding confidence scores
"""
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
# loop over the number of rows
for y in range(0, numRows):
# extract the scores (probabilities), followed by the
# geometrical data used to derive potential bounding box
# coordinates that surround text
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
# loop over the number of columns
for x in range(0, numCols):
# if our score does not have sufficient probability,
# ignore it
if scoresData[x] < self.min_confidence:
continue
# compute the offset factor as our resulting feature
# maps will be 4x smaller than the input image
(offsetX, offsetY) = (x * 4.0, y * 4.0)
# extract the rotation angle for the prediction and
# then compute the sin and cosine
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
# use the geometry volume to derive the width and height
# of the bounding box
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
# compute both the starting and ending (x, y)-coordinates
# for the text prediction bounding box
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
# add the bounding box coordinates and probability score
# to our respective lists
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
# return a tuple of the bounding boxes and associated confidences
return (rects, confidences)
def __merge_boxes(self, rects):
"""main function to detect text boxes from image
:param rects: list of
:type rects: numpy array
:param rectsUsed: image file in numpy array/opencv format
:type rectsUsed: numpy array
:return: output image with the list of text boxes
:rtype: file, list
"""
def grouper(iterable, interval=2):
prev = None
group = []
for item in iterable:
if not prev or abs(item[1] - prev[1]) <= interval:
group.append(item)
else:
yield group
group = [item]
prev = item
if group:
yield group
rects_used = []
heights = list()
for bbox in rects:
heights.append(bbox[3] - bbox[1])
heights = sorted(heights) # Sort heights
median_height = heights[len(heights) // 2] / 2 # Find half of the median height
bboxes_list = sorted(
rects, key=lambda k: k[1]
) # Sort the bounding boxes based on y1 coordinate ( y of the left-top coordinate )
combined_bboxes = grouper(
bboxes_list, median_height
) # Group the bounding boxes
for group in combined_bboxes:
x_min = min(group, key=lambda k: k[0])[0] # Find min of x1
x_max = max(group, key=lambda k: k[2])[2] # Find max of x2
y_min = min(group, key=lambda k: k[1])[1] # Find min of y1
y_max = max(group, key=lambda k: k[3])[3] # Find max of y2
rects_used.append([x_min, y_min, x_max, y_max])
return rects_used
def __detect_text(self):
"""Internal function to detect text bounding boxes from input image.
Returns list of bounding boxes of each detected text field in input image.
:param image: image file in numpy array/opencv format
:type image: numpy array
:param output_image: image file in numpy array/opencv format
:type output_image: numpy array
:return: output image with the list of text boxes
:rtype: file, list
"""
(H, W) = self.image.shape[:2]
rW = W / 320
rH = H / 320
image = cv2.resize(self.image, (320, 320))
(H, W) = image.shape[:2]
# construct a blob from the image and then perform a forward pass of
# the model to obtain the two output layer sets
blob = cv2.dnn.blobFromImage(
self.image, 1.0, (W, H), (123.68, 116.78, 103.94), swapRB=True, crop=False
)
self.net.setInput(blob)
(scores, geometry) = self.net.forward(self.layerNames)
rects, confidences = self.__decode_predictions(scores, geometry)
# apply non-maxima suppression to suppress weak, overlapping bounding
# boxes
boxes = non_max_suppression(np.array(rects), probs=confidences)
text_rects = []
# loop over the bounding boxes
for (startX, startY, endX, endY) in boxes:
# scale the bounding box coordinates based on the respective
# ratios
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
cv2.rectangle(self.image, (startX, startY), (endX, endY), (0, 0, 255), 3)
text_rects.append([startX, startY, endX, endY])
text_rects = sorted(text_rects, key=lambda item: item[0])
final_rects = text_rects
if len(text_rects) > 0:
final_rects = self.__merge_boxes(text_rects)
return final_rects
def set_image(self, image):
"""Public set_image function, This will detect all text boxes in input image and
will saves them as internal list of text_rect to be used in get_filter_result
:param image: input image from which needs to be cropped
:type image: numpy array(opencv)
"""
if image is None:
return None
self.image = image
self.text_rects = self.__detect_text()
def get_filter_result(self, crop):
"""Main public function of TextDetector filter class,
this filter Returns false if crop contains no text, additionally
checks for overlap between input crop rectangle and the detected
text bounding box, returns True if No overlap (Filter will not discard input crop)
otherwise returns False (signal for discarding input crop).
:param crop: input crop rectangle to test
:type crop: crop_rect
:return: True if No overlap (Filter will not discard input crop) otherwise returns False
:rtype: bool
"""
# rect: xs,ys,xe,ye
# crop: x,y,w,h
if self.text_rects is None or len(self.text_rects) == 0:
return True
for rect in self.text_rects:
if not (
(rect[2]) <= (crop.x + crop.w)
and (rect[0]) >= (crop.x)
and (rect[1]) >= (crop.y)
and (rect[3]) <= (crop.y + crop.h)
):
return False
else:
return True
return True
| 40.058608
| 117
| 0.59263
|
import os
import cv2
import numpy as np
import time
import requests
import random
from imutils.object_detection import non_max_suppression
from Katna.image_filters.filter import Filter
import Katna.config as config
class TextDetector(Filter):
def __init__(self, weight=1.0):
super().__init__(weight)
self.min_confidence = config.TextDetector.min_confidence
self.merge_threshold = config.TextDetector.merge_threshold
self.layerNames = config.TextDetector.layerNames
self.frozen_weights = config.TextDetector.frozen_weights
self.cache_subdir = config.TextDetector.cache_subdir
try:
self.network_folder_path = os.path.join(os.path.expanduser("~"), ".katna")
if not os.access(self.network_folder_path, os.W_OK):
self.network_folder_path = os.path.join("/tmp", ".katna")
self.datadir = os.path.join(self.network_folder_path, self.cache_subdir)
if not os.path.exists(self.datadir):
os.makedirs(self.datadir)
self.network_file_path = os.path.join(self.datadir, self.frozen_weights)
if not os.path.exists(self.network_file_path):
self.download_data()
self.net = cv2.dnn.readNet(self.network_file_path)
except Exception:
raise FileNotFoundError(
self.frozen_weights
+ " seems to be missing.\
Download the file and specify the full path\
while initializing TextDetector class"
)
def download_data(self):
link = config.TextDetector.model_download_link
r = requests.get(link, stream=True)
print("Downloading model file...")
with open(os.path.join(self.datadir, self.frozen_weights), "wb") as f:
for chunk in r.iter_content(chunk_size=1024 * 1024):
if chunk:
f.write(chunk)
print("Model file downloaded.")
def __decode_predictions(self, scores, geometry):
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
for y in range(0, numRows):
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
for x in range(0, numCols):
if scoresData[x] < self.min_confidence:
continue
(offsetX, offsetY) = (x * 4.0, y * 4.0)
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
return (rects, confidences)
def __merge_boxes(self, rects):
def grouper(iterable, interval=2):
prev = None
group = []
for item in iterable:
if not prev or abs(item[1] - prev[1]) <= interval:
group.append(item)
else:
yield group
group = [item]
prev = item
if group:
yield group
rects_used = []
heights = list()
for bbox in rects:
heights.append(bbox[3] - bbox[1])
heights = sorted(heights)
median_height = heights[len(heights) // 2] / 2
bboxes_list = sorted(
rects, key=lambda k: k[1]
)
combined_bboxes = grouper(
bboxes_list, median_height
)
for group in combined_bboxes:
x_min = min(group, key=lambda k: k[0])[0]
x_max = max(group, key=lambda k: k[2])[2]
y_min = min(group, key=lambda k: k[1])[1]
y_max = max(group, key=lambda k: k[3])[3]
rects_used.append([x_min, y_min, x_max, y_max])
return rects_used
def __detect_text(self):
(H, W) = self.image.shape[:2]
rW = W / 320
rH = H / 320
image = cv2.resize(self.image, (320, 320))
(H, W) = image.shape[:2]
blob = cv2.dnn.blobFromImage(
self.image, 1.0, (W, H), (123.68, 116.78, 103.94), swapRB=True, crop=False
)
self.net.setInput(blob)
(scores, geometry) = self.net.forward(self.layerNames)
rects, confidences = self.__decode_predictions(scores, geometry)
boxes = non_max_suppression(np.array(rects), probs=confidences)
text_rects = []
for (startX, startY, endX, endY) in boxes:
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
cv2.rectangle(self.image, (startX, startY), (endX, endY), (0, 0, 255), 3)
text_rects.append([startX, startY, endX, endY])
text_rects = sorted(text_rects, key=lambda item: item[0])
final_rects = text_rects
if len(text_rects) > 0:
final_rects = self.__merge_boxes(text_rects)
return final_rects
def set_image(self, image):
if image is None:
return None
self.image = image
self.text_rects = self.__detect_text()
def get_filter_result(self, crop):
if self.text_rects is None or len(self.text_rects) == 0:
return True
for rect in self.text_rects:
if not (
(rect[2]) <= (crop.x + crop.w)
and (rect[0]) >= (crop.x)
and (rect[1]) >= (crop.y)
and (rect[3]) <= (crop.y + crop.h)
):
return False
else:
return True
return True
| true
| true
|
7909233748429a363bd6474889766b2fb68d7fbd
| 13,121
|
py
|
Python
|
client/python/unrealcv/__init__.py
|
Embracing/unrealcv
|
19305da8554c3a0e683a5e27a1e487cc2cf42776
|
[
"MIT"
] | 1,617
|
2016-09-10T04:41:33.000Z
|
2022-03-31T20:03:28.000Z
|
client/python/unrealcv/__init__.py
|
Embracing/unrealcv
|
19305da8554c3a0e683a5e27a1e487cc2cf42776
|
[
"MIT"
] | 199
|
2016-09-13T09:40:59.000Z
|
2022-03-16T02:37:23.000Z
|
client/python/unrealcv/__init__.py
|
Embracing/unrealcv
|
19305da8554c3a0e683a5e27a1e487cc2cf42776
|
[
"MIT"
] | 431
|
2016-09-10T03:20:35.000Z
|
2022-03-19T13:44:21.000Z
|
'''
UnrealCV
========
Provides functions to interact with games built using Unreal Engine.
>>> import unrealcv
>>> (HOST, PORT) = ('localhost', 9000)
>>> client = unrealcv.Client((HOST, PORT))
'''
import sys, ctypes, struct, threading, socket, re, time, logging
try:
from Queue import Queue
except:
from queue import Queue # for Python 3
_L = logging.getLogger(__name__)
# _L.addHandler(logging.NullHandler()) # Let client to decide how to do logging
_L.handlers = []
h = logging.StreamHandler()
h.setFormatter(logging.Formatter('%(levelname)s:%(module)s:%(lineno)d:%(message)s'))
_L.addHandler(h)
_L.propagate = False
_L.setLevel(logging.INFO)
fmt = 'I'
class SocketMessage(object):
'''
Define the format of a message. This class is defined similar to the class FNFSMessageHeader in UnrealEngine4, but without CRC check.
The magic number is from Unreal implementation
See https://github.com/EpicGames/UnrealEngine/blob/dff3c48be101bb9f84633a733ef79c91c38d9542/Engine/Source/Runtime/Sockets/Public/NetworkMessage.h
'''
magic = ctypes.c_uint32(0x9E2B83C1).value
def __init__(self, payload):
self.magic = SocketMessage.magic
self.payload_size = ctypes.c_uint32(len(payload)).value
@classmethod
def ReceivePayload(cls, socket):
'''
Return only payload, not the raw message, None if failed.
socket: a blocking socket for read data.
'''
# rbufsize = -1 # From SocketServer.py
rbufsize = 0
rfile = socket.makefile('rb', rbufsize)
_L.debug('read raw_magic %s', threading.current_thread().name)
try:
raw_magic = rfile.read(4) # socket is disconnected or invalid
except Exception as e:
_L.debug('Fail to read raw_magic, %s', e)
raw_magic = None
_L.debug('read raw_magic %s done: %s', threading.current_thread().name, repr(raw_magic))
if not raw_magic: # nothing to read
# _L.debug('socket disconnect')
return None
# print 'Receive raw magic: %d, %s' % (len(raw_magic), raw_magic)
magic = struct.unpack(fmt, raw_magic)[0] # 'I' means unsigned int
# print 'Receive magic:', magic
if magic != cls.magic:
_L.error('Error: receive a malformat message, the message should start from a four bytes uint32 magic number')
return None
# The next time it will read four bytes again
_L.debug('read payload')
raw_payload_size = rfile.read(4)
# print 'Receive raw payload size: %d, %s' % (len(raw_payload_size), raw_payload_size)
payload_size = struct.unpack('I', raw_payload_size)[0]
_L.debug('Receive payload size %d', payload_size)
# if the message is incomplete, should wait until all the data received
payload = b""
remain_size = payload_size
while remain_size > 0:
data = rfile.read(remain_size)
if not data:
return None
payload += data
bytes_read = len(data) # len(data) is its string length, but we want length of bytes
# print 'bytes_read %d, remain_size %d, read_str %s' % (bytes_read, remain_size, data)
assert(bytes_read <= remain_size)
remain_size -= bytes_read
rfile.close()
return payload
@classmethod
def WrapAndSendPayload(cls, socket, payload):
'''
Send payload, true if success, false if failed
'''
try:
# From SocketServer.py
# wbufsize = 0, flush immediately
wbufsize = -1
# Convert
socket_message = SocketMessage(payload)
wfile = socket.makefile('wb', wbufsize)
# Write the message
wfile.write(struct.pack(fmt, socket_message.magic))
# Need to send the packed version
# print 'Sent ', socket_message.magic
wfile.write(struct.pack(fmt, socket_message.payload_size))
# print 'Sent ', socket_message.payload_size
wfile.write(payload)
# print 'Sent ', payload
wfile.flush()
wfile.close() # Close file object, not close the socket
return True
except Exception as e:
_L.error('Fail to send message %s', e)
return False
class BaseClient(object):
'''
BaseClient send message out and receiving message in a seperate thread.
After calling the `send` function, only True or False will be returned
to indicate whether the operation was successful.
If you are trying to send a request and get a response, consider using `Client` instead.
This class adds message framing on top of TCP
'''
def __init__(self, endpoint, raw_message_handler):
'''
Parameters:
endpoint: a tuple (ip, port)
message_handler: a function defined as `def message_handler(msg)` to handle incoming message, msg is a string
'''
self.endpoint = endpoint
self.raw_message_handler = raw_message_handler
self.socket = None # if socket == None, means client is not connected
self.wait_connected = threading.Event()
# Start a thread to get data from the socket
receiving_thread = threading.Thread(target = self.__receiving)
receiving_thread.setDaemon(1)
receiving_thread.start()
def connect(self, timeout = 1):
'''
Try to connect to server, return whether connection successful
'''
if self.isconnected():
return True
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(self.endpoint)
self.socket = s
_L.debug('BaseClient: wait for connection confirm')
self.wait_connected.clear()
isset = self.wait_connected.wait(timeout)
assert(isset != None) # in python prior to 2.7 wait will return None
if isset:
return True
else:
self.socket = None
_L.error('Socket is created, but can not get connection confirm from %s, timeout after %.2f seconds', self.endpoint, timeout)
return False
# only assign self.socket to connected socket
# so it is safe to use self.socket != None to check connection status
# This does not neccessarily mean connection successful, might be closed by server
# Unless explicitly to tell the server to accept new socket
except Exception as e:
_L.error('Can not connect to %s', str(self.endpoint))
_L.error("Error %s", e)
self.socket = None
return False
def isconnected(self):
return self.socket is not None
def disconnect(self):
if self.isconnected():
_L.debug("BaseClient, request disconnect from server in %s", threading.current_thread().name)
self.socket.shutdown(socket.SHUT_RD)
# Because socket is on read in __receiving thread, need to call shutdown to force it to close
if self.socket: # This may also be set to None in the __receiving thread
self.socket.close()
self.socket = None
time.sleep(0.1) # TODO, this is tricky
def __receiving(self):
'''
Receive packages, Extract message from packages
Call self.message_handler if got a message
Also check whether client is still connected
'''
_L.debug('BaseClient start receiving in %s', threading.current_thread().name)
while True:
if self.isconnected():
# Only this thread is allowed to read from socket, otherwise need lock to avoid competing
message = SocketMessage.ReceivePayload(self.socket)
_L.debug('Got server raw message %s', message)
if not message:
_L.debug('BaseClient: remote disconnected, no more message')
self.socket = None
continue
if message.startswith(b'connected'):
_L.info('Got connection confirm: %s', repr(message))
self.wait_connected.set()
# self.wait_connected.clear()
continue
if self.raw_message_handler:
self.raw_message_handler(message) # will block this thread
else:
_L.error('No message handler for raw message %s', message)
def send(self, message):
'''
Send message out, return whether the message was successfully sent
'''
if self.isconnected():
_L.debug('BaseClient: Send message %s', self.socket)
SocketMessage.WrapAndSendPayload(self.socket, message)
return True
else:
_L.error('Fail to send message, client is not connected')
return False
class Client(object):
'''
Client can be used to send request to a game and get response
Currently only one client is allowed at a time
More clients will be rejected
'''
def __raw_message_handler(self, raw_message):
# print 'Waiting for message id %d' % self.message_id
match = self.raw_message_regexp.match(raw_message)
if match:
[message_id, message_body] = (int(match.group(1)), match.group(2)) # TODO: handle multiline response
message_body = raw_message[len(match.group(1))+1:]
# Convert to utf-8 if it's not a byte array (as is the case for images)
try:
message_body = message_body.decode('utf-8')
except UnicodeDecodeError:
pass
# print 'Received message id %s' % message_id
if message_id == self.message_id:
self.response = message_body
self.wait_response.set()
else:
assert(False)
else:
if self.message_handler:
def do_callback():
self.message_handler(raw_message)
self.queue.put(do_callback)
else:
# Instead of just dropping this message, give a verbose notice
_L.error('No message handler to handle message %s', raw_message)
def __init__(self, endpoint, message_handler=None):
self.raw_message_regexp = re.compile(b'(\d{1,8}):(.*)')
self.message_client = BaseClient(endpoint, self.__raw_message_handler)
self.message_handler = message_handler
self.message_id = 0
self.wait_response = threading.Event()
self.response = ''
self.isconnected = self.message_client.isconnected
self.connect = self.message_client.connect
self.disconnect = self.message_client.disconnect
self.queue = Queue()
self.main_thread = threading.Thread(target = self.worker)
self.main_thread.setDaemon(1)
self.main_thread.start()
def worker(self):
while True:
task = self.queue.get()
task()
self.queue.task_done()
def request(self, message, timeout=5):
# docstring in numpy style
"""
Send a request to server and wait util get a response from server or timeout.
Parameters
----------
cmd : str
command to control the game. More info can be seen from http://docs.unrealcv.org/en/master/reference/commands.html
Returns
-------
str
plain text message from server
Examples
--------
>>> client = Client('localhost', 9000)
>>> client.connect()
>>> response = client.request('vget /camera/0/view')
"""
if sys.version_info[0] == 3:
if not isinstance(message, bytes):
message = message.encode("utf-8")
def do_request():
raw_message = b'%d:%s' % (self.message_id, message)
_L.debug('Request: %s', raw_message.decode("utf-8"))
if not self.message_client.send(raw_message):
return None
# request can only be sent in the main thread, do not support multi-thread submitting request together
if threading.current_thread().name == self.main_thread.name:
do_request()
else:
self.queue.put(do_request)
# Timeout is required
# see: https://bugs.python.org/issue8844
self.wait_response.clear() # This is important
isset = self.wait_response.wait(timeout)
self.message_id += 1 # Increment it only after the request/response cycle finished
assert(isset != None) # only python prior to 2.7 will return None
if isset:
return self.response
else:
_L.error('Can not receive a response from server, timeout after %.2f seconds', timeout)
return None
(HOST, PORT) = ('localhost', 9000)
client = Client((HOST, PORT), None)
| 38.478006
| 149
| 0.604146
|
import sys, ctypes, struct, threading, socket, re, time, logging
try:
from Queue import Queue
except:
from queue import Queue
_L = logging.getLogger(__name__)
ler()
h.setFormatter(logging.Formatter('%(levelname)s:%(module)s:%(lineno)d:%(message)s'))
_L.addHandler(h)
_L.propagate = False
_L.setLevel(logging.INFO)
fmt = 'I'
class SocketMessage(object):
magic = ctypes.c_uint32(0x9E2B83C1).value
def __init__(self, payload):
self.magic = SocketMessage.magic
self.payload_size = ctypes.c_uint32(len(payload)).value
@classmethod
def ReceivePayload(cls, socket):
rfile = socket.makefile('rb', rbufsize)
_L.debug('read raw_magic %s', threading.current_thread().name)
try:
raw_magic = rfile.read(4)
except Exception as e:
_L.debug('Fail to read raw_magic, %s', e)
raw_magic = None
_L.debug('read raw_magic %s done: %s', threading.current_thread().name, repr(raw_magic))
if not raw_magic:
return None
magic = struct.unpack(fmt, raw_magic)[0]
if magic != cls.magic:
_L.error('Error: receive a malformat message, the message should start from a four bytes uint32 magic number')
return None
_L.debug('read payload')
raw_payload_size = rfile.read(4)
payload_size = struct.unpack('I', raw_payload_size)[0]
_L.debug('Receive payload size %d', payload_size)
payload = b""
remain_size = payload_size
while remain_size > 0:
data = rfile.read(remain_size)
if not data:
return None
payload += data
bytes_read = len(data)
assert(bytes_read <= remain_size)
remain_size -= bytes_read
rfile.close()
return payload
@classmethod
def WrapAndSendPayload(cls, socket, payload):
try:
wbufsize = -1
socket_message = SocketMessage(payload)
wfile = socket.makefile('wb', wbufsize)
wfile.write(struct.pack(fmt, socket_message.magic))
wfile.write(struct.pack(fmt, socket_message.payload_size))
wfile.write(payload)
wfile.flush()
wfile.close()
return True
except Exception as e:
_L.error('Fail to send message %s', e)
return False
class BaseClient(object):
def __init__(self, endpoint, raw_message_handler):
self.endpoint = endpoint
self.raw_message_handler = raw_message_handler
self.socket = None
self.wait_connected = threading.Event()
receiving_thread = threading.Thread(target = self.__receiving)
receiving_thread.setDaemon(1)
receiving_thread.start()
def connect(self, timeout = 1):
if self.isconnected():
return True
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(self.endpoint)
self.socket = s
_L.debug('BaseClient: wait for connection confirm')
self.wait_connected.clear()
isset = self.wait_connected.wait(timeout)
assert(isset != None)
if isset:
return True
else:
self.socket = None
_L.error('Socket is created, but can not get connection confirm from %s, timeout after %.2f seconds', self.endpoint, timeout)
return False
except Exception as e:
_L.error('Can not connect to %s', str(self.endpoint))
_L.error("Error %s", e)
self.socket = None
return False
def isconnected(self):
return self.socket is not None
def disconnect(self):
if self.isconnected():
_L.debug("BaseClient, request disconnect from server in %s", threading.current_thread().name)
self.socket.shutdown(socket.SHUT_RD)
if self.socket:
self.socket.close()
self.socket = None
time.sleep(0.1)
def __receiving(self):
_L.debug('BaseClient start receiving in %s', threading.current_thread().name)
while True:
if self.isconnected():
message = SocketMessage.ReceivePayload(self.socket)
_L.debug('Got server raw message %s', message)
if not message:
_L.debug('BaseClient: remote disconnected, no more message')
self.socket = None
continue
if message.startswith(b'connected'):
_L.info('Got connection confirm: %s', repr(message))
self.wait_connected.set()
continue
if self.raw_message_handler:
self.raw_message_handler(message)
else:
_L.error('No message handler for raw message %s', message)
def send(self, message):
if self.isconnected():
_L.debug('BaseClient: Send message %s', self.socket)
SocketMessage.WrapAndSendPayload(self.socket, message)
return True
else:
_L.error('Fail to send message, client is not connected')
return False
class Client(object):
def __raw_message_handler(self, raw_message):
match = self.raw_message_regexp.match(raw_message)
if match:
[message_id, message_body] = (int(match.group(1)), match.group(2))
message_body = raw_message[len(match.group(1))+1:]
try:
message_body = message_body.decode('utf-8')
except UnicodeDecodeError:
pass
# print 'Received message id %s' % message_id
if message_id == self.message_id:
self.response = message_body
self.wait_response.set()
else:
assert(False)
else:
if self.message_handler:
def do_callback():
self.message_handler(raw_message)
self.queue.put(do_callback)
else:
# Instead of just dropping this message, give a verbose notice
_L.error('No message handler to handle message %s', raw_message)
def __init__(self, endpoint, message_handler=None):
self.raw_message_regexp = re.compile(b'(\d{1,8}):(.*)')
self.message_client = BaseClient(endpoint, self.__raw_message_handler)
self.message_handler = message_handler
self.message_id = 0
self.wait_response = threading.Event()
self.response = ''
self.isconnected = self.message_client.isconnected
self.connect = self.message_client.connect
self.disconnect = self.message_client.disconnect
self.queue = Queue()
self.main_thread = threading.Thread(target = self.worker)
self.main_thread.setDaemon(1)
self.main_thread.start()
def worker(self):
while True:
task = self.queue.get()
task()
self.queue.task_done()
def request(self, message, timeout=5):
# docstring in numpy style
if sys.version_info[0] == 3:
if not isinstance(message, bytes):
message = message.encode("utf-8")
def do_request():
raw_message = b'%d:%s' % (self.message_id, message)
_L.debug('Request: %s', raw_message.decode("utf-8"))
if not self.message_client.send(raw_message):
return None
# request can only be sent in the main thread, do not support multi-thread submitting request together
if threading.current_thread().name == self.main_thread.name:
do_request()
else:
self.queue.put(do_request)
# Timeout is required
# see: https://bugs.python.org/issue8844
self.wait_response.clear() # This is important
isset = self.wait_response.wait(timeout)
self.message_id += 1 # Increment it only after the request/response cycle finished
assert(isset != None) # only python prior to 2.7 will return None
if isset:
return self.response
else:
_L.error('Can not receive a response from server, timeout after %.2f seconds', timeout)
return None
(HOST, PORT) = ('localhost', 9000)
client = Client((HOST, PORT), None)
| true
| true
|
790923c2fd21bc6db4b5c791020ec0e65b80f376
| 8,014
|
py
|
Python
|
python/ray/ray_constants.py
|
thavlik/ray
|
9b9c7f86f7e2c0723b7e14e38cd52c69cc7e1c43
|
[
"Apache-2.0"
] | 4
|
2019-10-18T17:44:58.000Z
|
2021-04-14T14:37:21.000Z
|
python/ray/ray_constants.py
|
thavlik/ray
|
9b9c7f86f7e2c0723b7e14e38cd52c69cc7e1c43
|
[
"Apache-2.0"
] | 1
|
2022-03-30T17:52:44.000Z
|
2022-03-30T17:52:44.000Z
|
python/ray/ray_constants.py
|
thavlik/ray
|
9b9c7f86f7e2c0723b7e14e38cd52c69cc7e1c43
|
[
"Apache-2.0"
] | 1
|
2020-06-26T07:54:25.000Z
|
2020-06-26T07:54:25.000Z
|
"""Ray constants used in the Python code."""
import logging
import math
import os
logger = logging.getLogger(__name__)
def env_integer(key, default):
if key in os.environ:
return int(os.environ[key])
return default
def direct_call_enabled():
return bool(int(os.environ.get("RAY_FORCE_DIRECT", "1")))
ID_SIZE = 20
# The default maximum number of bytes to allocate to the object store unless
# overridden by the user.
DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES = 20 * 10**9
# The default number of retries to call `put` when the object store is full.
DEFAULT_PUT_OBJECT_RETRIES = 5
# The default seconds for delay between calls to retry `put` when
# the object store is full. This delay is exponentially doubled up to
# DEFAULT_PUT_OBJECT_RETRIES times.
DEFAULT_PUT_OBJECT_DELAY = 1
# The smallest cap on the memory used by the object store that we allow.
# This must be greater than MEMORY_RESOURCE_UNIT_BYTES * 0.7
OBJECT_STORE_MINIMUM_MEMORY_BYTES = 75 * 1024 * 1024
# The default maximum number of bytes that the non-primary Redis shards are
# allowed to use unless overridden by the user.
DEFAULT_REDIS_MAX_MEMORY_BYTES = 10**10
# The smallest cap on the memory used by Redis that we allow.
REDIS_MINIMUM_MEMORY_BYTES = 10**7
# Default resource requirements for actors when no resource requirements are
# specified.
DEFAULT_ACTOR_METHOD_CPU_SIMPLE = 1
DEFAULT_ACTOR_CREATION_CPU_SIMPLE = 0
# Default resource requirements for actors when some resource requirements are
# specified in .
DEFAULT_ACTOR_METHOD_CPU_SPECIFIED = 0
DEFAULT_ACTOR_CREATION_CPU_SPECIFIED = 1
# Default number of return values for each actor method.
DEFAULT_ACTOR_METHOD_NUM_RETURN_VALS = 1
# If a remote function or actor (or some other export) has serialized size
# greater than this quantity, print an warning.
PICKLE_OBJECT_WARNING_SIZE = 10**7
# If remote functions with the same source are imported this many times, then
# print a warning.
DUPLICATE_REMOTE_FUNCTION_THRESHOLD = 100
# The maximum resource quantity that is allowed. TODO(rkn): This could be
# relaxed, but the current implementation of the node manager will be slower
# for large resource quantities due to bookkeeping of specific resource IDs.
MAX_RESOURCE_QUANTITY = 100000
# Each memory "resource" counts as this many bytes of memory.
MEMORY_RESOURCE_UNIT_BYTES = 50 * 1024 * 1024
# Number of units 1 resource can be subdivided into.
MIN_RESOURCE_GRANULARITY = 0.0001
# Fraction of plasma memory that can be reserved. It is actually 70% but this
# is set to 69% to leave some headroom.
PLASMA_RESERVABLE_MEMORY_FRACTION = 0.69
def round_to_memory_units(memory_bytes, round_up):
"""Round bytes to the nearest memory unit."""
return from_memory_units(to_memory_units(memory_bytes, round_up))
def from_memory_units(memory_units):
"""Convert from memory units -> bytes."""
return memory_units * MEMORY_RESOURCE_UNIT_BYTES
def to_memory_units(memory_bytes, round_up):
"""Convert from bytes -> memory units."""
value = memory_bytes / MEMORY_RESOURCE_UNIT_BYTES
if value < 1:
raise ValueError(
"The minimum amount of memory that can be requested is {} bytes, "
"however {} bytes was asked.".format(MEMORY_RESOURCE_UNIT_BYTES,
memory_bytes))
if isinstance(value, float) and not value.is_integer():
# TODO(ekl) Ray currently does not support fractional resources when
# the quantity is greater than one. We should fix memory resources to
# be allocated in units of bytes and not 100MB.
if round_up:
value = int(math.ceil(value))
else:
value = int(math.floor(value))
return int(value)
# Different types of Ray errors that can be pushed to the driver.
# TODO(rkn): These should be defined in flatbuffers and must be synced with
# the existing C++ definitions.
WAIT_FOR_CLASS_PUSH_ERROR = "wait_for_class"
PICKLING_LARGE_OBJECT_PUSH_ERROR = "pickling_large_object"
WAIT_FOR_FUNCTION_PUSH_ERROR = "wait_for_function"
TASK_PUSH_ERROR = "task"
REGISTER_REMOTE_FUNCTION_PUSH_ERROR = "register_remote_function"
FUNCTION_TO_RUN_PUSH_ERROR = "function_to_run"
VERSION_MISMATCH_PUSH_ERROR = "version_mismatch"
CHECKPOINT_PUSH_ERROR = "checkpoint"
REGISTER_ACTOR_PUSH_ERROR = "register_actor"
WORKER_CRASH_PUSH_ERROR = "worker_crash"
WORKER_DIED_PUSH_ERROR = "worker_died"
WORKER_POOL_LARGE_ERROR = "worker_pool_large"
PUT_RECONSTRUCTION_PUSH_ERROR = "put_reconstruction"
INFEASIBLE_TASK_ERROR = "infeasible_task"
RESOURCE_DEADLOCK_ERROR = "resource_deadlock"
REMOVED_NODE_ERROR = "node_removed"
MONITOR_DIED_ERROR = "monitor_died"
LOG_MONITOR_DIED_ERROR = "log_monitor_died"
REPORTER_DIED_ERROR = "reporter_died"
DASHBOARD_DIED_ERROR = "dashboard_died"
RAYLET_CONNECTION_ERROR = "raylet_connection_error"
# Abort autoscaling if more than this number of errors are encountered. This
# is a safety feature to prevent e.g. runaway node launches.
AUTOSCALER_MAX_NUM_FAILURES = env_integer("AUTOSCALER_MAX_NUM_FAILURES", 5)
# The maximum number of nodes to launch in a single request.
# Multiple requests may be made for this batch size, up to
# the limit of AUTOSCALER_MAX_CONCURRENT_LAUNCHES.
AUTOSCALER_MAX_LAUNCH_BATCH = env_integer("AUTOSCALER_MAX_LAUNCH_BATCH", 5)
# Max number of nodes to launch at a time.
AUTOSCALER_MAX_CONCURRENT_LAUNCHES = env_integer(
"AUTOSCALER_MAX_CONCURRENT_LAUNCHES", 10)
# Interval at which to perform autoscaling updates.
AUTOSCALER_UPDATE_INTERVAL_S = env_integer("AUTOSCALER_UPDATE_INTERVAL_S", 5)
# The autoscaler will attempt to restart Ray on nodes it hasn't heard from
# in more than this interval.
AUTOSCALER_HEARTBEAT_TIMEOUT_S = env_integer("AUTOSCALER_HEARTBEAT_TIMEOUT_S",
30)
# The reporter will report its statistics this often (milliseconds).
REPORTER_UPDATE_INTERVAL_MS = env_integer("REPORTER_UPDATE_INTERVAL_MS", 2500)
# Max number of retries to AWS (default is 5, time increases exponentially)
BOTO_MAX_RETRIES = env_integer("BOTO_MAX_RETRIES", 12)
# Max number of retries to create an EC2 node (retry different subnet)
BOTO_CREATE_MAX_RETRIES = env_integer("BOTO_CREATE_MAX_RETRIES", 5)
LOGGER_FORMAT = (
"%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s")
LOGGER_FORMAT_HELP = "The logging format. default='{}'".format(LOGGER_FORMAT)
LOGGER_LEVEL = "info"
LOGGER_LEVEL_CHOICES = ["debug", "info", "warning", "error", "critical"]
LOGGER_LEVEL_HELP = ("The logging level threshold, choices=['debug', 'info',"
" 'warning', 'error', 'critical'], default='info'")
# A constant indicating that an actor doesn't need reconstructions.
NO_RECONSTRUCTION = 0
# A constant indicating that an actor should be reconstructed infinite times.
INFINITE_RECONSTRUCTION = 2**30
# Constants used to define the different process types.
PROCESS_TYPE_REAPER = "reaper"
PROCESS_TYPE_MONITOR = "monitor"
PROCESS_TYPE_RAYLET_MONITOR = "raylet_monitor"
PROCESS_TYPE_LOG_MONITOR = "log_monitor"
PROCESS_TYPE_REPORTER = "reporter"
PROCESS_TYPE_DASHBOARD = "dashboard"
PROCESS_TYPE_WORKER = "worker"
PROCESS_TYPE_RAYLET = "raylet"
PROCESS_TYPE_PLASMA_STORE = "plasma_store"
PROCESS_TYPE_REDIS_SERVER = "redis_server"
PROCESS_TYPE_WEB_UI = "web_ui"
LOG_MONITOR_MAX_OPEN_FILES = 200
# A constant used as object metadata to indicate the object is raw binary.
RAW_BUFFER_METADATA = b"RAW"
# A constant used as object metadata to indicate the object is pickled. This
# format is only ever used for Python inline task argument values.
PICKLE_BUFFER_METADATA = b"PICKLE"
# A constant used as object metadata to indicate the object is pickle5 format.
PICKLE5_BUFFER_METADATA = b"PICKLE5"
AUTOSCALER_RESOURCE_REQUEST_CHANNEL = b"autoscaler_resource_request"
# The default password to prevent redis port scanning attack.
# Hex for ray.
REDIS_DEFAULT_PASSWORD = "5241590000000000"
# The default ip address to bind to.
NODE_DEFAULT_IP = "127.0.0.1"
| 39.673267
| 78
| 0.774644
|
import logging
import math
import os
logger = logging.getLogger(__name__)
def env_integer(key, default):
if key in os.environ:
return int(os.environ[key])
return default
def direct_call_enabled():
return bool(int(os.environ.get("RAY_FORCE_DIRECT", "1")))
ID_SIZE = 20
DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES = 20 * 10**9
DEFAULT_PUT_OBJECT_RETRIES = 5
DEFAULT_PUT_OBJECT_DELAY = 1
OBJECT_STORE_MINIMUM_MEMORY_BYTES = 75 * 1024 * 1024
DEFAULT_REDIS_MAX_MEMORY_BYTES = 10**10
REDIS_MINIMUM_MEMORY_BYTES = 10**7
DEFAULT_ACTOR_METHOD_CPU_SIMPLE = 1
DEFAULT_ACTOR_CREATION_CPU_SIMPLE = 0
DEFAULT_ACTOR_METHOD_CPU_SPECIFIED = 0
DEFAULT_ACTOR_CREATION_CPU_SPECIFIED = 1
DEFAULT_ACTOR_METHOD_NUM_RETURN_VALS = 1
PICKLE_OBJECT_WARNING_SIZE = 10**7
DUPLICATE_REMOTE_FUNCTION_THRESHOLD = 100
MAX_RESOURCE_QUANTITY = 100000
MEMORY_RESOURCE_UNIT_BYTES = 50 * 1024 * 1024
MIN_RESOURCE_GRANULARITY = 0.0001
PLASMA_RESERVABLE_MEMORY_FRACTION = 0.69
def round_to_memory_units(memory_bytes, round_up):
return from_memory_units(to_memory_units(memory_bytes, round_up))
def from_memory_units(memory_units):
return memory_units * MEMORY_RESOURCE_UNIT_BYTES
def to_memory_units(memory_bytes, round_up):
value = memory_bytes / MEMORY_RESOURCE_UNIT_BYTES
if value < 1:
raise ValueError(
"The minimum amount of memory that can be requested is {} bytes, "
"however {} bytes was asked.".format(MEMORY_RESOURCE_UNIT_BYTES,
memory_bytes))
if isinstance(value, float) and not value.is_integer():
if round_up:
value = int(math.ceil(value))
else:
value = int(math.floor(value))
return int(value)
WAIT_FOR_CLASS_PUSH_ERROR = "wait_for_class"
PICKLING_LARGE_OBJECT_PUSH_ERROR = "pickling_large_object"
WAIT_FOR_FUNCTION_PUSH_ERROR = "wait_for_function"
TASK_PUSH_ERROR = "task"
REGISTER_REMOTE_FUNCTION_PUSH_ERROR = "register_remote_function"
FUNCTION_TO_RUN_PUSH_ERROR = "function_to_run"
VERSION_MISMATCH_PUSH_ERROR = "version_mismatch"
CHECKPOINT_PUSH_ERROR = "checkpoint"
REGISTER_ACTOR_PUSH_ERROR = "register_actor"
WORKER_CRASH_PUSH_ERROR = "worker_crash"
WORKER_DIED_PUSH_ERROR = "worker_died"
WORKER_POOL_LARGE_ERROR = "worker_pool_large"
PUT_RECONSTRUCTION_PUSH_ERROR = "put_reconstruction"
INFEASIBLE_TASK_ERROR = "infeasible_task"
RESOURCE_DEADLOCK_ERROR = "resource_deadlock"
REMOVED_NODE_ERROR = "node_removed"
MONITOR_DIED_ERROR = "monitor_died"
LOG_MONITOR_DIED_ERROR = "log_monitor_died"
REPORTER_DIED_ERROR = "reporter_died"
DASHBOARD_DIED_ERROR = "dashboard_died"
RAYLET_CONNECTION_ERROR = "raylet_connection_error"
AUTOSCALER_MAX_NUM_FAILURES = env_integer("AUTOSCALER_MAX_NUM_FAILURES", 5)
AUTOSCALER_MAX_LAUNCH_BATCH = env_integer("AUTOSCALER_MAX_LAUNCH_BATCH", 5)
AUTOSCALER_MAX_CONCURRENT_LAUNCHES = env_integer(
"AUTOSCALER_MAX_CONCURRENT_LAUNCHES", 10)
AUTOSCALER_UPDATE_INTERVAL_S = env_integer("AUTOSCALER_UPDATE_INTERVAL_S", 5)
# in more than this interval.
AUTOSCALER_HEARTBEAT_TIMEOUT_S = env_integer("AUTOSCALER_HEARTBEAT_TIMEOUT_S",
30)
# The reporter will report its statistics this often (milliseconds).
REPORTER_UPDATE_INTERVAL_MS = env_integer("REPORTER_UPDATE_INTERVAL_MS", 2500)
# Max number of retries to AWS (default is 5, time increases exponentially)
BOTO_MAX_RETRIES = env_integer("BOTO_MAX_RETRIES", 12)
# Max number of retries to create an EC2 node (retry different subnet)
BOTO_CREATE_MAX_RETRIES = env_integer("BOTO_CREATE_MAX_RETRIES", 5)
LOGGER_FORMAT = (
"%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s")
LOGGER_FORMAT_HELP = "The logging format. default='{}'".format(LOGGER_FORMAT)
LOGGER_LEVEL = "info"
LOGGER_LEVEL_CHOICES = ["debug", "info", "warning", "error", "critical"]
LOGGER_LEVEL_HELP = ("The logging level threshold, choices=['debug', 'info',"
" 'warning', 'error', 'critical'], default='info'")
# A constant indicating that an actor doesn't need reconstructions.
NO_RECONSTRUCTION = 0
INFINITE_RECONSTRUCTION = 2**30
PROCESS_TYPE_REAPER = "reaper"
PROCESS_TYPE_MONITOR = "monitor"
PROCESS_TYPE_RAYLET_MONITOR = "raylet_monitor"
PROCESS_TYPE_LOG_MONITOR = "log_monitor"
PROCESS_TYPE_REPORTER = "reporter"
PROCESS_TYPE_DASHBOARD = "dashboard"
PROCESS_TYPE_WORKER = "worker"
PROCESS_TYPE_RAYLET = "raylet"
PROCESS_TYPE_PLASMA_STORE = "plasma_store"
PROCESS_TYPE_REDIS_SERVER = "redis_server"
PROCESS_TYPE_WEB_UI = "web_ui"
LOG_MONITOR_MAX_OPEN_FILES = 200
RAW_BUFFER_METADATA = b"RAW"
PICKLE_BUFFER_METADATA = b"PICKLE"
PICKLE5_BUFFER_METADATA = b"PICKLE5"
AUTOSCALER_RESOURCE_REQUEST_CHANNEL = b"autoscaler_resource_request"
REDIS_DEFAULT_PASSWORD = "5241590000000000"
NODE_DEFAULT_IP = "127.0.0.1"
| true
| true
|
79092458d54f834a021361e4c5753435738c3bb6
| 222
|
py
|
Python
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/account_tax_cash_basis/models/__init__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | 1
|
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/account_tax_cash_basis/models/__init__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/account_tax_cash_basis/models/__init__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import account_config_settings
import account_move
import account_partial_reconcile
import account_tax
import res_company
| 24.666667
| 74
| 0.815315
|
import account_config_settings
import account_move
import account_partial_reconcile
import account_tax
import res_company
| true
| true
|
7909246c63c81c1121878c80bf533febb6897be6
| 982
|
py
|
Python
|
Level2/Lessons12951/gamjapark2.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | null | null | null |
Level2/Lessons12951/gamjapark2.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | null | null | null |
Level2/Lessons12951/gamjapark2.py
|
StudyForCoding/ProgrammersLevel
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
[
"MIT"
] | 1
|
2021-04-05T07:35:59.000Z
|
2021-04-05T07:35:59.000Z
|
# JadenCase 문자열 만들기
def solution(s):
s = s.lower()
changed_words = []
print(s.split(" "))
for word in s.split(" "):
if len(word) == 0:
changed_words.append(word)
continue
elif len(word) == 1:
word = word[0].upper()
else:
word = word[0].upper() + word[1:]
changed_words.append(word)
print(changed_words)
answer = ' '.join(changed_words)
return answer
'''
채점을 시작합니다.
정확성 테스트
테스트 1 〉 통과 (0.02ms, 10.3MB)
테스트 2 〉 통과 (0.02ms, 10.1MB)
테스트 3 〉 통과 (0.02ms, 10.2MB)
테스트 4 〉 통과 (0.02ms, 10.1MB)
테스트 5 〉 통과 (0.03ms, 10.2MB)
테스트 6 〉 통과 (0.02ms, 10.1MB)
테스트 7 〉 통과 (0.03ms, 10.2MB)
테스트 8 〉 통과 (0.01ms, 10.2MB)
테스트 9 〉 통과 (0.02ms, 10.2MB)
테스트 10 〉 통과 (0.01ms, 10.1MB)
테스트 11 〉 통과 (0.03ms, 10.2MB)
테스트 12 〉 통과 (0.02ms, 10.2MB)
테스트 13 〉 통과 (0.02ms, 10.2MB)
테스트 14 〉 통과 (0.02ms, 10.2MB)
테스트 15 〉 통과 (0.03ms, 10.2MB)
테스트 16 〉 통과 (0.01ms, 10.2MB)
채점 결과
정확성: 100.0
합계: 100.0 / 100.0
'''
| 22.837209
| 45
| 0.546843
|
def solution(s):
s = s.lower()
changed_words = []
print(s.split(" "))
for word in s.split(" "):
if len(word) == 0:
changed_words.append(word)
continue
elif len(word) == 1:
word = word[0].upper()
else:
word = word[0].upper() + word[1:]
changed_words.append(word)
print(changed_words)
answer = ' '.join(changed_words)
return answer
| true
| true
|
7909246c6f87492cdfbf5a4178c7937ebf4855d0
| 1,158
|
py
|
Python
|
sigrhe_contract.py
|
ejgr-mtsiw/pw-html-parser
|
af44b1f163e02f285c2c7d86f1d838083c6546cf
|
[
"MIT"
] | null | null | null |
sigrhe_contract.py
|
ejgr-mtsiw/pw-html-parser
|
af44b1f163e02f285c2c7d86f1d838083c6546cf
|
[
"MIT"
] | null | null | null |
sigrhe_contract.py
|
ejgr-mtsiw/pw-html-parser
|
af44b1f163e02f285c2c7d86f1d838083c6546cf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# (C)Eduardo Ribeiro - 1600820
class Contract:
id = 0
school_code = 0
school_name = ""
n_contract = 0
n_hours_per_week = 0
contract_end_date = ""
application_deadline = ""
recruitment_group = ""
county = ""
district = ""
class_project = ""
qualifications = ""
def __init__(
self,
id,
school_code,
school_name,
n_contract,
n_hours_per_week,
contract_end_date,
application_deadline,
recruitment_group,
county,
district,
class_project,
qualifications,
):
self.id = id
self.school_code = school_code
self.school_name = school_name
self.n_contract = n_contract
self.n_hours_per_week = n_hours_per_week
self.contract_end_date = contract_end_date
self.application_deadline = application_deadline
self.recruitment_group = recruitment_group
self.county = county
self.district = district
self.class_project = class_project
self.qualifications = qualifications
| 24.125
| 56
| 0.611399
|
class Contract:
id = 0
school_code = 0
school_name = ""
n_contract = 0
n_hours_per_week = 0
contract_end_date = ""
application_deadline = ""
recruitment_group = ""
county = ""
district = ""
class_project = ""
qualifications = ""
def __init__(
self,
id,
school_code,
school_name,
n_contract,
n_hours_per_week,
contract_end_date,
application_deadline,
recruitment_group,
county,
district,
class_project,
qualifications,
):
self.id = id
self.school_code = school_code
self.school_name = school_name
self.n_contract = n_contract
self.n_hours_per_week = n_hours_per_week
self.contract_end_date = contract_end_date
self.application_deadline = application_deadline
self.recruitment_group = recruitment_group
self.county = county
self.district = district
self.class_project = class_project
self.qualifications = qualifications
| true
| true
|
790924e95f876a790320a1c2ef3702feb736a0f3
| 328
|
py
|
Python
|
Degree Distribution.py
|
monkee52/NCSSChallenge
|
e8849085e0578268dc5ce022b39c7d499884d810
|
[
"BSD-2-Clause"
] | null | null | null |
Degree Distribution.py
|
monkee52/NCSSChallenge
|
e8849085e0578268dc5ce022b39c7d499884d810
|
[
"BSD-2-Clause"
] | null | null | null |
Degree Distribution.py
|
monkee52/NCSSChallenge
|
e8849085e0578268dc5ce022b39c7d499884d810
|
[
"BSD-2-Clause"
] | null | null | null |
# Enter your code for "Degree Distribution" here.
import csv
degrees = []
students = []
for l in csv.DictReader(open("degrees.csv")):
degrees.append(l)
for l in csv.DictReader(open("students.csv")):
students.append(l)
students = sorted(students, key=lambda x: float(x["score"]))
students.reverse()
print(students)
| 18.222222
| 60
| 0.698171
|
import csv
degrees = []
students = []
for l in csv.DictReader(open("degrees.csv")):
degrees.append(l)
for l in csv.DictReader(open("students.csv")):
students.append(l)
students = sorted(students, key=lambda x: float(x["score"]))
students.reverse()
print(students)
| true
| true
|
790925888ab503ef5ead5db6ac4f59663ab3665c
| 5,499
|
py
|
Python
|
spacy/tests/test_displacy.py
|
xettrisomeman/spaCy
|
72f7f4e68a5076a87dd9402812bfb72e479237ed
|
[
"MIT"
] | null | null | null |
spacy/tests/test_displacy.py
|
xettrisomeman/spaCy
|
72f7f4e68a5076a87dd9402812bfb72e479237ed
|
[
"MIT"
] | null | null | null |
spacy/tests/test_displacy.py
|
xettrisomeman/spaCy
|
72f7f4e68a5076a87dd9402812bfb72e479237ed
|
[
"MIT"
] | null | null | null |
import pytest
from spacy import displacy
from spacy.displacy.render import DependencyRenderer, EntityRenderer
from spacy.lang.fa import Persian
from spacy.tokens import Span, Doc
def test_displacy_parse_ents(en_vocab):
"""Test that named entities on a Doc are converted into displaCy's format."""
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
ents = displacy.parse_ents(doc)
assert isinstance(ents, dict)
assert ents["text"] == "But Google is starting from behind "
assert ents["ents"] == [
{"start": 4, "end": 10, "label": "ORG", "kb_id": "", "kb_url": "#"}
]
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"], kb_id="Q95")]
ents = displacy.parse_ents(doc)
assert isinstance(ents, dict)
assert ents["text"] == "But Google is starting from behind "
assert ents["ents"] == [
{"start": 4, "end": 10, "label": "ORG", "kb_id": "Q95", "kb_url": "#"}
]
def test_displacy_parse_ents_with_kb_id_options(en_vocab):
"""Test that named entities with kb_id on a Doc are converted into displaCy's format."""
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"], kb_id="Q95")]
ents = displacy.parse_ents(
doc, {"kb_url_template": "https://www.wikidata.org/wiki/{}"}
)
assert isinstance(ents, dict)
assert ents["text"] == "But Google is starting from behind "
assert ents["ents"] == [
{
"start": 4,
"end": 10,
"label": "ORG",
"kb_id": "Q95",
"kb_url": "https://www.wikidata.org/wiki/Q95",
}
]
def test_displacy_parse_deps(en_vocab):
"""Test that deps and tags on a Doc are converted into displaCy's format."""
words = ["This", "is", "a", "sentence"]
heads = [1, 1, 3, 1]
pos = ["DET", "VERB", "DET", "NOUN"]
tags = ["DT", "VBZ", "DT", "NN"]
deps = ["nsubj", "ROOT", "det", "attr"]
doc = Doc(en_vocab, words=words, heads=heads, pos=pos, tags=tags, deps=deps)
deps = displacy.parse_deps(doc)
assert isinstance(deps, dict)
assert deps["words"] == [
{"lemma": None, "text": words[0], "tag": pos[0]},
{"lemma": None, "text": words[1], "tag": pos[1]},
{"lemma": None, "text": words[2], "tag": pos[2]},
{"lemma": None, "text": words[3], "tag": pos[3]},
]
assert deps["arcs"] == [
{"start": 0, "end": 1, "label": "nsubj", "dir": "left"},
{"start": 2, "end": 3, "label": "det", "dir": "left"},
{"start": 1, "end": 3, "label": "attr", "dir": "right"},
]
def test_displacy_invalid_arcs():
renderer = DependencyRenderer()
words = [{"text": "This", "tag": "DET"}, {"text": "is", "tag": "VERB"}]
arcs = [
{"start": 0, "end": 1, "label": "nsubj", "dir": "left"},
{"start": -1, "end": 2, "label": "det", "dir": "left"},
]
with pytest.raises(ValueError):
renderer.render([{"words": words, "arcs": arcs}])
def test_displacy_spans(en_vocab):
"""Test that displaCy can render Spans."""
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
html = displacy.render(doc[1:4], style="ent")
assert html.startswith("<div")
def test_displacy_raises_for_wrong_type(en_vocab):
with pytest.raises(ValueError):
displacy.render("hello world")
def test_displacy_rtl():
# Source: http://www.sobhe.ir/hazm/ – is this correct?
words = ["ما", "بسیار", "کتاب", "می\u200cخوانیم"]
# These are (likely) wrong, but it's just for testing
pos = ["PRO", "ADV", "N_PL", "V_SUB"] # needs to match lang.fa.tag_map
deps = ["foo", "bar", "foo", "baz"]
heads = [1, 0, 3, 1]
nlp = Persian()
doc = Doc(nlp.vocab, words=words, tags=pos, heads=heads, deps=deps)
doc.ents = [Span(doc, 1, 3, label="TEST")]
html = displacy.render(doc, page=True, style="dep")
assert "direction: rtl" in html
assert 'direction="rtl"' in html
assert f'lang="{nlp.lang}"' in html
html = displacy.render(doc, page=True, style="ent")
assert "direction: rtl" in html
assert f'lang="{nlp.lang}"' in html
def test_displacy_render_wrapper(en_vocab):
"""Test that displaCy accepts custom rendering wrapper."""
def wrapper(html):
return "TEST" + html + "TEST"
displacy.set_render_wrapper(wrapper)
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
html = displacy.render(doc, style="ent")
assert html.startswith("TEST<div")
assert html.endswith("/div>TEST")
# Restore
displacy.set_render_wrapper(lambda html: html)
def test_displacy_options_case():
ents = ["foo", "BAR"]
colors = {"FOO": "red", "bar": "green"}
renderer = EntityRenderer({"ents": ents, "colors": colors})
text = "abcd"
labels = ["foo", "bar", "FOO", "BAR"]
spans = [{"start": i, "end": i + 1, "label": labels[i]} for i in range(len(text))]
result = renderer.render_ents("abcde", spans, None).split("\n\n")
assert "red" in result[0] and "foo" in result[0]
assert "green" in result[1] and "bar" in result[1]
assert "red" in result[2] and "FOO" in result[2]
assert "green" in result[3] and "BAR" in result[3]
| 38.1875
| 92
| 0.590653
|
import pytest
from spacy import displacy
from spacy.displacy.render import DependencyRenderer, EntityRenderer
from spacy.lang.fa import Persian
from spacy.tokens import Span, Doc
def test_displacy_parse_ents(en_vocab):
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
ents = displacy.parse_ents(doc)
assert isinstance(ents, dict)
assert ents["text"] == "But Google is starting from behind "
assert ents["ents"] == [
{"start": 4, "end": 10, "label": "ORG", "kb_id": "", "kb_url": "#"}
]
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"], kb_id="Q95")]
ents = displacy.parse_ents(doc)
assert isinstance(ents, dict)
assert ents["text"] == "But Google is starting from behind "
assert ents["ents"] == [
{"start": 4, "end": 10, "label": "ORG", "kb_id": "Q95", "kb_url": "#"}
]
def test_displacy_parse_ents_with_kb_id_options(en_vocab):
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"], kb_id="Q95")]
ents = displacy.parse_ents(
doc, {"kb_url_template": "https://www.wikidata.org/wiki/{}"}
)
assert isinstance(ents, dict)
assert ents["text"] == "But Google is starting from behind "
assert ents["ents"] == [
{
"start": 4,
"end": 10,
"label": "ORG",
"kb_id": "Q95",
"kb_url": "https://www.wikidata.org/wiki/Q95",
}
]
def test_displacy_parse_deps(en_vocab):
words = ["This", "is", "a", "sentence"]
heads = [1, 1, 3, 1]
pos = ["DET", "VERB", "DET", "NOUN"]
tags = ["DT", "VBZ", "DT", "NN"]
deps = ["nsubj", "ROOT", "det", "attr"]
doc = Doc(en_vocab, words=words, heads=heads, pos=pos, tags=tags, deps=deps)
deps = displacy.parse_deps(doc)
assert isinstance(deps, dict)
assert deps["words"] == [
{"lemma": None, "text": words[0], "tag": pos[0]},
{"lemma": None, "text": words[1], "tag": pos[1]},
{"lemma": None, "text": words[2], "tag": pos[2]},
{"lemma": None, "text": words[3], "tag": pos[3]},
]
assert deps["arcs"] == [
{"start": 0, "end": 1, "label": "nsubj", "dir": "left"},
{"start": 2, "end": 3, "label": "det", "dir": "left"},
{"start": 1, "end": 3, "label": "attr", "dir": "right"},
]
def test_displacy_invalid_arcs():
renderer = DependencyRenderer()
words = [{"text": "This", "tag": "DET"}, {"text": "is", "tag": "VERB"}]
arcs = [
{"start": 0, "end": 1, "label": "nsubj", "dir": "left"},
{"start": -1, "end": 2, "label": "det", "dir": "left"},
]
with pytest.raises(ValueError):
renderer.render([{"words": words, "arcs": arcs}])
def test_displacy_spans(en_vocab):
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
html = displacy.render(doc[1:4], style="ent")
assert html.startswith("<div")
def test_displacy_raises_for_wrong_type(en_vocab):
with pytest.raises(ValueError):
displacy.render("hello world")
def test_displacy_rtl():
words = ["ما", "بسیار", "کتاب", "می\u200cخوانیم"]
pos = ["PRO", "ADV", "N_PL", "V_SUB"] # needs to match lang.fa.tag_map
deps = ["foo", "bar", "foo", "baz"]
heads = [1, 0, 3, 1]
nlp = Persian()
doc = Doc(nlp.vocab, words=words, tags=pos, heads=heads, deps=deps)
doc.ents = [Span(doc, 1, 3, label="TEST")]
html = displacy.render(doc, page=True, style="dep")
assert "direction: rtl" in html
assert 'direction="rtl"' in html
assert f'lang="{nlp.lang}"' in html
html = displacy.render(doc, page=True, style="ent")
assert "direction: rtl" in html
assert f'lang="{nlp.lang}"' in html
def test_displacy_render_wrapper(en_vocab):
def wrapper(html):
return "TEST" + html + "TEST"
displacy.set_render_wrapper(wrapper)
doc = Doc(en_vocab, words=["But", "Google", "is", "starting", "from", "behind"])
doc.ents = [Span(doc, 1, 2, label=doc.vocab.strings["ORG"])]
html = displacy.render(doc, style="ent")
assert html.startswith("TEST<div")
assert html.endswith("/div>TEST")
# Restore
displacy.set_render_wrapper(lambda html: html)
def test_displacy_options_case():
ents = ["foo", "BAR"]
colors = {"FOO": "red", "bar": "green"}
renderer = EntityRenderer({"ents": ents, "colors": colors})
text = "abcd"
labels = ["foo", "bar", "FOO", "BAR"]
spans = [{"start": i, "end": i + 1, "label": labels[i]} for i in range(len(text))]
result = renderer.render_ents("abcde", spans, None).split("\n\n")
assert "red" in result[0] and "foo" in result[0]
assert "green" in result[1] and "bar" in result[1]
assert "red" in result[2] and "FOO" in result[2]
assert "green" in result[3] and "BAR" in result[3]
| true
| true
|
790925b5b59a83031217bbd4c92740713b911535
| 431
|
py
|
Python
|
Python/Hora da Corrida - SBC 2019.py
|
Filipe-uefs/Algoritmos
|
2443f133cd40781d0ad20ed248a53e279b0acba1
|
[
"MIT"
] | null | null | null |
Python/Hora da Corrida - SBC 2019.py
|
Filipe-uefs/Algoritmos
|
2443f133cd40781d0ad20ed248a53e279b0acba1
|
[
"MIT"
] | null | null | null |
Python/Hora da Corrida - SBC 2019.py
|
Filipe-uefs/Algoritmos
|
2443f133cd40781d0ad20ed248a53e279b0acba1
|
[
"MIT"
] | null | null | null |
#link (https://neps.academy/problem/443)
voltas,placas= input().split()
result = int(voltas) * int(placas)
numbers = []
resultado = result * float(str(0) + str('.') + str(1))
for x in range(2,11):
if int(resultado)==resultado:
numbers.append(int(resultado))
else:
numbers.append(int(resultado)+1)
resultado = result * float(str(0) + str('.') + str(x))
for x in numbers:
print(int(x), end=' ')
| 26.9375
| 58
| 0.607889
|
voltas,placas= input().split()
result = int(voltas) * int(placas)
numbers = []
resultado = result * float(str(0) + str('.') + str(1))
for x in range(2,11):
if int(resultado)==resultado:
numbers.append(int(resultado))
else:
numbers.append(int(resultado)+1)
resultado = result * float(str(0) + str('.') + str(x))
for x in numbers:
print(int(x), end=' ')
| true
| true
|
790925ec72a5e89da2f9fd5db3424ad72f8d336d
| 180
|
py
|
Python
|
example/decode_image.py
|
Eye-Remocon/Face_Recognition
|
256ba99e821b923679b85aba9a3febecb28258df
|
[
"MIT"
] | null | null | null |
example/decode_image.py
|
Eye-Remocon/Face_Recognition
|
256ba99e821b923679b85aba9a3febecb28258df
|
[
"MIT"
] | 8
|
2021-05-05T05:40:38.000Z
|
2021-06-28T13:22:19.000Z
|
example/decode_image.py
|
Eye-Remocon/Face_Recognition
|
256ba99e821b923679b85aba9a3febecb28258df
|
[
"MIT"
] | 3
|
2021-05-05T04:34:24.000Z
|
2021-05-09T03:47:03.000Z
|
import base64
def decode_img(img_string):
img_data = base64.b64decode(img_string)
filename = "temp_img.jpg"
with open(filename, "wb") as f:
f.write(img_data)
| 20
| 43
| 0.677778
|
import base64
def decode_img(img_string):
img_data = base64.b64decode(img_string)
filename = "temp_img.jpg"
with open(filename, "wb") as f:
f.write(img_data)
| true
| true
|
790926ac5bd77e6f1853dec33c30206d01fea08f
| 1,248
|
py
|
Python
|
multibar/core/variants/lib_info.py
|
Animatea/DiscordProgressbar
|
654807f7eddcc19b0357fb11de700e09da0379da
|
[
"Apache-2.0"
] | 12
|
2021-03-16T17:01:07.000Z
|
2021-04-26T19:16:13.000Z
|
multibar/core/variants/lib_info.py
|
Animatea/python-multibar
|
654807f7eddcc19b0357fb11de700e09da0379da
|
[
"Apache-2.0"
] | 1
|
2021-09-12T21:38:40.000Z
|
2022-02-22T20:54:15.000Z
|
multibar/core/variants/lib_info.py
|
Animatea/python-multibar
|
654807f7eddcc19b0357fb11de700e09da0379da
|
[
"Apache-2.0"
] | 5
|
2021-09-10T13:30:37.000Z
|
2021-12-31T19:26:53.000Z
|
"""
Copyright [2021] [DenyS]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import typing
__all__: typing.Sequence[str] = ("Info",)
T = typing.TypeVar("T")
class Info(typing.Generic[T]):
"""Annotation for filtering global variables.
Parameters:
-----------
value: :class:`TypeVar`
A parameter that stores the value of a certain variable.
Features:
---------
* `__repr__`: repr(Info())
Development Information.
* `__str__`: str(Info()) | Info()
Will output the value that stores value.
"""
def __init__(self, value: T) -> None:
self.value = value
def __repr__(self) -> str:
return f"Info(value={self.value})"
def __str__(self) -> str:
return str(self.value)
| 24.470588
| 72
| 0.669872
|
import typing
__all__: typing.Sequence[str] = ("Info",)
T = typing.TypeVar("T")
class Info(typing.Generic[T]):
def __init__(self, value: T) -> None:
self.value = value
def __repr__(self) -> str:
return f"Info(value={self.value})"
def __str__(self) -> str:
return str(self.value)
| true
| true
|
790926c5623b0254f0cc962a99f4ed0413033567
| 7,755
|
py
|
Python
|
blog/routes.py
|
mlewan01/flaskblog01
|
19b5035a0c99ece4c9dddaf8e0fc396a8ce0b4c3
|
[
"Apache-2.0"
] | null | null | null |
blog/routes.py
|
mlewan01/flaskblog01
|
19b5035a0c99ece4c9dddaf8e0fc396a8ce0b4c3
|
[
"Apache-2.0"
] | null | null | null |
blog/routes.py
|
mlewan01/flaskblog01
|
19b5035a0c99ece4c9dddaf8e0fc396a8ce0b4c3
|
[
"Apache-2.0"
] | null | null | null |
import secrets
import os
from PIL import Image
from flask import render_template, url_for, flash, redirect, request, abort
from blog import app, db, bcrypt, mail
from blog.forms import (RegistrationForm, LoginForm, UpdateAccountForm, PostForm, \
RequestResetForm, ResetPasswordForm)
from blog.models import User, Post
from flask_login import login_user, current_user, logout_user, login_required
from flask_mail import Message
@app.route("/")
@app.route("/home")
def home():
page = request.args.get('page', 1, type=int)
posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=2)
return render_template('home.html', posts=posts)
@app.route("/about")
def about():
return render_template('about.html', title='About')
@app.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash(f'Account created for {form.username.data}! You can now log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
if next_page:
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
return redirect(url_for('account'))
else:
flash(f'Login Unsuccessful...Please check email and password!', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/profile_pics', picture_fn)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
@app.route("/account", methods=['GET', 'POST'])
@login_required # accessible only if logged in
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
if form.picture.data:
picture_file = save_picture(form.picture.data)
old_picture = current_user.image_file
old_picture_path = os.path.join(app.root_path, 'static/profile_pics', old_picture)
if os.path.exists(old_picture_path):
os.remove(old_picture_path)
else:
print("The file does not exist " + old_picture)
current_user.image_file = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your account has been updated!', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for('static', filename='profile_pics/' + current_user.image_file)
return render_template('account.html', title='Account', image_file=image_file, form=form)
@app.route("/post/new", methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data, content=form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post has been created!', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title='New Post', form=form, legend='New Post')
@app.route("/post/<int:post_id>")
def post(post_id):
post = Post.query.get_or_404(post_id)
return render_template('post.html', title=post.title, post=post)
@app.route("/post/<int:post_id>/update", methods=['GET', 'POST'])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('create_post.html', title='Update Post', form=form, legend='Update Post')
@app.route("/post/<int:post_id>/delete", methods=['POST'])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('home'))
@app.route("/user/<string:username>")
def user_posts(username):
page = request.args.get('page', 1, type=int)
user = User.query.filter_by(username=username).first_or_404()
posts = Post.query.filter_by(author=user) \
.order_by(Post.date_posted.desc()) \
.paginate(page=page, per_page=5)
return render_template('user_posts.html', posts=posts, user=user)
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Password Reset Request', sender='mariusz@artemlux.com',
recipients=['mariusz@artemlux.com']) # recipients=[user.email])
msg.body = f'''To reset your password, visit the following link:
{url_for('reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
@app.route("/reset_password", methods=['GET', 'POST'])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
send_reset_email(user)
flash('An email has been sent with instructions to reset your password', 'info')
return redirect(url_for('login'))
return render_template('reset_request.html', titlee='Reset Password', form=form)
@app.route("/reset_password/<token>", methods=['GET', 'POST'])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('home'))
user = User.verify_reset_token(token)
if user is None:
flash("That is an invalid or expired token", 'warning')
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user.password = hashed_password
db.session.commit()
flash(f'Your password has been updated! You can now log in', 'success')
return redirect(url_for('login'))
return render_template('reset_token.html', title='Reset Password', form=form)
| 37.283654
| 100
| 0.676209
|
import secrets
import os
from PIL import Image
from flask import render_template, url_for, flash, redirect, request, abort
from blog import app, db, bcrypt, mail
from blog.forms import (RegistrationForm, LoginForm, UpdateAccountForm, PostForm, \
RequestResetForm, ResetPasswordForm)
from blog.models import User, Post
from flask_login import login_user, current_user, logout_user, login_required
from flask_mail import Message
@app.route("/")
@app.route("/home")
def home():
page = request.args.get('page', 1, type=int)
posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=2)
return render_template('home.html', posts=posts)
@app.route("/about")
def about():
return render_template('about.html', title='About')
@app.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash(f'Account created for {form.username.data}! You can now log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
if next_page:
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
return redirect(url_for('account'))
else:
flash(f'Login Unsuccessful...Please check email and password!', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/profile_pics', picture_fn)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
@app.route("/account", methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
if form.picture.data:
picture_file = save_picture(form.picture.data)
old_picture = current_user.image_file
old_picture_path = os.path.join(app.root_path, 'static/profile_pics', old_picture)
if os.path.exists(old_picture_path):
os.remove(old_picture_path)
else:
print("The file does not exist " + old_picture)
current_user.image_file = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your account has been updated!', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for('static', filename='profile_pics/' + current_user.image_file)
return render_template('account.html', title='Account', image_file=image_file, form=form)
@app.route("/post/new", methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data, content=form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post has been created!', 'success')
return redirect(url_for('home'))
return render_template('create_post.html', title='New Post', form=form, legend='New Post')
@app.route("/post/<int:post_id>")
def post(post_id):
post = Post.query.get_or_404(post_id)
return render_template('post.html', title=post.title, post=post)
@app.route("/post/<int:post_id>/update", methods=['GET', 'POST'])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('create_post.html', title='Update Post', form=form, legend='Update Post')
@app.route("/post/<int:post_id>/delete", methods=['POST'])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('home'))
@app.route("/user/<string:username>")
def user_posts(username):
page = request.args.get('page', 1, type=int)
user = User.query.filter_by(username=username).first_or_404()
posts = Post.query.filter_by(author=user) \
.order_by(Post.date_posted.desc()) \
.paginate(page=page, per_page=5)
return render_template('user_posts.html', posts=posts, user=user)
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Password Reset Request', sender='mariusz@artemlux.com',
recipients=['mariusz@artemlux.com'])
msg.body = f'''To reset your password, visit the following link:
{url_for('reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
@app.route("/reset_password", methods=['GET', 'POST'])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
send_reset_email(user)
flash('An email has been sent with instructions to reset your password', 'info')
return redirect(url_for('login'))
return render_template('reset_request.html', titlee='Reset Password', form=form)
@app.route("/reset_password/<token>", methods=['GET', 'POST'])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('home'))
user = User.verify_reset_token(token)
if user is None:
flash("That is an invalid or expired token", 'warning')
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user.password = hashed_password
db.session.commit()
flash(f'Your password has been updated! You can now log in', 'success')
return redirect(url_for('login'))
return render_template('reset_token.html', title='Reset Password', form=form)
| true
| true
|
790926c7ee3b355a4183550658a3da6905cea595
| 18,661
|
py
|
Python
|
qa/rpc-tests/test_framework/comptool.py
|
86b/Abosom
|
44dc7338b1a53b1121cb06e8aa28dca8088185af
|
[
"MIT"
] | 2
|
2020-07-15T17:38:28.000Z
|
2020-08-02T17:00:24.000Z
|
qa/rpc-tests/test_framework/comptool.py
|
86b/Abosom
|
44dc7338b1a53b1121cb06e8aa28dca8088185af
|
[
"MIT"
] | 2
|
2020-03-11T20:41:04.000Z
|
2020-08-16T13:49:37.000Z
|
qa/rpc-tests/test_framework/comptool.py
|
86b/Abosom
|
44dc7338b1a53b1121cb06e8aa28dca8088185af
|
[
"MIT"
] | 2
|
2020-06-10T04:48:10.000Z
|
2020-07-24T10:52:48.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from .mininode import *
from .blockstore import BlockStore, TxStore
from .util import p2p_port
'''
This is a tool for comparing two or more abosomds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
class RejectResult(object):
'''
Outcome that expects rejection of a transaction or block.
'''
def __init__(self, code, reason=b''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
def send_header(self, header):
m = msg_headers()
m.headers.append(header)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter, timeout=float('inf')):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs, timeout=timeout)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks, sleep=0.1):
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter, timeout=300)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
logger.error('Block not in reject map: %064x' % (blockhash))
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
logger.error('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash))
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
logger.error('Tx not in reject map: %064x' % (txhash))
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash))
return False
elif ((txhash in c.cb.lastInv) != outcome):
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
# if we expect success, send inv and sync every block
# if we expect failure, just push the block and see what happens.
if outcome == True:
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
else:
[ c.send_message(msg_block(block)) for c in self.connections ]
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter, timeout=300)
self.ping_counter += 1
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
block_header = CBlockHeader(block)
[ c.cb.send_header(block_header) for c in self.connections ]
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
[ c.cb.send_header(block_header) for c in self.connections ]
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
logger.info("Test %d: PASS" % test_number)
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
| 45.184019
| 149
| 0.600825
|
from .mininode import *
from .blockstore import BlockStore, TxStore
from .util import p2p_port
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
class RejectResult(object):
def __init__(self, code, reason=b''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
def send_header(self, header):
m = msg_headers()
m.headers.append(header)
self.conn.send_message(m)
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter, timeout=float('inf')):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs, timeout=timeout)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks, sleep=0.1):
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter, timeout=300)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
logger.error('Block not in reject map: %064x' % (blockhash))
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
logger.error('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash))
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.lastInv != self.connections[0].cb.lastInv:
return False
elif isinstance(outcome, RejectResult):
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
logger.error('Tx not in reject map: %064x' % (txhash))
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash))
return False
elif ((txhash in c.cb.lastInv) != outcome):
return False
return True
def run(self):
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock):
block = b_or_t
block_outcome = outcome
tip = block.sha256
if len(test_obj) >= 3:
tip = test_obj[2]
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
if outcome == True:
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
else:
[ c.send_message(msg_block(block)) for c in self.connections ]
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter, timeout=300)
self.ping_counter += 1
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
block_header = CBlockHeader(block)
[ c.cb.send_header(block_header) for c in self.connections ]
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
[ c.cb.send_header(block_header) for c in self.connections ]
else:
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
logger.info("Test %d: PASS" % test_number)
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
| true
| true
|
7909281508b24be74f4d08069d650825511b6326
| 22,039
|
py
|
Python
|
cleanrl/experiments/dqn2_atari_visual.py
|
manabukosaka/cleanrl
|
31ae5f640ac7f7225375bc51759c4e8baa4880b4
|
[
"MIT"
] | 1
|
2021-08-04T00:03:14.000Z
|
2021-08-04T00:03:14.000Z
|
cleanrl/experiments/dqn2_atari_visual.py
|
manabukosaka/cleanrl
|
31ae5f640ac7f7225375bc51759c4e8baa4880b4
|
[
"MIT"
] | null | null | null |
cleanrl/experiments/dqn2_atari_visual.py
|
manabukosaka/cleanrl
|
31ae5f640ac7f7225375bc51759c4e8baa4880b4
|
[
"MIT"
] | null | null | null |
# https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=((shp[0] * k,)+shp[1:]), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=0)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def wrap_atari(env, max_episode_steps=None):
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
env = ImageToPyTorch(env)
if frame_stack:
env = FrameStack(env, 4)
return env
# Reference: https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import argparse
from distutils.util import strtobool
import collections
import numpy as np
import gym
from gym.wrappers import TimeLimit, Monitor
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space
import time
import random
import os
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from PIL import Image
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Double DQN Agent')
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="BreakoutNoFrameskip-v4",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=1e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=2,
help='seed of the experiment')
parser.add_argument('--total-timesteps', type=int, default=10000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, `torch.backends.cudnn.deterministic=False`')
parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, cuda will not be enabled by default')
parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='weather to capture videos of the agent performances (check out `videos` folder)')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
parser.add_argument('--wandb-entity', type=str, default=None,
help="the entity (team) of wandb's project")
# Algorithm specific arguments
parser.add_argument('--buffer-size', type=int, default=1000000,
help='the replay memory buffer size')
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--target-network-frequency', type=int, default=1000,
help="the timesteps it takes to update the target network")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--batch-size', type=int, default=32,
help="the batch size of sample from the reply memory")
parser.add_argument('--start-e', type=float, default=1.,
help="the starting epsilon for exploration")
parser.add_argument('--end-e', type=float, default=0.02,
help="the ending epsilon for exploration")
parser.add_argument('--exploration-fraction', type=float, default=0.10,
help="the fraction of `total-timesteps` it takes from start-e to go end-e")
parser.add_argument('--learning-starts', type=int, default=80000,
help="timestep to start learning")
parser.add_argument('--train-frequency', type=int, default=4,
help="the frequency of training")
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
class QValueVisualizationWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.env.reset()
self.image_shape = self.env.render(mode="rgb_array").shape
self.q_values = [[0.,0.,0.,0.]]
# self.metadata['video.frames_per_second'] = 60
def set_q_values(self, q_values):
self.q_values = q_values
def render(self, mode="human"):
if mode=="rgb_array":
env_rgb_array = super().render(mode)
fig, ax = plt.subplots(figsize=(self.image_shape[1]/100,self.image_shape[0]/100), constrained_layout=True, dpi=100)
df = pd.DataFrame(np.array(self.q_values).T)
sns.barplot(x=df.index, y=0, data=df, ax=ax)
ax.set(xlabel='actions', ylabel='q-values')
fig.canvas.draw()
X = np.array(fig.canvas.renderer.buffer_rgba())
Image.fromarray(X)
# Image.fromarray(X)
rgb_image = np.array(Image.fromarray(X).convert('RGB'))
plt.close(fig)
q_value_rgb_array = rgb_image
return np.append(env_rgb_array, q_value_rgb_array, axis=1)
else:
super().render(mode)
# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
import wandb
wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)
writer = SummaryWriter(f"/tmp/{experiment_name}")
# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
env = gym.make(args.gym_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env) # records episode reward in `info['episode']['r']`
if args.capture_video:
env = QValueVisualizationWrapper(env)
env = Monitor(env, f'videos/{experiment_name}')
env = wrap_deepmind(
env,
clip_rewards=True,
frame_stack=True,
scale=False,
)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
env.seed(args.seed)
env.action_space.seed(args.seed)
env.observation_space.seed(args.seed)
# respect the default timelimit
assert isinstance(env.action_space, Discrete), "only discrete action space is supported"
# modified from https://github.com/seungeunrho/minimalRL/blob/master/dqn.py#
class ReplayBuffer():
def __init__(self, buffer_limit):
self.buffer = collections.deque(maxlen=buffer_limit)
def put(self, transition):
self.buffer.append(transition)
def sample(self, n):
mini_batch = random.sample(self.buffer, n)
s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []
for transition in mini_batch:
s, a, r, s_prime, done_mask = transition
s_lst.append(s)
a_lst.append(a)
r_lst.append(r)
s_prime_lst.append(s_prime)
done_mask_lst.append(done_mask)
return np.array(s_lst), np.array(a_lst), \
np.array(r_lst), np.array(s_prime_lst), \
np.array(done_mask_lst)
# ALGO LOGIC: initialize agent here:
# tricks taken from https://github.com/cpnota/autonomous-learning-library/blob/6d1111afce0d1582de463326f7d078a86e850551/all/presets/atari/models/__init__.py#L16
# apparently matters
class Linear0(nn.Linear):
def reset_parameters(self):
nn.init.constant_(self.weight, 0.0)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
class QNetwork(nn.Module):
def __init__(self, frames=4):
super(QNetwork, self).__init__()
self.network = nn.Sequential(
Scale(1/255),
nn.Conv2d(frames, 32, 8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(3136, 512),
nn.ReLU(),
Linear0(512, env.action_space.n)
)
def forward(self, x):
x = torch.Tensor(x).to(device)
return self.network(x)
def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
slope = (end_e - start_e) / duration
return max(slope * t + start_e, end_e)
rb = ReplayBuffer(args.buffer_size)
q_network = QNetwork().to(device)
target_network = QNetwork().to(device)
target_network.load_state_dict(q_network.state_dict())
optimizer = optim.Adam(q_network.parameters(), lr=args.learning_rate)
loss_fn = nn.MSELoss()
print(device.__repr__())
print(q_network)
# TRY NOT TO MODIFY: start the game
obs = env.reset()
episode_reward = 0
for global_step in range(args.total_timesteps):
# ALGO LOGIC: put action logic here
epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction*args.total_timesteps, global_step)
obs = np.array(obs)
logits = q_network.forward(obs.reshape((1,)+obs.shape))
if args.capture_video:
env.set_q_values(logits.tolist())
if random.random() < epsilon:
action = env.action_space.sample()
else:
action = torch.argmax(logits, dim=1).tolist()[0]
# TRY NOT TO MODIFY: execute the game and log data.
next_obs, reward, done, info = env.step(action)
episode_reward += reward
# TRY NOT TO MODIFY: record rewards for plotting purposes
if 'episode' in info.keys():
print(f"global_step={global_step}, episode_reward={info['episode']['r']}")
writer.add_scalar("charts/episode_reward", info['episode']['r'], global_step)
writer.add_scalar("charts/epsilon", epsilon, global_step)
# ALGO LOGIC: training.
rb.put((obs, action, reward, next_obs, done))
if global_step > args.learning_starts and global_step % args.train_frequency == 0:
s_obs, s_actions, s_rewards, s_next_obses, s_dones = rb.sample(args.batch_size)
with torch.no_grad():
# target_max = torch.max(target_network.forward(s_next_obses), dim=1)[0]
current_value = q_network.forward(s_next_obses)
target_value = target_network.forward(s_next_obses)
target_max = target_value.gather(1, torch.max(current_value, 1)[1].unsqueeze(1)).squeeze(1)
td_target = torch.Tensor(s_rewards).to(device) + args.gamma * target_max * (1 - torch.Tensor(s_dones).to(device))
old_val = q_network.forward(s_obs).gather(1, torch.LongTensor(s_actions).view(-1,1).to(device)).squeeze()
loss = loss_fn(td_target, old_val)
writer.add_scalar("losses/td_loss", loss, global_step)
# optimize the midel
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(list(q_network.parameters()), args.max_grad_norm)
optimizer.step()
# update the target network
if global_step % args.target_network_frequency == 0:
target_network.load_state_dict(q_network.state_dict())
# TRY NOT TO MODIFY: CRUCIAL step easy to overlook
obs = next_obs
if done:
# important to note that because `EpisodicLifeEnv` wrapper is applied,
# the real episode reward is actually the sum of episode reward of 5 lives
# which we record through `info['episode']['r']` provided by gym.wrappers.RecordEpisodeStatistics
obs, episode_reward = env.reset(), 0
env.close()
writer.close()
| 38.129758
| 171
| 0.632742
|
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=((shp[0] * k,)+shp[1:]), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=0)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def wrap_atari(env, max_episode_steps=None):
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
class ImageToPyTorch(gym.ObservationWrapper):
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
env = ImageToPyTorch(env)
if frame_stack:
env = FrameStack(env, 4)
return env
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import argparse
from distutils.util import strtobool
import collections
import numpy as np
import gym
from gym.wrappers import TimeLimit, Monitor
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space
import time
import random
import os
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from PIL import Image
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Double DQN Agent')
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="BreakoutNoFrameskip-v4",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=1e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=2,
help='seed of the experiment')
parser.add_argument('--total-timesteps', type=int, default=10000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, `torch.backends.cudnn.deterministic=False`')
parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, cuda will not be enabled by default')
parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='weather to capture videos of the agent performances (check out `videos` folder)')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
parser.add_argument('--wandb-entity', type=str, default=None,
help="the entity (team) of wandb's project")
parser.add_argument('--buffer-size', type=int, default=1000000,
help='the replay memory buffer size')
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--target-network-frequency', type=int, default=1000,
help="the timesteps it takes to update the target network")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--batch-size', type=int, default=32,
help="the batch size of sample from the reply memory")
parser.add_argument('--start-e', type=float, default=1.,
help="the starting epsilon for exploration")
parser.add_argument('--end-e', type=float, default=0.02,
help="the ending epsilon for exploration")
parser.add_argument('--exploration-fraction', type=float, default=0.10,
help="the fraction of `total-timesteps` it takes from start-e to go end-e")
parser.add_argument('--learning-starts', type=int, default=80000,
help="timestep to start learning")
parser.add_argument('--train-frequency', type=int, default=4,
help="the frequency of training")
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
class QValueVisualizationWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.env.reset()
self.image_shape = self.env.render(mode="rgb_array").shape
self.q_values = [[0.,0.,0.,0.]]
def set_q_values(self, q_values):
self.q_values = q_values
def render(self, mode="human"):
if mode=="rgb_array":
env_rgb_array = super().render(mode)
fig, ax = plt.subplots(figsize=(self.image_shape[1]/100,self.image_shape[0]/100), constrained_layout=True, dpi=100)
df = pd.DataFrame(np.array(self.q_values).T)
sns.barplot(x=df.index, y=0, data=df, ax=ax)
ax.set(xlabel='actions', ylabel='q-values')
fig.canvas.draw()
X = np.array(fig.canvas.renderer.buffer_rgba())
Image.fromarray(X)
rgb_image = np.array(Image.fromarray(X).convert('RGB'))
plt.close(fig)
q_value_rgb_array = rgb_image
return np.append(env_rgb_array, q_value_rgb_array, axis=1)
else:
super().render(mode)
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
import wandb
wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)
writer = SummaryWriter(f"/tmp/{experiment_name}")
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
env = gym.make(args.gym_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env)
if args.capture_video:
env = QValueVisualizationWrapper(env)
env = Monitor(env, f'videos/{experiment_name}')
env = wrap_deepmind(
env,
clip_rewards=True,
frame_stack=True,
scale=False,
)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
env.seed(args.seed)
env.action_space.seed(args.seed)
env.observation_space.seed(args.seed)
assert isinstance(env.action_space, Discrete), "only discrete action space is supported"
class ReplayBuffer():
def __init__(self, buffer_limit):
self.buffer = collections.deque(maxlen=buffer_limit)
def put(self, transition):
self.buffer.append(transition)
def sample(self, n):
mini_batch = random.sample(self.buffer, n)
s_lst, a_lst, r_lst, s_prime_lst, done_mask_lst = [], [], [], [], []
for transition in mini_batch:
s, a, r, s_prime, done_mask = transition
s_lst.append(s)
a_lst.append(a)
r_lst.append(r)
s_prime_lst.append(s_prime)
done_mask_lst.append(done_mask)
return np.array(s_lst), np.array(a_lst), \
np.array(r_lst), np.array(s_prime_lst), \
np.array(done_mask_lst)
ass Linear0(nn.Linear):
def reset_parameters(self):
nn.init.constant_(self.weight, 0.0)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
class QNetwork(nn.Module):
def __init__(self, frames=4):
super(QNetwork, self).__init__()
self.network = nn.Sequential(
Scale(1/255),
nn.Conv2d(frames, 32, 8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(3136, 512),
nn.ReLU(),
Linear0(512, env.action_space.n)
)
def forward(self, x):
x = torch.Tensor(x).to(device)
return self.network(x)
def linear_schedule(start_e: float, end_e: float, duration: int, t: int):
slope = (end_e - start_e) / duration
return max(slope * t + start_e, end_e)
rb = ReplayBuffer(args.buffer_size)
q_network = QNetwork().to(device)
target_network = QNetwork().to(device)
target_network.load_state_dict(q_network.state_dict())
optimizer = optim.Adam(q_network.parameters(), lr=args.learning_rate)
loss_fn = nn.MSELoss()
print(device.__repr__())
print(q_network)
obs = env.reset()
episode_reward = 0
for global_step in range(args.total_timesteps):
epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction*args.total_timesteps, global_step)
obs = np.array(obs)
logits = q_network.forward(obs.reshape((1,)+obs.shape))
if args.capture_video:
env.set_q_values(logits.tolist())
if random.random() < epsilon:
action = env.action_space.sample()
else:
action = torch.argmax(logits, dim=1).tolist()[0]
next_obs, reward, done, info = env.step(action)
episode_reward += reward
if 'episode' in info.keys():
print(f"global_step={global_step}, episode_reward={info['episode']['r']}")
writer.add_scalar("charts/episode_reward", info['episode']['r'], global_step)
writer.add_scalar("charts/epsilon", epsilon, global_step)
rb.put((obs, action, reward, next_obs, done))
if global_step > args.learning_starts and global_step % args.train_frequency == 0:
s_obs, s_actions, s_rewards, s_next_obses, s_dones = rb.sample(args.batch_size)
with torch.no_grad():
current_value = q_network.forward(s_next_obses)
target_value = target_network.forward(s_next_obses)
target_max = target_value.gather(1, torch.max(current_value, 1)[1].unsqueeze(1)).squeeze(1)
td_target = torch.Tensor(s_rewards).to(device) + args.gamma * target_max * (1 - torch.Tensor(s_dones).to(device))
old_val = q_network.forward(s_obs).gather(1, torch.LongTensor(s_actions).view(-1,1).to(device)).squeeze()
loss = loss_fn(td_target, old_val)
writer.add_scalar("losses/td_loss", loss, global_step)
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(list(q_network.parameters()), args.max_grad_norm)
optimizer.step()
if global_step % args.target_network_frequency == 0:
target_network.load_state_dict(q_network.state_dict())
obs = next_obs
if done:
obs, episode_reward = env.reset(), 0
env.close()
writer.close()
| true
| true
|
79092ae037401100239163d1bdc62803bb41e8eb
| 612
|
py
|
Python
|
Project Pattern/pattern_16.py
|
chandthash/nppy
|
228116d4efa6d28a9cdab245c6c8045844e96211
|
[
"MIT"
] | null | null | null |
Project Pattern/pattern_16.py
|
chandthash/nppy
|
228116d4efa6d28a9cdab245c6c8045844e96211
|
[
"MIT"
] | null | null | null |
Project Pattern/pattern_16.py
|
chandthash/nppy
|
228116d4efa6d28a9cdab245c6c8045844e96211
|
[
"MIT"
] | null | null | null |
def pattern_sixteen(steps):
''' Pattern sixteen
9
9 8
9 8 7
9 8 7 6
9 8 7 6 5
9 8 7 6 5 4
9 8 7 6 5 4 3
9 8 7 6 5 4 3 2
9 8 7 6 5 4 3 2 1
'''
get_range = [str(i) for i in range(1, steps + 1)][::-1] # Getting range of number in string and reverse it
for gr in range(1, len(get_range) + 1):
join = ' '.join(get_range[:gr]) # Slicing values
print(join)
if __name__ == '__main__':
try:
pattern_sixteen(9)
except NameError:
print('Integer was expected')
| 21.857143
| 112
| 0.48366
|
def pattern_sixteen(steps):
get_range = [str(i) for i in range(1, steps + 1)][::-1]
for gr in range(1, len(get_range) + 1):
join = ' '.join(get_range[:gr])
print(join)
if __name__ == '__main__':
try:
pattern_sixteen(9)
except NameError:
print('Integer was expected')
| true
| true
|
79092d8f23c784b5f570fd6425bf38e9e338988f
| 2,072
|
py
|
Python
|
hw_asr/metric/cer_metric.py
|
ArseniyBolotin/asr_project
|
e026286df406bed20a45a82a8c961bad5446aa9a
|
[
"MIT"
] | null | null | null |
hw_asr/metric/cer_metric.py
|
ArseniyBolotin/asr_project
|
e026286df406bed20a45a82a8c961bad5446aa9a
|
[
"MIT"
] | null | null | null |
hw_asr/metric/cer_metric.py
|
ArseniyBolotin/asr_project
|
e026286df406bed20a45a82a8c961bad5446aa9a
|
[
"MIT"
] | null | null | null |
from typing import List
import torch
from torch import Tensor
from hw_asr.base.base_metric import BaseMetric
from hw_asr.base.base_text_encoder import BaseTextEncoder
from hw_asr.metric.utils import calc_cer
class ArgmaxCERMetric(BaseMetric):
def __init__(self, text_encoder: BaseTextEncoder, *args, **kwargs):
super().__init__(*args, **kwargs)
self.text_encoder = text_encoder
def __call__(self, log_probs: Tensor, text: List[str], *args, **kwargs):
cers = []
predictions = torch.argmax(log_probs.cpu(), dim=-1)
for log_prob_vec, target_text, log_prob_length in zip(predictions, text, kwargs['log_probs_length']):
if hasattr(self.text_encoder, "ctc_decode"):
pred_text = self.text_encoder.ctc_decode(log_prob_vec[:log_prob_length.item()])
else:
pred_text = self.text_encoder.decode(log_prob_vec)
cers.append(calc_cer(target_text, pred_text))
return sum(cers) / len(cers)
class BeamSearchCERMetric(BaseMetric):
def __init__(self, text_encoder: BaseTextEncoder, *args, **kwargs):
super().__init__(*args, **kwargs)
self.text_encoder = text_encoder
def __call__(self, log_probs: Tensor, text: List[str], *args, **kwargs):
cers = []
if hasattr(self.text_encoder, "ctc_beam_search"):
predictions = log_probs.cpu()
else:
predictions = torch.argmax(log_probs.cpu(), dim=-1)
for log_prob_length, log_prob_vec, target_text in zip(kwargs['log_probs_length'], predictions, text):
if hasattr(self.text_encoder, "ctc_beam_search"):
pred_text = self.text_encoder.ctc_beam_search(log_prob_vec[:log_prob_length.item(), :].unsqueeze(0))
elif hasattr(self.text_encoder, "ctc_decode"):
pred_text = self.text_encoder.ctc_decode(log_prob_vec)
else:
pred_text = self.text_encoder.decode(log_prob_vec)
cers.append(calc_cer(target_text, pred_text))
return sum(cers) / len(cers)
| 41.44
| 116
| 0.668436
|
from typing import List
import torch
from torch import Tensor
from hw_asr.base.base_metric import BaseMetric
from hw_asr.base.base_text_encoder import BaseTextEncoder
from hw_asr.metric.utils import calc_cer
class ArgmaxCERMetric(BaseMetric):
def __init__(self, text_encoder: BaseTextEncoder, *args, **kwargs):
super().__init__(*args, **kwargs)
self.text_encoder = text_encoder
def __call__(self, log_probs: Tensor, text: List[str], *args, **kwargs):
cers = []
predictions = torch.argmax(log_probs.cpu(), dim=-1)
for log_prob_vec, target_text, log_prob_length in zip(predictions, text, kwargs['log_probs_length']):
if hasattr(self.text_encoder, "ctc_decode"):
pred_text = self.text_encoder.ctc_decode(log_prob_vec[:log_prob_length.item()])
else:
pred_text = self.text_encoder.decode(log_prob_vec)
cers.append(calc_cer(target_text, pred_text))
return sum(cers) / len(cers)
class BeamSearchCERMetric(BaseMetric):
def __init__(self, text_encoder: BaseTextEncoder, *args, **kwargs):
super().__init__(*args, **kwargs)
self.text_encoder = text_encoder
def __call__(self, log_probs: Tensor, text: List[str], *args, **kwargs):
cers = []
if hasattr(self.text_encoder, "ctc_beam_search"):
predictions = log_probs.cpu()
else:
predictions = torch.argmax(log_probs.cpu(), dim=-1)
for log_prob_length, log_prob_vec, target_text in zip(kwargs['log_probs_length'], predictions, text):
if hasattr(self.text_encoder, "ctc_beam_search"):
pred_text = self.text_encoder.ctc_beam_search(log_prob_vec[:log_prob_length.item(), :].unsqueeze(0))
elif hasattr(self.text_encoder, "ctc_decode"):
pred_text = self.text_encoder.ctc_decode(log_prob_vec)
else:
pred_text = self.text_encoder.decode(log_prob_vec)
cers.append(calc_cer(target_text, pred_text))
return sum(cers) / len(cers)
| true
| true
|
79092fb8dc4a60159a099011d0f8734a3688e434
| 2,669
|
py
|
Python
|
scripts/add_def_obstructions.py
|
grigoriy-chirkov/OpenLane
|
374211966b12e2fa0930f33c44d04347df9705f6
|
[
"Apache-2.0"
] | null | null | null |
scripts/add_def_obstructions.py
|
grigoriy-chirkov/OpenLane
|
374211966b12e2fa0930f33c44d04347df9705f6
|
[
"Apache-2.0"
] | null | null | null |
scripts/add_def_obstructions.py
|
grigoriy-chirkov/OpenLane
|
374211966b12e2fa0930f33c44d04347df9705f6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
import opendb as odb
parser = argparse.ArgumentParser(
description='Creates obstructions in def files.')
parser.add_argument('--lef', '-l',
nargs='+',
type=str,
default=None,
required=True,
help='LEF file needed to have a proper view of the DEF files.')
parser.add_argument('--input-def', '-id', required=True,
help='DEF view of the design that needs to be obstructed.')
parser.add_argument('--obstructions', '-obs', required=True,
help='Format: layer llx lly urx ury, ... (in microns)')
parser.add_argument('--output', '-o', required=True,
help='Output DEF file.')
args = parser.parse_args()
input_lef_file_names = args.lef
input_def_file_name = args.input_def
obs_args = args.obstructions
output_def_file_name = args.output
RE_NUMBER = r'[\-]?[0-9]+(\.[0-9]+)?'
RE_OBS = r'(?P<layer>\S+)\s+' r'(?P<bbox>' + RE_NUMBER + r'\s+' + RE_NUMBER + r'\s+' + RE_NUMBER + r'\s+' + RE_NUMBER + r')'
obses = obs_args.split(',')
obs_list = []
for obs in obses:
obs = obs.strip()
m = re.match(RE_OBS, obs)
assert m,\
"Incorrectly formatted input (%s).\n Format: layer llx lly urx ury, ..." % (obs)
layer = m.group('layer')
bbox = [float(x) for x in m.group('bbox').split()]
obs_list.append((layer, bbox))
design_db = odb.dbDatabase.create()
for lef in input_lef_file_names:
odb.read_lef(design_db, lef)
odb.read_def(design_db, input_def_file_name)
design_chip = design_db.getChip()
design_block = design_chip.getBlock()
design_insts = design_block.getInsts()
design_tech = design_db.getTech()
for obs in obs_list:
layer = obs[0]
bbox = obs[1]
dbu = design_tech.getDbUnitsPerMicron()
bbox = [int(x*dbu) for x in bbox]
print("Creating an obstruction on", layer, "at", *bbox, "(DBU)")
odb.dbObstruction_create(design_block, design_tech.findLayer(layer), *bbox)
odb.write_def(design_block, output_def_file_name)
| 33.3625
| 125
| 0.665043
|
import argparse
import re
import opendb as odb
parser = argparse.ArgumentParser(
description='Creates obstructions in def files.')
parser.add_argument('--lef', '-l',
nargs='+',
type=str,
default=None,
required=True,
help='LEF file needed to have a proper view of the DEF files.')
parser.add_argument('--input-def', '-id', required=True,
help='DEF view of the design that needs to be obstructed.')
parser.add_argument('--obstructions', '-obs', required=True,
help='Format: layer llx lly urx ury, ... (in microns)')
parser.add_argument('--output', '-o', required=True,
help='Output DEF file.')
args = parser.parse_args()
input_lef_file_names = args.lef
input_def_file_name = args.input_def
obs_args = args.obstructions
output_def_file_name = args.output
RE_NUMBER = r'[\-]?[0-9]+(\.[0-9]+)?'
RE_OBS = r'(?P<layer>\S+)\s+' r'(?P<bbox>' + RE_NUMBER + r'\s+' + RE_NUMBER + r'\s+' + RE_NUMBER + r'\s+' + RE_NUMBER + r')'
obses = obs_args.split(',')
obs_list = []
for obs in obses:
obs = obs.strip()
m = re.match(RE_OBS, obs)
assert m,\
"Incorrectly formatted input (%s).\n Format: layer llx lly urx ury, ..." % (obs)
layer = m.group('layer')
bbox = [float(x) for x in m.group('bbox').split()]
obs_list.append((layer, bbox))
design_db = odb.dbDatabase.create()
for lef in input_lef_file_names:
odb.read_lef(design_db, lef)
odb.read_def(design_db, input_def_file_name)
design_chip = design_db.getChip()
design_block = design_chip.getBlock()
design_insts = design_block.getInsts()
design_tech = design_db.getTech()
for obs in obs_list:
layer = obs[0]
bbox = obs[1]
dbu = design_tech.getDbUnitsPerMicron()
bbox = [int(x*dbu) for x in bbox]
print("Creating an obstruction on", layer, "at", *bbox, "(DBU)")
odb.dbObstruction_create(design_block, design_tech.findLayer(layer), *bbox)
odb.write_def(design_block, output_def_file_name)
| true
| true
|
79092fecf728c4382b2f448ef5583fda1dd83b5d
| 21,596
|
py
|
Python
|
tf_encrypted/convert/register.py
|
dropoutlabs/tf-encrypted
|
48c9dc7419163425e736ad05bb19980d134fc851
|
[
"Apache-2.0"
] | 1
|
2019-06-14T17:40:37.000Z
|
2019-06-14T17:40:37.000Z
|
tf_encrypted/convert/register.py
|
dropoutlabs/tf-encrypted
|
48c9dc7419163425e736ad05bb19980d134fc851
|
[
"Apache-2.0"
] | null | null | null |
tf_encrypted/convert/register.py
|
dropoutlabs/tf-encrypted
|
48c9dc7419163425e736ad05bb19980d134fc851
|
[
"Apache-2.0"
] | null | null | null |
"""Registry for the TF Encrypted Converter."""
import array
import logging
import os
from typing import Any, List
from collections import OrderedDict
import yaml
import numpy as np
import tensorflow as tf
from ..layers import Conv2D, Relu, Sigmoid, Dense, AveragePooling2D, MaxPooling2D
from ..protocol.pond import PondPrivateTensor, PondMaskedTensor
def registry():
"""Map reserved names and scopes to their conversion functions."""
reg = {
'Placeholder': _placeholder,
'Const': _constant,
'Conv2D': _conv2d,
'Relu': _relu,
'Sigmoid': _sigmoid,
'MatMul': _matmul,
'Shape': _shape,
'StridedSlice': _strided_slice,
'Add': _add,
'Sub': _sub,
'Transpose': _transpose,
'Reshape': _reshape,
'Pack': _pack,
'Rsqrt': _rsqrt,
'Mul': _mul,
'ExpandDims': _expand_dims,
'AvgPool': _avgpool,
'Squeeze': _squeeze,
'ConcatV2': _concat,
'BiasAdd': _bias_add,
'MaxPool': _maxpool,
'Pad': _pad,
'BatchToSpaceND': _batch_to_space_nd,
'SpaceToBatchND': _space_to_batch_nd,
'ArgMax': _argmax,
'required_space_to_batch_paddings': _required_space_to_batch_paddings,
'flatten': _flatten,
'conv2d': _keras_conv2d,
'Slice': _slice,
'Neg': _negative,
'Split': _split,
'Identity': _identity,
"GatherV2": _gather,
"dense": _keras_dense,
}
return reg
convert_dir = os.path.dirname(os.path.abspath(__file__))
specops_path = os.path.join(convert_dir, "specops.yaml")
with open(specops_path, "r") as stream:
loaded_yaml = yaml.load(stream, Loader=yaml.SafeLoader)
sorted_yaml = sorted(loaded_yaml.items(), key=lambda kv: kv[0])
REGISTERED_SPECOPS = OrderedDict(sorted_yaml)
# pylint: disable=unused-argument
# pylint: disable=missing-docstring
def _placeholder(converter, node: Any, inputs: List[str]) -> Any:
return tf.placeholder(node.attr["dtype"].type,
shape=node.attr["shape"].shape)
def _constant(converter, node: Any, inputs: List[str]) -> Any:
# need to able to access the underlying weights return the node
return node
def _identity(converter, node: Any, inputs: List[str]) -> Any:
# need to able to access the underlying weights return the node
return converter.outputs[inputs[0]]
def _matmul(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
tensor = b.attr["value"].tensor
b_shape = [i.size for i in tensor.tensor_shape.dim]
transpose_a = node.attr["transpose_a"].b
transpose_b = node.attr["transpose_b"].b
layer = Dense(a.shape.as_list(),
b_shape[1],
transpose_input=transpose_a,
transpose_weight=transpose_b)
dtype = tensor.dtype
if dtype == tf.float32:
nums = array.array('f', tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for weights")
def inputter_fn():
return tf.constant(np.array(nums).reshape(b_shape))
w = converter.protocol.define_private_input(converter.model_provider,
inputter_fn)
layer.initialize(initial_weights=w)
return layer.forward(a)
def _conv2d(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
kernel = converter.outputs[inputs[1]]
if isinstance(kernel, tf.NodeDef):
shape = [i.size for i in kernel.attr["value"].tensor.tensor_shape.dim]
w = _nodef_to_private_pond(converter, kernel)
else:
shape = kernel.shape.as_list()
w = kernel
fmt = node.attr["data_format"].s.decode('ascii')
layer = Conv2D(x_in.shape.as_list(),
shape,
strides=int(max(node.attr["strides"].list.i)),
padding=node.attr["padding"].s.decode('ascii'),
channels_first=fmt == "NCHW")
layer.initialize(initial_weights=w)
out = layer.forward(x_in)
return out
def _keras_conv2d(converter, interiors, inputs):
x_in = converter.outputs[inputs[0]]
conv_op = interiors["Conv2D"]
kernel = interiors["kernel"]
k = _nodef_to_private_pond(converter, kernel)
try:
bias = interiors["bias"]
b = _nodef_to_private_pond(converter, bias)
for ax in [0, -1, -1]:
b = b.expand_dims(axis=ax)
except KeyError:
b = None
input_shape = x_in.shape.as_list()
shape = [i.size for i in kernel.attr["value"].tensor.tensor_shape.dim]
fmt = conv_op.attr["data_format"].s.decode('ascii')
strides = int(max(conv_op.attr["strides"].list.i))
padding = conv_op.attr["padding"].s.decode('ascii')
layer = Conv2D(
input_shape, shape,
strides=strides,
padding=padding,
channels_first=fmt == "NCHW"
)
layer.initialize(initial_weights=k, initial_bias=b)
out = layer.forward(x_in)
return out
def _keras_dense(converter, interiors, inputs):
x_in = converter.outputs[inputs[0]]
kernel = interiors["kernel"]
k = _nodef_to_private_pond(converter, kernel)
try:
bias = interiors["bias"]
b = _nodef_to_private_pond(converter, bias)
except KeyError:
b = None
input_shape = x_in.shape.as_list()
shape = [i.size for i in kernel.attr["value"].tensor.tensor_shape.dim]
layer = Dense(input_shape,
out_features=shape[1])
layer.initialize(initial_weights=k, initial_bias=b)
out = layer.forward(x_in)
return out
def _relu(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
return Relu(x_in.shape.as_list()).forward(x_in)
def _sigmoid(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
return Sigmoid(x_in.shape.as_list()).forward(x_in)
def _strided_slice(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
begin = converter.outputs[inputs[1]]
end = converter.outputs[inputs[2]]
strides = converter.outputs[inputs[3]]
begin_mask = node.attr["begin_mask"].i
end_mask = node.attr["end_mask"].i
ellipsis_mask = node.attr["ellipsis_mask"].i
new_axis_mask = node.attr["new_axis_mask"].i
shrink_axis_mask = node.attr["shrink_axis_mask"].i
begin = tf.constant(begin.attr["value"].tensor)
end = tf.constant(end.attr["value"].tensor)
strides = tf.constant(strides.attr["value"].tensor)
return converter.protocol.strided_slice(input_out, begin, end,
strides=strides,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
def _pack(converter, node: Any, inputs: List[str]) -> Any:
final_inputs = []
for x_in in inputs:
input_c = converter.outputs[x_in]
if isinstance(input_c, tf.NodeDef):
final_inputs.append(_nodef_to_private_pond(converter, input_c))
else:
final_inputs.append(input_c)
return converter.protocol.stack(final_inputs, axis=node.attr["axis"].i)
def _bias_add(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_private_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_private_pond(converter, b)
else:
b_out = b
return converter.protocol.add(a_out, b_out)
def _maxpool(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
ksize = node.attr["ksize"].list.i
s = node.attr["strides"].list.i
padding = node.attr["padding"].s.decode('ascii')
pool_size = [ksize[1], ksize[2]]
strides = [s[1], s[2]]
shape = [int(i) for i in x_in.shape]
channels_first = node.attr["data_format"].s.decode('ascii') == "NCHW"
pooler = MaxPooling2D(shape, pool_size, strides, padding, channels_first)
out = pooler.forward(x_in)
return out
def _shape(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
return x_in.shape
def _reshape(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
shape = converter.outputs[inputs[1]]
tensor = shape.attr["value"].tensor
dtype = shape.attr["dtype"].type
if dtype == tf.int32:
nums = array.array('i', tensor.tensor_content)
elif dtype == tf.int64:
nums = array.array('l', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for reshape shape")
return converter.protocol.reshape(x_in, list(nums))
def _transpose(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
perm = converter.outputs[inputs[1]]
tensor = perm.attr["value"].tensor
shape = [i.size for i in tensor.tensor_shape.dim]
dtype = perm.attr["dtype"].type
if dtype == tf.int32:
nums = array.array('i', tensor.tensor_content)
elif dtype == tf.int64:
nums = array.array('l', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for transpose perm")
return converter.protocol.transpose(x_in, np.array(nums).reshape(shape))
def _expand_dims(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
input_axis = converter.outputs[inputs[1]]
axis_attr = input_axis.attr["value"].tensor.int_val
axis_val = array.array('i', axis_attr)[0]
return converter.protocol.expand_dims(input_out, axis_val)
def _negative(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
return converter.protocol.negative(input_out)
def _gather(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
indices = converter.outputs[inputs[1]]
axis = converter.outputs[inputs[2]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
indices_out = list(_nodef_to_numpy_array(indices))
axis_val = axis.attr["value"].tensor.int_val[0]
return converter.protocol.gather(input_out, indices_out, axis_val)
def _squeeze(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
axis = node.attr["squeeze_dims"].list.i
return converter.protocol.squeeze(x_in, list(axis))
def _split(converter, node: Any, inputs: List[str]) -> Any:
axis = converter.outputs[inputs[0]]
x_in = converter.outputs[inputs[1]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
num_split = node.attr["num_split"].i
axis_val = axis.attr["value"].tensor.int_val[0]
return converter.protocol.split(input_out, num_split, axis_val)[0]
def _pad(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
p = (converter.outputs[inputs[1]])
paddings_t = p.attr["value"].tensor
paddings_arr = list(array.array('I', paddings_t.tensor_content))
paddings_lst = [paddings_arr[i:i + 2]
for i in range(0, len(paddings_arr), 2)]
return converter.protocol.pad(x_in, paddings_lst)
def _rsqrt(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
tensor = x_in.attr["value"].tensor
shape = [i.size for i in tensor.tensor_shape.dim]
dtype = x_in.attr["dtype"].type
if dtype == tf.float32:
nums = array.array('f', tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for rsqrt")
def inputter_fn():
return tf.constant(1 / np.sqrt(np.array(nums).reshape(shape)))
else:
# XXX this is a little weird but the input into rsqrt is public and
# being used only for batchnorm at the moment
decoded = converter.protocol._decode(x_in.value_on_0, True) # pylint: disable=protected-access
def inputter_fn():
return tf.rsqrt(decoded)
x = converter.protocol.define_public_input(
converter.model_provider, inputter_fn)
return x
def _add(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_public_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_public_pond(converter, b)
else:
b_out = b
return converter.protocol.add(a_out, b_out)
def _sub(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_public_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_public_pond(converter, b)
else:
b_out = b
return converter.protocol.sub(a_out, b_out)
def _mul(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_public_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_public_pond(converter, b)
else:
b_out = b
return converter.protocol.mul(a_out, b_out)
def _avgpool(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
ksize = node.attr["ksize"].list.i
s = node.attr["strides"].list.i
padding = node.attr["padding"].s.decode('ascii')
pool_size = [ksize[1], ksize[2]]
strides = [s[1], s[2]]
shape = [int(i) for i in x_in.shape]
channels_first = node.attr["data_format"].s.decode('ascii') == "NCHW"
avg = AveragePooling2D(shape, pool_size, strides, padding, channels_first)
out = avg.forward(x_in)
return out
def _concat(converter, node: Any, inputs: List[str]) -> Any:
input0 = converter.outputs[inputs[0]]
input1 = converter.outputs[inputs[1]]
axis = converter.outputs[inputs[2]]
axis_int = axis.attr["value"].tensor.int_val[0]
return converter.protocol.concat([input0, input1], axis_int)
def _batch_to_space_nd(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
block_shape = converter.outputs[inputs[1]].attr["value"].tensor
crops = converter.outputs[inputs[2]].attr["value"].tensor
return converter.protocol.batch_to_space_nd(x_in, block_shape, crops)
def _space_to_batch_nd(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
block_shape = converter.outputs[inputs[1]].attr["value"].tensor
paddings = converter.outputs[inputs[2]].attr["value"].tensor
return converter.protocol.space_to_batch_nd(x_in, block_shape, paddings)
def _flatten(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
shape = x_in.shape.as_list()
non_batch = 1
for dim in shape[1:]:
non_batch *= dim
return converter.protocol.reshape(x_in, [-1, non_batch])
def _required_space_to_batch_paddings(converter, node, inputs: List[str]):
inputs_node = [converter.outputs[inputs[i]] for i in range(len(inputs))]
inputs_int32 = []
for x_in in inputs_node:
pvt_check = isinstance(x_in, PondPrivateTensor)
msk_check = isinstance(x_in, PondMaskedTensor)
if pvt_check or msk_check:
logging.warning(("Revealing private input: "
"required_space_to_batch_paddings assumes public "
"input."))
inputs_int32.append(tf.cast(x_in.reveal().decode(), tf.int32))
elif isinstance(x_in, tf.NodeDef):
inputs_int32.append(_nodef_to_numpy_array(x_in))
else:
raise TypeError("Unexpected input of type {}.".format(type(x_in)))
if len(inputs_int32) == 2:
input_shape, block_shape = inputs_int32
def inputter_pad():
pads, _ = tf.required_space_to_batch_paddings(input_shape, block_shape)
return tf.cast(pads, tf.float64)
def inputter_crop():
_, crops = tf.required_space_to_batch_paddings(input_shape, block_shape)
return tf.cast(crops, tf.float64)
else:
base_paddings, input_shape, block_shape = inputs_int32
def inputter_pad():
pads, _ = tf.required_space_to_batch_paddings(
input_shape,
block_shape,
base_paddings=base_paddings,
)
return tf.cast(pads, tf.float64)
def inputter_crop():
_, crops = tf.required_space_to_batch_paddings(
input_shape,
block_shape,
base_paddings=base_paddings,
)
return tf.cast(crops, tf.float64)
pad_private = converter.protocol.define_public_input(
converter.model_provider, inputter_pad)
crop_private = converter.protocol.define_public_input(
converter.model_provider, inputter_crop)
return (pad_private, crop_private)
def _argmax(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
axis = converter.outputs[inputs[1]].attr["value"].tensor.int_val[0]
return converter.protocol.argmax(x_in, axis=axis)
def _slice(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
begin = _nodef_to_numpy_array(converter.outputs[inputs[1]])
size = _nodef_to_numpy_array(converter.outputs[inputs[2]])
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
# Slice is a special case of strided_slice. Slice takes size (the number of
# elements we want to slice) as an input. However strided_slice takes end
# (integer until which the slicing takes place) as input.
# We can infere the end parameter with : end[i] = begin[i] + size[i].
# If size is negative, the stepping go towards smaller indices.
# In this case we can infer the end parameter with: end[i] = input_shape[i] - size[i] + 1
end = np.zeros(len(begin))
input_shape = x_in.shape.as_list()
# if size is negative take the input dimension
for i in range(len(end)): # pylint: disable=consider-using-enumerate
if size[i] < 0:
end[i] = input_shape[i] - size[i] + 1
else:
end[i] = begin[i] + size[i]
return converter.protocol.strided_slice(input_out, begin, end)
# pylint: enable=unused-argument
# pylint: enable=missing-docstring
def _nodef_to_public_pond(converter, x):
"""Map a NodeDef x to a PublicPondTensor."""
dtype = x.attr["dtype"].type
x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]
if not x_shape:
if dtype == tf.float32:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.float64:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.int32:
nums = x.attr["value"].tensor.int_val
else:
raise TypeError("Unsupported dtype")
def inputter_fn():
return tf.constant(np.array(nums).reshape(1, 1))
else:
if dtype == tf.float32:
nums = array.array('f', x.attr["value"].tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', x.attr["value"].tensor.tensor_content)
elif dtype == tf.int32:
nums = array.array('i', x.attr["value"].tensor.tensor_content)
else:
raise TypeError("Unsupported dtype")
def inputter_fn():
return tf.constant(np.array(nums).reshape(x_shape))
x_public = converter.protocol.define_public_input(
converter.model_provider, inputter_fn)
return x_public
def _nodef_to_private_pond(converter, x):
"""Map a NodeDef x to a PrivatePondTensor."""
dtype = x.attr["dtype"].type
warn_msg = "Unexpected dtype {} found at node {}"
err_msg = "Unsupported dtype {} found at node {}"
x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]
if not x_shape:
if dtype == tf.float32:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.float64:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.int32:
logging.warning(warn_msg, dtype, x.name)
nums = x.attr["value"].tensor.int_val
else:
raise TypeError(err_msg.format(dtype, x.name))
def inputter_fn():
return tf.constant(np.array(nums).reshape(1, 1))
else:
if dtype == tf.float32:
nums = array.array('f', x.attr["value"].tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', x.attr["value"].tensor.tensor_content)
elif dtype == tf.int32:
logging.warning(warn_msg, dtype, x.name)
nums = array.array('i', x.attr["value"].tensor.tensor_content)
else:
raise TypeError(err_msg.format(dtype, x.name))
def inputter_fn():
return tf.constant(np.array(nums).reshape(x_shape))
x_private = converter.protocol.define_private_input(
converter.model_provider, inputter_fn)
return x_private
def _nodef_to_numpy_array(x):
"""Map a NodeDef x to a np.array."""
dtype = x.attr["dtype"].type
x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]
if dtype == tf.float32:
nums = array.array('f', x.attr["value"].tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', x.attr["value"].tensor.tensor_content)
elif dtype == tf.int32:
nums = array.array('i', x.attr["value"].tensor.tensor_content)
else:
raise TypeError("Unsupported dtype")
return np.array(nums).reshape(x_shape)
| 29.144399
| 99
| 0.677348
|
import array
import logging
import os
from typing import Any, List
from collections import OrderedDict
import yaml
import numpy as np
import tensorflow as tf
from ..layers import Conv2D, Relu, Sigmoid, Dense, AveragePooling2D, MaxPooling2D
from ..protocol.pond import PondPrivateTensor, PondMaskedTensor
def registry():
reg = {
'Placeholder': _placeholder,
'Const': _constant,
'Conv2D': _conv2d,
'Relu': _relu,
'Sigmoid': _sigmoid,
'MatMul': _matmul,
'Shape': _shape,
'StridedSlice': _strided_slice,
'Add': _add,
'Sub': _sub,
'Transpose': _transpose,
'Reshape': _reshape,
'Pack': _pack,
'Rsqrt': _rsqrt,
'Mul': _mul,
'ExpandDims': _expand_dims,
'AvgPool': _avgpool,
'Squeeze': _squeeze,
'ConcatV2': _concat,
'BiasAdd': _bias_add,
'MaxPool': _maxpool,
'Pad': _pad,
'BatchToSpaceND': _batch_to_space_nd,
'SpaceToBatchND': _space_to_batch_nd,
'ArgMax': _argmax,
'required_space_to_batch_paddings': _required_space_to_batch_paddings,
'flatten': _flatten,
'conv2d': _keras_conv2d,
'Slice': _slice,
'Neg': _negative,
'Split': _split,
'Identity': _identity,
"GatherV2": _gather,
"dense": _keras_dense,
}
return reg
convert_dir = os.path.dirname(os.path.abspath(__file__))
specops_path = os.path.join(convert_dir, "specops.yaml")
with open(specops_path, "r") as stream:
loaded_yaml = yaml.load(stream, Loader=yaml.SafeLoader)
sorted_yaml = sorted(loaded_yaml.items(), key=lambda kv: kv[0])
REGISTERED_SPECOPS = OrderedDict(sorted_yaml)
def _placeholder(converter, node: Any, inputs: List[str]) -> Any:
return tf.placeholder(node.attr["dtype"].type,
shape=node.attr["shape"].shape)
def _constant(converter, node: Any, inputs: List[str]) -> Any:
return node
def _identity(converter, node: Any, inputs: List[str]) -> Any:
return converter.outputs[inputs[0]]
def _matmul(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
tensor = b.attr["value"].tensor
b_shape = [i.size for i in tensor.tensor_shape.dim]
transpose_a = node.attr["transpose_a"].b
transpose_b = node.attr["transpose_b"].b
layer = Dense(a.shape.as_list(),
b_shape[1],
transpose_input=transpose_a,
transpose_weight=transpose_b)
dtype = tensor.dtype
if dtype == tf.float32:
nums = array.array('f', tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for weights")
def inputter_fn():
return tf.constant(np.array(nums).reshape(b_shape))
w = converter.protocol.define_private_input(converter.model_provider,
inputter_fn)
layer.initialize(initial_weights=w)
return layer.forward(a)
def _conv2d(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
kernel = converter.outputs[inputs[1]]
if isinstance(kernel, tf.NodeDef):
shape = [i.size for i in kernel.attr["value"].tensor.tensor_shape.dim]
w = _nodef_to_private_pond(converter, kernel)
else:
shape = kernel.shape.as_list()
w = kernel
fmt = node.attr["data_format"].s.decode('ascii')
layer = Conv2D(x_in.shape.as_list(),
shape,
strides=int(max(node.attr["strides"].list.i)),
padding=node.attr["padding"].s.decode('ascii'),
channels_first=fmt == "NCHW")
layer.initialize(initial_weights=w)
out = layer.forward(x_in)
return out
def _keras_conv2d(converter, interiors, inputs):
x_in = converter.outputs[inputs[0]]
conv_op = interiors["Conv2D"]
kernel = interiors["kernel"]
k = _nodef_to_private_pond(converter, kernel)
try:
bias = interiors["bias"]
b = _nodef_to_private_pond(converter, bias)
for ax in [0, -1, -1]:
b = b.expand_dims(axis=ax)
except KeyError:
b = None
input_shape = x_in.shape.as_list()
shape = [i.size for i in kernel.attr["value"].tensor.tensor_shape.dim]
fmt = conv_op.attr["data_format"].s.decode('ascii')
strides = int(max(conv_op.attr["strides"].list.i))
padding = conv_op.attr["padding"].s.decode('ascii')
layer = Conv2D(
input_shape, shape,
strides=strides,
padding=padding,
channels_first=fmt == "NCHW"
)
layer.initialize(initial_weights=k, initial_bias=b)
out = layer.forward(x_in)
return out
def _keras_dense(converter, interiors, inputs):
x_in = converter.outputs[inputs[0]]
kernel = interiors["kernel"]
k = _nodef_to_private_pond(converter, kernel)
try:
bias = interiors["bias"]
b = _nodef_to_private_pond(converter, bias)
except KeyError:
b = None
input_shape = x_in.shape.as_list()
shape = [i.size for i in kernel.attr["value"].tensor.tensor_shape.dim]
layer = Dense(input_shape,
out_features=shape[1])
layer.initialize(initial_weights=k, initial_bias=b)
out = layer.forward(x_in)
return out
def _relu(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
return Relu(x_in.shape.as_list()).forward(x_in)
def _sigmoid(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
return Sigmoid(x_in.shape.as_list()).forward(x_in)
def _strided_slice(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
begin = converter.outputs[inputs[1]]
end = converter.outputs[inputs[2]]
strides = converter.outputs[inputs[3]]
begin_mask = node.attr["begin_mask"].i
end_mask = node.attr["end_mask"].i
ellipsis_mask = node.attr["ellipsis_mask"].i
new_axis_mask = node.attr["new_axis_mask"].i
shrink_axis_mask = node.attr["shrink_axis_mask"].i
begin = tf.constant(begin.attr["value"].tensor)
end = tf.constant(end.attr["value"].tensor)
strides = tf.constant(strides.attr["value"].tensor)
return converter.protocol.strided_slice(input_out, begin, end,
strides=strides,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
def _pack(converter, node: Any, inputs: List[str]) -> Any:
final_inputs = []
for x_in in inputs:
input_c = converter.outputs[x_in]
if isinstance(input_c, tf.NodeDef):
final_inputs.append(_nodef_to_private_pond(converter, input_c))
else:
final_inputs.append(input_c)
return converter.protocol.stack(final_inputs, axis=node.attr["axis"].i)
def _bias_add(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_private_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_private_pond(converter, b)
else:
b_out = b
return converter.protocol.add(a_out, b_out)
def _maxpool(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
ksize = node.attr["ksize"].list.i
s = node.attr["strides"].list.i
padding = node.attr["padding"].s.decode('ascii')
pool_size = [ksize[1], ksize[2]]
strides = [s[1], s[2]]
shape = [int(i) for i in x_in.shape]
channels_first = node.attr["data_format"].s.decode('ascii') == "NCHW"
pooler = MaxPooling2D(shape, pool_size, strides, padding, channels_first)
out = pooler.forward(x_in)
return out
def _shape(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
return x_in.shape
def _reshape(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
shape = converter.outputs[inputs[1]]
tensor = shape.attr["value"].tensor
dtype = shape.attr["dtype"].type
if dtype == tf.int32:
nums = array.array('i', tensor.tensor_content)
elif dtype == tf.int64:
nums = array.array('l', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for reshape shape")
return converter.protocol.reshape(x_in, list(nums))
def _transpose(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
perm = converter.outputs[inputs[1]]
tensor = perm.attr["value"].tensor
shape = [i.size for i in tensor.tensor_shape.dim]
dtype = perm.attr["dtype"].type
if dtype == tf.int32:
nums = array.array('i', tensor.tensor_content)
elif dtype == tf.int64:
nums = array.array('l', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for transpose perm")
return converter.protocol.transpose(x_in, np.array(nums).reshape(shape))
def _expand_dims(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
input_axis = converter.outputs[inputs[1]]
axis_attr = input_axis.attr["value"].tensor.int_val
axis_val = array.array('i', axis_attr)[0]
return converter.protocol.expand_dims(input_out, axis_val)
def _negative(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
return converter.protocol.negative(input_out)
def _gather(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
indices = converter.outputs[inputs[1]]
axis = converter.outputs[inputs[2]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
indices_out = list(_nodef_to_numpy_array(indices))
axis_val = axis.attr["value"].tensor.int_val[0]
return converter.protocol.gather(input_out, indices_out, axis_val)
def _squeeze(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
axis = node.attr["squeeze_dims"].list.i
return converter.protocol.squeeze(x_in, list(axis))
def _split(converter, node: Any, inputs: List[str]) -> Any:
axis = converter.outputs[inputs[0]]
x_in = converter.outputs[inputs[1]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
num_split = node.attr["num_split"].i
axis_val = axis.attr["value"].tensor.int_val[0]
return converter.protocol.split(input_out, num_split, axis_val)[0]
def _pad(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
p = (converter.outputs[inputs[1]])
paddings_t = p.attr["value"].tensor
paddings_arr = list(array.array('I', paddings_t.tensor_content))
paddings_lst = [paddings_arr[i:i + 2]
for i in range(0, len(paddings_arr), 2)]
return converter.protocol.pad(x_in, paddings_lst)
def _rsqrt(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
tensor = x_in.attr["value"].tensor
shape = [i.size for i in tensor.tensor_shape.dim]
dtype = x_in.attr["dtype"].type
if dtype == tf.float32:
nums = array.array('f', tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for rsqrt")
def inputter_fn():
return tf.constant(1 / np.sqrt(np.array(nums).reshape(shape)))
else:
decoded = converter.protocol._decode(x_in.value_on_0, True)
def inputter_fn():
return tf.rsqrt(decoded)
x = converter.protocol.define_public_input(
converter.model_provider, inputter_fn)
return x
def _add(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_public_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_public_pond(converter, b)
else:
b_out = b
return converter.protocol.add(a_out, b_out)
def _sub(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_public_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_public_pond(converter, b)
else:
b_out = b
return converter.protocol.sub(a_out, b_out)
def _mul(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_public_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_public_pond(converter, b)
else:
b_out = b
return converter.protocol.mul(a_out, b_out)
def _avgpool(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
ksize = node.attr["ksize"].list.i
s = node.attr["strides"].list.i
padding = node.attr["padding"].s.decode('ascii')
pool_size = [ksize[1], ksize[2]]
strides = [s[1], s[2]]
shape = [int(i) for i in x_in.shape]
channels_first = node.attr["data_format"].s.decode('ascii') == "NCHW"
avg = AveragePooling2D(shape, pool_size, strides, padding, channels_first)
out = avg.forward(x_in)
return out
def _concat(converter, node: Any, inputs: List[str]) -> Any:
input0 = converter.outputs[inputs[0]]
input1 = converter.outputs[inputs[1]]
axis = converter.outputs[inputs[2]]
axis_int = axis.attr["value"].tensor.int_val[0]
return converter.protocol.concat([input0, input1], axis_int)
def _batch_to_space_nd(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
block_shape = converter.outputs[inputs[1]].attr["value"].tensor
crops = converter.outputs[inputs[2]].attr["value"].tensor
return converter.protocol.batch_to_space_nd(x_in, block_shape, crops)
def _space_to_batch_nd(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
block_shape = converter.outputs[inputs[1]].attr["value"].tensor
paddings = converter.outputs[inputs[2]].attr["value"].tensor
return converter.protocol.space_to_batch_nd(x_in, block_shape, paddings)
def _flatten(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
shape = x_in.shape.as_list()
non_batch = 1
for dim in shape[1:]:
non_batch *= dim
return converter.protocol.reshape(x_in, [-1, non_batch])
def _required_space_to_batch_paddings(converter, node, inputs: List[str]):
inputs_node = [converter.outputs[inputs[i]] for i in range(len(inputs))]
inputs_int32 = []
for x_in in inputs_node:
pvt_check = isinstance(x_in, PondPrivateTensor)
msk_check = isinstance(x_in, PondMaskedTensor)
if pvt_check or msk_check:
logging.warning(("Revealing private input: "
"required_space_to_batch_paddings assumes public "
"input."))
inputs_int32.append(tf.cast(x_in.reveal().decode(), tf.int32))
elif isinstance(x_in, tf.NodeDef):
inputs_int32.append(_nodef_to_numpy_array(x_in))
else:
raise TypeError("Unexpected input of type {}.".format(type(x_in)))
if len(inputs_int32) == 2:
input_shape, block_shape = inputs_int32
def inputter_pad():
pads, _ = tf.required_space_to_batch_paddings(input_shape, block_shape)
return tf.cast(pads, tf.float64)
def inputter_crop():
_, crops = tf.required_space_to_batch_paddings(input_shape, block_shape)
return tf.cast(crops, tf.float64)
else:
base_paddings, input_shape, block_shape = inputs_int32
def inputter_pad():
pads, _ = tf.required_space_to_batch_paddings(
input_shape,
block_shape,
base_paddings=base_paddings,
)
return tf.cast(pads, tf.float64)
def inputter_crop():
_, crops = tf.required_space_to_batch_paddings(
input_shape,
block_shape,
base_paddings=base_paddings,
)
return tf.cast(crops, tf.float64)
pad_private = converter.protocol.define_public_input(
converter.model_provider, inputter_pad)
crop_private = converter.protocol.define_public_input(
converter.model_provider, inputter_crop)
return (pad_private, crop_private)
def _argmax(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
axis = converter.outputs[inputs[1]].attr["value"].tensor.int_val[0]
return converter.protocol.argmax(x_in, axis=axis)
def _slice(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
begin = _nodef_to_numpy_array(converter.outputs[inputs[1]])
size = _nodef_to_numpy_array(converter.outputs[inputs[2]])
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
end = np.zeros(len(begin))
input_shape = x_in.shape.as_list()
for i in range(len(end)):
if size[i] < 0:
end[i] = input_shape[i] - size[i] + 1
else:
end[i] = begin[i] + size[i]
return converter.protocol.strided_slice(input_out, begin, end)
def _nodef_to_public_pond(converter, x):
dtype = x.attr["dtype"].type
x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]
if not x_shape:
if dtype == tf.float32:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.float64:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.int32:
nums = x.attr["value"].tensor.int_val
else:
raise TypeError("Unsupported dtype")
def inputter_fn():
return tf.constant(np.array(nums).reshape(1, 1))
else:
if dtype == tf.float32:
nums = array.array('f', x.attr["value"].tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', x.attr["value"].tensor.tensor_content)
elif dtype == tf.int32:
nums = array.array('i', x.attr["value"].tensor.tensor_content)
else:
raise TypeError("Unsupported dtype")
def inputter_fn():
return tf.constant(np.array(nums).reshape(x_shape))
x_public = converter.protocol.define_public_input(
converter.model_provider, inputter_fn)
return x_public
def _nodef_to_private_pond(converter, x):
dtype = x.attr["dtype"].type
warn_msg = "Unexpected dtype {} found at node {}"
err_msg = "Unsupported dtype {} found at node {}"
x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]
if not x_shape:
if dtype == tf.float32:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.float64:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.int32:
logging.warning(warn_msg, dtype, x.name)
nums = x.attr["value"].tensor.int_val
else:
raise TypeError(err_msg.format(dtype, x.name))
def inputter_fn():
return tf.constant(np.array(nums).reshape(1, 1))
else:
if dtype == tf.float32:
nums = array.array('f', x.attr["value"].tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', x.attr["value"].tensor.tensor_content)
elif dtype == tf.int32:
logging.warning(warn_msg, dtype, x.name)
nums = array.array('i', x.attr["value"].tensor.tensor_content)
else:
raise TypeError(err_msg.format(dtype, x.name))
def inputter_fn():
return tf.constant(np.array(nums).reshape(x_shape))
x_private = converter.protocol.define_private_input(
converter.model_provider, inputter_fn)
return x_private
def _nodef_to_numpy_array(x):
dtype = x.attr["dtype"].type
x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]
if dtype == tf.float32:
nums = array.array('f', x.attr["value"].tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', x.attr["value"].tensor.tensor_content)
elif dtype == tf.int32:
nums = array.array('i', x.attr["value"].tensor.tensor_content)
else:
raise TypeError("Unsupported dtype")
return np.array(nums).reshape(x_shape)
| true
| true
|
7909304117b4d88707f4d39962d2e98e40792ef1
| 1,205
|
py
|
Python
|
Semester I/Design and Analysis of Algorithm/Practical 04- Hiring Problem/HiringProblem.py
|
STreK7/MSc.-CS
|
78484f5bbce9f5149da680b19626eb139cc5ca90
|
[
"Apache-2.0"
] | null | null | null |
Semester I/Design and Analysis of Algorithm/Practical 04- Hiring Problem/HiringProblem.py
|
STreK7/MSc.-CS
|
78484f5bbce9f5149da680b19626eb139cc5ca90
|
[
"Apache-2.0"
] | null | null | null |
Semester I/Design and Analysis of Algorithm/Practical 04- Hiring Problem/HiringProblem.py
|
STreK7/MSc.-CS
|
78484f5bbce9f5149da680b19626eb139cc5ca90
|
[
"Apache-2.0"
] | 2
|
2021-10-12T14:01:39.000Z
|
2022-01-23T14:28:55.000Z
|
import random
def HiringProblem(score, n):
sample_size = int(round(n / e))
print(f"\nRejecting first {sample_size} candidates as sample")
#finding best candidate in the sample set for benchmark
best_candidate = 0;
for i in range(1, sample_size):
if (score[i] > score[best_candidate]):
best_candidate = i
#finding the first best candidate outside the sample set
for i in range(sample_size, n):
if (score[i] >= score[best_candidate]):
best_candidate = i
break
if (best_candidate >= int(sample_size)):
print(f"\nThe best Candidate found is {best_candidate+1} with score {score[best_candidate]}")
else:
print("Couldn't find a best candidate")
# Driver code
if __name__ == "__main__":
e = 2.71828
n = int(input("Enter number of candidates to simulate\n")) #total number of candidate
score = []
#populating the list
for i in range(n):
score.append(random.randint(1, n))
print("Candidate\tScore\n");
for i in range(n):
print(f"{i+1}\t\t{score[i]}");
HiringProblem(score, n);
| 29.390244
| 102
| 0.591701
|
import random
def HiringProblem(score, n):
sample_size = int(round(n / e))
print(f"\nRejecting first {sample_size} candidates as sample")
best_candidate = 0;
for i in range(1, sample_size):
if (score[i] > score[best_candidate]):
best_candidate = i
for i in range(sample_size, n):
if (score[i] >= score[best_candidate]):
best_candidate = i
break
if (best_candidate >= int(sample_size)):
print(f"\nThe best Candidate found is {best_candidate+1} with score {score[best_candidate]}")
else:
print("Couldn't find a best candidate")
# Driver code
if __name__ == "__main__":
e = 2.71828
n = int(input("Enter number of candidates to simulate\n")) #total number of candidate
score = []
#populating the list
for i in range(n):
score.append(random.randint(1, n))
print("Candidate\tScore\n");
for i in range(n):
print(f"{i+1}\t\t{score[i]}");
HiringProblem(score, n);
| true
| true
|
7909310895915b039888ed90749f0aacd8c9e71b
| 1,174
|
py
|
Python
|
heart.py
|
xxninjabunnyxx/pixel_pop_heart_challenge
|
94fabffa969f4ab2374c68a3a722d975ee940001
|
[
"MIT"
] | null | null | null |
heart.py
|
xxninjabunnyxx/pixel_pop_heart_challenge
|
94fabffa969f4ab2374c68a3a722d975ee940001
|
[
"MIT"
] | 1
|
2022-02-18T15:24:57.000Z
|
2022-02-18T15:24:57.000Z
|
heart.py
|
xxninjabunnyxx/pixel-pop-heart-challenge
|
94fabffa969f4ab2374c68a3a722d975ee940001
|
[
"MIT"
] | null | null | null |
def pixel(num):
def f(s):
return s + '\033[{}m \033[0m'.format(num)
return f
def new_line(s):
return s + u"\n"
def build(*steps, string=""):
for step in steps:
string = step(string)
return string
def main():
cyan = pixel(46)
space = pixel('08')
heart = [new_line,
space, space, cyan, cyan, space, space, space, cyan, cyan, new_line,
space, cyan, cyan, cyan, cyan, space, cyan, cyan, cyan, cyan, new_line,
cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, new_line,
cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, new_line,
cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, new_line,
space, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, new_line,
space, space, cyan, cyan, cyan, cyan, cyan, cyan, cyan, new_line,
space, space, space, cyan, cyan, cyan, cyan, cyan, new_line,
space, space, space, space, cyan, cyan, cyan, new_line,
space, space, space, space, space, cyan, new_line]
print(build(*heart))
if __name__ == '__main__':
main()
| 36.6875
| 87
| 0.579216
|
def pixel(num):
def f(s):
return s + '\033[{}m \033[0m'.format(num)
return f
def new_line(s):
return s + u"\n"
def build(*steps, string=""):
for step in steps:
string = step(string)
return string
def main():
cyan = pixel(46)
space = pixel('08')
heart = [new_line,
space, space, cyan, cyan, space, space, space, cyan, cyan, new_line,
space, cyan, cyan, cyan, cyan, space, cyan, cyan, cyan, cyan, new_line,
cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, new_line,
cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, new_line,
cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, new_line,
space, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, new_line,
space, space, cyan, cyan, cyan, cyan, cyan, cyan, cyan, new_line,
space, space, space, cyan, cyan, cyan, cyan, cyan, new_line,
space, space, space, space, cyan, cyan, cyan, new_line,
space, space, space, space, space, cyan, new_line]
print(build(*heart))
if __name__ == '__main__':
main()
| true
| true
|
7909314b0b6a03e6197837bea805cb1f5b58eb77
| 310
|
py
|
Python
|
api/app/crud/crud_group.py
|
LukasPatzke/ambientHUE
|
9a66cc965c25bc93c84e423dd74c48aa6737c453
|
[
"MIT"
] | 2
|
2020-08-06T16:39:39.000Z
|
2021-05-04T18:59:11.000Z
|
api/app/crud/crud_group.py
|
LukasPatzke/ambientHUE
|
9a66cc965c25bc93c84e423dd74c48aa6737c453
|
[
"MIT"
] | null | null | null |
api/app/crud/crud_group.py
|
LukasPatzke/ambientHUE
|
9a66cc965c25bc93c84e423dd74c48aa6737c453
|
[
"MIT"
] | null | null | null |
from sqlalchemy.orm import Session
from .base import CRUDBase
from app.models import Group
from app.schemas import GroupCreate, GroupUpdate
class CRUDGroup(CRUDBase[Group, GroupCreate, GroupUpdate]):
def count(self, db: Session) -> int:
return db.query(Group).count()
group = CRUDGroup(Group)
| 22.142857
| 59
| 0.751613
|
from sqlalchemy.orm import Session
from .base import CRUDBase
from app.models import Group
from app.schemas import GroupCreate, GroupUpdate
class CRUDGroup(CRUDBase[Group, GroupCreate, GroupUpdate]):
def count(self, db: Session) -> int:
return db.query(Group).count()
group = CRUDGroup(Group)
| true
| true
|
7909321c9cd617ce693c13f3a722cffcce227512
| 2,656
|
py
|
Python
|
oct/ansible/openshift-ansible/utils/setup.py
|
staebler/origin-ci-tool
|
2cb86c3cad7a37450e711571ac75997118c899e5
|
[
"Apache-2.0"
] | 23
|
2017-01-06T21:32:09.000Z
|
2022-03-14T17:14:49.000Z
|
oct/ansible/openshift-ansible/utils/setup.py
|
staebler/origin-ci-tool
|
2cb86c3cad7a37450e711571ac75997118c899e5
|
[
"Apache-2.0"
] | 129
|
2017-01-06T18:29:51.000Z
|
2022-01-27T17:37:21.000Z
|
oct/ansible/openshift-ansible/utils/setup.py
|
staebler/origin-ci-tool
|
2cb86c3cad7a37450e711571ac75997118c899e5
|
[
"Apache-2.0"
] | 52
|
2017-01-06T16:03:49.000Z
|
2022-01-24T18:58:58.000Z
|
"""A setuptools based setup module.
"""
# Always prefer setuptools over distutils
from setuptools import setup
setup(
name='ooinstall',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version="3.0.0",
description="Ansible wrapper for OpenShift Enterprise 3 installation.",
# The project's main homepage.
url="http://github.com/openshift/openshift-extras/tree/enterprise-3.0/oo-install",
# Author details
author="openshift@redhat.com",
author_email="OpenShift",
# Choose your license
license="Apache 2.0",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Utilities',
],
# What does your project relate to?
keywords='oo-install setuptools development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
#packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=['ooinstall'],
package_dir={'ooinstall': 'src/ooinstall'},
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['click', 'PyYAML'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'ooinstall': ['ansible.cfg', 'ansible-quiet.cfg', 'ansible_plugins/*'],
},
tests_require=['nose'],
test_suite='nose.collector',
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'oo-install=ooinstall.cli_installer:cli',
],
},
)
| 32.790123
| 86
| 0.670557
|
from setuptools import setup
setup(
name='ooinstall',
version="3.0.0",
description="Ansible wrapper for OpenShift Enterprise 3 installation.",
url="http://github.com/openshift/openshift-extras/tree/enterprise-3.0/oo-install",
# Author details
author="openshift@redhat.com",
author_email="OpenShift",
# Choose your license
license="Apache 2.0",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Utilities',
],
# What does your project relate to?
keywords='oo-install setuptools development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
#packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=['ooinstall'],
package_dir={'ooinstall': 'src/ooinstall'},
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
install_requires=['click', 'PyYAML'],
package_data={
'ooinstall': ['ansible.cfg', 'ansible-quiet.cfg', 'ansible_plugins/*'],
},
tests_require=['nose'],
test_suite='nose.collector',
entry_points={
'console_scripts': [
'oo-install=ooinstall.cli_installer:cli',
],
},
)
| true
| true
|
7909333124983a76b904ffaa880a21addb7e07ba
| 8,739
|
py
|
Python
|
qa/pull-tester/rpc-tests.py
|
LumoCash2018/LumoCash
|
5fbaa077d63a643ce484ddf4fdada1fbc65651c6
|
[
"MIT"
] | null | null | null |
qa/pull-tester/rpc-tests.py
|
LumoCash2018/LumoCash
|
5fbaa077d63a643ce484ddf4fdada1fbc65651c6
|
[
"MIT"
] | 1
|
2018-10-14T23:28:11.000Z
|
2018-10-14T23:28:11.000Z
|
qa/pull-tester/rpc-tests.py
|
LumoCash2018/LumoCash
|
5fbaa077d63a643ce484ddf4fdada1fbc65651c6
|
[
"MIT"
] | 1
|
2018-10-12T18:35:55.000Z
|
2018-10-12T18:35:55.000Z
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "LUMOD" not in os.environ:
os.environ["LUMOD"] = buildDir + '/src/lumocashd' + EXEEXT
if "LUMOCLI" not in os.environ:
os.environ["LUMOCLI"] = buildDir + '/src/lumocash-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print "Win tests currently disabled by default. Use -win option to enable"
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
sys.exit(0)
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
raise e
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'p2p-fullblocktest.py', # NOTE: needs lumocash_hash to pass
'blockchain.py',
'disablewallet.py',
'sendheaders.py', # NOTE: needs lumocash_hash to pass
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs lumocash_hash to pass
'invalidtxrequest.py', # NOTE: needs lumocash_hash to pass
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs lumocash_hash to pass
'bip68-sequence.py',
'bipdersig-p2p.py', # NOTE: needs lumocash_hash to pass
'bipdersig.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
# 'pruning.py', # Prune mode is incompatible with -txindex.
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs lumocash_hash to pass
'mempool_packages.py',
'maxuploadtarget.py',
# 'replace-by-fee.py', # RBF is disabled in LumoCash
]
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| 31.663043
| 163
| 0.638174
|
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
buildDir = BUILDDIR
if "LUMOD" not in os.environ:
os.environ["LUMOD"] = buildDir + '/src/lumocashd' + EXEEXT
if "LUMOCLI" not in os.environ:
os.environ["LUMOCLI"] = buildDir + '/src/lumocash-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
urrently disabled by default. Use -win option to enable"
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
sys.exit(0)
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
raise e
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'p2p-fullblocktest.py',
'blockchain.py',
'disablewallet.py',
'sendheaders.py',
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bip68-sequence.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_longpoll.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
py',
.py',
'p2p-acceptblock.py',
'mempool_packages.py',
'maxuploadtarget.py',
age = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| false
| true
|
790933eb734b7a0e9cc5ef4d08940e8e25bb9c30
| 378
|
py
|
Python
|
sample/basic_quotes.py
|
wangjild/mootdx
|
1471a708ba77e7d79de74f1ea763b3c8a060626f
|
[
"MIT"
] | 2
|
2020-02-29T03:25:15.000Z
|
2020-07-09T10:30:49.000Z
|
sample/basic_quotes.py
|
wangjild/mootdx
|
1471a708ba77e7d79de74f1ea763b3c8a060626f
|
[
"MIT"
] | 1
|
2020-07-14T08:46:14.000Z
|
2020-07-14T09:17:19.000Z
|
sample/basic_quotes.py
|
wangjild/mootdx
|
1471a708ba77e7d79de74f1ea763b3c8a060626f
|
[
"MIT"
] | 2
|
2021-03-10T02:54:00.000Z
|
2021-03-29T09:03:15.000Z
|
# -*- coding: utf-8 -*-
from mootdx.quotes import Quotes
client = Quotes.factory(market='std') # 标准市场
# client = Quotes.factory(market='ext', multithread=True, heartbeat=True) # 扩展市场
quote = client.bars(symbol='600036', frequency=9, offset=10)
print(quote)
quote = client.index(symbol='000001', frequency=9)
print(quote)
quote = client.minute(symbol='000001')
print(quote)
| 25.2
| 80
| 0.719577
|
from mootdx.quotes import Quotes
client = Quotes.factory(market='std')
e = client.bars(symbol='600036', frequency=9, offset=10)
print(quote)
quote = client.index(symbol='000001', frequency=9)
print(quote)
quote = client.minute(symbol='000001')
print(quote)
| true
| true
|
7909364c19c40145e3dff583813512095cee4f88
| 4,340
|
py
|
Python
|
{{cookiecutter.project_name}}/server/settings/components/common.py
|
vovanbo/wemake-django-template
|
5e7a77e335d647eaf209db5050284bc13f3200d1
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/server/settings/components/common.py
|
vovanbo/wemake-django-template
|
5e7a77e335d647eaf209db5050284bc13f3200d1
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/server/settings/components/common.py
|
vovanbo/wemake-django-template
|
5e7a77e335d647eaf209db5050284bc13f3200d1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Django settings for server project.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their config, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
from typing import Tuple
from server.settings.components import BASE_DIR, config
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
SECRET_KEY = config('DJANGO_SECRET_KEY')
# Application definition:
INSTALLED_APPS: Tuple[str, ...] = (
# Default django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# django-admin:
'django.contrib.admin',
'django.contrib.admindocs',
# Security:
'axes',
# Your apps go here:
'server.main_app',
)
MIDDLEWARE: Tuple[str, ...] = (
# Content Security Policy:
'csp.middleware.CSPMiddleware',
# Django:
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'server.urls'
WSGI_APPLICATION = 'server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
# Choices are: postgresql_psycopg2, mysql, sqlite3, oracle
'ENGINE': 'django.db.backends.postgresql_psycopg2',
# Database name or filepath if using 'sqlite3':
'NAME': config('POSTGRES_DB'),
# You don't need these settings if using 'sqlite3':
'USER': config('POSTGRES_USER'),
'PASSWORD': config('POSTGRES_PASSWORD'),
'HOST': config('DJANGO_DATABASE_HOST'),
'PORT': config('DJANGO_DATABASE_PORT', cast=int),
'CONN_MAX_AGE': config('CONN_MAX_AGE', cast=int, default=60),
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
LANGUAGES = (
('en', 'English'),
('ru', 'Russian'),
)
LOCALE_PATHS = (
'locale/',
)
USE_TZ = True
TIME_ZONE = 'UTC'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Templates
# https://docs.djangoproject.com/en/1.11/ref/templates/api
TEMPLATES = [{
'APP_DIRS': True,
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# Contains plain text templates, like `robots.txt`:
BASE_DIR.joinpath('server', 'templates'),
],
'OPTIONS': {
'context_processors': [
# default template context processors
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
}]
# Media files
# Media-root is commonly changed in production
# (see development.py and production.py).
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR.joinpath('media')
# Django default authentication system.
# https://docs.djangoproject.com/en/1.11/topics/auth/
# AUTH_USER_MODEL = 'auth_app.User'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
]
| 26.30303
| 72
| 0.698387
|
from typing import Tuple
from server.settings.components import BASE_DIR, config
SECRET_KEY = config('DJANGO_SECRET_KEY')
INSTALLED_APPS: Tuple[str, ...] = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'axes',
'server.main_app',
)
MIDDLEWARE: Tuple[str, ...] = (
'csp.middleware.CSPMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'server.urls'
WSGI_APPLICATION = 'server.wsgi.application'
S = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('POSTGRES_DB'),
'USER': config('POSTGRES_USER'),
'PASSWORD': config('POSTGRES_PASSWORD'),
'HOST': config('DJANGO_DATABASE_HOST'),
'PORT': config('DJANGO_DATABASE_PORT', cast=int),
'CONN_MAX_AGE': config('CONN_MAX_AGE', cast=int, default=60),
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
LANGUAGES = (
('en', 'English'),
('ru', 'Russian'),
)
LOCALE_PATHS = (
'locale/',
)
USE_TZ = True
TIME_ZONE = 'UTC'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Templates
# https://docs.djangoproject.com/en/1.11/ref/templates/api
TEMPLATES = [{
'APP_DIRS': True,
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# Contains plain text templates, like `robots.txt`:
BASE_DIR.joinpath('server', 'templates'),
],
'OPTIONS': {
'context_processors': [
# default template context processors
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
}]
# Media files
# Media-root is commonly changed in production
# (see development.py and production.py).
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR.joinpath('media')
# Django default authentication system.
# https://docs.djangoproject.com/en/1.11/topics/auth/
# AUTH_USER_MODEL = 'auth_app.User'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
]
| true
| true
|
790936be6ee5889577f5423e7fb92cb06eff620d
| 424
|
py
|
Python
|
web/core/__init__.py
|
maximest-pierre/WebCore
|
543bfb79c0737917d1bd2a148eb61761ab6f6319
|
[
"MIT"
] | 56
|
2015-05-13T16:08:06.000Z
|
2021-12-26T22:24:46.000Z
|
web/core/__init__.py
|
maximest-pierre/WebCore
|
543bfb79c0737917d1bd2a148eb61761ab6f6319
|
[
"MIT"
] | 104
|
2015-01-20T23:55:28.000Z
|
2021-03-01T03:29:47.000Z
|
web/core/__init__.py
|
maximest-pierre/WebCore
|
543bfb79c0737917d1bd2a148eb61761ab6f6319
|
[
"MIT"
] | 12
|
2015-05-22T15:46:39.000Z
|
2021-09-16T00:38:54.000Z
|
# encoding: utf-8
# ## Imports
from threading import local as __local
# Expose these as importable from the top-level `web.core` namespace.
from .application import Application
from .util import lazy
# ## Module Globals
__all__ = ['local', 'Application', 'lazy'] # Symbols exported by this package.
# This is to support the web.ext.local extension, and allow for early importing of the variable.
local = __local()
| 21.2
| 96
| 0.735849
|
port local as __local
from .application import Application
from .util import lazy
n', 'lazy']
local = __local()
| true
| true
|
790936d8b88635db54ab70587e54a7c4d151073a
| 51,129
|
py
|
Python
|
userbot/modules/memes.py
|
konsolxnxx/Petercord-Userbotilham
|
ef9e98a913f857c967fdf0528bab405d72e2426c
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/memes.py
|
konsolxnxx/Petercord-Userbotilham
|
ef9e98a913f857c967fdf0528bab405d72e2426c
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/memes.py
|
konsolxnxx/Petercord-Userbotilham
|
ef9e98a913f857c967fdf0528bab405d72e2426c
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2021-03-26T09:08:24.000Z
|
2021-03-26T09:08:24.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
""" Userbot module for having some fun with people. """
import os
import urllib
import requests
from re import sub
from cowpy import cow
from asyncio import sleep
from collections import deque
from random import choice, getrandbits, randint
from userbot import bot, CMD_HELP
from userbot.events import register
from userbot.modules.admin import get_user_from_event
# ================= CONSTANT =================
METOOSTR = [
"Aku Juga Terimakasih",
"Haha Iya, Aku Juga",
"Sama Haha",
"Aku Juga Gabut",
"Sama Sini",
"Haha Iya",
"Aku Juga",
]
ZALG_LIST = [[
"̖",
" ̗",
" ̘",
" ̙",
" ̜",
" ̝",
" ̞",
" ̟",
" ̠",
" ̤",
" ̥",
" ̦",
" ̩",
" ̪",
" ̫",
" ̬",
" ̭",
" ̮",
" ̯",
" ̰",
" ̱",
" ̲",
" ̳",
" ̹",
" ̺",
" ̻",
" ̼",
" ͅ",
" ͇",
" ͈",
" ͉",
" ͍",
" ͎",
" ͓",
" ͔",
" ͕",
" ͖",
" ͙",
" ͚",
" ",
],
[
" ̍",
" ̎",
" ̄",
" ̅",
" ̿",
" ̑",
" ̆",
" ̐",
" ͒",
" ͗",
" ͑",
" ̇",
" ̈",
" ̊",
" ͂",
" ̓",
" ̈́",
" ͊",
" ͋",
" ͌",
" ̃",
" ̂",
" ̌",
" ͐",
" ́",
" ̋",
" ̏",
" ̽",
" ̉",
" ͣ",
" ͤ",
" ͥ",
" ͦ",
" ͧ",
" ͨ",
" ͩ",
" ͪ",
" ͫ",
" ͬ",
" ͭ",
" ͮ",
" ͯ",
" ̾",
" ͛",
" ͆",
" ̚",
],
[
" ̕",
" ̛",
" ̀",
" ́",
" ͘",
" ̡",
" ̢",
" ̧",
" ̨",
" ̴",
" ̵",
" ̶",
" ͜",
" ͝",
" ͞",
" ͟",
" ͠",
" ͢",
" ̸",
" ̷",
" ͡",
]]
EMOJIS = [
"😂",
"😂",
"👌",
"✌",
"💞",
"👍",
"👌",
"💯",
"🎶",
"👀",
"😂",
"👓",
"👏",
"👐",
"🍕",
"💥",
"🍴",
"💦",
"💦",
"🍑",
"🍆",
"😩",
"😏",
"👉👌",
"👀",
"👅",
"😩",
"🚰",
]
INSULT_STRINGS = [
"Jangan minum dan mengetik.",
"Saya pikir Anda harus pulang atau lebih baik ke rumah sakit jiwa.",
"Perintah tidak ditemukan. Sama seperti otak Anda.",
"Apakah kamu sadar bahwa kamu membodohi dirimu sendiri? Ternyata tidak.",
"Anda bisa mengetik lebih baik dari itu.",
"Bot aturan 544 bagian 9 mencegah saya membalas orang bodoh seperti Anda.",
"Maaf, kami tidak menjual otak.",
"Percayalah kamu tidak normal.",
"Saya yakin otak Anda terasa seperti baru, mengingat Anda tidak pernah menggunakannya.",
"Jika saya ingin bunuh diri, saya akan meningkatkan ego Anda dan melompat ke IQ Anda.",
"Zombie memakan otak ... kamu aman.",
"Anda tidak berevolusi dari kera, mereka berevolusi dari Anda.",
"Kembalilah dan bicara padaku ketika IQ mu melebihi umurmu.",
"Saya tidak mengatakan Anda bodoh, saya hanya mengatakan bahwa Anda tidak beruntung dalam hal berpikir.",
"Kamu berbicara bahasa apa? Karena terdengar seperti omong kosong.",
"Kebodohan bukanlah kejahatan jadi kamu bebas pergi.",
"Anda adalah bukti bahwa evolusi BISA mundur.",
"Aku akan bertanya berapa umurmu tapi aku tahu kamu tidak bisa menghitung setinggi itu.",
"Sebagai orang luar, apa pendapat Anda tentang umat manusia?",
"Otak bukanlah segalanya. Dalam kasusmu mereka bukan apa-apa.",
"Biasanya orang hidup dan belajar. Kamu hidup saja.",
"Aku tidak tahu apa yang membuatmu begitu bodoh, tapi itu benar-benar berhasil.",
"Teruslah berbicara, suatu hari nanti kamu akan mengatakan sesuatu yang cerdas! (Meskipun aku ragu)"
"Shock saya, katakan sesuatu yang cerdas.",
"IQ Anda lebih rendah dari ukuran sepatu Anda.",
"Aduh! Neurotransmiter Anda tidak lagi bekerja.",
"Apakah kamu gila kamu bodoh.",
"Setiap orang berhak untuk menjadi bodoh tetapi Anda menyalahgunakan hak istimewa tersebut.",
"Maaf aku menyakiti perasaanmu saat menyebutmu bodoh. Kupikir kamu sudah tahu itu.",
"Anda harus mencoba mencicipi sianida.",
"Enzim Anda dimaksudkan untuk mencerna racun tikus.",
"Kamu harus mencoba tidur selamanya.",
"Ambil pistol dan tembak dirimu sendiri.",
"Anda bisa membuat rekor dunia dengan melompat dari pesawat tanpa parasut.",
"Berhenti berbicara BS dan melompat di depan kereta peluru yang sedang berjalan.",
"Cobalah mandi dengan Hydrochloric Acid daripada air.",
"Coba ini: jika Anda menahan napas di bawah air selama satu jam, Anda dapat menahannya selamanya.",
"Go Green! Berhenti menghirup Oksigen.",
"Tuhan sedang mencarimu. Kamu harus pergi untuk bertemu dengannya.",
"berikan 100% mu. Sekarang, pergi donor darah.",
"Cobalah melompat dari gedung seratus lantai tetapi Anda hanya dapat melakukannya sekali.",
"Anda harus menyumbangkan otak Anda melihat bahwa Anda tidak pernah menggunakannya.",
"Relawan untuk target dalam jarak tembak.",
"Tembak kepala itu menyenangkan. Dapatkan dirimu sendiri.",
"Anda harus mencoba berenang dengan hiu putih besar.",
"Anda harus mengecat diri Anda dengan warna merah dan berlari dalam bull marathon.",
"Anda bisa tetap di bawah air selama sisa hidup Anda tanpa harus kembali lagi.",
"Bagaimana kalau kamu berhenti bernapas selama 1 hari? Itu akan bagus.",
"Cobalah memprovokasi harimau saat kalian berdua berada di dalam sangkar.",
"Sudahkah Anda mencoba menembak diri Anda sendiri setinggi 100m menggunakan kanon.",
"Anda harus mencoba menahan TNT di mulut Anda dan menyalakannya.",
"Cobalah bermain menangkap dan melempar dengan RDX itu menyenangkan.",
"Saya dengar phogine beracun tapi saya rasa Anda tidak keberatan menghirupnya untuk bersenang-senang.",
"Luncurkan diri Anda ke luar angkasa sambil melupakan oksigen di Bumi.",
"Kamu harus mencoba bermain ular tangga, dengan ular sungguhan dan tanpa tangga.",
"Menari telanjang di beberapa kabel HT.",
"Gunung Berapi Aktif adalah kolam renang terbaik untuk Anda.",
"Anda harus mencoba mandi air panas di gunung berapi.",
"Cobalah untuk menghabiskan satu hari di peti mati dan itu akan menjadi milikmu selamanya.",
"Pukul Uranium dengan neutron yang bergerak lambat di hadapanmu. Ini akan menjadi pengalaman yang berharga.",
"Anda bisa menjadi orang pertama yang menginjak matahari. Selamat mencoba.",
]
UWUS = [
"(・`ω´・)",
";;w;;",
"owo",
"UwU",
">w<",
"^w^",
r"\(^o\) (/o^)/",
"( ^ _ ^)∠☆",
"(ô_ô)",
"~:o",
";-;",
"(*^*)",
"(>_",
"(♥_♥)",
"*(^O^)*",
"((+_+))",
]
IWIS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
]
FACEREACTS = [
"ʘ‿ʘ",
"ヾ(-_- )ゞ",
"(っ˘ڡ˘ς)",
"(´ж`ς)",
"( ಠ ʖ̯ ಠ)",
"(° ͜ʖ͡°)╭∩╮",
"(ᵟຶ︵ ᵟຶ)",
"(งツ)ว",
"ʚ(•`",
"(っ▀¯▀)つ",
"(◠﹏◠)",
"( ͡ಠ ʖ̯ ͡ಠ)",
"( ఠ ͟ʖ ఠ)",
"(∩`-´)⊃━☆゚.*・。゚",
"(⊃。•́‿•̀。)⊃",
"(._.)",
"{•̃_•̃}",
"(ᵔᴥᵔ)",
"♨_♨",
"⥀.⥀",
"ح˚௰˚づ ",
"(҂◡_◡)",
"ƪ(ړײ)ƪ",
"(っ•́。•́)♪♬",
"◖ᵔᴥᵔ◗ ♪ ♫ ",
"(☞゚ヮ゚)☞",
"[¬º-°]¬",
"(Ծ‸ Ծ)",
"(•̀ᴗ•́)و ̑̑",
"ヾ(´〇`)ノ♪♪♪",
"(ง'̀-'́)ง",
"ლ(•́•́ლ)",
"ʕ •́؈•̀ ₎",
"♪♪ ヽ(ˇ∀ˇ )ゞ",
"щ(゚Д゚щ)",
"( ˇ෴ˇ )",
"눈_눈",
"(๑•́ ₃ •̀๑) ",
"( ˘ ³˘)♥ ",
"ԅ(≖‿≖ԅ)",
"♥‿♥",
"◔_◔",
"⁽⁽ଘ( ˊᵕˋ )ଓ⁾⁾",
"乁( ◔ ౪◔)「 ┑( ̄Д  ̄)┍",
"( ఠൠఠ )ノ",
"٩(๏_๏)۶",
"┌(ㆆ㉨ㆆ)ʃ",
"ఠ_ఠ",
"(づ。◕‿‿◕。)づ",
"(ノಠ ∩ಠ)ノ彡( \\o°o)\\",
"“ヽ(´▽`)ノ”",
"༼ ༎ຶ ෴ ༎ຶ༽",
"。゚( ゚இ‸இ゚)゚。",
"(づ ̄ ³ ̄)づ",
"(⊙.☉)7",
"ᕕ( ᐛ )ᕗ",
"t(-_-t)",
"(ಥ⌣ಥ)",
"ヽ༼ ಠ益ಠ ༽ノ",
"༼∵༽ ༼⍨༽ ༼⍢༽ ༼⍤༽",
"ミ●﹏☉ミ",
"(⊙_◎)",
"¿ⓧ_ⓧﮌ",
"ಠ_ಠ",
"(´・_・`)",
"ᕦ(ò_óˇ)ᕤ",
"⊙﹏⊙",
"(╯°□°)╯︵ ┻━┻",
r"¯\_(⊙︿⊙)_/¯",
"٩◔̯◔۶",
"°‿‿°",
"ᕙ(⇀‸↼‶)ᕗ",
"⊂(◉‿◉)つ",
"V•ᴥ•V",
"q(❂‿❂)p",
"ಥ_ಥ",
"ฅ^•ﻌ•^ฅ",
"ಥ﹏ಥ",
"( ^_^)o自自o(^_^ )",
"ಠ‿ಠ",
"ヽ(´▽`)/",
"ᵒᴥᵒ#",
"( ͡° ͜ʖ ͡°)",
"┬─┬ ノ( ゜-゜ノ)",
"ヽ(´ー`)ノ",
"☜(⌒▽⌒)☞",
"ε=ε=ε=┌(;*´Д`)ノ",
"(╬ ಠ益ಠ)",
"┬─┬⃰͡ (ᵔᵕᵔ͜ )",
"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻",
r"¯\_(ツ)_/¯",
"ʕᵔᴥᵔʔ",
"(`・ω・´)",
"ʕ•ᴥ•ʔ",
"ლ(`ー´ლ)",
"ʕʘ̅͜ʘ̅ʔ",
"( ゚Д゚)",
r"¯\(°_o)/¯",
"(。◕‿◕。)",
]
RUNS_STR = [
"Berlari ke Thanos..",
"Berlari jauh, jauh dari bumi..",
"Berlari lebih cepat dari Bolt karena aku pengguna bot !!",
"Berlari ke Mia Khalifa..",
"Grup ini terlalu berbahaya untuk ditangani, aku harus lari.",
"`Berlari Dari Orang Yang Bau Sawi 😬`",
"Aku sangat lelah untuk berlari dan mengejarmu 💔",
"Aku pergi dulu",
"Saya hanya berjalan pergi, karena saya terlalu gemuk untuk lari.",
"Saya Cape!",
"Larii Disini Bau Sawii 😭",
"Saya lari karena saya sangat gabut.",
"Lari... \nkarena diet bukanlah pilihan.",
"Berlari Cepat Dari Orang Gila",
"Jika kamu ingin menangkapku, kamu harus cepat... \nJika kamu ingin tinggal bersamaku, kamu harus menjadi orang yang baik... \nTapi jika kamu ingin melewati aku... \nKamu pasti bercanda. ",
"Siapapun dapat berlari seratus meter, itu hitungan empat puluh dua ribu dua ratus berikutnya.",
"Mengapa semua orang ini mengikuti saya?",
"Apakah anak-anak masih mengejarku?",
"Berlari Sekencang Super Dede.. Apakah Sopan Begitu?",
]
CHASE_STR = [
"Menurutmu kemana kamu akan pergi?",
"Hah? Apa? Apakah mereka lolos?",
"ZZzzZZzz... Hah? Apa? Oh, hanya mereka lagi, lupakan.",
"Kembali kesini!",
"Tidak terlalu cepat...",
"Awas ke dinding!",
"Jangan tinggalkan aku sendiri dengan mereka !!",
"Kamu lari, kamu mati.",
"Bercanda, aku ada dimana-mana",
"Kamu akan menyesali itu ...",
"Kamu juga bisa mencoba /kickme, kudengar itu menyenangkan.",
"Ganggu orang lain, tidak ada yang peduli.",
"Kamu bisa lari, tapi kamu tidak bisa bersembunyi.",
"Apakah hanya itu yang kamu punya?",
"Saya di belakang Anda...",
"Anda punya teman!",
"Kita bisa melakukan ini dengan cara mudah, atau cara sulit.",
"Anda tidak mengerti, bukan?",
"Ya, sebaiknya kau lari!",
"Tolong, ingatkan saya apakah saya peduli?",
"Aku akan lari lebih cepat jika jadi kamu.",
"Itu pasti droid yang kami cari.",
"Semoga peluang selalu menguntungkan Anda.",
"Kata-kata terakhir yang terkenal.",
"Dan mereka menghilang selamanya, tidak pernah terlihat lagi.",
"Oh, lihat aku! Saya sangat keren, saya bisa lari dari bot orang ini",
"Ya ya, cukup ketuk /kickme.",
"Ini, ambil cincin ini dan pergilah ke Mordor saat kamu melakukannya.",
"Legenda mengatakan, mereka masih berjalan...",
"Tidak seperti Harry Potter, orang tuamu tidak bisa melindungimu dariku.",
"Ketakutan menyebabkan kemarahan. Kemarahan mengarah pada kebencian. Kebencian menyebabkan penderitaan. Jika Anda terus berlari dalam ketakutan, Anda mungkin"
"jadilah Vader berikutnya.",
"Beberapa kalkulasi nanti, saya telah memutuskan minat saya pada kejahatan Anda tepat 0.",
"Legenda mengatakan, mereka masih berjalan.",
"Teruskan, kami tidak yakin kami menginginkanmu di sini.",
"Kamu seorang penyihir- Oh. Tunggu. Kamu bukan Harry, terus bergerak.",
"JANGAN BERLARI DI SINI!",
"Hasta la vista, sayang.",
"Siapa yang membiarkan anjing keluar?",
"Ini lucu, karena tidak ada yang peduli.",
"Ah, sayang sekali, Aku suka yang itu.",
"Terus terang, sayangku, aku tidak peduli.",
"Milkshake saya membawa semua anak laki-laki ke halaman... Jadi lari lebih cepat!",
"Anda tidak bisa MENANGANI kebenaran!",
"Dahulu kala, di galaksi yang sangat jauh... Seseorang akan peduli tentang itu, Tapi sekarang tidak lagi.",
"Hei, lihat mereka! Mereka lari dari palu yang tak terelakkan... Manis.",
"Han menembak lebih dulu, Aku juga.",
"Apa yang kamu kejar, kelinci putih?",
"Seperti yang dikatakan The Doctor... LARI!",
]
HELLOSTR = [
"Hai!",
"'Ello, bro!",
"Apa itu crackin?",
"Apa kabarmu?",
"Halo, apa kabar, apa kabar!",
"Halo, siapa di sana, saya sedang berbicara.",
"Kamu tahu siapa ini.",
"Yo!",
"Wassup.",
"Salam dan salam!",
"Halo, sinar matahari!",
"Hei, apa kabar, hai!",
"Apa yang menendang, ayam kecil?",
"Ciluk ba!",
"Halo-bagus!",
"Halo, mahasiswa baru!",
"Saya datang dengan damai!",
"Ahoy, sobat!",
"Hiya!",
]
SHGS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
]
CRI = [
"أ‿أ",
"╥﹏╥",
"(;﹏;)",
"(ToT)",
"(┳Д┳)",
"(ಥ﹏ಥ)",
"(;へ:)",
"(T_T)",
"(πーπ)",
"(T▽T)",
"(⋟﹏⋞)",
"(iДi)",
"(´Д⊂ヽ",
"(;Д;)",
"(>﹏<)",
"(TдT)",
"(つ﹏⊂)",
"༼☯﹏☯༽",
"(ノ﹏ヽ)",
"(ノAヽ)",
"(╥_╥)",
"(T⌓T)",
"(༎ຶ⌑༎ຶ)",
"(☍﹏⁰)。",
"(ಥ_ʖಥ)",
"(つд⊂)",
"(≖͞_≖̥)",
"(இ﹏இ`。)",
"༼ಢ_ಢ༽",
"༼ ༎ຶ ෴ ༎ຶ༽",
]
SLAP_TEMPLATES_EN = [
"{hits} {victim} dengan {item}.",
"{hits} {victim} di wajah dengan {item}.",
"{hits} {victim} sekitar sedikit dengan {item}.",
"{throws} {item} ke {Victim}.",
"mengambil {item} dan {throws} ke wajah {victim}.",
"Menusuk {victim} dengan tombak cinta.",
"{throws} beberapa {item} ke {victim}.",
"mengambil {item} dan {throws} ke wajah {victim}.",
"meluncurkan {item} ke arah umum {korban}.",
"duduk di wajah {victim} sambil membanting {item}.",
"mulai menampar {victim} dengan konyol dengan {item}.",
"pin {victim} ke bawah dan berulang kali {hits} mereka dengan {item}.",
"mengambil {item} dan {hits} {victim} dengannya.",
"mulai menampar {victim} dengan konyol dengan {item}.",
"menahan {victim} dan berulang kali {hits} mereka dengan {item}.",
"memukul {victim} dengan {item}.",
"mengambil {item} dan {hits} {victim} dengannya.",
"mengikat {victim} ke kursi dan {throws} {item} padanya.",
"{hits} {victim} {where} dengan {item}.",
"mengikat {victim} ke tiang dan mencambuk mereka {where} dengan {item}."
"memberikan dorongan ramah untuk membantu {victim} belajar berenang di lahar.",
"mengirim {victim} ke /laut /lahar.",
"mengirim {victim} ke lubang memori.",
"memenggal {victim}.",
"melemparkan {victim} dari sebuah gedung.",
"mengganti semua musik {victim} dengan lagu iri bilang bos.",
"spam email {victim}.",
"membuat {victim} depresi.",
"menampar {victim} tanpa apa-apa.",
"pukul {victim} dengan pesawat garuda.",
"memukul kepala {victim}.",
"taruh {victim} di tong sampah.",
"Menendang {victim} dan melemparnya ke sungai.",
"letakkan {victim} di rumah hantu.",
"menampar {victim} dengan tongkat besi!"]
ITEMS_EN = [
"Tabung Gas",
"Televisi 42 In",
"Raket",
"Raket Nyamuk",
"Kaca",
"Buku",
"Ringgis",
"Telur",
"Jarum",
"Monitor Tabung",
"Obeng",
"Almunium",
"Emas",
"Printer",
"Speaker",
"Gas Lpg",
"Tangki Bensin",
"Tandon Air",
"Bola Boling",
"Laptop",
"Hardisk Rusak",
"Wajan Panas",
"Virus Corona",
"Meja Kantor",
"Meja Arsip",
"Lemari",
"Ember Besi",
"Besi Beton",
"Timah Panas",
"Harimau",
"Batu Krikil",
"Makanan Basi",
"Pesawat AirBus",
"Roket Nasa",
"Satelit Nasa",
"Matahari",
"Meteor",
"Berkas Kantor",
"Beton panas",
"Cermin",
"Batu Giok",
"Botol",
"Nezuko",
"Kaset Pita",
"Tiang Jemuran",
"Pisau Lipat",
"Bongkahan Es ",
"Asteroid",
]
THROW_EN = [
"melempar",
"melemparkan",
]
HIT_EN = [
"memukul",
"menendang",
"menampar",
"memukul",
"melempar",
]
WHERE_EN = ["di pipi", "di kepala", "di pantat", "di badan"]
SLAP_TEMPLATES_ID = [
"{hits} {victim} dengan {item}.",
"{throws} sebuah {item} kepada {victim}.",
"mengambil {item} dan {hits} {victim} .",
"Mengambil Sebuah {item} dan {hits} {victim} Dengan itu.",
"Menjatuhkan {victim} Ke Lava.",
"Mengirimkan {victim} ke Kawah.",
"Membuang {victim} Ke Laut.",
"Mengeluarkan {victim} Dari Bumi.",
"Melempar {victim} Ke luar angkasa.",
"Menaruh {victim} di Pluto.",
"Melemparkan sebuah {item} ke {victim}.",
"Melemparkan {item} kepada {victim}.",
"Menampar {victim} menggunakan {item}.",
"Membuang {victim} Ke udara.",
"Menghapus {victim} Dari Daftar Teman.",
"Melemparkan {item} {where} {victim}.",
"Meletakan {item} {where} {victim}.",
"Menyerang {victim} menggunakan {anime}.",
"Mengehack Seluruh akun {victim}"
]
ITEMS_ID = [
"Tabung Gas",
"Televisi 42 In",
"Raket",
"Raket Nyamuk",
"Kaca",
"Buku",
"Ringgis",
"Telur",
"Jarum",
"Monitor Tabung",
"Obeng",
"Almunium",
"Emas",
"Printer",
"Speaker",
"Gas Lpg",
"Tangki Bensin",
"Tandon Air",
"Bola Boling",
"Laptop",
"Hardisk Rusak",
"Wajan Panas",
"Virus Corona",
"Meja Kantor",
"Meja Arsip",
"Lemari",
"Ember Besi",
"Besi Beton",
"Timah Panas",
"Harimau",
"Batu Krikil",
"Makanan Basi",
"Pesawat AirBus",
"Roket Nasa",
"Satelit Nasa",
"Matahari",
"Meteor",
"Berkas Kantor",
"Beton panas",
"Cermin",
"Batu Giok",
"Botol",
"Nezuko",
"Kaset Pita",
"Tiang Jemuran",
"Pisau Lipat",
"Bongkahan Es ",
"Asteroid",
]
THROW_ID = [
"Melempar",
"Melemparkan",
]
HIT_ID = [
"Memukul",
"melemparkan",
"Memukuli",
]
WHERE_ID = ["di pipi", "di kepala", "di bokong", "di badan"]
SLAP_TEMPLATES_Jutsu = [
"Menyerang {victim} Menggunakan {hits}.",
"Menyerang {victim} Menggunakan {item}.",
"Melemparkan {throws} kepada {victim} .",
"Melemparkan {throws} {where} {victim}."
]
ITEMS_Jutsu = [
"KAA MEE HAA MEE HAA",
"Chibaku Tensei",
]
THROW_Jutsu = [
"Futon Rasen Shuriken",
"Shuriken",
]
HIT_Jutsu = [
"Rasengan",
"Chidori",
]
GAMBAR_TITIT = """
😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋😋
😋😋😋😋😋😋
😋😋😋 😋😋😋
😋😋 😋😋
"""
GAMBAR_OK = """
░▐▀▀▀▀▀▀▀▀▌▐▀▌▄▄▄▀▀▓▀
░▐▌▓▀▀▀▀▓▌▌▐▐▌▀▌▄▄▀░░
░▐▐▌▐▀▀▌▐▐▌▐▌▐▓▄▀░░░░
░▐▌▌▐▄▄▌▐▌▌▐▐▌▓▀▄░░░░
░▐▐▓▄▄▄▄▓▐▌▐▌▌▄▌▀▀▄░░
░▐▄▄▄▄▄▄▄▄▌▐▄▌▀▀▀▄▄▓▄
"""
GAMBAR_TENGKORAK = """
░░░░░░░░░░░░░▄▐░░░░
░░░░░░░▄▄▄░░▄██▄░░░
░░░░░░▐▀█▀▌░░░░▀█▄░
░░░░░░▐█▄█▌░░░░░░▀█▄
░░░░░░░▀▄▀░░░▄▄▄▄▄▀▀
░░░░░▄▄▄██▀▀▀▀░░░░░
░░░░█▀▄▄▄█░▀▀░░░░░░
░░░░▌░▄▄▄▐▌▀▀▀░░░░░
░▄░▐░░░▄▄░█░▀▀░░░░░
░▀█▌░░░▄░▀█▀░▀░░░░░
░░░░░░░░▄▄▐▌▄▄░░░░░
░░░░░░░░▀███▀█▄░░░░
░░░░░░░▐▌▀▄▀▄▀▐░░░░
░░░░░░░▐▀░░░░░░▐▌░░
░░░░░░░█░░░░░░░░█░░
░░░░░░▐▌░░░░░░░░░█░
"""
GAMBAR_KONTL = """
⣠⡶⠚⠛⠲⢄⡀
⣼⠁ ⠀⠀⠀ ⠳⢤⣄
⢿⠀⢧⡀⠀⠀⠀⠀⠀⢈⡇
⠈⠳⣼⡙⠒⠶⠶⠖⠚⠉⠳⣄
⠀⠀⠈⣇⠀⠀⠀⠀⠀⠀⠀⠈⠳⣄
⠀⠀⠀⠘⣆ ⠀⠀⠀⠀ ⠀⠈⠓⢦⣀
⠀⠀⠀⠀⠈⢳⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⠲⢤
⠀⠀⠀⠀⠀⠀⠙⢦⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⢧
⠀⠀⠀⠀⠀⠀⠀⡴⠋⠓⠦⣤⡀⠀⠀⠀⠀⠀⠀⠀⠈⣇
⠀⠀⠀⠀⠀⠀⣸⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⡄
⠀⠀⠀⠀⠀⠀⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⡇
⠀⠀⠀⠀⠀⠀⢹⡄⠀⠀⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⠃
⠀⠀⠀⠀⠀⠀⠀⠙⢦⣀⣳⡀⠀⠀⠀⠀⠀⠀⠀⠀⣰⠏
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⠛⢦⣀⣀⣀⣀⣠⡴⠚⠁⠉⠉⠉
"""
WHERE_Jutsu = ["Di Pipi", "Di Kepala", "Di Bokong", "Di Badan ,Di Pantat"]
normiefont = [
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z']
weebyfont = [
'卂',
'乃',
'匚',
'刀',
'乇',
'下',
'厶',
'卄',
'工',
'丁',
'长',
'乚',
'从',
'𠘨',
'口',
'尸',
'㔿',
'尺',
'丂',
'丅',
'凵',
'リ',
'山',
'乂',
'丫',
'乙']
# ===========================================
@register(outgoing=True, pattern=r"^\.(\w+)say (.*)")
async def univsaye(cowmsg):
""" For .cowsay module, userbot wrapper for cow which says things. """
arg = cowmsg.pattern_match.group(1).lower()
text = cowmsg.pattern_match.group(2)
if arg == "cow":
arg = "default"
if arg not in cow.COWACTERS:
return
cheese = cow.get_cow(arg)
cheese = cheese()
await cowmsg.edit(f"`{cheese.milk(text).replace('`', '´')}`")
@register(outgoing=True, pattern=r"^\.coinflip (.*)")
async def coin(event):
r = choice(["Kepala", "Ekor"])
input_str = event.pattern_match.group(1)
if input_str:
input_str = input_str.lower()
if r == "Kepala":
if input_str == "Kepala":
await event.edit(
"Koin Itu Mendarat Di: **Kepala**.\nKamu Benar.")
elif input_str == "Ekor":
await event.edit(
"Koin Itu Mendarat Di: **Kepala**.\nKamu Salah, Coba Lagi..."
)
else:
await event.edit("Koin Itu Mendarat Di: **Kepala**.")
elif r == "Ekor":
if input_str == "Ekor":
await event.edit(
"Koin Itu Mendarat Di: **Ekor**.\nKamu Benar.")
elif input_str == "Kepala":
await event.edit(
"Koin Itu Mendarat Di: **Ekor**.\nKamu Salah, Coba Lagi..."
)
else:
await event.edit("Koin Itu Mendarat Di: **Ekor**.")
@register(pattern=r"^\.slap(?: |$)(.*)", outgoing=True)
async def who(event):
""" slaps a user, or get slapped if not a reply. """
replied_user = await get_user_from_event(event)
if replied_user:
replied_user = replied_user[0]
else:
return
caption = await slap(replied_user, event)
try:
await event.edit(caption)
except BaseException:
await event.edit(
"`Tidak bisa slap orang ini, perlu mengambil beberapa meteor dan batu!`"
)
async def slap(replied_user, event):
""" Construct a funny slap sentence !! """
user_id = replied_user.id
first_name = replied_user.first_name
username = replied_user.username
if username:
slapped = "@{}".format(username)
else:
slapped = f"[{first_name}](tg://user?id={user_id})"
slap_str = event.pattern_match.group(1)
if slap_str == "en":
temp = choice(SLAP_TEMPLATES_EN)
item = choice(ITEMS_EN)
hit = choice(HIT_EN)
throw = choice(THROW_EN)
where = choice(WHERE_EN)
elif slap_str == "id":
temp = choice(SLAP_TEMPLATES_ID)
item = choice(ITEMS_ID)
hit = choice(HIT_ID)
throw = choice(THROW_ID)
where = choice(WHERE_ID)
elif slap_str == "jutsu":
temp = choice(SLAP_TEMPLATES_Jutsu)
item = choice(ITEMS_Jutsu)
hit = choice(HIT_Jutsu)
throw = choice(THROW_Jutsu)
where = choice(WHERE_Jutsu)
else:
temp = choice(SLAP_TEMPLATES_EN)
item = choice(ITEMS_EN)
hit = choice(HIT_EN)
throw = choice(THROW_EN)
where = choice(WHERE_EN)
caption = "..." + temp.format(
victim=slapped, item=item, hits=hit, throws=throw, where=where)
return caption
@register(outgoing=True, pattern=r"^\.boobs(?: |$)(.*)")
async def boobs(e):
await e.edit("`Berdosa, Mendapatkan Gambar Boobs...`")
await sleep(3)
await e.edit("`Mengirim Gambar Boobs...`")
nsfw = requests.get(
'http://api.oboobs.ru/noise/1').json()[0]["Gambar Boobs"]
urllib.request.urlretrieve(
"http://media.oboobs.ru/{}".format(nsfw), "*.jpg")
os.rename('*.jpg', 'boobs.jpg')
await e.client.send_file(e.chat_id, "boobs.jpg")
os.remove("boobs.jpg")
await e.delete()
@register(outgoing=True, pattern=r"^\.pantat(?: |$)(.*)")
async def butts(e):
await e.edit("`Berdosa, Mendapatkan Gambar Pantat Yang Indah...`")
await sleep(3)
await e.edit("`Mengirim Gambar Pantat Indah...`")
nsfw = requests.get(
'http://api.obutts.ru/noise/1').json()[0]["Gambar Pantat"]
urllib.request.urlretrieve(
"http://media.obutts.ru/{}".format(nsfw), "*.jpg")
os.rename('*.jpg', 'butts.jpg')
await e.client.send_file(e.chat_id, "butts.jpg")
os.remove("butts.jpg")
await e.delete()
@register(outgoing=True, pattern=r"^\.(yes|no|maybe|decide)$")
async def decide(event):
decision = event.pattern_match.group(1).lower()
message_id = event.reply_to_msg_id if event.reply_to_msg_id else None
if decision != "decide":
r = requests.get(f"https://yesno.wtf/api?force={decision}").json()
else:
r = requests.get(f"https://yesno.wtf/api").json()
await event.delete()
await event.client.send_message(event.chat_id,
str(r["answer"]).upper(),
reply_to=message_id,
file=r["image"])
@register(outgoing=True, pattern=r"^\.fp$")
async def facepalm(e):
""" Facepalm 🤦♂ """
await e.edit("🤦♂")
@register(outgoing=True, pattern=r"^\.cry$")
async def cry(e):
""" y u du dis, i cry everytime !! """
await e.edit(choice(CRI))
@register(outgoing=True, pattern=r"^\.insult$")
async def insult(e):
""" I make you cry !! """
await e.edit(choice(INSULT_STRINGS))
@register(outgoing=True, pattern=r"^\.cp(?: |$)(.*)")
async def copypasta(cp_e):
""" Copypasta the famous meme """
textx = await cp_e.get_reply_message()
message = cp_e.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await cp_e.edit("`😂🅱️AhHH👐MaNtAp👅Bro👅UnTuk✌️MeMbuAT👌Ku👐TeRliHat👀LuCu💞HaHAhaA!💦`")
reply_text = choice(EMOJIS)
# choose a random character in the message to be substituted with 🅱️
b_char = choice(message).lower()
for owo in message:
if owo == " ":
reply_text += choice(EMOJIS)
elif owo in EMOJIS:
reply_text += owo
reply_text += choice(EMOJIS)
elif owo.lower() == b_char:
reply_text += "🅱️"
else:
if bool(getrandbits(1)):
reply_text += owo.upper()
else:
reply_text += owo.lower()
reply_text += choice(EMOJIS)
await cp_e.edit(reply_text)
@register(outgoing=True, pattern=r"^\.vapor(?: |$)(.*)")
async def vapor(vpr):
""" Vaporize everything! """
reply_text = list()
textx = await vpr.get_reply_message()
message = vpr.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await vpr.edit("`B e r i k a n S e b u a h T e k s U n t u k Vapor!`")
for charac in message:
if 0x21 <= ord(charac) <= 0x7F:
reply_text.append(chr(ord(charac) + 0xFEE0))
elif ord(charac) == 0x20:
reply_text.append(chr(0x3000))
else:
reply_text.append(charac)
await vpr.edit("".join(reply_text))
@register(outgoing=True, pattern=r"^\.str(?: |$)(.*)")
async def stretch(stret):
""" Stretch it."""
textx = await stret.get_reply_message()
message = stret.text
message = stret.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await stret.edit("`Beriiiiiiiiikaaannnn sebuuuuuuuuuah teeeeeeeks!`")
count = randint(3, 10)
reply_text = sub(r"([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])", (r"\1" * count),
message)
await stret.edit(reply_text)
@register(outgoing=True, pattern=r"^\.zal(?: |$)(.*)")
async def zal(zgfy):
""" Invoke the feeling of chaos. """
reply_text = list()
textx = await zgfy.get_reply_message()
message = zgfy.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await zgfy.edit(
"`b̜́ͨe͒͜r̠͂ͬi̷̱̋k͖͒ͤa̋ͫ͑n͕͂͗ t̢͘͟e͂̽̈́k͎͂͠s̤͚ͭ m̪͔͑è͜͡n͈ͮḁ͞ͅk̲̮͛u̺͂ͩt̬̗́k͍̙̮á ̺n̨̹ͪ`"
)
for charac in message:
if not charac.isalpha():
reply_text.append(charac)
continue
for _ in range(0, 3):
rand = randint(0, 2)
if rand == 0:
charac = charac.strip() + \
choice(ZALG_LIST[0]).strip()
elif rand == 1:
charac = charac.strip() + \
choice(ZALG_LIST[1]).strip()
else:
charac = charac.strip() + \
choice(ZALG_LIST[2]).strip()
reply_text.append(charac)
await zgfy.edit("".join(reply_text))
@register(outgoing=True, pattern=r"^\.hi$")
async def hoi(hello):
""" Greet everyone! """
await hello.edit(choice(HELLOSTR))
@register(outgoing=True, pattern=r"^\.owo(?: |$)(.*)")
async def faces(owo):
""" UwU """
textx = await owo.get_reply_message()
message = owo.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await owo.edit("` Mohon Berikan Teks UwU! `")
reply_text = sub(r"(r|l)", "w", message)
reply_text = sub(r"(R|L)", "W", reply_text)
reply_text = sub(r"n([aeiou])", r"ny\1", reply_text)
reply_text = sub(r"N([aeiouAEIOU])", r"Ny\1", reply_text)
reply_text = sub(r"\!+", " " + choice(UWUS), reply_text)
reply_text = reply_text.replace("ove", "uv")
reply_text += " " + choice(UWUS)
await owo.edit(reply_text)
@register(outgoing=True, pattern=r"^\.react$")
async def react_meme(react):
""" Make your userbot react to everything. """
await react.edit(choice(FACEREACTS))
@register(outgoing=True, pattern=r"^\.shg$")
async def shrugger(shg):
r""" ¯\_(ツ)_/¯ """
await shg.edit(choice(SHGS))
@register(outgoing=True, pattern=r"^\.chase$")
async def police(chase):
""" Lari bro lari, aku akan segera menangkapmu !! """
await chase.edit(choice(CHASE_STR))
@register(outgoing=True, pattern=r"^\.run$")
async def runner_lol(run):
""" Lari, lari, LARIII! """
await run.edit(choice(RUNS_STR))
@register(outgoing=True, pattern=r"^\.metoo$")
async def metoo(hahayes):
""" Haha yes """
await hahayes.edit(choice(METOOSTR))
@register(outgoing=True, pattern=r"^\.oem$")
async def oem(e):
t = "Oem"
for j in range(16):
t = t[:-1] + "em"
await e.edit(t)
@register(outgoing=True, pattern=r"^\.Oem$")
async def Oem(e):
t = "Oem"
for j in range(16):
t = t[:-1] + "em"
await e.edit(t)
@register(outgoing=True, pattern=r"^\.10iq$")
async def iqless(e):
await e.edit("♿")
@register(outgoing=True, pattern="^.fuck$")
async def iqless(e):
await e.edit("🖕🖕🖕🖕🖕🖕🖕🖕\n🖕🖕🖕🖕🖕🖕🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕🖕🖕🖕🖕\n🖕🖕🖕🖕🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕")
@register(outgoing=True, pattern=r"^\.moon$")
async def moon(event):
deq = deque(list("🌗🌘🌑🌒🌓🌔🌕🌖"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.bunga$")
async def moon(event):
deq = deque(list("🌼🌻🌺🌹🌸🌷"))
try:
for x in range(35):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.waktu$")
async def moon(event):
deq = deque(list("🎑🌄🌅🌇🌆🌃🌌"))
try:
for x in range(100):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.buah$")
async def moon(event):
deq = deque(list("🍉🍓🍇🍎🍍🍐🍌"))
try:
for x in range(35):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.clock$")
async def clock(event):
deq = deque(list("🕙🕘🕗🕖🕕🕔🕓🕒🕑🕐🕛"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.rain$")
async def rain(event):
deq = deque(list("☀️🌤⛅️🌥☁️🌧⛈"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.love$")
async def love(event):
deq = deque(list("❤️🧡💛💚💙💜🖤💕💞💓💗💖💘💝"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.earth$")
async def earth(event):
deq = deque(list("🌏🌍🌎🌎🌍🌏🌍🌎"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.hati$")
async def earth(event):
deq = deque(list("🖤💜💙💚💛🧡❤️🤍"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.monyet$")
async def earth(event):
deq = deque(list("🙈🙉🙈🙉🙈🙉🙈🙉"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.emo$")
async def earth(event):
deq = deque(list("🙂😁😄😃😂🤣😭🐵🙊🙉🙈"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.mock(?: |$)(.*)")
async def spongemocktext(mock):
""" Do it and find the real fun. """
reply_text = list()
textx = await mock.get_reply_message()
message = mock.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await mock.edit("`bEriKan PeSan UnTuK MoCk!`")
for charac in message:
if charac.isalpha() and randint(0, 1):
to_app = charac.upper() if charac.islower() else charac.lower()
reply_text.append(to_app)
else:
reply_text.append(charac)
await mock.edit("".join(reply_text))
@register(outgoing=True, pattern=r"^\.weeb(?: |$)(.*)")
async def weebify(e):
args = e.pattern_match.group(1)
if not args:
get = await e.get_reply_message()
args = get.text
if not args:
await e.edit("`Apa Yang Anda Lakukan Tuan ツ`")
return
string = ' '.join(args).lower()
for normiecharacter in string:
if normiecharacter in normiefont:
weebycharacter = weebyfont[normiefont.index(normiecharacter)]
string = string.replace(normiecharacter, weebycharacter)
await e.edit(string)
@register(outgoing=True, pattern=r"^\.clap(?: |$)(.*)")
async def claptext(memereview):
""" Praise people! """
textx = await memereview.get_reply_message()
message = memereview.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await memereview.edit("`Tuan, Mohon Balas Ke Pesan Orang Yang Ingin Anda Puji ツ`")
reply_text = "👏 "
reply_text += message.replace(" ", " 👏 ")
reply_text += " 👏"
await memereview.edit(reply_text)
@register(outgoing=True, pattern=r"^\.teksbiru$")
async def bluetext(bt_e):
""" Believe me, you will find this useful. """
if await bt_e.get_reply_message() and bt_e.is_group:
await bt_e.edit(
"/TEKSBIRU /APAKAH /ANDA.\n"
"/SEDANG /GABUT /KARNA /TERTARIK /MELIHAT /TEKS /BIRU /PASTI /ANDA /BOSAN?")
@register(outgoing=True, pattern=r"^\.f (.*)")
async def payf(event):
paytext = event.pattern_match.group(1)
pay = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}".format(
paytext * 8, paytext * 8, paytext * 2, paytext * 2, paytext * 2,
paytext * 6, paytext * 6, paytext * 2, paytext * 2, paytext * 2,
paytext * 2, paytext * 2)
await event.edit(pay)
@register(outgoing=True, pattern=r"^\.lfy (.*)")
async def let_me_google_that_for_you(lmgtfy_q):
textx = await lmgtfy_q.get_reply_message()
qry = lmgtfy_q.pattern_match.group(1)
if qry:
query = str(qry)
elif textx:
query = textx
query = query.message
query_encoded = query.replace(" ", "+")
lfy_url = f"http://lmgtfy.com/?s=g&iie=1&q={query_encoded}"
payload = {'format': 'json', 'url': lfy_url}
r = requests.get('http://is.gd/create.php', params=payload)
await lmgtfy_q.edit("Ini Dia, Bantu Dirimu Sendiri."
f"\n[{query}]({r.json()['shorturl']})")
@register(outgoing=True, pattern=r"^\.sayhi$")
async def sayhi(e):
await e.edit(
"\n💰💰💰💰💰💰💰💰💰💰💰💰"
"\n💰🔷💰💰💰🔷💰💰🔷🔷🔷💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷🔷🔷🔷🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰🔷🔷🔷💰"
"\n💰💰💰💰💰💰💰💰💰💰💰💰")
@register(pattern=r".scam(?: |$)(.*)", outgoing=True)
async def scam(event):
""" Just a small command to fake chat actions for fun !! """
options = [
'mengetik', 'kontak', 'game', 'lokasi', 'suara', 'bulat', 'video',
'foto', 'dokumen', 'batal'
]
input_str = event.pattern_match.group(1)
args = input_str.split()
if len(args) == 0: # Let bot decide action and time
scam_action = choice(options)
scam_time = randint(30, 60)
elif len(args) == 1: # User decides time/action, bot decides the other.
try:
scam_action = str(args[0]).lower()
scam_time = randint(30, 60)
except ValueError:
scam_action = choice(options)
scam_time = int(args[0])
elif len(args) == 2: # User decides both action and time
scam_action = str(args[0]).lower()
scam_time = int(args[1])
else:
await event.edit("`Tidak Valid`")
return
try:
if (scam_time > 300):
await event.delete()
async with event.client.action(event.chat_id, scam_action):
await sleep(scam_time)
except BaseException:
return
@register(pattern=r".type(?: |$)(.*)", outgoing=True)
async def typewriter(typew):
""" Just a small command to make your keyboard become a typewriter! """
textx = await typew.get_reply_message()
message = typew.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await typew.edit("`Berikan Sebuah Teks Untuk Type!`")
sleep_time = 0.03
typing_symbol = "|"
old_text = ""
await typew.edit(typing_symbol)
await sleep(sleep_time)
for character in message:
old_text = old_text + "" + character
typing_text = old_text + "" + typing_symbol
await typew.edit(typing_text)
await sleep(sleep_time)
await typew.edit(old_text)
await sleep(sleep_time)
@register(outgoing=True, pattern=r"^\.leave$")
async def leave(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`Tuan Telah Meninggalkan Grup ツ`")
@register(outgoing=True, pattern=r"^\.fail$")
async def fail(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ `"
"`\n████▌▄▌▄▐▐▌█████ `"
"`\n████▌▄▌▄▐▐▌▀████ `"
"`\n▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ `")
@register(outgoing=True, pattern=r"^\.lol$")
async def lol(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╱┏┓╱╱╱╭━━━╮┏┓╱╱╱╱ `"
"`\n╱┃┃╱╱╱┃╭━╮┃┃┃╱╱╱╱ `"
"`\n╱┃┗━━┓┃╰━╯┃┃┗━━┓╱ `"
"`\n╱┗━━━┛╰━━━╯┗━━━┛╱ `")
@register(outgoing=True, pattern=r"^\.rock$")
async def lol(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈╭╮┈┈┈┈┈┈┈┈┈┈┈┈ `"
"`\n┈┃┃┈╭╮┈┏╮╭╮╭╮┃╭ `"
"`\n┈┃┃┈┃┃┈┣┫┃┃┃┈┣┫ `"
"`\n┈┃┣┳┫┃┈┃╰╰╯╰╯┃╰ `"
"`\n╭┻┻┻┫┃┈┈╭╮┃┃━┳━ `"
"`\n┃╱╭━╯┃┈┈┃┃┃┃┈┃┈ `"
"`\n╰╮╱╱╱┃┈┈╰╯╰╯┈┃┈ `")
@register(outgoing=True, pattern=r"^\.lool$")
async def lool(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╭╭━━━╮╮┈┈┈┈┈┈┈┈┈┈\n┈┃╭━━╯┈┈┈┈▕╲▂▂╱▏┈\n┈┃┃╱▔▔▔▔▔▔▔▏╱▋▋╮┈`"
"`\n┈┃╰▏┃╱╭╮┃╱╱▏╱╱▆┃┈\n┈╰━▏┗━╰╯┗━╱╱╱╰┻┫┈\n┈┈┈▏┏┳━━━━▏┏┳━━╯┈`"
"`\n┈┈┈▏┃┃┈┈┈┈▏┃┃┈┈┈┈ `")
@register(outgoing=True, pattern=r"^\.stfu$")
async def stfu(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n██████████████████████████████`"
"`\n██▀▀▀▀████▀▀▀▀████▀▀▀▀▀███▀▀██▀▀█`"
"`\n█──────██──────██───────██──██──█`"
"`\n█──██▄▄████──████──███▄▄██──██──█`"
"`\n█▄────▀████──████────█████──██──█`"
"`\n█▀▀██──████──████──███████──██──█`"
"`\n█──────████──████──███████──────█`"
"`\n██▄▄▄▄█████▄▄████▄▄████████▄▄▄▄██`"
"`\n█████████████████████████████████`")
@register(outgoing=True, pattern=r"^\.gtfo$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n███████████████████████████████ `"
"`\n█▀▀▀▀▀▀▀█▀▀▀▀▀▀█▀▀▀▀▀▀▀█▀▀▀▀▀▀█ `"
"`\n█───────█──────█───────█──────█ `"
"`\n█──███──███──███──███▄▄█──██──█ `"
"`\n█──███▄▄███──███─────███──██──█ `"
"`\n█──██───███──███──██████──██──█ `"
"`\n█──▀▀▀──███──███──██████──────█ `"
"`\n█▄▄▄▄▄▄▄███▄▄███▄▄██████▄▄▄▄▄▄█ `"
"`\n███████████████████████████████ `")
@register(outgoing=True, pattern=r"^\.nih$")
async def nih(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n(\\_/)`"
"`\n(●_●)`"
"`\n />💖 *Ini Buat Kamu`"
"\n \n"
r"`(\_/)`"
"`\n(●_●)`"
"`\n💖<\\ *Tapi Bo'ong`")
@register(outgoing=True, pattern=r"^\.fag$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n█████████`"
"`\n█▄█████▄█`"
"`\n█▼▼▼▼▼`"
"`\n█ STFU FAGGOT'S`"
"`\n█▲▲▲▲▲`"
"`\n█████████`"
"`\n ██ ██`")
@register(outgoing=True, pattern=r"^\.tai$")
async def taco(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n{\\__/}"
"\n(●_●)"
"\n( >💩 Mau Tai Ku?")
@register(outgoing=True, pattern=r"^\.paw$")
async def paw(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`(=ↀωↀ=)")
@register(outgoing=True, pattern=r"^\.tf$")
async def tf(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("(̿▀̿ ̿Ĺ̯̿̿▀̿ ̿)̄ ")
@register(outgoing=True, pattern=r"^\.gey$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈Lu Bau Hehe`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern=r"^\.gay$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈ANDA GAY`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern=r"^\.bot$")
async def bot(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("` \n ╲╲╭━━━━╮ \n╭╮┃▆┈┈▆┃╭╮ \n┃╰┫▽▽▽┣╯┃ \n╰━┫△△△┣━╯`"
"`\n╲╲┃┈┈┈┈┃ \n╲╲┃┈┏┓┈┃ `")
@register(outgoing=True, pattern=r"^\.hey$")
async def hey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n┈┈┈╱▔▔▔▔╲┈╭━━━━━\n┈┈▕▂▂▂▂▂▂▏┃HEY!┊😀`"
"`\n┈┈▕▔▇▔▔┳▔▏╰┳╮HEY!┊\n┈┈▕╭━╰╯━╮▏━╯╰━━━\n╱▔▔▏▅▅▅▅▕▔▔╲┈┈┈┈`"
"`\n▏┈┈╲▂▂▂▂╱┈┈┈▏┈┈┈`")
@register(outgoing=True, pattern=r"^\.nou$")
async def nou(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈╭╮╭╮\n┈┃┃┃┃\n╭┻┗┻┗╮`"
"`\n┃┈▋┈▋┃\n┃┈╭▋━╮━╮\n┃┈┈╭╰╯╰╯╮`"
"`\n┫┈┈ NoU\n┃┈╰╰━━━━╯`"
"`\n┗━━┻━┛`")
@register(outgoing=True, pattern=r"^\.iwi(?: |$)(.*)")
async def faces(siwis):
""" IwI """
textx = await siwis.get_reply_message()
message = siwis.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await siwis.edit("` Anda Harus Memberikan Teks Ke IwI `")
return
reply_text = sub(r"(a|i|u|e|o)", "i", message)
reply_text = sub(r"(A|I|U|E|O)", "I", reply_text)
reply_text = sub(r"\!+", " " + choice(IWIS), reply_text)
reply_text += " " + choice(IWIS)
await siwis.edit(reply_text)
@register(outgoing=True, pattern="^.koc$")
async def koc(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("8✊===D")
await e.edit("8=✊==D")
await e.edit("8==✊=D")
await e.edit("8===✊D")
await e.edit("8==✊=D")
await e.edit("8=✊==D")
await e.edit("8✊===D")
await e.edit("8=✊==D")
await e.edit("8==✊=D")
await e.edit("8===✊D")
await e.edit("8==✊=D")
await e.edit("8=✊==D")
await e.edit("8✊===D")
await e.edit("8=✊==D")
await e.edit("8==✊=D")
await e.edit("8===✊D")
await e.edit("8==✊=D")
await e.edit("8=✊==D")
await e.edit("8===✊D💦")
await e.edit("8==✊=D💦💦")
await e.edit("8=✊==D💦💦💦")
await e.edit("8✊===D💦💦💦💦")
await e.edit("8===✊D💦💦💦💦💦")
await e.edit("8==✊=D💦💦💦💦💦💦")
await e.edit("8=✊==D💦💦💦💦💦💦💦")
await e.edit("8✊===D💦💦💦💦💦💦💦💦")
await e.edit("8===✊D💦💦💦💦💦💦💦💦💦")
await e.edit("8==✊=D💦💦💦💦💦💦💦💦💦💦")
await e.edit("8=✊==D Lah Kok Habis?")
await e.edit("😭😭😭😭")
@register(outgoing=True, pattern="^.gas$")
async def gas(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("___________________🚑")
await e.edit("________________🚑___")
await e.edit("______________🚑_____")
await e.edit("___________🚑________")
await e.edit("________🚑___________")
await e.edit("_____🚑______________")
await e.edit("__🚑_________________")
await e.edit("🚑___________________")
await e.edit("_____________________")
await e.edit(choice(FACEREACTS))
@register(outgoing=True, pattern=r"^\.shg$")
async def shrugger(shg):
r""" ¯\_(ツ)_/¯ """
await shg.edit(choice(SHGS))
@register(outgoing=True, pattern=r"^\.(?:penis|dick)\s?(.)?")
async def emoji_penis(e):
emoji = e.pattern_match.group(1)
titid = GAMBAR_TITIT
if emoji:
titid = titid.replace('😋', emoji)
await e.edit(titid)
@register(outgoing=True, pattern=r"^\.(?:kon|kontl)\s?(.)?")
async def emoji_kontl(e):
emoji = e.pattern_match.group(1)
kontl = GAMBAR_KONTL
if emoji:
kontl = kontl.replace('😂', emoji)
await e.edit(kontl)
@register(outgoing=True, pattern=r"^\.ok$")
async def emoji_oke(e):
emoji = e.pattern_match.group(1)
oke = GAMBAR_OK
if emoji:
oke = oke.replace('😂', emoji)
await e.edit(oke)
@register(outgoing=True, pattern=r"^\.skull$")
async def emoji_tengkorak(e):
emoji = e.pattern_match.group(1)
tengkorak = GAMBAR_TENGKORAK
if emoji:
tengkorak = tengkorak.replace('😂', emoji)
await e.edit(tengkorak)
CMD_HELP.update({
"memes":
">`.cowsay`"
"\nUsage: sapi yang mengatakan sesuatu."
"\n\n> .cp"
"\nUsage: Copy paste meme terkenal"
"\n\n>`.vapor`"
"\nUsage: Menguapkan semuanya!"
"\n\n>`.str`"
"\nUsage: Regangkan."
"\n\n>`.10iq`"
"\nUsage: Kamu mundur !!"
"\n\n>`.zal`"
"\nUsage: Munculkan perasaan kacau."
"\n\n>`.Oem`"
"\nPenggunaan: Oeeeem"
"\n\n>`.fp`"
"\nUsage: Telapak Tangan:P"
"\n\n>`.moon`"
"\nUsage: animasi bulan."
"\n\n>`.clock`"
"\nUsage: animasi jam."
"\n\n>`.hi`"
"\nUsage: Sapa semuanya!"
"\n\n>`.coinflip` <Kepala/Ekor>"
"\nUsage: Melempar koin !!"
"\n\n>`.owo`"
"\nUsage: UwU"
"\n\n>`.react`"
"\nUsage: Buat Userbot Anda bereaksi terhadap semuanya."
"\n\n>`.slap`"
"\nUsage: balas tampar mereka dengan benda acak !!"
"\n\n>`.cry`"
"\nUsage: jika kamu melakukan ini, aku akan menangis."
"\n\n>`.shg`"
"\nUsage: Angkat bahu!"
"\n\n>`.run`"
"\nUsage: Biarkan Aku Lari, Lari, LARI!"
"\n\n>`.chase`"
"\nUsage: Sebaiknya Anda mulai berlari"
"\n\n>`.metoo`"
"\nUsage: Haha ya"
"\n\n>`.mock`"
"\nUsage: Lakukan dan temukan kesenangan yang sesungguhnya."
"\n\n>`.clap`"
"\nUsage: Puji orang!"
"\n\n>`.f` <emoji/karakter>"
"\nUsage: F."
"\n\n>`.bt`"
"\nUsage: Percayalah, Anda akan menemukan ini berguna."
"\n\n>`.weeb`"
"\nUsage: Untuk Mengubah Teks Menjadi Weeb-ify."
"\n\n>`.type` <teks>"
"\nUsage: Hanya perintah kecil untuk membuat keyboard Anda menjadi mesin tik!"
"\n\n>`.lfy` <query>"
"\nUsage: Biar saya Google itu untuk Anda dengan cepat!"
"\n\n>`.decide` [Alternatif: (.yes, .no, .maybe)]"
"\nUsage: Buat keputusan cepat."
"\n\n> `.nou` `.bot` `.rock` `.gey` `.tf` `.paw` `.tai` `.nih`"
"\n> `.fag` `.gtfo`; `.stfu` `.lol` `.lool` `.fail` `.leave`"
"\n> `.iwi` `.sayhi` `.koc` `.gas` `.earth` `.love` `.rain`"
"\n> `.penis` `.emo` `.fuck` `.skull` `.monyet`\nUsage: Cobain aja"
"\n\n\n**Semoga Harimu Menyenangkan**\n➥ `Alvin`"
})
| 27.239744
| 193
| 0.500264
|
import os
import urllib
import requests
from re import sub
from cowpy import cow
from asyncio import sleep
from collections import deque
from random import choice, getrandbits, randint
from userbot import bot, CMD_HELP
from userbot.events import register
from userbot.modules.admin import get_user_from_event
METOOSTR = [
"Aku Juga Terimakasih",
"Haha Iya, Aku Juga",
"Sama Haha",
"Aku Juga Gabut",
"Sama Sini",
"Haha Iya",
"Aku Juga",
]
ZALG_LIST = [[
"̖",
" ̗",
" ̘",
" ̙",
" ̜",
" ̝",
" ̞",
" ̟",
" ̠",
" ̤",
" ̥",
" ̦",
" ̩",
" ̪",
" ̫",
" ̬",
" ̭",
" ̮",
" ̯",
" ̰",
" ̱",
" ̲",
" ̳",
" ̹",
" ̺",
" ̻",
" ̼",
" ͅ",
" ͇",
" ͈",
" ͉",
" ͍",
" ͎",
" ͓",
" ͔",
" ͕",
" ͖",
" ͙",
" ͚",
" ",
],
[
" ̍",
" ̎",
" ̄",
" ̅",
" ̿",
" ̑",
" ̆",
" ̐",
" ͒",
" ͗",
" ͑",
" ̇",
" ̈",
" ̊",
" ͂",
" ̓",
" ̈́",
" ͊",
" ͋",
" ͌",
" ̃",
" ̂",
" ̌",
" ͐",
" ́",
" ̋",
" ̏",
" ̽",
" ̉",
" ͣ",
" ͤ",
" ͥ",
" ͦ",
" ͧ",
" ͨ",
" ͩ",
" ͪ",
" ͫ",
" ͬ",
" ͭ",
" ͮ",
" ͯ",
" ̾",
" ͛",
" ͆",
" ̚",
],
[
" ̕",
" ̛",
" ̀",
" ́",
" ͘",
" ̡",
" ̢",
" ̧",
" ̨",
" ̴",
" ̵",
" ̶",
" ͜",
" ͝",
" ͞",
" ͟",
" ͠",
" ͢",
" ̸",
" ̷",
" ͡",
]]
EMOJIS = [
"😂",
"😂",
"👌",
"✌",
"💞",
"👍",
"👌",
"💯",
"🎶",
"👀",
"😂",
"👓",
"👏",
"👐",
"🍕",
"💥",
"🍴",
"💦",
"💦",
"🍑",
"🍆",
"😩",
"😏",
"👉👌",
"👀",
"👅",
"😩",
"🚰",
]
INSULT_STRINGS = [
"Jangan minum dan mengetik.",
"Saya pikir Anda harus pulang atau lebih baik ke rumah sakit jiwa.",
"Perintah tidak ditemukan. Sama seperti otak Anda.",
"Apakah kamu sadar bahwa kamu membodohi dirimu sendiri? Ternyata tidak.",
"Anda bisa mengetik lebih baik dari itu.",
"Bot aturan 544 bagian 9 mencegah saya membalas orang bodoh seperti Anda.",
"Maaf, kami tidak menjual otak.",
"Percayalah kamu tidak normal.",
"Saya yakin otak Anda terasa seperti baru, mengingat Anda tidak pernah menggunakannya.",
"Jika saya ingin bunuh diri, saya akan meningkatkan ego Anda dan melompat ke IQ Anda.",
"Zombie memakan otak ... kamu aman.",
"Anda tidak berevolusi dari kera, mereka berevolusi dari Anda.",
"Kembalilah dan bicara padaku ketika IQ mu melebihi umurmu.",
"Saya tidak mengatakan Anda bodoh, saya hanya mengatakan bahwa Anda tidak beruntung dalam hal berpikir.",
"Kamu berbicara bahasa apa? Karena terdengar seperti omong kosong.",
"Kebodohan bukanlah kejahatan jadi kamu bebas pergi.",
"Anda adalah bukti bahwa evolusi BISA mundur.",
"Aku akan bertanya berapa umurmu tapi aku tahu kamu tidak bisa menghitung setinggi itu.",
"Sebagai orang luar, apa pendapat Anda tentang umat manusia?",
"Otak bukanlah segalanya. Dalam kasusmu mereka bukan apa-apa.",
"Biasanya orang hidup dan belajar. Kamu hidup saja.",
"Aku tidak tahu apa yang membuatmu begitu bodoh, tapi itu benar-benar berhasil.",
"Teruslah berbicara, suatu hari nanti kamu akan mengatakan sesuatu yang cerdas! (Meskipun aku ragu)"
"Shock saya, katakan sesuatu yang cerdas.",
"IQ Anda lebih rendah dari ukuran sepatu Anda.",
"Aduh! Neurotransmiter Anda tidak lagi bekerja.",
"Apakah kamu gila kamu bodoh.",
"Setiap orang berhak untuk menjadi bodoh tetapi Anda menyalahgunakan hak istimewa tersebut.",
"Maaf aku menyakiti perasaanmu saat menyebutmu bodoh. Kupikir kamu sudah tahu itu.",
"Anda harus mencoba mencicipi sianida.",
"Enzim Anda dimaksudkan untuk mencerna racun tikus.",
"Kamu harus mencoba tidur selamanya.",
"Ambil pistol dan tembak dirimu sendiri.",
"Anda bisa membuat rekor dunia dengan melompat dari pesawat tanpa parasut.",
"Berhenti berbicara BS dan melompat di depan kereta peluru yang sedang berjalan.",
"Cobalah mandi dengan Hydrochloric Acid daripada air.",
"Coba ini: jika Anda menahan napas di bawah air selama satu jam, Anda dapat menahannya selamanya.",
"Go Green! Berhenti menghirup Oksigen.",
"Tuhan sedang mencarimu. Kamu harus pergi untuk bertemu dengannya.",
"berikan 100% mu. Sekarang, pergi donor darah.",
"Cobalah melompat dari gedung seratus lantai tetapi Anda hanya dapat melakukannya sekali.",
"Anda harus menyumbangkan otak Anda melihat bahwa Anda tidak pernah menggunakannya.",
"Relawan untuk target dalam jarak tembak.",
"Tembak kepala itu menyenangkan. Dapatkan dirimu sendiri.",
"Anda harus mencoba berenang dengan hiu putih besar.",
"Anda harus mengecat diri Anda dengan warna merah dan berlari dalam bull marathon.",
"Anda bisa tetap di bawah air selama sisa hidup Anda tanpa harus kembali lagi.",
"Bagaimana kalau kamu berhenti bernapas selama 1 hari? Itu akan bagus.",
"Cobalah memprovokasi harimau saat kalian berdua berada di dalam sangkar.",
"Sudahkah Anda mencoba menembak diri Anda sendiri setinggi 100m menggunakan kanon.",
"Anda harus mencoba menahan TNT di mulut Anda dan menyalakannya.",
"Cobalah bermain menangkap dan melempar dengan RDX itu menyenangkan.",
"Saya dengar phogine beracun tapi saya rasa Anda tidak keberatan menghirupnya untuk bersenang-senang.",
"Luncurkan diri Anda ke luar angkasa sambil melupakan oksigen di Bumi.",
"Kamu harus mencoba bermain ular tangga, dengan ular sungguhan dan tanpa tangga.",
"Menari telanjang di beberapa kabel HT.",
"Gunung Berapi Aktif adalah kolam renang terbaik untuk Anda.",
"Anda harus mencoba mandi air panas di gunung berapi.",
"Cobalah untuk menghabiskan satu hari di peti mati dan itu akan menjadi milikmu selamanya.",
"Pukul Uranium dengan neutron yang bergerak lambat di hadapanmu. Ini akan menjadi pengalaman yang berharga.",
"Anda bisa menjadi orang pertama yang menginjak matahari. Selamat mencoba.",
]
UWUS = [
"(・`ω´・)",
";;w;;",
"owo",
"UwU",
">w<",
"^w^",
r"\(^o\) (/o^)/",
"( ^ _ ^)∠☆",
"(ô_ô)",
"~:o",
";-;",
"(*^*)",
"(>_",
"(♥_♥)",
"*(^O^)*",
"((+_+))",
]
IWIS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
]
FACEREACTS = [
"ʘ‿ʘ",
"ヾ(-_- )ゞ",
"(っ˘ڡ˘ς)",
"(´ж`ς)",
"( ಠ ʖ̯ ಠ)",
"(° ͜ʖ͡°)╭∩╮",
"(ᵟຶ︵ ᵟຶ)",
"(งツ)ว",
"ʚ(•`",
"(っ▀¯▀)つ",
"(◠﹏◠)",
"( ͡ಠ ʖ̯ ͡ಠ)",
"( ఠ ͟ʖ ఠ)",
"(∩`-´)⊃━☆゚.*・。゚",
"(⊃。•́‿•̀。)⊃",
"(._.)",
"{•̃_•̃}",
"(ᵔᴥᵔ)",
"♨_♨",
"⥀.⥀",
"ح˚௰˚づ ",
"(҂◡_◡)",
"ƪ(ړײ)ƪ",
"(っ•́。•́)♪♬",
"◖ᵔᴥᵔ◗ ♪ ♫ ",
"(☞゚ヮ゚)☞",
"[¬º-°]¬",
"(Ծ‸ Ծ)",
"(•̀ᴗ•́)و ̑̑",
"ヾ(´〇`)ノ♪♪♪",
"(ง'̀-'́)ง",
"ლ(•́•́ლ)",
"ʕ •́؈•̀ ₎",
"♪♪ ヽ(ˇ∀ˇ )ゞ",
"щ(゚Д゚щ)",
"( ˇ෴ˇ )",
"눈_눈",
"(๑•́ ₃ •̀๑) ",
"( ˘ ³˘)♥ ",
"ԅ(≖‿≖ԅ)",
"♥‿♥",
"◔_◔",
"⁽⁽ଘ( ˊᵕˋ )ଓ⁾⁾",
"乁( ◔ ౪◔)「 ┑( ̄Д  ̄)┍",
"( ఠൠఠ )ノ",
"٩(๏_๏)۶",
"┌(ㆆ㉨ㆆ)ʃ",
"ఠ_ఠ",
"(づ。◕‿‿◕。)づ",
"(ノಠ ∩ಠ)ノ彡( \\o°o)\\",
"“ヽ(´▽`)ノ”",
"༼ ༎ຶ ෴ ༎ຶ༽",
"。゚( ゚இ‸இ゚)゚。",
"(づ ̄ ³ ̄)づ",
"(⊙.☉)7",
"ᕕ( ᐛ )ᕗ",
"t(-_-t)",
"(ಥ⌣ಥ)",
"ヽ༼ ಠ益ಠ ༽ノ",
"༼∵༽ ༼⍨༽ ༼⍢༽ ༼⍤༽",
"ミ●﹏☉ミ",
"(⊙_◎)",
"¿ⓧ_ⓧﮌ",
"ಠ_ಠ",
"(´・_・`)",
"ᕦ(ò_óˇ)ᕤ",
"⊙﹏⊙",
"(╯°□°)╯︵ ┻━┻",
r"¯\_(⊙︿⊙)_/¯",
"٩◔̯◔۶",
"°‿‿°",
"ᕙ(⇀‸↼‶)ᕗ",
"⊂(◉‿◉)つ",
"V•ᴥ•V",
"q(❂‿❂)p",
"ಥ_ಥ",
"ฅ^•ﻌ•^ฅ",
"ಥ﹏ಥ",
"( ^_^)o自自o(^_^ )",
"ಠ‿ಠ",
"ヽ(´▽`)/",
"ᵒᴥᵒ#",
"( ͡° ͜ʖ ͡°)",
"┬─┬ ノ( ゜-゜ノ)",
"ヽ(´ー`)ノ",
"☜(⌒▽⌒)☞",
"ε=ε=ε=┌(;*´Д`)ノ",
"(╬ ಠ益ಠ)",
"┬─┬⃰͡ (ᵔᵕᵔ͜ )",
"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻",
r"¯\_(ツ)_/¯",
"ʕᵔᴥᵔʔ",
"(`・ω・´)",
"ʕ•ᴥ•ʔ",
"ლ(`ー´ლ)",
"ʕʘ̅͜ʘ̅ʔ",
"( ゚Д゚)",
r"¯\(°_o)/¯",
"(。◕‿◕。)",
]
RUNS_STR = [
"Berlari ke Thanos..",
"Berlari jauh, jauh dari bumi..",
"Berlari lebih cepat dari Bolt karena aku pengguna bot !!",
"Berlari ke Mia Khalifa..",
"Grup ini terlalu berbahaya untuk ditangani, aku harus lari.",
"`Berlari Dari Orang Yang Bau Sawi 😬`",
"Aku sangat lelah untuk berlari dan mengejarmu 💔",
"Aku pergi dulu",
"Saya hanya berjalan pergi, karena saya terlalu gemuk untuk lari.",
"Saya Cape!",
"Larii Disini Bau Sawii 😭",
"Saya lari karena saya sangat gabut.",
"Lari... \nkarena diet bukanlah pilihan.",
"Berlari Cepat Dari Orang Gila",
"Jika kamu ingin menangkapku, kamu harus cepat... \nJika kamu ingin tinggal bersamaku, kamu harus menjadi orang yang baik... \nTapi jika kamu ingin melewati aku... \nKamu pasti bercanda. ",
"Siapapun dapat berlari seratus meter, itu hitungan empat puluh dua ribu dua ratus berikutnya.",
"Mengapa semua orang ini mengikuti saya?",
"Apakah anak-anak masih mengejarku?",
"Berlari Sekencang Super Dede.. Apakah Sopan Begitu?",
]
CHASE_STR = [
"Menurutmu kemana kamu akan pergi?",
"Hah? Apa? Apakah mereka lolos?",
"ZZzzZZzz... Hah? Apa? Oh, hanya mereka lagi, lupakan.",
"Kembali kesini!",
"Tidak terlalu cepat...",
"Awas ke dinding!",
"Jangan tinggalkan aku sendiri dengan mereka !!",
"Kamu lari, kamu mati.",
"Bercanda, aku ada dimana-mana",
"Kamu akan menyesali itu ...",
"Kamu juga bisa mencoba /kickme, kudengar itu menyenangkan.",
"Ganggu orang lain, tidak ada yang peduli.",
"Kamu bisa lari, tapi kamu tidak bisa bersembunyi.",
"Apakah hanya itu yang kamu punya?",
"Saya di belakang Anda...",
"Anda punya teman!",
"Kita bisa melakukan ini dengan cara mudah, atau cara sulit.",
"Anda tidak mengerti, bukan?",
"Ya, sebaiknya kau lari!",
"Tolong, ingatkan saya apakah saya peduli?",
"Aku akan lari lebih cepat jika jadi kamu.",
"Itu pasti droid yang kami cari.",
"Semoga peluang selalu menguntungkan Anda.",
"Kata-kata terakhir yang terkenal.",
"Dan mereka menghilang selamanya, tidak pernah terlihat lagi.",
"Oh, lihat aku! Saya sangat keren, saya bisa lari dari bot orang ini",
"Ya ya, cukup ketuk /kickme.",
"Ini, ambil cincin ini dan pergilah ke Mordor saat kamu melakukannya.",
"Legenda mengatakan, mereka masih berjalan...",
"Tidak seperti Harry Potter, orang tuamu tidak bisa melindungimu dariku.",
"Ketakutan menyebabkan kemarahan. Kemarahan mengarah pada kebencian. Kebencian menyebabkan penderitaan. Jika Anda terus berlari dalam ketakutan, Anda mungkin"
"jadilah Vader berikutnya.",
"Beberapa kalkulasi nanti, saya telah memutuskan minat saya pada kejahatan Anda tepat 0.",
"Legenda mengatakan, mereka masih berjalan.",
"Teruskan, kami tidak yakin kami menginginkanmu di sini.",
"Kamu seorang penyihir- Oh. Tunggu. Kamu bukan Harry, terus bergerak.",
"JANGAN BERLARI DI SINI!",
"Hasta la vista, sayang.",
"Siapa yang membiarkan anjing keluar?",
"Ini lucu, karena tidak ada yang peduli.",
"Ah, sayang sekali, Aku suka yang itu.",
"Terus terang, sayangku, aku tidak peduli.",
"Milkshake saya membawa semua anak laki-laki ke halaman... Jadi lari lebih cepat!",
"Anda tidak bisa MENANGANI kebenaran!",
"Dahulu kala, di galaksi yang sangat jauh... Seseorang akan peduli tentang itu, Tapi sekarang tidak lagi.",
"Hei, lihat mereka! Mereka lari dari palu yang tak terelakkan... Manis.",
"Han menembak lebih dulu, Aku juga.",
"Apa yang kamu kejar, kelinci putih?",
"Seperti yang dikatakan The Doctor... LARI!",
]
HELLOSTR = [
"Hai!",
"'Ello, bro!",
"Apa itu crackin?",
"Apa kabarmu?",
"Halo, apa kabar, apa kabar!",
"Halo, siapa di sana, saya sedang berbicara.",
"Kamu tahu siapa ini.",
"Yo!",
"Wassup.",
"Salam dan salam!",
"Halo, sinar matahari!",
"Hei, apa kabar, hai!",
"Apa yang menendang, ayam kecil?",
"Ciluk ba!",
"Halo-bagus!",
"Halo, mahasiswa baru!",
"Saya datang dengan damai!",
"Ahoy, sobat!",
"Hiya!",
]
SHGS = [
"┐(´д`)┌",
"┐(´~`)┌",
"┐(´ー`)┌",
"┐( ̄ヘ ̄)┌",
"╮(╯∀╰)╭",
"╮(╯_╰)╭",
"┐(´д`)┌",
"┐(´∀`)┌",
"ʅ(́◡◝)ʃ",
"┐(゚~゚)┌",
"┐('д')┌",
"┐(‘~`;)┌",
"ヘ(´-`;)ヘ",
"┐( -“-)┌",
"ʅ(´◔౪◔)ʃ",
"ヽ(゜~゜o)ノ",
"ヽ(~~~ )ノ",
"┐(~ー~;)┌",
"┐(-。ー;)┌",
r"¯\_(ツ)_/¯",
r"¯\_(⊙_ʖ⊙)_/¯",
r"¯\_༼ ಥ ‿ ಥ ༽_/¯",
"乁( ⁰͡ Ĺ̯ ⁰͡ ) ㄏ",
]
CRI = [
"أ‿أ",
"╥﹏╥",
"(;﹏;)",
"(ToT)",
"(┳Д┳)",
"(ಥ﹏ಥ)",
"(;へ:)",
"(T_T)",
"(πーπ)",
"(T▽T)",
"(⋟﹏⋞)",
"(iДi)",
"(´Д⊂ヽ",
"(;Д;)",
"(>﹏<)",
"(TдT)",
"(つ﹏⊂)",
"༼☯﹏☯༽",
"(ノ﹏ヽ)",
"(ノAヽ)",
"(╥_╥)",
"(T⌓T)",
"(༎ຶ⌑༎ຶ)",
"(☍﹏⁰)。",
"(ಥ_ʖಥ)",
"(つд⊂)",
"(≖͞_≖̥)",
"(இ﹏இ`。)",
"༼ಢ_ಢ༽",
"༼ ༎ຶ ෴ ༎ຶ༽",
]
SLAP_TEMPLATES_EN = [
"{hits} {victim} dengan {item}.",
"{hits} {victim} di wajah dengan {item}.",
"{hits} {victim} sekitar sedikit dengan {item}.",
"{throws} {item} ke {Victim}.",
"mengambil {item} dan {throws} ke wajah {victim}.",
"Menusuk {victim} dengan tombak cinta.",
"{throws} beberapa {item} ke {victim}.",
"mengambil {item} dan {throws} ke wajah {victim}.",
"meluncurkan {item} ke arah umum {korban}.",
"duduk di wajah {victim} sambil membanting {item}.",
"mulai menampar {victim} dengan konyol dengan {item}.",
"pin {victim} ke bawah dan berulang kali {hits} mereka dengan {item}.",
"mengambil {item} dan {hits} {victim} dengannya.",
"mulai menampar {victim} dengan konyol dengan {item}.",
"menahan {victim} dan berulang kali {hits} mereka dengan {item}.",
"memukul {victim} dengan {item}.",
"mengambil {item} dan {hits} {victim} dengannya.",
"mengikat {victim} ke kursi dan {throws} {item} padanya.",
"{hits} {victim} {where} dengan {item}.",
"mengikat {victim} ke tiang dan mencambuk mereka {where} dengan {item}."
"memberikan dorongan ramah untuk membantu {victim} belajar berenang di lahar.",
"mengirim {victim} ke /laut /lahar.",
"mengirim {victim} ke lubang memori.",
"memenggal {victim}.",
"melemparkan {victim} dari sebuah gedung.",
"mengganti semua musik {victim} dengan lagu iri bilang bos.",
"spam email {victim}.",
"membuat {victim} depresi.",
"menampar {victim} tanpa apa-apa.",
"pukul {victim} dengan pesawat garuda.",
"memukul kepala {victim}.",
"taruh {victim} di tong sampah.",
"Menendang {victim} dan melemparnya ke sungai.",
"letakkan {victim} di rumah hantu.",
"menampar {victim} dengan tongkat besi!"]
ITEMS_EN = [
"Tabung Gas",
"Televisi 42 In",
"Raket",
"Raket Nyamuk",
"Kaca",
"Buku",
"Ringgis",
"Telur",
"Jarum",
"Monitor Tabung",
"Obeng",
"Almunium",
"Emas",
"Printer",
"Speaker",
"Gas Lpg",
"Tangki Bensin",
"Tandon Air",
"Bola Boling",
"Laptop",
"Hardisk Rusak",
"Wajan Panas",
"Virus Corona",
"Meja Kantor",
"Meja Arsip",
"Lemari",
"Ember Besi",
"Besi Beton",
"Timah Panas",
"Harimau",
"Batu Krikil",
"Makanan Basi",
"Pesawat AirBus",
"Roket Nasa",
"Satelit Nasa",
"Matahari",
"Meteor",
"Berkas Kantor",
"Beton panas",
"Cermin",
"Batu Giok",
"Botol",
"Nezuko",
"Kaset Pita",
"Tiang Jemuran",
"Pisau Lipat",
"Bongkahan Es ",
"Asteroid",
]
THROW_EN = [
"melempar",
"melemparkan",
]
HIT_EN = [
"memukul",
"menendang",
"menampar",
"memukul",
"melempar",
]
WHERE_EN = ["di pipi", "di kepala", "di pantat", "di badan"]
SLAP_TEMPLATES_ID = [
"{hits} {victim} dengan {item}.",
"{throws} sebuah {item} kepada {victim}.",
"mengambil {item} dan {hits} {victim} .",
"Mengambil Sebuah {item} dan {hits} {victim} Dengan itu.",
"Menjatuhkan {victim} Ke Lava.",
"Mengirimkan {victim} ke Kawah.",
"Membuang {victim} Ke Laut.",
"Mengeluarkan {victim} Dari Bumi.",
"Melempar {victim} Ke luar angkasa.",
"Menaruh {victim} di Pluto.",
"Melemparkan sebuah {item} ke {victim}.",
"Melemparkan {item} kepada {victim}.",
"Menampar {victim} menggunakan {item}.",
"Membuang {victim} Ke udara.",
"Menghapus {victim} Dari Daftar Teman.",
"Melemparkan {item} {where} {victim}.",
"Meletakan {item} {where} {victim}.",
"Menyerang {victim} menggunakan {anime}.",
"Mengehack Seluruh akun {victim}"
]
ITEMS_ID = [
"Tabung Gas",
"Televisi 42 In",
"Raket",
"Raket Nyamuk",
"Kaca",
"Buku",
"Ringgis",
"Telur",
"Jarum",
"Monitor Tabung",
"Obeng",
"Almunium",
"Emas",
"Printer",
"Speaker",
"Gas Lpg",
"Tangki Bensin",
"Tandon Air",
"Bola Boling",
"Laptop",
"Hardisk Rusak",
"Wajan Panas",
"Virus Corona",
"Meja Kantor",
"Meja Arsip",
"Lemari",
"Ember Besi",
"Besi Beton",
"Timah Panas",
"Harimau",
"Batu Krikil",
"Makanan Basi",
"Pesawat AirBus",
"Roket Nasa",
"Satelit Nasa",
"Matahari",
"Meteor",
"Berkas Kantor",
"Beton panas",
"Cermin",
"Batu Giok",
"Botol",
"Nezuko",
"Kaset Pita",
"Tiang Jemuran",
"Pisau Lipat",
"Bongkahan Es ",
"Asteroid",
]
THROW_ID = [
"Melempar",
"Melemparkan",
]
HIT_ID = [
"Memukul",
"melemparkan",
"Memukuli",
]
WHERE_ID = ["di pipi", "di kepala", "di bokong", "di badan"]
SLAP_TEMPLATES_Jutsu = [
"Menyerang {victim} Menggunakan {hits}.",
"Menyerang {victim} Menggunakan {item}.",
"Melemparkan {throws} kepada {victim} .",
"Melemparkan {throws} {where} {victim}."
]
ITEMS_Jutsu = [
"KAA MEE HAA MEE HAA",
"Chibaku Tensei",
]
THROW_Jutsu = [
"Futon Rasen Shuriken",
"Shuriken",
]
HIT_Jutsu = [
"Rasengan",
"Chidori",
]
GAMBAR_TITIT = """
😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋
😋😋😋😋
😋😋😋😋😋😋
😋😋😋 😋😋😋
😋😋 😋😋
"""
GAMBAR_OK = """
░▐▀▀▀▀▀▀▀▀▌▐▀▌▄▄▄▀▀▓▀
░▐▌▓▀▀▀▀▓▌▌▐▐▌▀▌▄▄▀░░
░▐▐▌▐▀▀▌▐▐▌▐▌▐▓▄▀░░░░
░▐▌▌▐▄▄▌▐▌▌▐▐▌▓▀▄░░░░
░▐▐▓▄▄▄▄▓▐▌▐▌▌▄▌▀▀▄░░
░▐▄▄▄▄▄▄▄▄▌▐▄▌▀▀▀▄▄▓▄
"""
GAMBAR_TENGKORAK = """
░░░░░░░░░░░░░▄▐░░░░
░░░░░░░▄▄▄░░▄██▄░░░
░░░░░░▐▀█▀▌░░░░▀█▄░
░░░░░░▐█▄█▌░░░░░░▀█▄
░░░░░░░▀▄▀░░░▄▄▄▄▄▀▀
░░░░░▄▄▄██▀▀▀▀░░░░░
░░░░█▀▄▄▄█░▀▀░░░░░░
░░░░▌░▄▄▄▐▌▀▀▀░░░░░
░▄░▐░░░▄▄░█░▀▀░░░░░
░▀█▌░░░▄░▀█▀░▀░░░░░
░░░░░░░░▄▄▐▌▄▄░░░░░
░░░░░░░░▀███▀█▄░░░░
░░░░░░░▐▌▀▄▀▄▀▐░░░░
░░░░░░░▐▀░░░░░░▐▌░░
░░░░░░░█░░░░░░░░█░░
░░░░░░▐▌░░░░░░░░░█░
"""
GAMBAR_KONTL = """
⣠⡶⠚⠛⠲⢄⡀
⣼⠁ ⠀⠀⠀ ⠳⢤⣄
⢿⠀⢧⡀⠀⠀⠀⠀⠀⢈⡇
⠈⠳⣼⡙⠒⠶⠶⠖⠚⠉⠳⣄
⠀⠀⠈⣇⠀⠀⠀⠀⠀⠀⠀⠈⠳⣄
⠀⠀⠀⠘⣆ ⠀⠀⠀⠀ ⠀⠈⠓⢦⣀
⠀⠀⠀⠀⠈⢳⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⠲⢤
⠀⠀⠀⠀⠀⠀⠙⢦⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⢧
⠀⠀⠀⠀⠀⠀⠀⡴⠋⠓⠦⣤⡀⠀⠀⠀⠀⠀⠀⠀⠈⣇
⠀⠀⠀⠀⠀⠀⣸⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⡄
⠀⠀⠀⠀⠀⠀⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⡇
⠀⠀⠀⠀⠀⠀⢹⡄⠀⠀⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⠃
⠀⠀⠀⠀⠀⠀⠀⠙⢦⣀⣳⡀⠀⠀⠀⠀⠀⠀⠀⠀⣰⠏
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠙⠛⢦⣀⣀⣀⣀⣠⡴⠚⠁⠉⠉⠉
"""
WHERE_Jutsu = ["Di Pipi", "Di Kepala", "Di Bokong", "Di Badan ,Di Pantat"]
normiefont = [
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z']
weebyfont = [
'卂',
'乃',
'匚',
'刀',
'乇',
'下',
'厶',
'卄',
'工',
'丁',
'长',
'乚',
'从',
'𠘨',
'口',
'尸',
'㔿',
'尺',
'丂',
'丅',
'凵',
'リ',
'山',
'乂',
'丫',
'乙']
# ===========================================
@register(outgoing=True, pattern=r"^\.(\w+)say (.*)")
async def univsaye(cowmsg):
arg = cowmsg.pattern_match.group(1).lower()
text = cowmsg.pattern_match.group(2)
if arg == "cow":
arg = "default"
if arg not in cow.COWACTERS:
return
cheese = cow.get_cow(arg)
cheese = cheese()
await cowmsg.edit(f"`{cheese.milk(text).replace('`', '´')}`")
@register(outgoing=True, pattern=r"^\.coinflip (.*)")
async def coin(event):
r = choice(["Kepala", "Ekor"])
input_str = event.pattern_match.group(1)
if input_str:
input_str = input_str.lower()
if r == "Kepala":
if input_str == "Kepala":
await event.edit(
"Koin Itu Mendarat Di: **Kepala**.\nKamu Benar.")
elif input_str == "Ekor":
await event.edit(
"Koin Itu Mendarat Di: **Kepala**.\nKamu Salah, Coba Lagi..."
)
else:
await event.edit("Koin Itu Mendarat Di: **Kepala**.")
elif r == "Ekor":
if input_str == "Ekor":
await event.edit(
"Koin Itu Mendarat Di: **Ekor**.\nKamu Benar.")
elif input_str == "Kepala":
await event.edit(
"Koin Itu Mendarat Di: **Ekor**.\nKamu Salah, Coba Lagi..."
)
else:
await event.edit("Koin Itu Mendarat Di: **Ekor**.")
@register(pattern=r"^\.slap(?: |$)(.*)", outgoing=True)
async def who(event):
replied_user = await get_user_from_event(event)
if replied_user:
replied_user = replied_user[0]
else:
return
caption = await slap(replied_user, event)
try:
await event.edit(caption)
except BaseException:
await event.edit(
"`Tidak bisa slap orang ini, perlu mengambil beberapa meteor dan batu!`"
)
async def slap(replied_user, event):
user_id = replied_user.id
first_name = replied_user.first_name
username = replied_user.username
if username:
slapped = "@{}".format(username)
else:
slapped = f"[{first_name}](tg://user?id={user_id})"
slap_str = event.pattern_match.group(1)
if slap_str == "en":
temp = choice(SLAP_TEMPLATES_EN)
item = choice(ITEMS_EN)
hit = choice(HIT_EN)
throw = choice(THROW_EN)
where = choice(WHERE_EN)
elif slap_str == "id":
temp = choice(SLAP_TEMPLATES_ID)
item = choice(ITEMS_ID)
hit = choice(HIT_ID)
throw = choice(THROW_ID)
where = choice(WHERE_ID)
elif slap_str == "jutsu":
temp = choice(SLAP_TEMPLATES_Jutsu)
item = choice(ITEMS_Jutsu)
hit = choice(HIT_Jutsu)
throw = choice(THROW_Jutsu)
where = choice(WHERE_Jutsu)
else:
temp = choice(SLAP_TEMPLATES_EN)
item = choice(ITEMS_EN)
hit = choice(HIT_EN)
throw = choice(THROW_EN)
where = choice(WHERE_EN)
caption = "..." + temp.format(
victim=slapped, item=item, hits=hit, throws=throw, where=where)
return caption
@register(outgoing=True, pattern=r"^\.boobs(?: |$)(.*)")
async def boobs(e):
await e.edit("`Berdosa, Mendapatkan Gambar Boobs...`")
await sleep(3)
await e.edit("`Mengirim Gambar Boobs...`")
nsfw = requests.get(
'http://api.oboobs.ru/noise/1').json()[0]["Gambar Boobs"]
urllib.request.urlretrieve(
"http://media.oboobs.ru/{}".format(nsfw), "*.jpg")
os.rename('*.jpg', 'boobs.jpg')
await e.client.send_file(e.chat_id, "boobs.jpg")
os.remove("boobs.jpg")
await e.delete()
@register(outgoing=True, pattern=r"^\.pantat(?: |$)(.*)")
async def butts(e):
await e.edit("`Berdosa, Mendapatkan Gambar Pantat Yang Indah...`")
await sleep(3)
await e.edit("`Mengirim Gambar Pantat Indah...`")
nsfw = requests.get(
'http://api.obutts.ru/noise/1').json()[0]["Gambar Pantat"]
urllib.request.urlretrieve(
"http://media.obutts.ru/{}".format(nsfw), "*.jpg")
os.rename('*.jpg', 'butts.jpg')
await e.client.send_file(e.chat_id, "butts.jpg")
os.remove("butts.jpg")
await e.delete()
@register(outgoing=True, pattern=r"^\.(yes|no|maybe|decide)$")
async def decide(event):
decision = event.pattern_match.group(1).lower()
message_id = event.reply_to_msg_id if event.reply_to_msg_id else None
if decision != "decide":
r = requests.get(f"https://yesno.wtf/api?force={decision}").json()
else:
r = requests.get(f"https://yesno.wtf/api").json()
await event.delete()
await event.client.send_message(event.chat_id,
str(r["answer"]).upper(),
reply_to=message_id,
file=r["image"])
@register(outgoing=True, pattern=r"^\.fp$")
async def facepalm(e):
await e.edit("🤦♂")
@register(outgoing=True, pattern=r"^\.cry$")
async def cry(e):
await e.edit(choice(CRI))
@register(outgoing=True, pattern=r"^\.insult$")
async def insult(e):
await e.edit(choice(INSULT_STRINGS))
@register(outgoing=True, pattern=r"^\.cp(?: |$)(.*)")
async def copypasta(cp_e):
textx = await cp_e.get_reply_message()
message = cp_e.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await cp_e.edit("`😂🅱️AhHH👐MaNtAp👅Bro👅UnTuk✌️MeMbuAT👌Ku👐TeRliHat👀LuCu💞HaHAhaA!💦`")
reply_text = choice(EMOJIS)
# choose a random character in the message to be substituted with 🅱️
b_char = choice(message).lower()
for owo in message:
if owo == " ":
reply_text += choice(EMOJIS)
elif owo in EMOJIS:
reply_text += owo
reply_text += choice(EMOJIS)
elif owo.lower() == b_char:
reply_text += "🅱️"
else:
if bool(getrandbits(1)):
reply_text += owo.upper()
else:
reply_text += owo.lower()
reply_text += choice(EMOJIS)
await cp_e.edit(reply_text)
@register(outgoing=True, pattern=r"^\.vapor(?: |$)(.*)")
async def vapor(vpr):
reply_text = list()
textx = await vpr.get_reply_message()
message = vpr.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await vpr.edit("`B e r i k a n S e b u a h T e k s U n t u k Vapor!`")
for charac in message:
if 0x21 <= ord(charac) <= 0x7F:
reply_text.append(chr(ord(charac) + 0xFEE0))
elif ord(charac) == 0x20:
reply_text.append(chr(0x3000))
else:
reply_text.append(charac)
await vpr.edit("".join(reply_text))
@register(outgoing=True, pattern=r"^\.str(?: |$)(.*)")
async def stretch(stret):
textx = await stret.get_reply_message()
message = stret.text
message = stret.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await stret.edit("`Beriiiiiiiiikaaannnn sebuuuuuuuuuah teeeeeeeks!`")
count = randint(3, 10)
reply_text = sub(r"([aeiouAEIOUaeiouAEIOUаеиоуюяыэё])", (r"\1" * count),
message)
await stret.edit(reply_text)
@register(outgoing=True, pattern=r"^\.zal(?: |$)(.*)")
async def zal(zgfy):
reply_text = list()
textx = await zgfy.get_reply_message()
message = zgfy.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await zgfy.edit(
"`b̜́ͨe͒͜r̠͂ͬi̷̱̋k͖͒ͤa̋ͫ͑n͕͂͗ t̢͘͟e͂̽̈́k͎͂͠s̤͚ͭ m̪͔͑è͜͡n͈ͮḁ͞ͅk̲̮͛u̺͂ͩt̬̗́k͍̙̮á ̺n̨̹ͪ`"
)
for charac in message:
if not charac.isalpha():
reply_text.append(charac)
continue
for _ in range(0, 3):
rand = randint(0, 2)
if rand == 0:
charac = charac.strip() + \
choice(ZALG_LIST[0]).strip()
elif rand == 1:
charac = charac.strip() + \
choice(ZALG_LIST[1]).strip()
else:
charac = charac.strip() + \
choice(ZALG_LIST[2]).strip()
reply_text.append(charac)
await zgfy.edit("".join(reply_text))
@register(outgoing=True, pattern=r"^\.hi$")
async def hoi(hello):
await hello.edit(choice(HELLOSTR))
@register(outgoing=True, pattern=r"^\.owo(?: |$)(.*)")
async def faces(owo):
textx = await owo.get_reply_message()
message = owo.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await owo.edit("` Mohon Berikan Teks UwU! `")
reply_text = sub(r"(r|l)", "w", message)
reply_text = sub(r"(R|L)", "W", reply_text)
reply_text = sub(r"n([aeiou])", r"ny\1", reply_text)
reply_text = sub(r"N([aeiouAEIOU])", r"Ny\1", reply_text)
reply_text = sub(r"\!+", " " + choice(UWUS), reply_text)
reply_text = reply_text.replace("ove", "uv")
reply_text += " " + choice(UWUS)
await owo.edit(reply_text)
@register(outgoing=True, pattern=r"^\.react$")
async def react_meme(react):
await react.edit(choice(FACEREACTS))
@register(outgoing=True, pattern=r"^\.shg$")
async def shrugger(shg):
await shg.edit(choice(SHGS))
@register(outgoing=True, pattern=r"^\.chase$")
async def police(chase):
await chase.edit(choice(CHASE_STR))
@register(outgoing=True, pattern=r"^\.run$")
async def runner_lol(run):
await run.edit(choice(RUNS_STR))
@register(outgoing=True, pattern=r"^\.metoo$")
async def metoo(hahayes):
await hahayes.edit(choice(METOOSTR))
@register(outgoing=True, pattern=r"^\.oem$")
async def oem(e):
t = "Oem"
for j in range(16):
t = t[:-1] + "em"
await e.edit(t)
@register(outgoing=True, pattern=r"^\.Oem$")
async def Oem(e):
t = "Oem"
for j in range(16):
t = t[:-1] + "em"
await e.edit(t)
@register(outgoing=True, pattern=r"^\.10iq$")
async def iqless(e):
await e.edit("♿")
@register(outgoing=True, pattern="^.fuck$")
async def iqless(e):
await e.edit("🖕🖕🖕🖕🖕🖕🖕🖕\n🖕🖕🖕🖕🖕🖕🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕🖕🖕🖕🖕\n🖕🖕🖕🖕🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕\n🖕🖕")
@register(outgoing=True, pattern=r"^\.moon$")
async def moon(event):
deq = deque(list("🌗🌘🌑🌒🌓🌔🌕🌖"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.bunga$")
async def moon(event):
deq = deque(list("🌼🌻🌺🌹🌸🌷"))
try:
for x in range(35):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.waktu$")
async def moon(event):
deq = deque(list("🎑🌄🌅🌇🌆🌃🌌"))
try:
for x in range(100):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.buah$")
async def moon(event):
deq = deque(list("🍉🍓🍇🍎🍍🍐🍌"))
try:
for x in range(35):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.clock$")
async def clock(event):
deq = deque(list("🕙🕘🕗🕖🕕🕔🕓🕒🕑🕐🕛"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.rain$")
async def rain(event):
deq = deque(list("☀️🌤⛅️🌥☁️🌧⛈"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.love$")
async def love(event):
deq = deque(list("❤️🧡💛💚💙💜🖤💕💞💓💗💖💘💝"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.earth$")
async def earth(event):
deq = deque(list("🌏🌍🌎🌎🌍🌏🌍🌎"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.hati$")
async def earth(event):
deq = deque(list("🖤💜💙💚💛🧡❤️🤍"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.monyet$")
async def earth(event):
deq = deque(list("🙈🙉🙈🙉🙈🙉🙈🙉"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern="^.emo$")
async def earth(event):
deq = deque(list("🙂😁😄😃😂🤣😭🐵🙊🙉🙈"))
try:
for x in range(32):
await sleep(0.1)
await event.edit("".join(deq))
deq.rotate(1)
except BaseException:
return
@register(outgoing=True, pattern=r"^\.mock(?: |$)(.*)")
async def spongemocktext(mock):
reply_text = list()
textx = await mock.get_reply_message()
message = mock.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await mock.edit("`bEriKan PeSan UnTuK MoCk!`")
for charac in message:
if charac.isalpha() and randint(0, 1):
to_app = charac.upper() if charac.islower() else charac.lower()
reply_text.append(to_app)
else:
reply_text.append(charac)
await mock.edit("".join(reply_text))
@register(outgoing=True, pattern=r"^\.weeb(?: |$)(.*)")
async def weebify(e):
args = e.pattern_match.group(1)
if not args:
get = await e.get_reply_message()
args = get.text
if not args:
await e.edit("`Apa Yang Anda Lakukan Tuan ツ`")
return
string = ' '.join(args).lower()
for normiecharacter in string:
if normiecharacter in normiefont:
weebycharacter = weebyfont[normiefont.index(normiecharacter)]
string = string.replace(normiecharacter, weebycharacter)
await e.edit(string)
@register(outgoing=True, pattern=r"^\.clap(?: |$)(.*)")
async def claptext(memereview):
textx = await memereview.get_reply_message()
message = memereview.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await memereview.edit("`Tuan, Mohon Balas Ke Pesan Orang Yang Ingin Anda Puji ツ`")
reply_text = "👏 "
reply_text += message.replace(" ", " 👏 ")
reply_text += " 👏"
await memereview.edit(reply_text)
@register(outgoing=True, pattern=r"^\.teksbiru$")
async def bluetext(bt_e):
if await bt_e.get_reply_message() and bt_e.is_group:
await bt_e.edit(
"/TEKSBIRU /APAKAH /ANDA.\n"
"/SEDANG /GABUT /KARNA /TERTARIK /MELIHAT /TEKS /BIRU /PASTI /ANDA /BOSAN?")
@register(outgoing=True, pattern=r"^\.f (.*)")
async def payf(event):
paytext = event.pattern_match.group(1)
pay = "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}".format(
paytext * 8, paytext * 8, paytext * 2, paytext * 2, paytext * 2,
paytext * 6, paytext * 6, paytext * 2, paytext * 2, paytext * 2,
paytext * 2, paytext * 2)
await event.edit(pay)
@register(outgoing=True, pattern=r"^\.lfy (.*)")
async def let_me_google_that_for_you(lmgtfy_q):
textx = await lmgtfy_q.get_reply_message()
qry = lmgtfy_q.pattern_match.group(1)
if qry:
query = str(qry)
elif textx:
query = textx
query = query.message
query_encoded = query.replace(" ", "+")
lfy_url = f"http://lmgtfy.com/?s=g&iie=1&q={query_encoded}"
payload = {'format': 'json', 'url': lfy_url}
r = requests.get('http://is.gd/create.php', params=payload)
await lmgtfy_q.edit("Ini Dia, Bantu Dirimu Sendiri."
f"\n[{query}]({r.json()['shorturl']})")
@register(outgoing=True, pattern=r"^\.sayhi$")
async def sayhi(e):
await e.edit(
"\n💰💰💰💰💰💰💰💰💰💰💰💰"
"\n💰🔷💰💰💰🔷💰💰🔷🔷🔷💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷🔷🔷🔷🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰💰🔷💰💰"
"\n💰🔷💰💰💰🔷💰💰🔷🔷🔷💰"
"\n💰💰💰💰💰💰💰💰💰💰💰💰")
@register(pattern=r".scam(?: |$)(.*)", outgoing=True)
async def scam(event):
options = [
'mengetik', 'kontak', 'game', 'lokasi', 'suara', 'bulat', 'video',
'foto', 'dokumen', 'batal'
]
input_str = event.pattern_match.group(1)
args = input_str.split()
if len(args) == 0: # Let bot decide action and time
scam_action = choice(options)
scam_time = randint(30, 60)
elif len(args) == 1: # User decides time/action, bot decides the other.
try:
scam_action = str(args[0]).lower()
scam_time = randint(30, 60)
except ValueError:
scam_action = choice(options)
scam_time = int(args[0])
elif len(args) == 2: # User decides both action and time
scam_action = str(args[0]).lower()
scam_time = int(args[1])
else:
await event.edit("`Tidak Valid`")
return
try:
if (scam_time > 300):
await event.delete()
async with event.client.action(event.chat_id, scam_action):
await sleep(scam_time)
except BaseException:
return
@register(pattern=r".type(?: |$)(.*)", outgoing=True)
async def typewriter(typew):
textx = await typew.get_reply_message()
message = typew.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
return await typew.edit("`Berikan Sebuah Teks Untuk Type!`")
sleep_time = 0.03
typing_symbol = "|"
old_text = ""
await typew.edit(typing_symbol)
await sleep(sleep_time)
for character in message:
old_text = old_text + "" + character
typing_text = old_text + "" + typing_symbol
await typew.edit(typing_text)
await sleep(sleep_time)
await typew.edit(old_text)
await sleep(sleep_time)
@register(outgoing=True, pattern=r"^\.leave$")
async def leave(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`Tuan Telah Meninggalkan Grup ツ`")
@register(outgoing=True, pattern=r"^\.fail$")
async def fail(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ `"
"`\n████▌▄▌▄▐▐▌█████ `"
"`\n████▌▄▌▄▐▐▌▀████ `"
"`\n▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ `")
@register(outgoing=True, pattern=r"^\.lol$")
async def lol(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╱┏┓╱╱╱╭━━━╮┏┓╱╱╱╱ `"
"`\n╱┃┃╱╱╱┃╭━╮┃┃┃╱╱╱╱ `"
"`\n╱┃┗━━┓┃╰━╯┃┃┗━━┓╱ `"
"`\n╱┗━━━┛╰━━━╯┗━━━┛╱ `")
@register(outgoing=True, pattern=r"^\.rock$")
async def lol(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈╭╮┈┈┈┈┈┈┈┈┈┈┈┈ `"
"`\n┈┃┃┈╭╮┈┏╮╭╮╭╮┃╭ `"
"`\n┈┃┃┈┃┃┈┣┫┃┃┃┈┣┫ `"
"`\n┈┃┣┳┫┃┈┃╰╰╯╰╯┃╰ `"
"`\n╭┻┻┻┫┃┈┈╭╮┃┃━┳━ `"
"`\n┃╱╭━╯┃┈┈┃┃┃┃┈┃┈ `"
"`\n╰╮╱╱╱┃┈┈╰╯╰╯┈┃┈ `")
@register(outgoing=True, pattern=r"^\.lool$")
async def lool(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n╭╭━━━╮╮┈┈┈┈┈┈┈┈┈┈\n┈┃╭━━╯┈┈┈┈▕╲▂▂╱▏┈\n┈┃┃╱▔▔▔▔▔▔▔▏╱▋▋╮┈`"
"`\n┈┃╰▏┃╱╭╮┃╱╱▏╱╱▆┃┈\n┈╰━▏┗━╰╯┗━╱╱╱╰┻┫┈\n┈┈┈▏┏┳━━━━▏┏┳━━╯┈`"
"`\n┈┈┈▏┃┃┈┈┈┈▏┃┃┈┈┈┈ `")
@register(outgoing=True, pattern=r"^\.stfu$")
async def stfu(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n██████████████████████████████`"
"`\n██▀▀▀▀████▀▀▀▀████▀▀▀▀▀███▀▀██▀▀█`"
"`\n█──────██──────██───────██──██──█`"
"`\n█──██▄▄████──████──███▄▄██──██──█`"
"`\n█▄────▀████──████────█████──██──█`"
"`\n█▀▀██──████──████──███████──██──█`"
"`\n█──────████──████──███████──────█`"
"`\n██▄▄▄▄█████▄▄████▄▄████████▄▄▄▄██`"
"`\n█████████████████████████████████`")
@register(outgoing=True, pattern=r"^\.gtfo$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n███████████████████████████████ `"
"`\n█▀▀▀▀▀▀▀█▀▀▀▀▀▀█▀▀▀▀▀▀▀█▀▀▀▀▀▀█ `"
"`\n█───────█──────█───────█──────█ `"
"`\n█──███──███──███──███▄▄█──██──█ `"
"`\n█──███▄▄███──███─────███──██──█ `"
"`\n█──██───███──███──██████──██──█ `"
"`\n█──▀▀▀──███──███──██████──────█ `"
"`\n█▄▄▄▄▄▄▄███▄▄███▄▄██████▄▄▄▄▄▄█ `"
"`\n███████████████████████████████ `")
@register(outgoing=True, pattern=r"^\.nih$")
async def nih(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n(\\_/)`"
"`\n(●_●)`"
"`\n />💖 *Ini Buat Kamu`"
"\n \n"
r"`(\_/)`"
"`\n(●_●)`"
"`\n💖<\\ *Tapi Bo'ong`")
@register(outgoing=True, pattern=r"^\.fag$")
async def gtfo(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n█████████`"
"`\n█▄█████▄█`"
"`\n█▼▼▼▼▼`"
"`\n█ STFU FAGGOT'S`"
"`\n█▲▲▲▲▲`"
"`\n█████████`"
"`\n ██ ██`")
@register(outgoing=True, pattern=r"^\.tai$")
async def taco(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n{\\__/}"
"\n(●_●)"
"\n( >💩 Mau Tai Ku?")
@register(outgoing=True, pattern=r"^\.paw$")
async def paw(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`(=ↀωↀ=)")
@register(outgoing=True, pattern=r"^\.tf$")
async def tf(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("(̿▀̿ ̿Ĺ̯̿̿▀̿ ̿)̄ ")
@register(outgoing=True, pattern=r"^\.gey$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈Lu Bau Hehe`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern=r"^\.gay$")
async def gey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈┈┈╭━━━━━╮┈┈┈┈┈\n┈┈┈┃┊┊┊┊┊┃┈┈┈┈┈`"
"`\n┈┈┈┃┊┊╭━╮┻╮┈┈┈┈\n┈┈┈╱╲┊┃▋┃▋┃┈┈┈┈\n┈┈╭┻┊┊╰━┻━╮┈┈┈┈`"
"`\n┈┈╰┳┊╭━━━┳╯┈┈┈┈\n┈┈┈┃┊┃╰━━┫┈ANDA GAY`"
"\n┈┈┈┈┈┈┏━┓┈┈┈┈┈┈")
@register(outgoing=True, pattern=r"^\.bot$")
async def bot(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("` \n ╲╲╭━━━━╮ \n╭╮┃▆┈┈▆┃╭╮ \n┃╰┫▽▽▽┣╯┃ \n╰━┫△△△┣━╯`"
"`\n╲╲┃┈┈┈┈┃ \n╲╲┃┈┏┓┈┃ `")
@register(outgoing=True, pattern=r"^\.hey$")
async def hey(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("\n┈┈┈╱▔▔▔▔╲┈╭━━━━━\n┈┈▕▂▂▂▂▂▂▏┃HEY!┊😀`"
"`\n┈┈▕▔▇▔▔┳▔▏╰┳╮HEY!┊\n┈┈▕╭━╰╯━╮▏━╯╰━━━\n╱▔▔▏▅▅▅▅▕▔▔╲┈┈┈┈`"
"`\n▏┈┈╲▂▂▂▂╱┈┈┈▏┈┈┈`")
@register(outgoing=True, pattern=r"^\.nou$")
async def nou(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("`\n┈╭╮╭╮\n┈┃┃┃┃\n╭┻┗┻┗╮`"
"`\n┃┈▋┈▋┃\n┃┈╭▋━╮━╮\n┃┈┈╭╰╯╰╯╮`"
"`\n┫┈┈ NoU\n┃┈╰╰━━━━╯`"
"`\n┗━━┻━┛`")
@register(outgoing=True, pattern=r"^\.iwi(?: |$)(.*)")
async def faces(siwis):
textx = await siwis.get_reply_message()
message = siwis.pattern_match.group(1)
if message:
pass
elif textx:
message = textx.text
else:
await siwis.edit("` Anda Harus Memberikan Teks Ke IwI `")
return
reply_text = sub(r"(a|i|u|e|o)", "i", message)
reply_text = sub(r"(A|I|U|E|O)", "I", reply_text)
reply_text = sub(r"\!+", " " + choice(IWIS), reply_text)
reply_text += " " + choice(IWIS)
await siwis.edit(reply_text)
@register(outgoing=True, pattern="^.koc$")
async def koc(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("8✊===D")
await e.edit("8=✊==D")
await e.edit("8==✊=D")
await e.edit("8===✊D")
await e.edit("8==✊=D")
await e.edit("8=✊==D")
await e.edit("8✊===D")
await e.edit("8=✊==D")
await e.edit("8==✊=D")
await e.edit("8===✊D")
await e.edit("8==✊=D")
await e.edit("8=✊==D")
await e.edit("8✊===D")
await e.edit("8=✊==D")
await e.edit("8==✊=D")
await e.edit("8===✊D")
await e.edit("8==✊=D")
await e.edit("8=✊==D")
await e.edit("8===✊D💦")
await e.edit("8==✊=D💦💦")
await e.edit("8=✊==D💦💦💦")
await e.edit("8✊===D💦💦💦💦")
await e.edit("8===✊D💦💦💦💦💦")
await e.edit("8==✊=D💦💦💦💦💦💦")
await e.edit("8=✊==D💦💦💦💦💦💦💦")
await e.edit("8✊===D💦💦💦💦💦💦💦💦")
await e.edit("8===✊D💦💦💦💦💦💦💦💦💦")
await e.edit("8==✊=D💦💦💦💦💦💦💦💦💦💦")
await e.edit("8=✊==D Lah Kok Habis?")
await e.edit("😭😭😭😭")
@register(outgoing=True, pattern="^.gas$")
async def gas(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
await e.edit("___________________🚑")
await e.edit("________________🚑___")
await e.edit("______________🚑_____")
await e.edit("___________🚑________")
await e.edit("________🚑___________")
await e.edit("_____🚑______________")
await e.edit("__🚑_________________")
await e.edit("🚑___________________")
await e.edit("_____________________")
await e.edit(choice(FACEREACTS))
@register(outgoing=True, pattern=r"^\.shg$")
async def shrugger(shg):
await shg.edit(choice(SHGS))
@register(outgoing=True, pattern=r"^\.(?:penis|dick)\s?(.)?")
async def emoji_penis(e):
emoji = e.pattern_match.group(1)
titid = GAMBAR_TITIT
if emoji:
titid = titid.replace('😋', emoji)
await e.edit(titid)
@register(outgoing=True, pattern=r"^\.(?:kon|kontl)\s?(.)?")
async def emoji_kontl(e):
emoji = e.pattern_match.group(1)
kontl = GAMBAR_KONTL
if emoji:
kontl = kontl.replace('😂', emoji)
await e.edit(kontl)
@register(outgoing=True, pattern=r"^\.ok$")
async def emoji_oke(e):
emoji = e.pattern_match.group(1)
oke = GAMBAR_OK
if emoji:
oke = oke.replace('😂', emoji)
await e.edit(oke)
@register(outgoing=True, pattern=r"^\.skull$")
async def emoji_tengkorak(e):
emoji = e.pattern_match.group(1)
tengkorak = GAMBAR_TENGKORAK
if emoji:
tengkorak = tengkorak.replace('😂', emoji)
await e.edit(tengkorak)
CMD_HELP.update({
"memes":
">`.cowsay`"
"\nUsage: sapi yang mengatakan sesuatu."
"\n\n> .cp"
"\nUsage: Copy paste meme terkenal"
"\n\n>`.vapor`"
"\nUsage: Menguapkan semuanya!"
"\n\n>`.str`"
"\nUsage: Regangkan."
"\n\n>`.10iq`"
"\nUsage: Kamu mundur !!"
"\n\n>`.zal`"
"\nUsage: Munculkan perasaan kacau."
"\n\n>`.Oem`"
"\nPenggunaan: Oeeeem"
"\n\n>`.fp`"
"\nUsage: Telapak Tangan:P"
"\n\n>`.moon`"
"\nUsage: animasi bulan."
"\n\n>`.clock`"
"\nUsage: animasi jam."
"\n\n>`.hi`"
"\nUsage: Sapa semuanya!"
"\n\n>`.coinflip` <Kepala/Ekor>"
"\nUsage: Melempar koin !!"
"\n\n>`.owo`"
"\nUsage: UwU"
"\n\n>`.react`"
"\nUsage: Buat Userbot Anda bereaksi terhadap semuanya."
"\n\n>`.slap`"
"\nUsage: balas tampar mereka dengan benda acak !!"
"\n\n>`.cry`"
"\nUsage: jika kamu melakukan ini, aku akan menangis."
"\n\n>`.shg`"
"\nUsage: Angkat bahu!"
"\n\n>`.run`"
"\nUsage: Biarkan Aku Lari, Lari, LARI!"
"\n\n>`.chase`"
"\nUsage: Sebaiknya Anda mulai berlari"
"\n\n>`.metoo`"
"\nUsage: Haha ya"
"\n\n>`.mock`"
"\nUsage: Lakukan dan temukan kesenangan yang sesungguhnya."
"\n\n>`.clap`"
"\nUsage: Puji orang!"
"\n\n>`.f` <emoji/karakter>"
"\nUsage: F."
"\n\n>`.bt`"
"\nUsage: Percayalah, Anda akan menemukan ini berguna."
"\n\n>`.weeb`"
"\nUsage: Untuk Mengubah Teks Menjadi Weeb-ify."
"\n\n>`.type` <teks>"
"\nUsage: Hanya perintah kecil untuk membuat keyboard Anda menjadi mesin tik!"
"\n\n>`.lfy` <query>"
"\nUsage: Biar saya Google itu untuk Anda dengan cepat!"
"\n\n>`.decide` [Alternatif: (.yes, .no, .maybe)]"
"\nUsage: Buat keputusan cepat."
"\n\n> `.nou` `.bot` `.rock` `.gey` `.tf` `.paw` `.tai` `.nih`"
"\n> `.fag` `.gtfo`; `.stfu` `.lol` `.lool` `.fail` `.leave`"
"\n> `.iwi` `.sayhi` `.koc` `.gas` `.earth` `.love` `.rain`"
"\n> `.penis` `.emo` `.fuck` `.skull` `.monyet`\nUsage: Cobain aja"
"\n\n\n**Semoga Harimu Menyenangkan**\n➥ `Alvin`"
})
| true
| true
|
790937151c22417254dfaa34148a0640c7540f05
| 2,679
|
py
|
Python
|
restclients/dao_implementation/catalyst.py
|
uw-it-cte/uw-restclients
|
2b09348bf066e5508304401f93f281805e965af5
|
[
"Apache-2.0"
] | null | null | null |
restclients/dao_implementation/catalyst.py
|
uw-it-cte/uw-restclients
|
2b09348bf066e5508304401f93f281805e965af5
|
[
"Apache-2.0"
] | null | null | null |
restclients/dao_implementation/catalyst.py
|
uw-it-cte/uw-restclients
|
2b09348bf066e5508304401f93f281805e965af5
|
[
"Apache-2.0"
] | null | null | null |
"""
Contains Catalyst DAO implementations.
"""
from django.conf import settings
from restclients.mock_http import MockHTTP
from restclients.dao_implementation import get_timeout
from restclients.dao_implementation.live import get_con_pool, get_live_url
from restclients.dao_implementation.mock import get_mockdata_url
import datetime
import hashlib
import pytz
class File(object):
"""
The File DAO implementation returns generally static content. Use this
DAO with this configuration:
RESTCLIENTS_CANVAS_DAO_CLASS =
'restclients.dao_implementation.catalyst.File'
"""
def getURL(self, url, headers):
return get_mockdata_url("catalyst", "file", url, headers)
class Live(object):
"""
This DAO provides real data. It requires further configuration, e.g.
For cert auth:
RESTCLIENTS_CATALYST_CERT_FILE='/path/to/an/authorized/cert.cert',
RESTCLIENTS_CATALYST_KEY_FILE='/path/to/the/certs_key.key',
SolAuth Authentication (personal only):
RESTCLIENTS_CATALYST_SOL_AUTH_PUBLIC_KEY='public_key'
RESTCLIENTS_CATALYST_SOL_AUTH_PRIVATE_KEY='12345'
SolAuth tokens are available at https://catalyst.uw.edu/rest_user
For an alternate host:
RESTCLIENTS_CATALYST_HOST = 'https://my-dev-server/'
"""
pool = None
def getURL(self, url, headers):
host = settings.RESTCLIENTS_CATALYST_HOST
if hasattr(settings, "RESTCLIENTS_CATALYST_CERT_FILE"):
Live.pool = get_con_pool(host,
settings.RESTCLIENTS_CATALYST_KEY_FILE,
settings.RESTCLIENTS_CATALYST_CERT_FILE,
socket_timeout=get_timeout("catalyst"))
else:
Live.pool = get_con_pool(host,
socket_timeout=get_timeout("catalyst"))
if hasattr(settings, "RESTCLIENTS_CATALYST_SOL_AUTH_PRIVATE_KEY"):
# Use js_rest instead of rest, to avoid certificate issues
url = url.replace("/rest/", "/js_rest/")
now_with_tz = datetime.datetime.now(pytz.utc).strftime(
"%a %b %d %H:%M:%S %Z %Y")
header_base = "%s\nGET\n%s\n%s\n" % (
settings.RESTCLIENTS_CATALYST_SOL_AUTH_PRIVATE_KEY,
url,
now_with_tz
)
public_key = settings.RESTCLIENTS_CATALYST_SOL_AUTH_PUBLIC_KEY
hashed = hashlib.sha1(header_base).hexdigest()
headers["Authorization"] = "SolAuth %s:%s" % (public_key, hashed)
headers["Date"] = now_with_tz
return get_live_url(Live.pool, "GET", host, url, headers=headers)
| 34.792208
| 77
| 0.658081
|
from django.conf import settings
from restclients.mock_http import MockHTTP
from restclients.dao_implementation import get_timeout
from restclients.dao_implementation.live import get_con_pool, get_live_url
from restclients.dao_implementation.mock import get_mockdata_url
import datetime
import hashlib
import pytz
class File(object):
def getURL(self, url, headers):
return get_mockdata_url("catalyst", "file", url, headers)
class Live(object):
pool = None
def getURL(self, url, headers):
host = settings.RESTCLIENTS_CATALYST_HOST
if hasattr(settings, "RESTCLIENTS_CATALYST_CERT_FILE"):
Live.pool = get_con_pool(host,
settings.RESTCLIENTS_CATALYST_KEY_FILE,
settings.RESTCLIENTS_CATALYST_CERT_FILE,
socket_timeout=get_timeout("catalyst"))
else:
Live.pool = get_con_pool(host,
socket_timeout=get_timeout("catalyst"))
if hasattr(settings, "RESTCLIENTS_CATALYST_SOL_AUTH_PRIVATE_KEY"):
url = url.replace("/rest/", "/js_rest/")
now_with_tz = datetime.datetime.now(pytz.utc).strftime(
"%a %b %d %H:%M:%S %Z %Y")
header_base = "%s\nGET\n%s\n%s\n" % (
settings.RESTCLIENTS_CATALYST_SOL_AUTH_PRIVATE_KEY,
url,
now_with_tz
)
public_key = settings.RESTCLIENTS_CATALYST_SOL_AUTH_PUBLIC_KEY
hashed = hashlib.sha1(header_base).hexdigest()
headers["Authorization"] = "SolAuth %s:%s" % (public_key, hashed)
headers["Date"] = now_with_tz
return get_live_url(Live.pool, "GET", host, url, headers=headers)
| true
| true
|
790938f4ade6cf753a74a4cd0631f47cbb2b2b20
| 1,029
|
py
|
Python
|
scraper/storage_spiders/azoravn.py
|
chongiadung/choinho
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
[
"MIT"
] | null | null | null |
scraper/storage_spiders/azoravn.py
|
chongiadung/choinho
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
[
"MIT"
] | 10
|
2020-02-11T23:34:28.000Z
|
2022-03-11T23:16:12.000Z
|
scraper/storage_spiders/azoravn.py
|
chongiadung/choinho
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
[
"MIT"
] | 3
|
2018-08-05T14:54:25.000Z
|
2021-06-07T01:49:59.000Z
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='product-name']/h1",
'price' : "//p[@class='special-price']/span[@class='price']|//span[@class='regular-price']/span[@class='price']",
'category' : "//div[@class='breadcrumbs']/ul/li/a",
'description' : "//div[@class='box-collateral box-description']/div[@id='details-area']",
'images' : "//p[@class='product-image']/a/@href",
'canonical' : "",
'base_url' : "",
'brand' : "",
'in_stock' : "",
'guarantee' : "",
'promotion' : ""
}
name = 'azora.vn'
allowed_domains = ['azora.vn']
start_urls = ['http://azora.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = ['']
rules = [
#Rule(LinkExtractor(), 'parse_item'),
#Rule(LinkExtractor(), 'parse'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+\.html($|\?p=\d+$)']), 'parse_item_and_links'),
]
| 34.3
| 117
| 0.609329
|
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='product-name']/h1",
'price' : "//p[@class='special-price']/span[@class='price']|//span[@class='regular-price']/span[@class='price']",
'category' : "//div[@class='breadcrumbs']/ul/li/a",
'description' : "//div[@class='box-collateral box-description']/div[@id='details-area']",
'images' : "//p[@class='product-image']/a/@href",
'canonical' : "",
'base_url' : "",
'brand' : "",
'in_stock' : "",
'guarantee' : "",
'promotion' : ""
}
name = 'azora.vn'
allowed_domains = ['azora.vn']
start_urls = ['http://azora.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = ['']
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+\.html($|\?p=\d+$)']), 'parse_item_and_links'),
]
| true
| true
|
790938f7e1357ee27418bae0c541ca6e8a26f23a
| 4,883
|
py
|
Python
|
shub_workflow/clone_job.py
|
curita/shub-workflow
|
5450da1502f8c300be242609dc6ae67bd3702079
|
[
"BSD-3-Clause"
] | null | null | null |
shub_workflow/clone_job.py
|
curita/shub-workflow
|
5450da1502f8c300be242609dc6ae67bd3702079
|
[
"BSD-3-Clause"
] | null | null | null |
shub_workflow/clone_job.py
|
curita/shub-workflow
|
5450da1502f8c300be242609dc6ae67bd3702079
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Utility for cloning ScrapyCloud jobs
Features tagging of cloned from/to jobs (both source and destination) and avoids to clone source jobs already cloned.
By default cloned jobs are scheduled in the same project as source job. If --project-id is given, target project
is overriden.
"""
import logging
from shub_workflow.script import BaseScript
from shub_workflow.utils import dash_retry_decorator
_LOG = logging.getLogger(__name__)
_LOG.setLevel(logging.INFO)
def _transform_cmd(job_cmd):
if isinstance(job_cmd, list):
return " ".join(["'%s'" % cmd for cmd in job_cmd[1:]])
return job_cmd
_COPIED_FROM_META = {
"job_cmd": ("cmd_args", _transform_cmd),
"units": (None, None),
"spider_args": ("job_args", None),
"tags": ("add_tag", None),
"job_settings": (None, None),
}
class BaseClonner(BaseScript):
@staticmethod
def is_cloned(job):
for tag in job.metadata.get("tags") or []:
if tag.startswith("ClonedTo="):
_LOG.warning(f"Job {job.key} already cloned. Skipped.")
return True
return False
@dash_retry_decorator
def is_cloned_by_jobkey(self, jobkey):
job = self.client.get_job(jobkey)
return self.is_cloned(job)
def job_params_hook(self, job_params):
pass
def clone_job(self, job_key, units=None, extra_tags=None):
extra_tags = extra_tags or []
job = self.client.get_job(job_key)
spider = job.metadata.get("spider")
job_params = dict()
for key, (target_key, _) in _COPIED_FROM_META.items():
if target_key is None:
target_key = key
job_params[target_key] = job.metadata.get(key)
add_tag = job_params.setdefault("add_tag", [])
add_tag = list(filter(lambda x: not x.startswith("ClonedFrom="), add_tag))
add_tag.append(f"ClonedFrom={job_key}")
add_tag.extend(extra_tags)
job_params["add_tag"] = add_tag
if units is not None:
job_params["units"] = units
self.job_params_hook(job_params)
for key, (target_key, transform) in _COPIED_FROM_META.items():
target_key = target_key or key
if transform is None:
def transform(x):
return x
job_params[target_key] = transform(job_params[target_key])
project_id, _, _ = job_key.split("/")
project = self.get_project(self.project_id or project_id)
new_job = self.schedule_generic(project, spider, **job_params)
_LOG.info("Cloned %s to %s", job_key, new_job.key)
jobtags = job.metadata.get("tags")
jobtags.append(f"ClonedTo={new_job.key}")
job.metadata.update({"tags": jobtags})
return job, new_job
@dash_retry_decorator
def schedule_generic(self, project, spider, **job_params):
return project.jobs.run(spider, **job_params)
class CloneJobScript(BaseClonner):
flow_id_required = False
@property
def description(self):
return __doc__
def parse_project_id(self, args):
project_id = super().parse_project_id(args)
if project_id:
return project_id
if args.key:
return args.key[0].split("/")[0]
if args.tag_spider:
return args.tag_spider.split("/")[0]
def add_argparser_options(self):
super().add_argparser_options()
self.argparser.add_argument(
"--key",
type=str,
action="append",
default=[],
help="Target job key. Can be given multiple times. All must be in same project.",
)
self.argparser.add_argument(
"--tag-spider",
help="In format <project_id>/<tag>/<spider name>," "clone given spider from given project id, by tag",
)
self.argparser.add_argument("--units", help="Set number of units. Default is the same as cloned job.", type=int)
def run(self):
if self.args.key:
keys = filter(lambda x: not self.is_cloned_by_jobkey(x), self.args.key)
elif self.args.tag_spider:
keys = []
project_id, tag, spider = self.args.tag_spider.split("/")
for job in self.get_project(project_id).jobs.iter(spider=spider, state=["finished"], has_tag=tag):
if not self.is_cloned_by_jobkey(job["key"]):
keys.append(job["key"])
else:
self.argparser.error("You must provide either --key or --tag-spider.")
for job_key in keys:
try:
self.clone_job(job_key, self.args.units, self.args.tag)
except Exception as e:
_LOG.error("Could not restart job %s: %s", job_key, e)
if __name__ == "__main__":
script = CloneJobScript()
script.run()
| 32.125
| 120
| 0.612124
|
import logging
from shub_workflow.script import BaseScript
from shub_workflow.utils import dash_retry_decorator
_LOG = logging.getLogger(__name__)
_LOG.setLevel(logging.INFO)
def _transform_cmd(job_cmd):
if isinstance(job_cmd, list):
return " ".join(["'%s'" % cmd for cmd in job_cmd[1:]])
return job_cmd
_COPIED_FROM_META = {
"job_cmd": ("cmd_args", _transform_cmd),
"units": (None, None),
"spider_args": ("job_args", None),
"tags": ("add_tag", None),
"job_settings": (None, None),
}
class BaseClonner(BaseScript):
@staticmethod
def is_cloned(job):
for tag in job.metadata.get("tags") or []:
if tag.startswith("ClonedTo="):
_LOG.warning(f"Job {job.key} already cloned. Skipped.")
return True
return False
@dash_retry_decorator
def is_cloned_by_jobkey(self, jobkey):
job = self.client.get_job(jobkey)
return self.is_cloned(job)
def job_params_hook(self, job_params):
pass
def clone_job(self, job_key, units=None, extra_tags=None):
extra_tags = extra_tags or []
job = self.client.get_job(job_key)
spider = job.metadata.get("spider")
job_params = dict()
for key, (target_key, _) in _COPIED_FROM_META.items():
if target_key is None:
target_key = key
job_params[target_key] = job.metadata.get(key)
add_tag = job_params.setdefault("add_tag", [])
add_tag = list(filter(lambda x: not x.startswith("ClonedFrom="), add_tag))
add_tag.append(f"ClonedFrom={job_key}")
add_tag.extend(extra_tags)
job_params["add_tag"] = add_tag
if units is not None:
job_params["units"] = units
self.job_params_hook(job_params)
for key, (target_key, transform) in _COPIED_FROM_META.items():
target_key = target_key or key
if transform is None:
def transform(x):
return x
job_params[target_key] = transform(job_params[target_key])
project_id, _, _ = job_key.split("/")
project = self.get_project(self.project_id or project_id)
new_job = self.schedule_generic(project, spider, **job_params)
_LOG.info("Cloned %s to %s", job_key, new_job.key)
jobtags = job.metadata.get("tags")
jobtags.append(f"ClonedTo={new_job.key}")
job.metadata.update({"tags": jobtags})
return job, new_job
@dash_retry_decorator
def schedule_generic(self, project, spider, **job_params):
return project.jobs.run(spider, **job_params)
class CloneJobScript(BaseClonner):
flow_id_required = False
@property
def description(self):
return __doc__
def parse_project_id(self, args):
project_id = super().parse_project_id(args)
if project_id:
return project_id
if args.key:
return args.key[0].split("/")[0]
if args.tag_spider:
return args.tag_spider.split("/")[0]
def add_argparser_options(self):
super().add_argparser_options()
self.argparser.add_argument(
"--key",
type=str,
action="append",
default=[],
help="Target job key. Can be given multiple times. All must be in same project.",
)
self.argparser.add_argument(
"--tag-spider",
help="In format <project_id>/<tag>/<spider name>," "clone given spider from given project id, by tag",
)
self.argparser.add_argument("--units", help="Set number of units. Default is the same as cloned job.", type=int)
def run(self):
if self.args.key:
keys = filter(lambda x: not self.is_cloned_by_jobkey(x), self.args.key)
elif self.args.tag_spider:
keys = []
project_id, tag, spider = self.args.tag_spider.split("/")
for job in self.get_project(project_id).jobs.iter(spider=spider, state=["finished"], has_tag=tag):
if not self.is_cloned_by_jobkey(job["key"]):
keys.append(job["key"])
else:
self.argparser.error("You must provide either --key or --tag-spider.")
for job_key in keys:
try:
self.clone_job(job_key, self.args.units, self.args.tag)
except Exception as e:
_LOG.error("Could not restart job %s: %s", job_key, e)
if __name__ == "__main__":
script = CloneJobScript()
script.run()
| true
| true
|
79093ae44bacb9494b8349f6098239d9b14a8d37
| 567
|
py
|
Python
|
Glyph-Builders/lowercase_from_upper.py
|
m4rc1e/mf-glyphs-scripts
|
c5ed026e5b72a886f1e574f85659cdcae041e66a
|
[
"MIT"
] | 27
|
2015-09-01T00:19:34.000Z
|
2021-12-05T01:59:01.000Z
|
Glyph-Builders/lowercase_from_upper.py
|
m4rc1e/mf-glyphs-scripts
|
c5ed026e5b72a886f1e574f85659cdcae041e66a
|
[
"MIT"
] | 26
|
2016-01-03T09:31:39.000Z
|
2018-06-01T18:05:58.000Z
|
Glyph-Builders/lowercase_from_upper.py
|
m4rc1e/mf-glyphs-scripts
|
c5ed026e5b72a886f1e574f85659cdcae041e66a
|
[
"MIT"
] | 7
|
2016-01-03T07:09:04.000Z
|
2018-04-06T00:24:14.000Z
|
#MenuTitle: Generate lowercase from uppercase
"""
Generate lowercase a-z from uppercase A-Z
TODO (M Foley) Generate all lowercase glyphs, not just a-z
"""
font = Glyphs.font
glyphs = list('abcdefghijklmnopqrstuvwxyz')
masters = font.masters
for glyph_name in glyphs:
glyph = GSGlyph(glyph_name)
glyph.updateGlyphInfo()
font.glyphs.append(glyph)
for idx,layer in enumerate(masters):
comp_name = glyph_name.upper()
component = GSComponent(comp_name, (0,0))
glyph.layers[idx].components.append(component)
Glyphs.redraw()
| 24.652174
| 58
| 0.714286
|
font = Glyphs.font
glyphs = list('abcdefghijklmnopqrstuvwxyz')
masters = font.masters
for glyph_name in glyphs:
glyph = GSGlyph(glyph_name)
glyph.updateGlyphInfo()
font.glyphs.append(glyph)
for idx,layer in enumerate(masters):
comp_name = glyph_name.upper()
component = GSComponent(comp_name, (0,0))
glyph.layers[idx].components.append(component)
Glyphs.redraw()
| true
| true
|
79093b7e069f398d82b0a766f8ff00f20d754159
| 8,281
|
py
|
Python
|
pyoomph/__main__.py
|
Akuli/oomph
|
508d64e8eae69a904aab21ef49e0c75ec4a2cad0
|
[
"MIT"
] | 2
|
2021-03-07T03:12:32.000Z
|
2021-04-08T20:44:02.000Z
|
pyoomph/__main__.py
|
Akuli/oomph
|
508d64e8eae69a904aab21ef49e0c75ec4a2cad0
|
[
"MIT"
] | 167
|
2021-03-03T10:30:06.000Z
|
2021-04-27T10:06:37.000Z
|
pyoomph/__main__.py
|
Akuli/oomph
|
508d64e8eae69a904aab21ef49e0c75ec4a2cad0
|
[
"MIT"
] | 1
|
2021-04-04T17:12:39.000Z
|
2021-04-04T17:12:39.000Z
|
from __future__ import annotations
import argparse
import atexit
import itertools
import shlex
import shutil
import signal
import subprocess
import sys
import traceback
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from pyoomph import ast, ast2ir, ast_transformer, c_output, ir, parser
python_code_dir = Path(__file__).absolute().parent
project_root = python_code_dir.parent
class CompilationUnit:
ast: List[ast.ToplevelDeclaration]
def __init__(self, source_path: Path, session: c_output.Session):
self.source_path = source_path
self.session = session
def _handle_error(self) -> None:
traceback.print_exc()
print(f"\nThis happened while compiling {self.source_path}", file=sys.stderr)
sys.exit(1)
def create_untyped_ast(self) -> None:
try:
source_code = self.source_path.read_text(encoding="utf-8")
self.ast = ast_transformer.transform_file(
parser.parse_file(
source_code, self.source_path, project_root / "stdlib"
)
)
except Exception:
self._handle_error()
def create_c_code(self, exports: List[ir.Symbol]) -> None:
try:
the_ir = ast2ir.convert_program(self.ast, self.source_path, exports)
self.session.create_c_code(the_ir, self.source_path)
except Exception:
self._handle_error()
def get_c_compiler_command(c_paths: List[Path], exepath: Path) -> Tuple[List[str], str]:
compile_info = {}
with (project_root / "obj" / "compile_info.txt").open() as file:
for line in file:
key, value = line.rstrip("\n").split("=", maxsplit=1)
compile_info[key] = value
before_files = (
[compile_info["cc"]]
+ shlex.split(compile_info["cflags"])
+ [str(path) for path in project_root.glob("obj/*.o")]
)
after_files = (
["-o", str(exepath)]
+ shlex.split(compile_info["ldflags"])
+ ["-I", str(project_root)]
)
return (
before_files + [str(path) for path in c_paths] + after_files,
" ".join(
[shlex.quote(arg) for arg in before_files]
+ [f"<{len(c_paths)} files>"]
+ [shlex.quote(arg) for arg in after_files]
),
)
def run(command: List[str], verbose: bool, human_readable: Optional[str] = None) -> int:
if verbose:
if human_readable is None:
human_readable = " ".join(map(shlex.quote, command))
print("Running:", human_readable, file=sys.stderr)
return subprocess.run(command).returncode
def get_compilation_dir(parent_dir: Path, name_hint: str) -> Path:
for i in itertools.count():
path = parent_dir / (name_hint + str(i))
path.mkdir(parents=True, exist_ok=True)
try:
(path / "compiling").touch(exist_ok=False)
except FileExistsError:
# Another instance of oomph compiler running in parallel
continue
else:
atexit.register((path / "compiling").unlink)
return path
assert False # make mypy feel good
def compute_dependency_graph(
session: c_output.Session,
infile: Path,
verbose: bool,
) -> Dict[CompilationUnit, List[Path]]:
dependency_graph: Dict[CompilationUnit, List[Path]] = {}
queue = [infile]
while queue:
# Pop the next source file to parse
source_path = queue.pop()
if source_path in (unit.source_path for unit in dependency_graph.keys()):
continue
if verbose:
print("Parsing", source_path)
# Create a compilation unit out of it and parse it into an untyped ast
candidate_unit = CompilationUnit(source_path, session)
candidate_unit.create_untyped_ast()
# Calculate its dependencies and add them to the dependencies dictionary,
# including builtins if necessary, and add those dependencies to the queue
current_dependencies = [
top_declaration.path
for top_declaration in candidate_unit.ast
if isinstance(top_declaration, ast.Import)
]
if source_path != project_root / "builtins.oomph":
current_dependencies.append(project_root / "builtins.oomph")
dependency_graph[candidate_unit] = current_dependencies
queue.extend(current_dependencies)
return dependency_graph
def compute_compilation_order(
verbose: bool,
dependency_graph: Dict[CompilationUnit, List[Path]],
) -> List[CompilationUnit]:
compilation_order: List[CompilationUnit] = []
while len(compilation_order) < len(dependency_graph):
candidate_unit = next(
u for u in dependency_graph.keys() if u not in compilation_order
)
breadcrumbs = [candidate_unit]
while True:
uncompiled_dependencies = [
u
for u in dependency_graph.keys()
if u not in compilation_order
and u.source_path in dependency_graph[candidate_unit]
]
if not uncompiled_dependencies:
break
candidate_unit = uncompiled_dependencies[0]
if candidate_unit in breadcrumbs:
message = (
" --> ".join(d.source_path.name for d in breadcrumbs)
+ " --> "
+ candidate_unit.source_path.name
)
raise RuntimeError("cyclic imports: " + message)
breadcrumbs.append(candidate_unit)
compilation_order.append(candidate_unit)
return compilation_order
def main() -> None:
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("infile", type=Path)
arg_parser.add_argument("-o", "--outfile", type=Path)
arg_parser.add_argument("--valgrind", default="")
arg_parser.add_argument("-v", "--verbose", action="store_true")
compiler_args, program_args = arg_parser.parse_known_args()
try:
cache_dir = compiler_args.infile.parent / ".oomph-cache"
cache_dir.mkdir(exist_ok=True)
except OSError:
cache_dir = Path.cwd() / ".oomph-cache"
cache_dir.mkdir(exist_ok=True)
# Create a compiler session
session = c_output.Session(
get_compilation_dir(cache_dir, compiler_args.infile.stem + "_compilation")
)
# Calculate the dependency graph
dependency_graph = compute_dependency_graph(
session, compiler_args.infile.absolute(), compiler_args.verbose
)
# Calculate in which order we need to compile our units
compilation_order = compute_compilation_order(
compiler_args.verbose, dependency_graph
)
# Compile in the calculated order
for unit in compilation_order:
if compiler_args.verbose:
print("Creating C code:", unit.source_path)
unit.create_c_code(session.symbols)
# Write out everything and compile it
c_paths = session.write_everything(project_root / "builtins.oomph")
exe_path = session.compilation_dir / compiler_args.infile.stem
command, human_readable_command = get_c_compiler_command(c_paths, exe_path)
result = run(command, compiler_args.verbose, human_readable_command)
if result != 0:
sys.exit(result)
# If we have an outfile path, move the resulting executable to it and bail
if compiler_args.outfile is not None:
assert not compiler_args.outfile.is_dir() # shutil.move is weird for dirs
shutil.move(str(exe_path), str(compiler_args.outfile))
if compiler_args.verbose:
print("Moved executable to", compiler_args.outfile)
return
# Otherwise, run it directly
command = shlex.split(compiler_args.valgrind) + [str(exe_path)] + program_args
result = run(command, compiler_args.verbose)
if result < 0: # killed by signal
message = f"Program killed by signal {abs(result)}"
try:
message += f" ({signal.Signals(abs(result)).name})"
except ValueError: # e.g. SIGRTMIN + 1
pass
print(message, file=sys.stderr)
elif result > 0:
print(f"Program exited with status {result}", file=sys.stderr)
sys.exit(result)
main()
| 35.088983
| 88
| 0.643521
|
from __future__ import annotations
import argparse
import atexit
import itertools
import shlex
import shutil
import signal
import subprocess
import sys
import traceback
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from pyoomph import ast, ast2ir, ast_transformer, c_output, ir, parser
python_code_dir = Path(__file__).absolute().parent
project_root = python_code_dir.parent
class CompilationUnit:
ast: List[ast.ToplevelDeclaration]
def __init__(self, source_path: Path, session: c_output.Session):
self.source_path = source_path
self.session = session
def _handle_error(self) -> None:
traceback.print_exc()
print(f"\nThis happened while compiling {self.source_path}", file=sys.stderr)
sys.exit(1)
def create_untyped_ast(self) -> None:
try:
source_code = self.source_path.read_text(encoding="utf-8")
self.ast = ast_transformer.transform_file(
parser.parse_file(
source_code, self.source_path, project_root / "stdlib"
)
)
except Exception:
self._handle_error()
def create_c_code(self, exports: List[ir.Symbol]) -> None:
try:
the_ir = ast2ir.convert_program(self.ast, self.source_path, exports)
self.session.create_c_code(the_ir, self.source_path)
except Exception:
self._handle_error()
def get_c_compiler_command(c_paths: List[Path], exepath: Path) -> Tuple[List[str], str]:
compile_info = {}
with (project_root / "obj" / "compile_info.txt").open() as file:
for line in file:
key, value = line.rstrip("\n").split("=", maxsplit=1)
compile_info[key] = value
before_files = (
[compile_info["cc"]]
+ shlex.split(compile_info["cflags"])
+ [str(path) for path in project_root.glob("obj/*.o")]
)
after_files = (
["-o", str(exepath)]
+ shlex.split(compile_info["ldflags"])
+ ["-I", str(project_root)]
)
return (
before_files + [str(path) for path in c_paths] + after_files,
" ".join(
[shlex.quote(arg) for arg in before_files]
+ [f"<{len(c_paths)} files>"]
+ [shlex.quote(arg) for arg in after_files]
),
)
def run(command: List[str], verbose: bool, human_readable: Optional[str] = None) -> int:
if verbose:
if human_readable is None:
human_readable = " ".join(map(shlex.quote, command))
print("Running:", human_readable, file=sys.stderr)
return subprocess.run(command).returncode
def get_compilation_dir(parent_dir: Path, name_hint: str) -> Path:
for i in itertools.count():
path = parent_dir / (name_hint + str(i))
path.mkdir(parents=True, exist_ok=True)
try:
(path / "compiling").touch(exist_ok=False)
except FileExistsError:
continue
else:
atexit.register((path / "compiling").unlink)
return path
assert False
def compute_dependency_graph(
session: c_output.Session,
infile: Path,
verbose: bool,
) -> Dict[CompilationUnit, List[Path]]:
dependency_graph: Dict[CompilationUnit, List[Path]] = {}
queue = [infile]
while queue:
source_path = queue.pop()
if source_path in (unit.source_path for unit in dependency_graph.keys()):
continue
if verbose:
print("Parsing", source_path)
candidate_unit = CompilationUnit(source_path, session)
candidate_unit.create_untyped_ast()
current_dependencies = [
top_declaration.path
for top_declaration in candidate_unit.ast
if isinstance(top_declaration, ast.Import)
]
if source_path != project_root / "builtins.oomph":
current_dependencies.append(project_root / "builtins.oomph")
dependency_graph[candidate_unit] = current_dependencies
queue.extend(current_dependencies)
return dependency_graph
def compute_compilation_order(
verbose: bool,
dependency_graph: Dict[CompilationUnit, List[Path]],
) -> List[CompilationUnit]:
compilation_order: List[CompilationUnit] = []
while len(compilation_order) < len(dependency_graph):
candidate_unit = next(
u for u in dependency_graph.keys() if u not in compilation_order
)
breadcrumbs = [candidate_unit]
while True:
uncompiled_dependencies = [
u
for u in dependency_graph.keys()
if u not in compilation_order
and u.source_path in dependency_graph[candidate_unit]
]
if not uncompiled_dependencies:
break
candidate_unit = uncompiled_dependencies[0]
if candidate_unit in breadcrumbs:
message = (
" --> ".join(d.source_path.name for d in breadcrumbs)
+ " --> "
+ candidate_unit.source_path.name
)
raise RuntimeError("cyclic imports: " + message)
breadcrumbs.append(candidate_unit)
compilation_order.append(candidate_unit)
return compilation_order
def main() -> None:
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("infile", type=Path)
arg_parser.add_argument("-o", "--outfile", type=Path)
arg_parser.add_argument("--valgrind", default="")
arg_parser.add_argument("-v", "--verbose", action="store_true")
compiler_args, program_args = arg_parser.parse_known_args()
try:
cache_dir = compiler_args.infile.parent / ".oomph-cache"
cache_dir.mkdir(exist_ok=True)
except OSError:
cache_dir = Path.cwd() / ".oomph-cache"
cache_dir.mkdir(exist_ok=True)
session = c_output.Session(
get_compilation_dir(cache_dir, compiler_args.infile.stem + "_compilation")
)
dependency_graph = compute_dependency_graph(
session, compiler_args.infile.absolute(), compiler_args.verbose
)
compilation_order = compute_compilation_order(
compiler_args.verbose, dependency_graph
)
for unit in compilation_order:
if compiler_args.verbose:
print("Creating C code:", unit.source_path)
unit.create_c_code(session.symbols)
c_paths = session.write_everything(project_root / "builtins.oomph")
exe_path = session.compilation_dir / compiler_args.infile.stem
command, human_readable_command = get_c_compiler_command(c_paths, exe_path)
result = run(command, compiler_args.verbose, human_readable_command)
if result != 0:
sys.exit(result)
if compiler_args.outfile is not None:
assert not compiler_args.outfile.is_dir()
shutil.move(str(exe_path), str(compiler_args.outfile))
if compiler_args.verbose:
print("Moved executable to", compiler_args.outfile)
return
command = shlex.split(compiler_args.valgrind) + [str(exe_path)] + program_args
result = run(command, compiler_args.verbose)
if result < 0:
message = f"Program killed by signal {abs(result)}"
try:
message += f" ({signal.Signals(abs(result)).name})"
except ValueError:
pass
print(message, file=sys.stderr)
elif result > 0:
print(f"Program exited with status {result}", file=sys.stderr)
sys.exit(result)
main()
| true
| true
|
79093cfb5e4feea5a49a917795cc271cb23118c8
| 373
|
py
|
Python
|
records/migrations/0007_rename_date_records_created_date.py
|
Glucemy/Glucemy-back
|
c9fcf7996b3f13c67697aadd449e3e32afb1fa1b
|
[
"MIT"
] | null | null | null |
records/migrations/0007_rename_date_records_created_date.py
|
Glucemy/Glucemy-back
|
c9fcf7996b3f13c67697aadd449e3e32afb1fa1b
|
[
"MIT"
] | null | null | null |
records/migrations/0007_rename_date_records_created_date.py
|
Glucemy/Glucemy-back
|
c9fcf7996b3f13c67697aadd449e3e32afb1fa1b
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.3 on 2022-04-06 17:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('records', '0006_alter_records_phasesday'),
]
operations = [
migrations.RenameField(
model_name='records',
old_name='date',
new_name='created_date',
),
]
| 19.631579
| 52
| 0.595174
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('records', '0006_alter_records_phasesday'),
]
operations = [
migrations.RenameField(
model_name='records',
old_name='date',
new_name='created_date',
),
]
| true
| true
|
79093db4565f6064592c3384dbdbc12088b803e4
| 12,886
|
bzl
|
Python
|
rules_daml/daml.bzl
|
FlashSheridan/daml
|
d7eb4580665d1ad07071b4eaa1814fd41251714a
|
[
"Apache-2.0"
] | null | null | null |
rules_daml/daml.bzl
|
FlashSheridan/daml
|
d7eb4580665d1ad07071b4eaa1814fd41251714a
|
[
"Apache-2.0"
] | null | null | null |
rules_daml/daml.bzl
|
FlashSheridan/daml
|
d7eb4580665d1ad07071b4eaa1814fd41251714a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
load("@build_environment//:configuration.bzl", "ghc_version", "sdk_version")
_damlc = attr.label(
allow_single_file = True,
default = Label("//compiler/damlc"),
executable = True,
cfg = "host",
doc = "The DAML compiler.",
)
_zipper = attr.label(
allow_single_file = True,
default = Label("@bazel_tools//tools/zip:zipper"),
executable = True,
cfg = "host",
)
def _daml_configure_impl(ctx):
project_name = ctx.attr.project_name
project_version = ctx.attr.project_version
daml_yaml = ctx.outputs.daml_yaml
target = ctx.attr.target
ctx.actions.write(
output = daml_yaml,
content = """
sdk-version: {sdk}
name: {name}
version: {version}
source: .
dependencies: []
build-options: [{target}]
""".format(
sdk = sdk_version,
name = project_name,
version = project_version,
target = "--target=" + target if (target) else "",
),
)
_daml_configure = rule(
implementation = _daml_configure_impl,
attrs = {
"project_name": attr.string(
mandatory = True,
doc = "Name of the DAML project.",
),
"project_version": attr.string(
mandatory = True,
doc = "Version of the DAML project.",
),
"daml_yaml": attr.output(
mandatory = True,
doc = "The generated daml.yaml config file.",
),
"target": attr.string(
doc = "DAML-LF version to output.",
),
},
)
def file_of_target(k):
[file] = k.files.to_list()
return file
def make_cp_command(src, dest):
return "mkdir -p $(dirname {dest}); cp -f {src} {dest}".format(
src = src,
dest = dest,
)
def _daml_build_impl(ctx):
name = ctx.label.name
daml_yaml = ctx.file.daml_yaml
srcs = ctx.files.srcs
dar_dict = ctx.attr.dar_dict
damlc = ctx.file._damlc
input_dars = [file_of_target(k) for k in dar_dict.keys()]
output_dar = ctx.outputs.dar
posix = ctx.toolchains["@rules_sh//sh/posix:toolchain_type"]
ctx.actions.run_shell(
tools = [damlc],
inputs = [daml_yaml] + srcs + input_dars,
outputs = [output_dar],
progress_message = "Building DAML project %s" % name,
command = """
set -eou pipefail
tmpdir=$(mktemp -d)
trap "rm -rf $tmpdir" EXIT
cp -f {config} $tmpdir/daml.yaml
# Having to produce all the daml.yaml files via a genrule is annoying
# so we allow hardcoded version numbers and patch them here.
{sed} -i 's/^sdk-version:.*$/sdk-version: {sdk_version}/' $tmpdir/daml.yaml
{cp_srcs}
{cp_dars}
{damlc} build --project-root $tmpdir -o $PWD/{output_dar}
""".format(
config = daml_yaml.path,
cp_srcs = "\n".join([
make_cp_command(
src = src.path,
dest = "$tmpdir/" + src.path,
)
for src in srcs
]),
cp_dars = "\n".join([
make_cp_command(
src = file_of_target(k).path,
dest = "$tmpdir/" + v,
)
for k, v in dar_dict.items()
]),
sed = posix.commands["sed"],
damlc = damlc.path,
output_dar = output_dar.path,
sdk_version = sdk_version,
),
)
_daml_build = rule(
implementation = _daml_build_impl,
attrs = {
"daml_yaml": attr.label(
allow_single_file = True,
mandatory = True,
doc = "The daml.yaml config file.",
),
"srcs": attr.label_list(
allow_files = [".daml"],
mandatory = True,
doc = "DAML files in this DAML project.",
),
"dar_dict": attr.label_keyed_string_dict(
mandatory = True,
allow_files = True,
doc = "Other DAML projects referenced by this DAML project.",
),
"dar": attr.output(
mandatory = True,
doc = "The generated DAR file.",
),
"_damlc": _damlc,
},
toolchains = ["@rules_sh//sh/posix:toolchain_type"],
)
def _extract_main_dalf_impl(ctx):
project_name = ctx.attr.project_name
project_version = ctx.attr.project_version
input_dar = ctx.file.dar
output_dalf = ctx.outputs.dalf
zipper = ctx.file._zipper
posix = ctx.toolchains["@rules_sh//sh/posix:toolchain_type"]
ctx.actions.run_shell(
tools = [zipper],
inputs = [input_dar],
outputs = [output_dalf],
progress_message = "Extract DALF from DAR (%s)" % project_name,
command = """
set -eoux pipefail
TMPDIR=$(mktemp -d)
trap "rm -rf $TMPDIR" EXIT
# While zipper has a -d option, it insists on it
# being a relative path so we don't use it.
ZIPPER=$PWD/{zipper}
DAR=$PWD/{input_dar}
(cd $TMPDIR && $ZIPPER x $DAR)
main_dalf=$({find} $TMPDIR/ -name '{project_name}-{project_version}-[a-z0-9]*.dalf')
cp $main_dalf {output_dalf}
""".format(
zipper = zipper.path,
find = posix.commands["find"],
project_name = project_name,
project_version = project_version,
input_dar = input_dar.path,
output_dalf = output_dalf.path,
),
)
_extract_main_dalf = rule(
implementation = _extract_main_dalf_impl,
attrs = {
"project_name": attr.string(
mandatory = True,
doc = "Name of the DAML project.",
),
"project_version": attr.string(
mandatory = True,
doc = "Version of the DAML project.",
),
"dar": attr.label(
allow_single_file = [".dar"],
mandatory = True,
doc = "The DAR from which the DALF will be extracted.",
),
"dalf": attr.output(
mandatory = True,
doc = "The extracted DALF.",
),
"_zipper": _zipper,
},
toolchains = ["@rules_sh//sh/posix:toolchain_type"],
)
def _daml_validate_test_impl(ctx):
name = ctx.label.name
dar = ctx.file.dar
script = ctx.actions.declare_file(name + ".sh")
damlc = ctx.file._damlc
script_content = """
set -eou pipefail
DAMLC=$(rlocation $TEST_WORKSPACE/{damlc})
DAR=$(rlocation $TEST_WORKSPACE/{dar})
$DAMLC validate-dar $DAR
""".format(
damlc = damlc.short_path,
dar = dar.short_path,
)
ctx.actions.write(
output = script,
content = script_content,
)
runfiles = ctx.runfiles(files = [dar, damlc])
return [DefaultInfo(executable = script, runfiles = runfiles)]
_daml_validate_test = rule(
implementation = _daml_validate_test_impl,
attrs = {
"dar": attr.label(
allow_single_file = True,
mandatory = True,
doc = "The DAR to validate.",
),
"_damlc": _damlc,
},
test = True,
)
def _inspect_dar(base):
name = base + "-inspect"
dar = base + ".dar"
pp = base + ".dar.pp"
native.genrule(
name = name,
srcs = [
dar,
"//compiler/damlc",
],
outs = [pp],
cmd = "$(location //compiler/damlc) inspect $(location :" + dar + ") > $@",
)
_default_project_version = "1.0.0"
def daml_compile(
name,
srcs,
version = _default_project_version,
target = None,
**kwargs):
"Build a DAML project, with a generated daml.yaml."
if len(srcs) == 0:
fail("daml_compile: Expected `srcs' to be non-empty.")
daml_yaml = name + ".yaml"
_daml_configure(
name = name + ".configure",
project_name = name,
project_version = version,
daml_yaml = daml_yaml,
target = target,
**kwargs
)
_daml_build(
name = name + ".build",
daml_yaml = daml_yaml,
srcs = srcs,
dar_dict = {},
dar = name + ".dar",
**kwargs
)
_inspect_dar(
base = name,
)
def daml_compile_with_dalf(
name,
version = _default_project_version,
**kwargs):
"Build a DAML project, with a generated daml.yaml, and extract the main DALF."
daml_compile(
name = name,
version = version,
**kwargs
)
_extract_main_dalf(
name = name + ".extract",
project_name = name,
project_version = version,
dar = name + ".dar",
dalf = name + ".dalf",
)
def daml_build_test(
name,
project_dir,
daml_config_basename = "daml.yaml",
daml_subdir_basename = "daml",
dar_dict = {},
**kwargs):
"Build a DAML project and validate the resulting .dar file."
daml_yaml = project_dir + "/" + daml_config_basename
srcs = native.glob([project_dir + "/" + daml_subdir_basename + "/**/*.daml"])
_daml_build(
name = name,
daml_yaml = daml_yaml,
srcs = srcs,
dar_dict = dar_dict,
dar = name + ".dar",
**kwargs
)
_daml_validate_test(
name = name + ".test",
dar = name + ".dar",
)
def _daml_test_impl(ctx):
script = """
set -eou pipefail
DAMLC=$(rlocation $TEST_WORKSPACE/{damlc})
rlocations () {{ for i in $@; do echo $(rlocation $TEST_WORKSPACE/$i); done; }}
$DAMLC test --files $(rlocations "{files}")
""".format(
damlc = ctx.executable.damlc.short_path,
files = " ".join([f.short_path for f in ctx.files.srcs]),
)
ctx.actions.write(
output = ctx.outputs.executable,
content = script,
)
damlc_runfiles = ctx.attr.damlc[DefaultInfo].data_runfiles
runfiles = ctx.runfiles(
collect_data = True,
files = ctx.files.srcs,
).merge(damlc_runfiles)
return [DefaultInfo(runfiles = runfiles)]
daml_test = rule(
implementation = _daml_test_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".daml"],
default = [],
doc = "DAML source files to test.",
),
"damlc": attr.label(
executable = True,
cfg = "host",
allow_files = True,
default = Label("//compiler/damlc"),
),
},
test = True,
)
def _daml_doctest_impl(ctx):
script = """
set -eou pipefail
DAMLC=$(rlocation $TEST_WORKSPACE/{damlc})
CPP=$(rlocation $TEST_WORKSPACE/{cpp})
rlocations () {{ for i in $@; do echo $(rlocation $TEST_WORKSPACE/$i); done; }}
$DAMLC doctest {flags} --cpp $CPP --package-name {package_name}-{version} $(rlocations "{files}")
""".format(
damlc = ctx.executable.damlc.short_path,
# we end up with "../hpp/hpp" while we want "external/hpp/hpp"
# so we just do the replacement ourselves.
cpp = ctx.executable.cpp.short_path.replace("..", "external"),
package_name = ctx.attr.package_name,
flags = " ".join(ctx.attr.flags),
version = ghc_version,
files = " ".join([
f.short_path
for f in ctx.files.srcs
if all([not f.short_path.endswith(ignore) for ignore in ctx.attr.ignored_srcs])
]),
)
ctx.actions.write(
output = ctx.outputs.executable,
content = script,
)
damlc_runfiles = ctx.attr.damlc[DefaultInfo].data_runfiles
cpp_runfiles = ctx.attr.cpp[DefaultInfo].data_runfiles
runfiles = ctx.runfiles(
collect_data = True,
files = ctx.files.srcs,
).merge(damlc_runfiles).merge(cpp_runfiles)
return [DefaultInfo(runfiles = runfiles)]
daml_doc_test = rule(
implementation = _daml_doctest_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".daml"],
default = [],
doc = "DAML source files that should be tested.",
),
"ignored_srcs": attr.string_list(
default = [],
doc = "DAML source files that should be ignored.",
),
"damlc": attr.label(
executable = True,
cfg = "host",
allow_files = True,
default = Label("//compiler/damlc"),
),
"cpp": attr.label(
executable = True,
cfg = "host",
allow_files = True,
default = Label("@hpp//:hpp"),
),
"flags": attr.string_list(
default = [],
doc = "Flags for damlc invokation.",
),
"package_name": attr.string(),
},
test = True,
)
| 29.622989
| 103
| 0.547649
|
load("@build_environment//:configuration.bzl", "ghc_version", "sdk_version")
_damlc = attr.label(
allow_single_file = True,
default = Label("//compiler/damlc"),
executable = True,
cfg = "host",
doc = "The DAML compiler.",
)
_zipper = attr.label(
allow_single_file = True,
default = Label("@bazel_tools//tools/zip:zipper"),
executable = True,
cfg = "host",
)
def _daml_configure_impl(ctx):
project_name = ctx.attr.project_name
project_version = ctx.attr.project_version
daml_yaml = ctx.outputs.daml_yaml
target = ctx.attr.target
ctx.actions.write(
output = daml_yaml,
content = """
sdk-version: {sdk}
name: {name}
version: {version}
source: .
dependencies: []
build-options: [{target}]
""".format(
sdk = sdk_version,
name = project_name,
version = project_version,
target = "--target=" + target if (target) else "",
),
)
_daml_configure = rule(
implementation = _daml_configure_impl,
attrs = {
"project_name": attr.string(
mandatory = True,
doc = "Name of the DAML project.",
),
"project_version": attr.string(
mandatory = True,
doc = "Version of the DAML project.",
),
"daml_yaml": attr.output(
mandatory = True,
doc = "The generated daml.yaml config file.",
),
"target": attr.string(
doc = "DAML-LF version to output.",
),
},
)
def file_of_target(k):
[file] = k.files.to_list()
return file
def make_cp_command(src, dest):
return "mkdir -p $(dirname {dest}); cp -f {src} {dest}".format(
src = src,
dest = dest,
)
def _daml_build_impl(ctx):
name = ctx.label.name
daml_yaml = ctx.file.daml_yaml
srcs = ctx.files.srcs
dar_dict = ctx.attr.dar_dict
damlc = ctx.file._damlc
input_dars = [file_of_target(k) for k in dar_dict.keys()]
output_dar = ctx.outputs.dar
posix = ctx.toolchains["@rules_sh//sh/posix:toolchain_type"]
ctx.actions.run_shell(
tools = [damlc],
inputs = [daml_yaml] + srcs + input_dars,
outputs = [output_dar],
progress_message = "Building DAML project %s" % name,
command = """
set -eou pipefail
tmpdir=$(mktemp -d)
trap "rm -rf $tmpdir" EXIT
cp -f {config} $tmpdir/daml.yaml
# Having to produce all the daml.yaml files via a genrule is annoying
# so we allow hardcoded version numbers and patch them here.
{sed} -i 's/^sdk-version:.*$/sdk-version: {sdk_version}/' $tmpdir/daml.yaml
{cp_srcs}
{cp_dars}
{damlc} build --project-root $tmpdir -o $PWD/{output_dar}
""".format(
config = daml_yaml.path,
cp_srcs = "\n".join([
make_cp_command(
src = src.path,
dest = "$tmpdir/" + src.path,
)
for src in srcs
]),
cp_dars = "\n".join([
make_cp_command(
src = file_of_target(k).path,
dest = "$tmpdir/" + v,
)
for k, v in dar_dict.items()
]),
sed = posix.commands["sed"],
damlc = damlc.path,
output_dar = output_dar.path,
sdk_version = sdk_version,
),
)
_daml_build = rule(
implementation = _daml_build_impl,
attrs = {
"daml_yaml": attr.label(
allow_single_file = True,
mandatory = True,
doc = "The daml.yaml config file.",
),
"srcs": attr.label_list(
allow_files = [".daml"],
mandatory = True,
doc = "DAML files in this DAML project.",
),
"dar_dict": attr.label_keyed_string_dict(
mandatory = True,
allow_files = True,
doc = "Other DAML projects referenced by this DAML project.",
),
"dar": attr.output(
mandatory = True,
doc = "The generated DAR file.",
),
"_damlc": _damlc,
},
toolchains = ["@rules_sh//sh/posix:toolchain_type"],
)
def _extract_main_dalf_impl(ctx):
project_name = ctx.attr.project_name
project_version = ctx.attr.project_version
input_dar = ctx.file.dar
output_dalf = ctx.outputs.dalf
zipper = ctx.file._zipper
posix = ctx.toolchains["@rules_sh//sh/posix:toolchain_type"]
ctx.actions.run_shell(
tools = [zipper],
inputs = [input_dar],
outputs = [output_dalf],
progress_message = "Extract DALF from DAR (%s)" % project_name,
command = """
set -eoux pipefail
TMPDIR=$(mktemp -d)
trap "rm -rf $TMPDIR" EXIT
# While zipper has a -d option, it insists on it
# being a relative path so we don't use it.
ZIPPER=$PWD/{zipper}
DAR=$PWD/{input_dar}
(cd $TMPDIR && $ZIPPER x $DAR)
main_dalf=$({find} $TMPDIR/ -name '{project_name}-{project_version}-[a-z0-9]*.dalf')
cp $main_dalf {output_dalf}
""".format(
zipper = zipper.path,
find = posix.commands["find"],
project_name = project_name,
project_version = project_version,
input_dar = input_dar.path,
output_dalf = output_dalf.path,
),
)
_extract_main_dalf = rule(
implementation = _extract_main_dalf_impl,
attrs = {
"project_name": attr.string(
mandatory = True,
doc = "Name of the DAML project.",
),
"project_version": attr.string(
mandatory = True,
doc = "Version of the DAML project.",
),
"dar": attr.label(
allow_single_file = [".dar"],
mandatory = True,
doc = "The DAR from which the DALF will be extracted.",
),
"dalf": attr.output(
mandatory = True,
doc = "The extracted DALF.",
),
"_zipper": _zipper,
},
toolchains = ["@rules_sh//sh/posix:toolchain_type"],
)
def _daml_validate_test_impl(ctx):
name = ctx.label.name
dar = ctx.file.dar
script = ctx.actions.declare_file(name + ".sh")
damlc = ctx.file._damlc
script_content = """
set -eou pipefail
DAMLC=$(rlocation $TEST_WORKSPACE/{damlc})
DAR=$(rlocation $TEST_WORKSPACE/{dar})
$DAMLC validate-dar $DAR
""".format(
damlc = damlc.short_path,
dar = dar.short_path,
)
ctx.actions.write(
output = script,
content = script_content,
)
runfiles = ctx.runfiles(files = [dar, damlc])
return [DefaultInfo(executable = script, runfiles = runfiles)]
_daml_validate_test = rule(
implementation = _daml_validate_test_impl,
attrs = {
"dar": attr.label(
allow_single_file = True,
mandatory = True,
doc = "The DAR to validate.",
),
"_damlc": _damlc,
},
test = True,
)
def _inspect_dar(base):
name = base + "-inspect"
dar = base + ".dar"
pp = base + ".dar.pp"
native.genrule(
name = name,
srcs = [
dar,
"//compiler/damlc",
],
outs = [pp],
cmd = "$(location //compiler/damlc) inspect $(location :" + dar + ") > $@",
)
_default_project_version = "1.0.0"
def daml_compile(
name,
srcs,
version = _default_project_version,
target = None,
**kwargs):
if len(srcs) == 0:
fail("daml_compile: Expected `srcs' to be non-empty.")
daml_yaml = name + ".yaml"
_daml_configure(
name = name + ".configure",
project_name = name,
project_version = version,
daml_yaml = daml_yaml,
target = target,
**kwargs
)
_daml_build(
name = name + ".build",
daml_yaml = daml_yaml,
srcs = srcs,
dar_dict = {},
dar = name + ".dar",
**kwargs
)
_inspect_dar(
base = name,
)
def daml_compile_with_dalf(
name,
version = _default_project_version,
**kwargs):
daml_compile(
name = name,
version = version,
**kwargs
)
_extract_main_dalf(
name = name + ".extract",
project_name = name,
project_version = version,
dar = name + ".dar",
dalf = name + ".dalf",
)
def daml_build_test(
name,
project_dir,
daml_config_basename = "daml.yaml",
daml_subdir_basename = "daml",
dar_dict = {},
**kwargs):
daml_yaml = project_dir + "/" + daml_config_basename
srcs = native.glob([project_dir + "/" + daml_subdir_basename + "/**/*.daml"])
_daml_build(
name = name,
daml_yaml = daml_yaml,
srcs = srcs,
dar_dict = dar_dict,
dar = name + ".dar",
**kwargs
)
_daml_validate_test(
name = name + ".test",
dar = name + ".dar",
)
def _daml_test_impl(ctx):
script = """
set -eou pipefail
DAMLC=$(rlocation $TEST_WORKSPACE/{damlc})
rlocations () {{ for i in $@; do echo $(rlocation $TEST_WORKSPACE/$i); done; }}
$DAMLC test --files $(rlocations "{files}")
""".format(
damlc = ctx.executable.damlc.short_path,
files = " ".join([f.short_path for f in ctx.files.srcs]),
)
ctx.actions.write(
output = ctx.outputs.executable,
content = script,
)
damlc_runfiles = ctx.attr.damlc[DefaultInfo].data_runfiles
runfiles = ctx.runfiles(
collect_data = True,
files = ctx.files.srcs,
).merge(damlc_runfiles)
return [DefaultInfo(runfiles = runfiles)]
daml_test = rule(
implementation = _daml_test_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".daml"],
default = [],
doc = "DAML source files to test.",
),
"damlc": attr.label(
executable = True,
cfg = "host",
allow_files = True,
default = Label("//compiler/damlc"),
),
},
test = True,
)
def _daml_doctest_impl(ctx):
script = """
set -eou pipefail
DAMLC=$(rlocation $TEST_WORKSPACE/{damlc})
CPP=$(rlocation $TEST_WORKSPACE/{cpp})
rlocations () {{ for i in $@; do echo $(rlocation $TEST_WORKSPACE/$i); done; }}
$DAMLC doctest {flags} --cpp $CPP --package-name {package_name}-{version} $(rlocations "{files}")
""".format(
damlc = ctx.executable.damlc.short_path,
cpp = ctx.executable.cpp.short_path.replace("..", "external"),
package_name = ctx.attr.package_name,
flags = " ".join(ctx.attr.flags),
version = ghc_version,
files = " ".join([
f.short_path
for f in ctx.files.srcs
if all([not f.short_path.endswith(ignore) for ignore in ctx.attr.ignored_srcs])
]),
)
ctx.actions.write(
output = ctx.outputs.executable,
content = script,
)
damlc_runfiles = ctx.attr.damlc[DefaultInfo].data_runfiles
cpp_runfiles = ctx.attr.cpp[DefaultInfo].data_runfiles
runfiles = ctx.runfiles(
collect_data = True,
files = ctx.files.srcs,
).merge(damlc_runfiles).merge(cpp_runfiles)
return [DefaultInfo(runfiles = runfiles)]
daml_doc_test = rule(
implementation = _daml_doctest_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".daml"],
default = [],
doc = "DAML source files that should be tested.",
),
"ignored_srcs": attr.string_list(
default = [],
doc = "DAML source files that should be ignored.",
),
"damlc": attr.label(
executable = True,
cfg = "host",
allow_files = True,
default = Label("//compiler/damlc"),
),
"cpp": attr.label(
executable = True,
cfg = "host",
allow_files = True,
default = Label("@hpp//:hpp"),
),
"flags": attr.string_list(
default = [],
doc = "Flags for damlc invokation.",
),
"package_name": attr.string(),
},
test = True,
)
| true
| true
|
79093e2e65a2feba08c341fc369ebf687e9c2a11
| 5,588
|
py
|
Python
|
pyxform/tests_v1/test_sheet_columns.py
|
medic/pyxform
|
cd76ce3ee43e3748656ff6e73cd119d238343113
|
[
"BSD-2-Clause"
] | 3
|
2016-01-31T23:04:57.000Z
|
2021-01-23T14:07:26.000Z
|
pyxform/tests_v1/test_sheet_columns.py
|
medic/pyxform
|
cd76ce3ee43e3748656ff6e73cd119d238343113
|
[
"BSD-2-Clause"
] | 5
|
2017-08-22T13:43:16.000Z
|
2021-05-13T02:52:40.000Z
|
pyxform/tests_v1/test_sheet_columns.py
|
medic/pyxform
|
cd76ce3ee43e3748656ff6e73cd119d238343113
|
[
"BSD-2-Clause"
] | 3
|
2016-03-17T08:45:25.000Z
|
2019-05-02T09:42:07.000Z
|
from pyxform.tests_v1.pyxform_test_case import PyxformTestCase
class InvalidSurveyColumnsTests(PyxformTestCase):
def test_missing_name(self):
"""
every question needs a name (or alias of name)
"""
self.assertPyxformXform(
name='invalidcols',
ss_structure={'survey': [{'type': 'text',
'label': 'label'}]},
errored=True,
error__contains=['no name'],
)
def test_missing_name_but_has_alias_of_name(self):
self.assertPyxformXform(
name='invalidcols',
ss_structure={'survey': [{'value': 'q1',
'type': 'text',
'label': 'label'}]},
errored=False,
)
def test_missing_label(self):
self.assertPyxformXform(
name="invalidcols",
ss_structure={'survey': [{'type': 'text',
'name': 'q1'}]},
errored=True,
error__contains=['no label or hint'],
)
def test_column_case(self):
"""
Ensure that column name is case insensitive
"""
self.assertPyxformXform(
name="mixedcasecolumns",
md="""
| Survey | | | |
| | Type | name | Label |
| | text | Name | the name |
| | integer | age | the age |
| | text | gender | the gender |
""",
errored=False,
debug=True
)
class InvalidChoiceSheetColumnsTests(PyxformTestCase):
def _simple_choice_ss(self, choice_sheet=None):
if choice_sheet is None:
choice_sheet = []
return {'survey': [{'type': 'select_one l1',
'name': 'l1choice',
'label': 'select one from list l1'}],
'choices': choice_sheet}
def test_valid_choices_sheet_passes(self):
self.assertPyxformXform(
name='valid_choices',
ss_structure=self._simple_choice_ss([
{'list_name': 'l1',
'name': 'c1',
'label': 'choice 1'},
{'list_name': 'l1',
'name': 'c2',
'label': 'choice 2'}]),
errored=False,
)
def test_invalid_choices_sheet_fails(self):
self.assertPyxformXform(
name='missing_name',
ss_structure=self._simple_choice_ss([
{'list_name': 'l1',
'label': 'choice 1'},
{'list_name': 'l1',
'label': 'choice 2'},
]),
errored=True,
error__contains=['option with no name'],
)
def test_missing_list_name(self):
self.assertPyxformXform(
name='missing_list_name',
ss_structure=self._simple_choice_ss([
{'bad_column': 'l1',
'name': 'l1c1',
'label': 'choice 1'},
{'bad_column': 'l1',
'name': 'l1c1',
'label': 'choice 2'},
]),
debug=True,
errored=True,
# some basic keywords that should be in the error:
error__contains=[
'choices',
'name',
'list name',
])
class AliasesTests(PyxformTestCase):
def test_value_and_name(self):
'''
confirm that both 'name' and 'value' columns of choice list work
'''
for name_alias in ['name', 'value']:
self.assertPyxformXform(
name="aliases",
md="""
| survey | | | |
| | type | name | label |
| | select_one yn | q1 | Question 1 |
| choices | | | |
| | list name | %(name_alias)s | label |
| | yn | yes | Yes |
| | yn | no | No |
""" % ({
u'name_alias': name_alias
}),
instance__contains=[
'<q1/>',
],
model__contains=[
'<bind nodeset="/aliases/q1" type="select1"/>',
],
xml__contains=[
'<select1 ref="/aliases/q1">',
'<value>yes</value>',
'<value>no</value>',
'</select1>',
])
''' # uncomment when re-implemented
# TODO: test that this fails for the correct reason
def test_conflicting_aliased_values_raises_error(self):
# example:
# an xlsform has {"name": "q_name", "value": "q_value"}
# should not compile because "name" and "value" columns are aliases
self.assertPyxformXform(
# debug=True,
name="aliases",
md="""
| survey | | | | |
| | type | name | value | label |
| | text | q_name | q_value | Question 1 |
""",
errored=True,
)
'''
| 35.367089
| 75
| 0.411059
|
from pyxform.tests_v1.pyxform_test_case import PyxformTestCase
class InvalidSurveyColumnsTests(PyxformTestCase):
def test_missing_name(self):
self.assertPyxformXform(
name='invalidcols',
ss_structure={'survey': [{'type': 'text',
'label': 'label'}]},
errored=True,
error__contains=['no name'],
)
def test_missing_name_but_has_alias_of_name(self):
self.assertPyxformXform(
name='invalidcols',
ss_structure={'survey': [{'value': 'q1',
'type': 'text',
'label': 'label'}]},
errored=False,
)
def test_missing_label(self):
self.assertPyxformXform(
name="invalidcols",
ss_structure={'survey': [{'type': 'text',
'name': 'q1'}]},
errored=True,
error__contains=['no label or hint'],
)
def test_column_case(self):
self.assertPyxformXform(
name="mixedcasecolumns",
md="""
| Survey | | | |
| | Type | name | Label |
| | text | Name | the name |
| | integer | age | the age |
| | text | gender | the gender |
""",
errored=False,
debug=True
)
class InvalidChoiceSheetColumnsTests(PyxformTestCase):
def _simple_choice_ss(self, choice_sheet=None):
if choice_sheet is None:
choice_sheet = []
return {'survey': [{'type': 'select_one l1',
'name': 'l1choice',
'label': 'select one from list l1'}],
'choices': choice_sheet}
def test_valid_choices_sheet_passes(self):
self.assertPyxformXform(
name='valid_choices',
ss_structure=self._simple_choice_ss([
{'list_name': 'l1',
'name': 'c1',
'label': 'choice 1'},
{'list_name': 'l1',
'name': 'c2',
'label': 'choice 2'}]),
errored=False,
)
def test_invalid_choices_sheet_fails(self):
self.assertPyxformXform(
name='missing_name',
ss_structure=self._simple_choice_ss([
{'list_name': 'l1',
'label': 'choice 1'},
{'list_name': 'l1',
'label': 'choice 2'},
]),
errored=True,
error__contains=['option with no name'],
)
def test_missing_list_name(self):
self.assertPyxformXform(
name='missing_list_name',
ss_structure=self._simple_choice_ss([
{'bad_column': 'l1',
'name': 'l1c1',
'label': 'choice 1'},
{'bad_column': 'l1',
'name': 'l1c1',
'label': 'choice 2'},
]),
debug=True,
errored=True,
error__contains=[
'choices',
'name',
'list name',
])
class AliasesTests(PyxformTestCase):
def test_value_and_name(self):
for name_alias in ['name', 'value']:
self.assertPyxformXform(
name="aliases",
md="""
| survey | | | |
| | type | name | label |
| | select_one yn | q1 | Question 1 |
| choices | | | |
| | list name | %(name_alias)s | label |
| | yn | yes | Yes |
| | yn | no | No |
""" % ({
u'name_alias': name_alias
}),
instance__contains=[
'<q1/>',
],
model__contains=[
'<bind nodeset="/aliases/q1" type="select1"/>',
],
xml__contains=[
'<select1 ref="/aliases/q1">',
'<value>yes</value>',
'<value>no</value>',
'</select1>',
])
| true
| true
|
79093e67e33c743f6ce430e95c32e02548597b3c
| 5,417
|
py
|
Python
|
content/actions/what-is-ec2-role/test_ec2_role_handler.py
|
varunsh-coder/dassana
|
fae1de20c7acdd10c9940f6cff3785943ba7f1f1
|
[
"Apache-2.0"
] | 45
|
2021-08-03T00:35:10.000Z
|
2022-03-31T05:51:49.000Z
|
content/actions/what-is-ec2-role/test_ec2_role_handler.py
|
varunsh-coder/dassana
|
fae1de20c7acdd10c9940f6cff3785943ba7f1f1
|
[
"Apache-2.0"
] | 454
|
2021-08-02T22:56:48.000Z
|
2021-12-20T21:09:44.000Z
|
content/actions/what-is-ec2-role/test_ec2_role_handler.py
|
varunsh-coder/dassana
|
fae1de20c7acdd10c9940f6cff3785943ba7f1f1
|
[
"Apache-2.0"
] | 9
|
2021-09-02T04:52:19.000Z
|
2021-12-22T18:11:52.000Z
|
import datetime
from typing import Dict, Tuple, Any
import boto3
from botocore.stub import Stubber
from dateutil.tz import tzutc
from dassana.common.aws_client import LambdaTestContext
from json import dumps
import pytest
@pytest.fixture()
def input_s3_with_website(s3_public_bucket_with_website, region):
return {
'bucketName': s3_public_bucket_with_website,
'region': region
}
@pytest.fixture()
def iam_policy():
return {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"ec2:GetDefaultCreditSpecification",
"ec2:GetEbsEncryptionByDefault",
"ec2:ExportClientVpnClientConfiguration",
"ec2:GetCapacityReservationUsage",
"ec2:DescribeVolumesModifications",
"ec2:GetHostReservationPurchasePreview",
"ec2:GetSubnetCidrReservations",
"ec2:GetConsoleScreenshot",
"ec2:GetConsoleOutput",
"ec2:ExportClientVpnClientCertificateRevocationList",
"ec2:GetLaunchTemplateData",
"ec2:GetSerialConsoleAccessStatus",
"ec2:GetFlowLogsIntegrationTemplate",
"ec2:DescribeScheduledInstanceAvailability",
"ec2:GetEbsDefaultKmsKeyId",
"ec2:GetManagedPrefixListEntries",
"ec2:DescribeVpnConnections",
"ec2:DescribeTags",
"ec2:GetCoipPoolUsage",
"ec2:DescribeFastSnapshotRestores",
"ec2:GetReservedInstancesExchangeQuote",
"ec2:GetAssociatedEnclaveCertificateIamRoles",
"ec2:GetPasswordData",
"ec2:GetAssociatedIpv6PoolCidrs",
"ec2:DescribeScheduledInstances",
"ec2:GetManagedPrefixListAssociations",
"ec2:DescribeElasticGpus"
],
"Resource": "*"
}
]
}
@pytest.fixture()
def iam_role_name():
return 'ec2-iam-role'
@pytest.fixture()
def instance_profile_name():
return 'ec2-instance-profile-role'
@pytest.fixture()
def iam_role_arn(iam_client, iam_policy, iam_role_name, instance_profile_name) -> Tuple[Any, Dict[str, Any]]:
resp = iam_client.create_role(RoleName=iam_role_name, AssumeRolePolicyDocument=dumps(iam_policy))
instance_profile_resp = iam_client.create_instance_profile(
InstanceProfileName=instance_profile_name
)
iam_client.add_role_to_instance_profile(
InstanceProfileName=instance_profile_name,
RoleName=iam_role_name
)
instance_profile_resp = instance_profile_resp.get('InstanceProfile')
return resp['Role']['Arn'], {
'Name': instance_profile_resp.get('InstanceProfileName'),
'Arn': instance_profile_resp.get('Arn')
}
@pytest.fixture()
def ec2_instance_with_role(ec2_client, iam_role_arn, instance_profile_name):
instances = ec2_client.run_instances(ImageId='ami-1234',
MinCount=1,
MaxCount=1,
InstanceType='t2.micro',
IamInstanceProfile=iam_role_arn[1])
instance_id = instances.get('Instances')[0].get('InstanceId')
assoc_resp = ec2_client.associate_iam_instance_profile(IamInstanceProfile=iam_role_arn[1], InstanceId=instance_id)
return instance_id
@pytest.fixture()
def ec2_instance_without_role(ec2_client):
ec2_client.run_instances(ImageId='ami-1234-foobar',
MinCount=1,
MaxCount=1)
instances = ec2_client.describe_instances(
Filters=[
{
'Name': 'image-id',
'Values': ['ami-1234-foobar']
}
]
)['Reservations'][0]['Instances']
return instances[0]['InstanceId']
def test_ec2_instance_with_role(ec2_instance_with_role, iam_role_arn, region):
from handler_ec2_role import handle
result: Dict = handle({'instanceId': ec2_instance_with_role, 'region': region},
LambdaTestContext('foobar', env={},
custom={}))
assert result.get('result').get('roleName') == iam_role_arn[1].get('Name')
assert str.split(result.get('result').get('roleArn'), ':role/') == str.split(iam_role_arn[1].get(
'Arn'), ':instance-profile/')
def test_ec2_instance_without_role(ec2_instance_without_role, region):
from handler_ec2_role import handle
result: Dict = handle({'instanceId': ec2_instance_without_role, 'region': region},
LambdaTestContext('foobar', env={}, custom={}))
assert result.get('result').get('roleArn') == ''
assert result.get('result').get('roleName') == ''
def test_ec2_instance_does_not_exist(ec2_instance_without_role, region):
from handler_ec2_role import handle
result: Dict = handle({'instanceId': 'i-abcd', 'region': region},
LambdaTestContext('foobar', env={}, custom={}))
assert result.get('result').get('roleArn') == ''
assert result.get('result').get('roleName') == ''
| 37.358621
| 118
| 0.603101
|
import datetime
from typing import Dict, Tuple, Any
import boto3
from botocore.stub import Stubber
from dateutil.tz import tzutc
from dassana.common.aws_client import LambdaTestContext
from json import dumps
import pytest
@pytest.fixture()
def input_s3_with_website(s3_public_bucket_with_website, region):
return {
'bucketName': s3_public_bucket_with_website,
'region': region
}
@pytest.fixture()
def iam_policy():
return {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"ec2:GetDefaultCreditSpecification",
"ec2:GetEbsEncryptionByDefault",
"ec2:ExportClientVpnClientConfiguration",
"ec2:GetCapacityReservationUsage",
"ec2:DescribeVolumesModifications",
"ec2:GetHostReservationPurchasePreview",
"ec2:GetSubnetCidrReservations",
"ec2:GetConsoleScreenshot",
"ec2:GetConsoleOutput",
"ec2:ExportClientVpnClientCertificateRevocationList",
"ec2:GetLaunchTemplateData",
"ec2:GetSerialConsoleAccessStatus",
"ec2:GetFlowLogsIntegrationTemplate",
"ec2:DescribeScheduledInstanceAvailability",
"ec2:GetEbsDefaultKmsKeyId",
"ec2:GetManagedPrefixListEntries",
"ec2:DescribeVpnConnections",
"ec2:DescribeTags",
"ec2:GetCoipPoolUsage",
"ec2:DescribeFastSnapshotRestores",
"ec2:GetReservedInstancesExchangeQuote",
"ec2:GetAssociatedEnclaveCertificateIamRoles",
"ec2:GetPasswordData",
"ec2:GetAssociatedIpv6PoolCidrs",
"ec2:DescribeScheduledInstances",
"ec2:GetManagedPrefixListAssociations",
"ec2:DescribeElasticGpus"
],
"Resource": "*"
}
]
}
@pytest.fixture()
def iam_role_name():
return 'ec2-iam-role'
@pytest.fixture()
def instance_profile_name():
return 'ec2-instance-profile-role'
@pytest.fixture()
def iam_role_arn(iam_client, iam_policy, iam_role_name, instance_profile_name) -> Tuple[Any, Dict[str, Any]]:
resp = iam_client.create_role(RoleName=iam_role_name, AssumeRolePolicyDocument=dumps(iam_policy))
instance_profile_resp = iam_client.create_instance_profile(
InstanceProfileName=instance_profile_name
)
iam_client.add_role_to_instance_profile(
InstanceProfileName=instance_profile_name,
RoleName=iam_role_name
)
instance_profile_resp = instance_profile_resp.get('InstanceProfile')
return resp['Role']['Arn'], {
'Name': instance_profile_resp.get('InstanceProfileName'),
'Arn': instance_profile_resp.get('Arn')
}
@pytest.fixture()
def ec2_instance_with_role(ec2_client, iam_role_arn, instance_profile_name):
instances = ec2_client.run_instances(ImageId='ami-1234',
MinCount=1,
MaxCount=1,
InstanceType='t2.micro',
IamInstanceProfile=iam_role_arn[1])
instance_id = instances.get('Instances')[0].get('InstanceId')
assoc_resp = ec2_client.associate_iam_instance_profile(IamInstanceProfile=iam_role_arn[1], InstanceId=instance_id)
return instance_id
@pytest.fixture()
def ec2_instance_without_role(ec2_client):
ec2_client.run_instances(ImageId='ami-1234-foobar',
MinCount=1,
MaxCount=1)
instances = ec2_client.describe_instances(
Filters=[
{
'Name': 'image-id',
'Values': ['ami-1234-foobar']
}
]
)['Reservations'][0]['Instances']
return instances[0]['InstanceId']
def test_ec2_instance_with_role(ec2_instance_with_role, iam_role_arn, region):
from handler_ec2_role import handle
result: Dict = handle({'instanceId': ec2_instance_with_role, 'region': region},
LambdaTestContext('foobar', env={},
custom={}))
assert result.get('result').get('roleName') == iam_role_arn[1].get('Name')
assert str.split(result.get('result').get('roleArn'), ':role/') == str.split(iam_role_arn[1].get(
'Arn'), ':instance-profile/')
def test_ec2_instance_without_role(ec2_instance_without_role, region):
from handler_ec2_role import handle
result: Dict = handle({'instanceId': ec2_instance_without_role, 'region': region},
LambdaTestContext('foobar', env={}, custom={}))
assert result.get('result').get('roleArn') == ''
assert result.get('result').get('roleName') == ''
def test_ec2_instance_does_not_exist(ec2_instance_without_role, region):
from handler_ec2_role import handle
result: Dict = handle({'instanceId': 'i-abcd', 'region': region},
LambdaTestContext('foobar', env={}, custom={}))
assert result.get('result').get('roleArn') == ''
assert result.get('result').get('roleName') == ''
| true
| true
|
79093ea59eb4164a1713612ea65f0dbf5cab5488
| 9,165
|
py
|
Python
|
src/signals.py
|
delos/dm-pta-mc
|
bce9ce815a518e1b47d1894fce3e003c5e649113
|
[
"MIT"
] | null | null | null |
src/signals.py
|
delos/dm-pta-mc
|
bce9ce815a518e1b47d1894fce3e003c5e649113
|
[
"MIT"
] | null | null | null |
src/signals.py
|
delos/dm-pta-mc
|
bce9ce815a518e1b47d1894fce3e003c5e649113
|
[
"MIT"
] | null | null | null |
"""
Functions computing the signal shapes
"""
import numpy as np
from time import time
import src.constants as const
def subtract_signal(t, signal, fit_params=3):
"""
Returns the subtracted signal
"""
# fit dphi(t) to polynomials and subtract the contribution from n=0, 1 and 2
coef = np.polynomial.polynomial.polyfit(t, signal, fit_params - 1) # (3)
delta_signal = np.einsum(
"n,nj->j", coef, np.asarray([np.power(t, n) for n in range(fit_params)])
) # (Nt)
# compute the subtracted signal
ht = signal - delta_signal # (Nt), unit = s
return ht
def dphi_dop_chunked(
t,
profile,
r0_vec,
v_vec,
d_hat,
use_form=False,
use_chunk=False,
chunk_size=10000,
verbose=False,
form_fun=None,
interp_table=None,
time_end=np.inf,
):
"""
Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to
store in memory
"""
num_objects = len(list(profile.items())[0][1]) # number of elements of 1st dict entry
dphi = np.zeros(len(t))
if use_chunk == True:
if num_objects % chunk_size == 0:
num_chunks = num_objects // chunk_size
else:
num_chunks = num_objects // chunk_size + 1
if verbose:
print(" Chunking data (%d chunks) ... "%num_chunks)
print()
for i in range(num_chunks):
if time() > time_end: raise TimeoutError
r0_c = r0_vec[i * chunk_size : (i + 1) * chunk_size]
v_c = v_vec[i * chunk_size : (i + 1) * chunk_size]
profile_c = {}
for key in list(profile):
profile_c[key] = profile[key][i * chunk_size : (i + 1) * chunk_size]
dphi += dphi_dop(
t, profile_c, r0_c, v_c, d_hat, use_form=use_form, form_fun=form_fun, interp_table=interp_table
)
else:
dphi += dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=use_form, form_fun=form_fun, interp_table=interp_table)
return dphi
def dphi_dop_chunked_vec(
t,
profile,
r0_vec,
v_vec,
use_form=False,
use_chunk=False,
chunk_size=10000,
verbose=False,
form_fun=None,
interp_table=None,
time_end=np.inf,
):
"""
Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to
store in memory
"""
num_objects = len(list(profile.items())[0][1]) # number of elements of 1st dict entry
dphi_vec = np.zeros((len(t), 3))
if use_chunk == True:
if verbose:
print(" Chunking data ... ")
print()
if num_objects % chunk_size == 0:
num_chunks = num_objects // chunk_size
else:
num_chunks = num_objects // chunk_size + 1
for i in range(num_chunks):
if time() > time_end: raise TimeoutError
r0_c = r0_vec[i * chunk_size : (i + 1) * chunk_size]
v_c = v_vec[i * chunk_size : (i + 1) * chunk_size]
profile_c = {}
for key in list(profile):
profile_c[key] = profile[key][i * chunk_size : (i + 1) * chunk_size]
dphi_vec += dphi_dop_vec(
t, profile_c, r0_c, v_c, use_form=use_form, form_fun=form_fun, interp_table=interp_table
)
else:
dphi_vec += dphi_dop_vec(t, profile, r0_vec, v_vec, use_form=use_form, form_fun=form_fun, interp_table=interp_table)
return dphi_vec
def dphi_dop_vec(t, profile, r0_vec, v_vec, use_form=False, form_fun=None,
interp_table=None):
"""
Returns the vector phase shift due to the Doppler delay for subhalos of mass, mass.
Dot with d_hat to get dphi_I
TODO: add use_closest option
"""
v_mag = np.linalg.norm(v_vec, axis=1)
r0_v = np.einsum("ij, ij -> i", r0_vec, v_vec)
t0 = -r0_v / np.square(v_mag) # year
b_vec = r0_vec + v_vec * t0[:, np.newaxis] # (N, 3)
b_mag = np.linalg.norm(b_vec, axis=1) # (N)
tau = b_mag / v_mag
b_hat = b_vec / b_mag[:, np.newaxis] # (N, 3)
v_hat = v_vec / v_mag[:, np.newaxis]
x = np.subtract.outer(t, t0) / tau
x0 = -t0 / tau
prefactor = (
const.yr_to_s
* const.GN
/ (const.km_s_to_kpc_yr * const.c_light * np.square(v_mag))
)
if interp_table is None:
bd_term = (np.sqrt(1 + x ** 2) + x) - (np.sqrt(1 + x0 ** 2) + x0) # (Nt, N)
vd_term = np.arcsinh(x) - np.arcsinh(x0)
if 'M' in list(profile):
prefactor *= profile['M']
if use_form:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
rv = ((3 * profile['M'] / (4 * np.pi)) * (1 / 200) * (1 / const.rho_crit)) ** (1 / 3)
form_func = np.where(r_cl<rv, form(r_cl / rv, profile['c']), 1) # (N)
bd_term *= prefactor * form_func
vd_term *= prefactor * form_func
else:
bd_term = prefactor * bd_term
vd_term = prefactor * vd_term
else:
if form_fun is not None:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
form_func = form_fun(r_cl, profile['rs'], profile['rhos'])
bd_term *= prefactor * form_func
vd_term *= prefactor * form_func
else:
raise ValueError('rho_s, r_s halo description currently requires custom density profile ("USE_FORMTAB")')
else:
y = b_mag / profile['rs']
bd_term0, vd_term0 = interp_table.bd_vd_terms(x0, y)
y.shape = (1,-1)
y = np.broadcast_to(y,x.shape)
bd_term, vd_term = interp_table.bd_vd_terms(x, y)
bd_term -= bd_term0
vd_term -= vd_term0
bd_term *= prefactor * profile['rhos'] * profile['rs']**3
vd_term *= prefactor * profile['rhos'] * profile['rs']**3
# sum the signal over all the events
sig = np.einsum("to, oi -> ti", bd_term, b_hat) - np.einsum(
"to, oi -> ti", vd_term, v_hat
)
return sig
def dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=False, form_fun=None,
interp_table=None):
"""
Returns the phase shift due to the Doppler delay for subhalos of mass, mass
TODO: add use_closest option
"""
v_mag = np.linalg.norm(v_vec, axis=1)
r0_v = np.einsum("ij, ij -> i", r0_vec, v_vec) # kpc^2/yr
t0 = -r0_v / np.square(v_mag) # year
b_vec = r0_vec + v_vec * t0[:, np.newaxis] # (N, 3), kpc
b_mag = np.linalg.norm(b_vec, axis=1) # (N)
tau = b_mag / v_mag # year
b_hat = b_vec / b_mag[:, np.newaxis]
v_hat = v_vec / v_mag[:, np.newaxis]
b_d = np.dot(b_hat, d_hat)
v_d = np.dot(v_hat, d_hat)
x = np.subtract.outer(t, t0) / tau
x0 = -t0 / tau
prefactor = (
const.yr_to_s
* const.GN
/ (const.km_s_to_kpc_yr * const.c_light * np.square(v_mag))
)
if interp_table is None:
bd_term = (np.sqrt(1 + x ** 2) + x) - (np.sqrt(1 + x0 ** 2) + x0)
vd_term = np.arcsinh(x) - np.arcsinh(x0)
sig = bd_term * b_d - vd_term * v_d
if 'M' in list(profile):
prefactor *= profile['M']
if use_form:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
rv = ((3 * profile['M'] / (4 * np.pi)) * (1 / 200) * (1 / const.rho_crit)) ** (1 / 3)
form_func = np.where(r_cl<rv, form(r_cl / rv, profile['c']), 1) # (N)
sig = form_func * sig
else:
if form_fun is not None:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
form_func = form_fun(r_cl, profile['rs'], profile['rhos'])
sig = form_func * sig
else:
raise ValueError('rho_s, r_s halo description currently requires custom density profile ("USE_FORMTAB")')
else:
y = b_mag / profile['rs']
bd_term0, vd_term0 = interp_table.bd_vd_terms(x0, y)
y.shape = (1,-1)
y = np.broadcast_to(y,x.shape)
bd_term, vd_term = interp_table.bd_vd_terms(x, y)
bd_term -= bd_term0
vd_term -= vd_term0
sig = profile['rhos'] * profile['rs']**3 * (bd_term * b_d + vd_term * v_d)
sig = prefactor * sig
# sum the signal over all the events
return np.sum(sig, axis=-1)
def form(s, c):
return (np.log(1 + c * s) - c * s / (1 + c * s)) / (np.log(1 + c) - c / (1 + c))
| 27.522523
| 124
| 0.529078
|
import numpy as np
from time import time
import src.constants as const
def subtract_signal(t, signal, fit_params=3):
coef = np.polynomial.polynomial.polyfit(t, signal, fit_params - 1)
delta_signal = np.einsum(
"n,nj->j", coef, np.asarray([np.power(t, n) for n in range(fit_params)])
)
ht = signal - delta_signal
return ht
def dphi_dop_chunked(
t,
profile,
r0_vec,
v_vec,
d_hat,
use_form=False,
use_chunk=False,
chunk_size=10000,
verbose=False,
form_fun=None,
interp_table=None,
time_end=np.inf,
):
num_objects = len(list(profile.items())[0][1])
dphi = np.zeros(len(t))
if use_chunk == True:
if num_objects % chunk_size == 0:
num_chunks = num_objects // chunk_size
else:
num_chunks = num_objects // chunk_size + 1
if verbose:
print(" Chunking data (%d chunks) ... "%num_chunks)
print()
for i in range(num_chunks):
if time() > time_end: raise TimeoutError
r0_c = r0_vec[i * chunk_size : (i + 1) * chunk_size]
v_c = v_vec[i * chunk_size : (i + 1) * chunk_size]
profile_c = {}
for key in list(profile):
profile_c[key] = profile[key][i * chunk_size : (i + 1) * chunk_size]
dphi += dphi_dop(
t, profile_c, r0_c, v_c, d_hat, use_form=use_form, form_fun=form_fun, interp_table=interp_table
)
else:
dphi += dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=use_form, form_fun=form_fun, interp_table=interp_table)
return dphi
def dphi_dop_chunked_vec(
t,
profile,
r0_vec,
v_vec,
use_form=False,
use_chunk=False,
chunk_size=10000,
verbose=False,
form_fun=None,
interp_table=None,
time_end=np.inf,
):
num_objects = len(list(profile.items())[0][1])
dphi_vec = np.zeros((len(t), 3))
if use_chunk == True:
if verbose:
print(" Chunking data ... ")
print()
if num_objects % chunk_size == 0:
num_chunks = num_objects // chunk_size
else:
num_chunks = num_objects // chunk_size + 1
for i in range(num_chunks):
if time() > time_end: raise TimeoutError
r0_c = r0_vec[i * chunk_size : (i + 1) * chunk_size]
v_c = v_vec[i * chunk_size : (i + 1) * chunk_size]
profile_c = {}
for key in list(profile):
profile_c[key] = profile[key][i * chunk_size : (i + 1) * chunk_size]
dphi_vec += dphi_dop_vec(
t, profile_c, r0_c, v_c, use_form=use_form, form_fun=form_fun, interp_table=interp_table
)
else:
dphi_vec += dphi_dop_vec(t, profile, r0_vec, v_vec, use_form=use_form, form_fun=form_fun, interp_table=interp_table)
return dphi_vec
def dphi_dop_vec(t, profile, r0_vec, v_vec, use_form=False, form_fun=None,
interp_table=None):
v_mag = np.linalg.norm(v_vec, axis=1)
r0_v = np.einsum("ij, ij -> i", r0_vec, v_vec)
t0 = -r0_v / np.square(v_mag)
b_vec = r0_vec + v_vec * t0[:, np.newaxis]
b_mag = np.linalg.norm(b_vec, axis=1)
tau = b_mag / v_mag
b_hat = b_vec / b_mag[:, np.newaxis]
v_hat = v_vec / v_mag[:, np.newaxis]
x = np.subtract.outer(t, t0) / tau
x0 = -t0 / tau
prefactor = (
const.yr_to_s
* const.GN
/ (const.km_s_to_kpc_yr * const.c_light * np.square(v_mag))
)
if interp_table is None:
bd_term = (np.sqrt(1 + x ** 2) + x) - (np.sqrt(1 + x0 ** 2) + x0)
vd_term = np.arcsinh(x) - np.arcsinh(x0)
if 'M' in list(profile):
prefactor *= profile['M']
if use_form:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
rv = ((3 * profile['M'] / (4 * np.pi)) * (1 / 200) * (1 / const.rho_crit)) ** (1 / 3)
form_func = np.where(r_cl<rv, form(r_cl / rv, profile['c']), 1)
bd_term *= prefactor * form_func
vd_term *= prefactor * form_func
else:
bd_term = prefactor * bd_term
vd_term = prefactor * vd_term
else:
if form_fun is not None:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
form_func = form_fun(r_cl, profile['rs'], profile['rhos'])
bd_term *= prefactor * form_func
vd_term *= prefactor * form_func
else:
raise ValueError('rho_s, r_s halo description currently requires custom density profile ("USE_FORMTAB")')
else:
y = b_mag / profile['rs']
bd_term0, vd_term0 = interp_table.bd_vd_terms(x0, y)
y.shape = (1,-1)
y = np.broadcast_to(y,x.shape)
bd_term, vd_term = interp_table.bd_vd_terms(x, y)
bd_term -= bd_term0
vd_term -= vd_term0
bd_term *= prefactor * profile['rhos'] * profile['rs']**3
vd_term *= prefactor * profile['rhos'] * profile['rs']**3
sig = np.einsum("to, oi -> ti", bd_term, b_hat) - np.einsum(
"to, oi -> ti", vd_term, v_hat
)
return sig
def dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=False, form_fun=None,
interp_table=None):
v_mag = np.linalg.norm(v_vec, axis=1)
r0_v = np.einsum("ij, ij -> i", r0_vec, v_vec)
t0 = -r0_v / np.square(v_mag)
b_vec = r0_vec + v_vec * t0[:, np.newaxis]
b_mag = np.linalg.norm(b_vec, axis=1)
tau = b_mag / v_mag
b_hat = b_vec / b_mag[:, np.newaxis]
v_hat = v_vec / v_mag[:, np.newaxis]
b_d = np.dot(b_hat, d_hat)
v_d = np.dot(v_hat, d_hat)
x = np.subtract.outer(t, t0) / tau
x0 = -t0 / tau
prefactor = (
const.yr_to_s
* const.GN
/ (const.km_s_to_kpc_yr * const.c_light * np.square(v_mag))
)
if interp_table is None:
bd_term = (np.sqrt(1 + x ** 2) + x) - (np.sqrt(1 + x0 ** 2) + x0)
vd_term = np.arcsinh(x) - np.arcsinh(x0)
sig = bd_term * b_d - vd_term * v_d
if 'M' in list(profile):
prefactor *= profile['M']
if use_form:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
rv = ((3 * profile['M'] / (4 * np.pi)) * (1 / 200) * (1 / const.rho_crit)) ** (1 / 3)
form_func = np.where(r_cl<rv, form(r_cl / rv, profile['c']), 1)
sig = form_func * sig
else:
if form_fun is not None:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
form_func = form_fun(r_cl, profile['rs'], profile['rhos'])
sig = form_func * sig
else:
raise ValueError('rho_s, r_s halo description currently requires custom density profile ("USE_FORMTAB")')
else:
y = b_mag / profile['rs']
bd_term0, vd_term0 = interp_table.bd_vd_terms(x0, y)
y.shape = (1,-1)
y = np.broadcast_to(y,x.shape)
bd_term, vd_term = interp_table.bd_vd_terms(x, y)
bd_term -= bd_term0
vd_term -= vd_term0
sig = profile['rhos'] * profile['rs']**3 * (bd_term * b_d + vd_term * v_d)
sig = prefactor * sig
return np.sum(sig, axis=-1)
def form(s, c):
return (np.log(1 + c * s) - c * s / (1 + c * s)) / (np.log(1 + c) - c / (1 + c))
| true
| true
|
79093f267726acc004aaf28a603df903376f96e9
| 2,887
|
py
|
Python
|
test/functional/api/cas/installer.py
|
andreatomassetti/open-cas-linux
|
6a6a0267d76dca86de8695a959991ecefdc0ddf8
|
[
"BSD-3-Clause"
] | 1
|
2022-01-23T23:50:23.000Z
|
2022-01-23T23:50:23.000Z
|
test/functional/api/cas/installer.py
|
andreatomassetti/open-cas-linux
|
6a6a0267d76dca86de8695a959991ecefdc0ddf8
|
[
"BSD-3-Clause"
] | 1
|
2022-03-21T22:05:26.000Z
|
2022-03-21T22:05:26.000Z
|
test/functional/api/cas/installer.py
|
andreatomassetti/open-cas-linux
|
6a6a0267d76dca86de8695a959991ecefdc0ddf8
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import logging
from tests import conftest
from core.test_run import TestRun
from api.cas import git
from api.cas import cas_module
from test_utils import os_utils
from test_utils.output import CmdException
def rsync_opencas_sources():
TestRun.LOGGER.info("Copying Open CAS repository to DUT")
TestRun.executor.rsync_to(
f"{TestRun.usr.repo_dir}/",
f"{TestRun.usr.working_dir}/",
exclude_list=["test/functional/results/"],
delete=True)
def _clean_opencas_repo():
TestRun.LOGGER.info("Cleaning Open CAS repo")
output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
"make distclean")
if output.exit_code != 0:
raise CmdException("make distclean command executed with nonzero status", output)
def build_opencas():
TestRun.LOGGER.info("Building Open CAS")
output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
"./configure && "
"make -j")
if output.exit_code != 0:
raise CmdException("Make command executed with nonzero status", output)
def install_opencas():
TestRun.LOGGER.info("Installing Open CAS")
output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
f"make install")
if output.exit_code != 0:
raise CmdException("Error while installing Open CAS", output)
TestRun.LOGGER.info("Check if casadm is properly installed.")
output = TestRun.executor.run("casadm -V")
if output.exit_code != 0:
raise CmdException("'casadm -V' command returned an error", output)
else:
TestRun.LOGGER.info(output.stdout)
def set_up_opencas(version=None):
_clean_opencas_repo()
if version:
git.checkout_cas_version(version)
build_opencas()
install_opencas()
def uninstall_opencas():
TestRun.LOGGER.info("Uninstalling Open CAS")
output = TestRun.executor.run("casadm -V")
if output.exit_code != 0:
raise CmdException("Open CAS is not properly installed", output)
else:
TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
f"make uninstall")
if output.exit_code != 0:
raise CmdException("There was an error during uninstall process", output)
def reinstall_opencas(version=None):
if check_if_installed():
uninstall_opencas()
set_up_opencas(version)
def check_if_installed():
TestRun.LOGGER.info("Check if Open-CAS-Linux is installed")
output = TestRun.executor.run("which casadm")
modules_loaded = os_utils.is_kernel_module_loaded(cas_module.CasModule.cache.value)
if output.exit_code == 0 and modules_loaded:
TestRun.LOGGER.info("CAS is installed")
return True
TestRun.LOGGER.info("CAS not installed")
return False
| 28.303922
| 89
| 0.68133
|
import logging
from tests import conftest
from core.test_run import TestRun
from api.cas import git
from api.cas import cas_module
from test_utils import os_utils
from test_utils.output import CmdException
def rsync_opencas_sources():
TestRun.LOGGER.info("Copying Open CAS repository to DUT")
TestRun.executor.rsync_to(
f"{TestRun.usr.repo_dir}/",
f"{TestRun.usr.working_dir}/",
exclude_list=["test/functional/results/"],
delete=True)
def _clean_opencas_repo():
TestRun.LOGGER.info("Cleaning Open CAS repo")
output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
"make distclean")
if output.exit_code != 0:
raise CmdException("make distclean command executed with nonzero status", output)
def build_opencas():
TestRun.LOGGER.info("Building Open CAS")
output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
"./configure && "
"make -j")
if output.exit_code != 0:
raise CmdException("Make command executed with nonzero status", output)
def install_opencas():
TestRun.LOGGER.info("Installing Open CAS")
output = TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
f"make install")
if output.exit_code != 0:
raise CmdException("Error while installing Open CAS", output)
TestRun.LOGGER.info("Check if casadm is properly installed.")
output = TestRun.executor.run("casadm -V")
if output.exit_code != 0:
raise CmdException("'casadm -V' command returned an error", output)
else:
TestRun.LOGGER.info(output.stdout)
def set_up_opencas(version=None):
_clean_opencas_repo()
if version:
git.checkout_cas_version(version)
build_opencas()
install_opencas()
def uninstall_opencas():
TestRun.LOGGER.info("Uninstalling Open CAS")
output = TestRun.executor.run("casadm -V")
if output.exit_code != 0:
raise CmdException("Open CAS is not properly installed", output)
else:
TestRun.executor.run(
f"cd {TestRun.usr.working_dir} && "
f"make uninstall")
if output.exit_code != 0:
raise CmdException("There was an error during uninstall process", output)
def reinstall_opencas(version=None):
if check_if_installed():
uninstall_opencas()
set_up_opencas(version)
def check_if_installed():
TestRun.LOGGER.info("Check if Open-CAS-Linux is installed")
output = TestRun.executor.run("which casadm")
modules_loaded = os_utils.is_kernel_module_loaded(cas_module.CasModule.cache.value)
if output.exit_code == 0 and modules_loaded:
TestRun.LOGGER.info("CAS is installed")
return True
TestRun.LOGGER.info("CAS not installed")
return False
| true
| true
|
79093f5ff026604709ef317195ff407c082bacd8
| 2,876
|
py
|
Python
|
tests/components/demo/test_fan.py
|
alindeman/home-assistant
|
b274b10f3874c196f0db8f9cfa5f47eb756d1f8e
|
[
"Apache-2.0"
] | 4
|
2019-07-03T22:36:57.000Z
|
2019-08-10T15:33:25.000Z
|
tests/components/demo/test_fan.py
|
alindeman/home-assistant
|
b274b10f3874c196f0db8f9cfa5f47eb756d1f8e
|
[
"Apache-2.0"
] | 7
|
2019-08-23T05:26:02.000Z
|
2022-03-11T23:57:18.000Z
|
tests/components/demo/test_fan.py
|
alindeman/home-assistant
|
b274b10f3874c196f0db8f9cfa5f47eb756d1f8e
|
[
"Apache-2.0"
] | 3
|
2019-04-28T16:35:45.000Z
|
2020-05-28T15:21:59.000Z
|
"""Test cases around the demo fan platform."""
import pytest
from homeassistant.setup import async_setup_component
from homeassistant.components import fan
from homeassistant.const import STATE_OFF, STATE_ON
from tests.components.fan import common
FAN_ENTITY_ID = 'fan.living_room_fan'
def get_entity(hass):
"""Get the fan entity."""
return hass.states.get(FAN_ENTITY_ID)
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
hass.loop.run_until_complete(async_setup_component(hass, fan.DOMAIN, {
'fan': {
'platform': 'demo',
}
}))
async def test_turn_on(hass):
"""Test turning on the device."""
assert STATE_OFF == get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert STATE_OFF != get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID, fan.SPEED_HIGH)
assert STATE_ON == get_entity(hass).state
assert fan.SPEED_HIGH == \
get_entity(hass).attributes[fan.ATTR_SPEED]
async def test_turn_off(hass):
"""Test turning off the device."""
assert STATE_OFF == get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert STATE_OFF != get_entity(hass).state
await common.async_turn_off(hass, FAN_ENTITY_ID)
assert STATE_OFF == get_entity(hass).state
async def test_turn_off_without_entity_id(hass):
"""Test turning off all fans."""
assert STATE_OFF == get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert STATE_OFF != get_entity(hass).state
await common.async_turn_off(hass)
assert STATE_OFF == get_entity(hass).state
async def test_set_direction(hass):
"""Test setting the direction of the device."""
assert STATE_OFF == get_entity(hass).state
await common.async_set_direction(hass, FAN_ENTITY_ID,
fan.DIRECTION_REVERSE)
assert fan.DIRECTION_REVERSE == \
get_entity(hass).attributes.get('direction')
async def test_set_speed(hass):
"""Test setting the speed of the device."""
assert STATE_OFF == get_entity(hass).state
await common.async_set_speed(hass, FAN_ENTITY_ID, fan.SPEED_LOW)
assert fan.SPEED_LOW == \
get_entity(hass).attributes.get('speed')
async def test_oscillate(hass):
"""Test oscillating the fan."""
assert not get_entity(hass).attributes.get('oscillating')
await common.async_oscillate(hass, FAN_ENTITY_ID, True)
assert get_entity(hass).attributes.get('oscillating')
await common.async_oscillate(hass, FAN_ENTITY_ID, False)
assert not get_entity(hass).attributes.get('oscillating')
async def test_is_on(hass):
"""Test is on service call."""
assert not fan.is_on(hass, FAN_ENTITY_ID)
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert fan.is_on(hass, FAN_ENTITY_ID)
| 29.050505
| 74
| 0.714882
|
import pytest
from homeassistant.setup import async_setup_component
from homeassistant.components import fan
from homeassistant.const import STATE_OFF, STATE_ON
from tests.components.fan import common
FAN_ENTITY_ID = 'fan.living_room_fan'
def get_entity(hass):
return hass.states.get(FAN_ENTITY_ID)
@pytest.fixture(autouse=True)
def setup_comp(hass):
hass.loop.run_until_complete(async_setup_component(hass, fan.DOMAIN, {
'fan': {
'platform': 'demo',
}
}))
async def test_turn_on(hass):
assert STATE_OFF == get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert STATE_OFF != get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID, fan.SPEED_HIGH)
assert STATE_ON == get_entity(hass).state
assert fan.SPEED_HIGH == \
get_entity(hass).attributes[fan.ATTR_SPEED]
async def test_turn_off(hass):
assert STATE_OFF == get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert STATE_OFF != get_entity(hass).state
await common.async_turn_off(hass, FAN_ENTITY_ID)
assert STATE_OFF == get_entity(hass).state
async def test_turn_off_without_entity_id(hass):
assert STATE_OFF == get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert STATE_OFF != get_entity(hass).state
await common.async_turn_off(hass)
assert STATE_OFF == get_entity(hass).state
async def test_set_direction(hass):
assert STATE_OFF == get_entity(hass).state
await common.async_set_direction(hass, FAN_ENTITY_ID,
fan.DIRECTION_REVERSE)
assert fan.DIRECTION_REVERSE == \
get_entity(hass).attributes.get('direction')
async def test_set_speed(hass):
assert STATE_OFF == get_entity(hass).state
await common.async_set_speed(hass, FAN_ENTITY_ID, fan.SPEED_LOW)
assert fan.SPEED_LOW == \
get_entity(hass).attributes.get('speed')
async def test_oscillate(hass):
assert not get_entity(hass).attributes.get('oscillating')
await common.async_oscillate(hass, FAN_ENTITY_ID, True)
assert get_entity(hass).attributes.get('oscillating')
await common.async_oscillate(hass, FAN_ENTITY_ID, False)
assert not get_entity(hass).attributes.get('oscillating')
async def test_is_on(hass):
assert not fan.is_on(hass, FAN_ENTITY_ID)
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert fan.is_on(hass, FAN_ENTITY_ID)
| true
| true
|
790940487406f760a0d61fe422f1afa8e6bc2856
| 14,212
|
py
|
Python
|
appengine/monorail/services/chart_svc.py
|
xinghun61/infra
|
b5d4783f99461438ca9e6a477535617fadab6ba3
|
[
"BSD-3-Clause"
] | 2
|
2021-04-13T21:22:18.000Z
|
2021-09-07T02:11:57.000Z
|
appengine/monorail/services/chart_svc.py
|
xinghun61/infra
|
b5d4783f99461438ca9e6a477535617fadab6ba3
|
[
"BSD-3-Clause"
] | 16
|
2020-09-07T11:55:09.000Z
|
2022-03-02T05:47:58.000Z
|
appengine/monorail/services/chart_svc.py
|
xinghun61/infra
|
b5d4783f99461438ca9e6a477535617fadab6ba3
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""A service for querying data for charts.
Functions for querying the IssueSnapshot table and associated join tables.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import settings
import time
from framework import framework_helpers
from framework import sql
from search import search_helpers
from tracker import tracker_bizobj
from tracker import tracker_helpers
from search import query2ast
from search import ast2select
from search import ast2ast
ISSUESNAPSHOT_TABLE_NAME = 'IssueSnapshot'
ISSUESNAPSHOT2CC_TABLE_NAME = 'IssueSnapshot2Cc'
ISSUESNAPSHOT2COMPONENT_TABLE_NAME = 'IssueSnapshot2Component'
ISSUESNAPSHOT2LABEL_TABLE_NAME = 'IssueSnapshot2Label'
ISSUESNAPSHOT_COLS = ['id', 'issue_id', 'shard', 'project_id', 'local_id',
'reporter_id', 'owner_id', 'status_id', 'period_start', 'period_end',
'is_open']
ISSUESNAPSHOT2CC_COLS = ['issuesnapshot_id', 'cc_id']
ISSUESNAPSHOT2COMPONENT_COLS = ['issuesnapshot_id', 'component_id']
ISSUESNAPSHOT2LABEL_COLS = ['issuesnapshot_id', 'label_id']
class ChartService(object):
"""Class for querying chart data."""
def __init__(self, config_service):
"""Constructor for ChartService.
Args:
config_service (ConfigService): An instance of ConfigService.
"""
self.config_service = config_service
# Set up SQL table objects.
self.issuesnapshot_tbl = sql.SQLTableManager(ISSUESNAPSHOT_TABLE_NAME)
self.issuesnapshot2cc_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2CC_TABLE_NAME)
self.issuesnapshot2component_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2COMPONENT_TABLE_NAME)
self.issuesnapshot2label_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2LABEL_TABLE_NAME)
def QueryIssueSnapshots(self, cnxn, services, unixtime, effective_ids,
project, perms, group_by=None, label_prefix=None,
query=None, canned_query=None):
"""Queries historical issue counts grouped by label or component.
Args:
cnxn: A MonorailConnection instance.
services: A Services instance.
unixtime: An integer representing the Unix time in seconds.
effective_ids: The effective User IDs associated with the current user.
project: A project object representing the current project.
perms: A permissions object associated with the current user.
group_by (str, optional): Which dimension to group by. Values can
be 'label', 'component', or None, in which case no grouping will
be applied.
label_prefix: Required when group_by is 'label.' Will limit the query to
only labels with the specified prefix (for example 'Pri').
query (str, optional): A query string from the request to apply to
the snapshot query.
canned_query (str, optional): Parsed canned query applied to the query
scope.
Returns:
1. A dict of {'2nd dimension or "total"': number of occurences}.
2. A list of any unsupported query conditions in query.
3. A boolean that is true if any results were capped.
"""
project_config = services.config.GetProjectConfig(cnxn,
project.project_id)
try:
query_left_joins, query_where, unsupported_conds = self._QueryToWhere(
cnxn, services, project_config, query, canned_query, project)
except ast2select.NoPossibleResults:
return {}, ['Invalid query.'], False
restricted_label_ids = search_helpers.GetPersonalAtRiskLabelIDs(
cnxn, None, self.config_service, effective_ids, project, perms)
left_joins = [
('Issue ON IssueSnapshot.issue_id = Issue.id', []),
]
if restricted_label_ids:
left_joins.append(
(('Issue2Label AS Forbidden_label'
' ON Issue.id = Forbidden_label.issue_id'
' AND Forbidden_label.label_id IN (%s)' % (
sql.PlaceHolders(restricted_label_ids)
)), restricted_label_ids))
if effective_ids:
left_joins.append(
('Issue2Cc AS I2cc'
' ON Issue.id = I2cc.issue_id'
' AND I2cc.cc_id IN (%s)' % sql.PlaceHolders(effective_ids),
effective_ids))
# TODO(jeffcarp): Handle case where there are issues with no labels.
where = [
('IssueSnapshot.period_start <= %s', [unixtime]),
('IssueSnapshot.period_end > %s', [unixtime]),
('IssueSnapshot.project_id = %s', [project.project_id]),
('Issue.is_spam = %s', [False]),
('Issue.deleted = %s', [False]),
]
forbidden_label_clause = 'Forbidden_label.label_id IS NULL'
if effective_ids:
if restricted_label_ids:
forbidden_label_clause = ' OR %s' % forbidden_label_clause
else:
forbidden_label_clause = ''
where.append(
((
'(Issue.reporter_id IN (%s)'
' OR Issue.owner_id IN (%s)'
' OR I2cc.cc_id IS NOT NULL'
'%s)'
) % (
sql.PlaceHolders(effective_ids), sql.PlaceHolders(effective_ids),
forbidden_label_clause
),
list(effective_ids) + list(effective_ids)
))
else:
where.append((forbidden_label_clause, []))
if group_by == 'component':
cols = ['Comp.path', 'COUNT(IssueSnapshot.issue_id)']
left_joins.extend([
(('IssueSnapshot2Component AS Is2c ON'
' Is2c.issuesnapshot_id = IssueSnapshot.id'), []),
('ComponentDef AS Comp ON Comp.id = Is2c.component_id', []),
])
group_by = ['Comp.path']
elif group_by == 'label':
cols = ['Lab.label', 'COUNT(IssueSnapshot.issue_id)']
left_joins.extend([
(('IssueSnapshot2Label AS Is2l'
' ON Is2l.issuesnapshot_id = IssueSnapshot.id'), []),
('LabelDef AS Lab ON Lab.id = Is2l.label_id', []),
])
if not label_prefix:
raise ValueError('`label_prefix` required when grouping by label.')
# TODO(jeffcarp): If LookupIDsOfLabelsMatching() is called on output,
# ensure regex is case-insensitive.
where.append(('LOWER(Lab.label) LIKE %s', [label_prefix.lower() + '-%']))
group_by = ['Lab.label']
elif group_by == 'open':
cols = ['IssueSnapshot.is_open',
'COUNT(IssueSnapshot.issue_id) AS issue_count']
group_by = ['IssueSnapshot.is_open']
elif group_by == 'status':
left_joins.append(('StatusDef AS Stats ON ' \
'Stats.id = IssueSnapshot.status_id', []))
cols = ['Stats.status', 'COUNT(IssueSnapshot.issue_id)']
group_by = ['Stats.status']
elif group_by == 'owner':
cols = ['IssueSnapshot.owner_id', 'COUNT(IssueSnapshot.issue_id)']
group_by = ['IssueSnapshot.owner_id']
elif not group_by:
cols = ['IssueSnapshot.issue_id']
else:
raise ValueError('`group_by` must be label, component, ' \
'open, status, owner or None.')
if query_left_joins:
left_joins.extend(query_left_joins)
if query_where:
where.extend(query_where)
promises = []
for shard_id in range(settings.num_logical_shards):
count_stmt, stmt_args = self._BuildSnapshotQuery(cols=cols,
where=where, joins=left_joins, group_by=group_by,
shard_id=shard_id)
promises.append(framework_helpers.Promise(cnxn.Execute,
count_stmt, stmt_args, shard_id=shard_id))
shard_values_dict = {}
search_limit_reached = False
for promise in promises:
# Wait for each query to complete and add it to the dict.
shard_values = list(promise.WaitAndGetValue())
if not shard_values:
continue
if group_by:
for name, count in shard_values:
if count >= settings.chart_query_max_rows:
search_limit_reached = True
shard_values_dict.setdefault(name, 0)
shard_values_dict[name] += count
else:
if shard_values[0][0] >= settings.chart_query_max_rows:
search_limit_reached = True
shard_values_dict.setdefault('total', 0)
shard_values_dict['total'] += shard_values[0][0]
unsupported_field_names = list(set([
field.field_name
for cond in unsupported_conds
for field in cond.field_defs
]))
return shard_values_dict, unsupported_field_names, search_limit_reached
def StoreIssueSnapshots(self, cnxn, issues, commit=True):
"""Adds an IssueSnapshot and updates the previous one for each issue."""
for issue in issues:
right_now = self._currentTime()
# Update previous snapshot of current issue's end time to right now.
self.issuesnapshot_tbl.Update(cnxn,
delta={'period_end': right_now},
where=[('IssueSnapshot.issue_id = %s', [issue.issue_id]),
('IssueSnapshot.period_end = %s',
[settings.maximum_snapshot_period_end])],
commit=commit)
config = self.config_service.GetProjectConfig(cnxn, issue.project_id)
period_end = settings.maximum_snapshot_period_end
is_open = tracker_helpers.MeansOpenInProject(
tracker_bizobj.GetStatus(issue), config)
shard = issue.issue_id % settings.num_logical_shards
status = tracker_bizobj.GetStatus(issue)
status_id = self.config_service.LookupStatusID(
cnxn, issue.project_id, status) or None
owner_id = tracker_bizobj.GetOwnerId(issue) or None
issuesnapshot_rows = [(issue.issue_id, shard, issue.project_id,
issue.local_id, issue.reporter_id, owner_id, status_id, right_now,
period_end, is_open)]
ids = self.issuesnapshot_tbl.InsertRows(
cnxn, ISSUESNAPSHOT_COLS[1:],
issuesnapshot_rows,
replace=True, commit=commit,
return_generated_ids=True)
issuesnapshot_id = ids[0]
# Add all labels to IssueSnapshot2Label.
label_rows = [
(issuesnapshot_id,
self.config_service.LookupLabelID(cnxn, issue.project_id, label))
for label in tracker_bizobj.GetLabels(issue)
]
self.issuesnapshot2label_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2LABEL_COLS,
label_rows, replace=True, commit=commit)
# Add all CCs to IssueSnapshot2Cc.
cc_rows = [
(issuesnapshot_id, cc_id)
for cc_id in tracker_bizobj.GetCcIds(issue)
]
self.issuesnapshot2cc_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2CC_COLS,
cc_rows,
replace=True, commit=commit)
# Add all components to IssueSnapshot2Component.
component_rows = [
(issuesnapshot_id, component_id)
for component_id in issue.component_ids
]
self.issuesnapshot2component_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2COMPONENT_COLS,
component_rows,
replace=True, commit=commit)
# Add all components to IssueSnapshot2Hotlist.
# This is raw SQL to obviate passing FeaturesService down through
# the call stack wherever this function is called.
# TODO(jrobbins): sort out dependencies between service classes.
cnxn.Execute('''
INSERT INTO IssueSnapshot2Hotlist (issuesnapshot_id, hotlist_id)
SELECT %s, hotlist_id FROM Hotlist2Issue WHERE issue_id = %s
''', [issuesnapshot_id, issue.issue_id])
def ExpungeHotlistsFromIssueSnapshots(self, cnxn, hotlist_ids):
"""Expunge the existence of hotlists from issue snapshots.
This method will not commit the operation. This method will not make
changes to in-memory data.
Args:
cnxn: connection to SQL database.
hotlist_ids: list of hotlist_ids for hotlists we want to delete.
"""
vals_ph = sql.PlaceHolders(hotlist_ids)
cnxn.Execute(
'DELETE FROM IssueSnapshot2Hotlist '
'WHERE hotlist_id IN ({vals_ph})'.format(vals_ph=vals_ph),
hotlist_ids,
commit=False)
def _currentTime(self):
"""This is a separate method so it can be mocked by tests."""
return time.time()
def _QueryToWhere(self, cnxn, services, project_config, query, canned_query,
project):
"""Parses a query string into LEFT JOIN and WHERE conditions.
Args:
cnxn: A MonorailConnection instance.
services: A Services instance.
project_config: The configuration for the given project.
query (string): The query to parse.
canned_query (string): The supplied canned query.
project: The current project.
Returns:
1. A list of LEFT JOIN clauses for the SQL query.
2. A list of WHERE clases for the SQL query.
3. A list of query conditions that are unsupported with snapshots.
"""
if not (query or canned_query):
return [], [], []
query = query or ''
scope = canned_query or ''
query_ast = query2ast.ParseUserQuery(query, scope,
query2ast.BUILTIN_ISSUE_FIELDS, project_config)
query_ast = ast2ast.PreprocessAST(cnxn, query_ast, [project.project_id],
services, project_config)
left_joins, where, unsupported = ast2select.BuildSQLQuery(query_ast,
snapshot_mode=True)
return left_joins, where, unsupported
def _BuildSnapshotQuery(self, cols, where, joins, group_by, shard_id):
"""Given SQL arguments, executes a snapshot COUNT query."""
stmt = sql.Statement.MakeSelect('IssueSnapshot', cols, distinct=True)
stmt.AddJoinClauses(joins, left=True)
stmt.AddWhereTerms(where + [('IssueSnapshot.shard = %s', [shard_id])])
if group_by:
stmt.AddGroupByTerms(group_by)
stmt.SetLimitAndOffset(limit=settings.chart_query_max_rows, offset=0)
stmt_str, stmt_args = stmt.Generate()
if group_by:
if group_by[0] == 'IssueSnapshot.is_open':
count_stmt = ('SELECT IF(results.is_open = 1, "Opened", "Closed") ' \
'AS bool_open, results.issue_count ' \
'FROM (%s) AS results' % stmt_str)
else:
count_stmt = stmt_str
else:
count_stmt = 'SELECT COUNT(results.issue_id) FROM (%s) AS results' % (
stmt_str)
return count_stmt, stmt_args
| 37.10705
| 79
| 0.675837
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import settings
import time
from framework import framework_helpers
from framework import sql
from search import search_helpers
from tracker import tracker_bizobj
from tracker import tracker_helpers
from search import query2ast
from search import ast2select
from search import ast2ast
ISSUESNAPSHOT_TABLE_NAME = 'IssueSnapshot'
ISSUESNAPSHOT2CC_TABLE_NAME = 'IssueSnapshot2Cc'
ISSUESNAPSHOT2COMPONENT_TABLE_NAME = 'IssueSnapshot2Component'
ISSUESNAPSHOT2LABEL_TABLE_NAME = 'IssueSnapshot2Label'
ISSUESNAPSHOT_COLS = ['id', 'issue_id', 'shard', 'project_id', 'local_id',
'reporter_id', 'owner_id', 'status_id', 'period_start', 'period_end',
'is_open']
ISSUESNAPSHOT2CC_COLS = ['issuesnapshot_id', 'cc_id']
ISSUESNAPSHOT2COMPONENT_COLS = ['issuesnapshot_id', 'component_id']
ISSUESNAPSHOT2LABEL_COLS = ['issuesnapshot_id', 'label_id']
class ChartService(object):
def __init__(self, config_service):
self.config_service = config_service
self.issuesnapshot_tbl = sql.SQLTableManager(ISSUESNAPSHOT_TABLE_NAME)
self.issuesnapshot2cc_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2CC_TABLE_NAME)
self.issuesnapshot2component_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2COMPONENT_TABLE_NAME)
self.issuesnapshot2label_tbl = sql.SQLTableManager(
ISSUESNAPSHOT2LABEL_TABLE_NAME)
def QueryIssueSnapshots(self, cnxn, services, unixtime, effective_ids,
project, perms, group_by=None, label_prefix=None,
query=None, canned_query=None):
project_config = services.config.GetProjectConfig(cnxn,
project.project_id)
try:
query_left_joins, query_where, unsupported_conds = self._QueryToWhere(
cnxn, services, project_config, query, canned_query, project)
except ast2select.NoPossibleResults:
return {}, ['Invalid query.'], False
restricted_label_ids = search_helpers.GetPersonalAtRiskLabelIDs(
cnxn, None, self.config_service, effective_ids, project, perms)
left_joins = [
('Issue ON IssueSnapshot.issue_id = Issue.id', []),
]
if restricted_label_ids:
left_joins.append(
(('Issue2Label AS Forbidden_label'
' ON Issue.id = Forbidden_label.issue_id'
' AND Forbidden_label.label_id IN (%s)' % (
sql.PlaceHolders(restricted_label_ids)
)), restricted_label_ids))
if effective_ids:
left_joins.append(
('Issue2Cc AS I2cc'
' ON Issue.id = I2cc.issue_id'
' AND I2cc.cc_id IN (%s)' % sql.PlaceHolders(effective_ids),
effective_ids))
where = [
('IssueSnapshot.period_start <= %s', [unixtime]),
('IssueSnapshot.period_end > %s', [unixtime]),
('IssueSnapshot.project_id = %s', [project.project_id]),
('Issue.is_spam = %s', [False]),
('Issue.deleted = %s', [False]),
]
forbidden_label_clause = 'Forbidden_label.label_id IS NULL'
if effective_ids:
if restricted_label_ids:
forbidden_label_clause = ' OR %s' % forbidden_label_clause
else:
forbidden_label_clause = ''
where.append(
((
'(Issue.reporter_id IN (%s)'
' OR Issue.owner_id IN (%s)'
' OR I2cc.cc_id IS NOT NULL'
'%s)'
) % (
sql.PlaceHolders(effective_ids), sql.PlaceHolders(effective_ids),
forbidden_label_clause
),
list(effective_ids) + list(effective_ids)
))
else:
where.append((forbidden_label_clause, []))
if group_by == 'component':
cols = ['Comp.path', 'COUNT(IssueSnapshot.issue_id)']
left_joins.extend([
(('IssueSnapshot2Component AS Is2c ON'
' Is2c.issuesnapshot_id = IssueSnapshot.id'), []),
('ComponentDef AS Comp ON Comp.id = Is2c.component_id', []),
])
group_by = ['Comp.path']
elif group_by == 'label':
cols = ['Lab.label', 'COUNT(IssueSnapshot.issue_id)']
left_joins.extend([
(('IssueSnapshot2Label AS Is2l'
' ON Is2l.issuesnapshot_id = IssueSnapshot.id'), []),
('LabelDef AS Lab ON Lab.id = Is2l.label_id', []),
])
if not label_prefix:
raise ValueError('`label_prefix` required when grouping by label.')
where.append(('LOWER(Lab.label) LIKE %s', [label_prefix.lower() + '-%']))
group_by = ['Lab.label']
elif group_by == 'open':
cols = ['IssueSnapshot.is_open',
'COUNT(IssueSnapshot.issue_id) AS issue_count']
group_by = ['IssueSnapshot.is_open']
elif group_by == 'status':
left_joins.append(('StatusDef AS Stats ON ' \
'Stats.id = IssueSnapshot.status_id', []))
cols = ['Stats.status', 'COUNT(IssueSnapshot.issue_id)']
group_by = ['Stats.status']
elif group_by == 'owner':
cols = ['IssueSnapshot.owner_id', 'COUNT(IssueSnapshot.issue_id)']
group_by = ['IssueSnapshot.owner_id']
elif not group_by:
cols = ['IssueSnapshot.issue_id']
else:
raise ValueError('`group_by` must be label, component, ' \
'open, status, owner or None.')
if query_left_joins:
left_joins.extend(query_left_joins)
if query_where:
where.extend(query_where)
promises = []
for shard_id in range(settings.num_logical_shards):
count_stmt, stmt_args = self._BuildSnapshotQuery(cols=cols,
where=where, joins=left_joins, group_by=group_by,
shard_id=shard_id)
promises.append(framework_helpers.Promise(cnxn.Execute,
count_stmt, stmt_args, shard_id=shard_id))
shard_values_dict = {}
search_limit_reached = False
for promise in promises:
shard_values = list(promise.WaitAndGetValue())
if not shard_values:
continue
if group_by:
for name, count in shard_values:
if count >= settings.chart_query_max_rows:
search_limit_reached = True
shard_values_dict.setdefault(name, 0)
shard_values_dict[name] += count
else:
if shard_values[0][0] >= settings.chart_query_max_rows:
search_limit_reached = True
shard_values_dict.setdefault('total', 0)
shard_values_dict['total'] += shard_values[0][0]
unsupported_field_names = list(set([
field.field_name
for cond in unsupported_conds
for field in cond.field_defs
]))
return shard_values_dict, unsupported_field_names, search_limit_reached
def StoreIssueSnapshots(self, cnxn, issues, commit=True):
for issue in issues:
right_now = self._currentTime()
self.issuesnapshot_tbl.Update(cnxn,
delta={'period_end': right_now},
where=[('IssueSnapshot.issue_id = %s', [issue.issue_id]),
('IssueSnapshot.period_end = %s',
[settings.maximum_snapshot_period_end])],
commit=commit)
config = self.config_service.GetProjectConfig(cnxn, issue.project_id)
period_end = settings.maximum_snapshot_period_end
is_open = tracker_helpers.MeansOpenInProject(
tracker_bizobj.GetStatus(issue), config)
shard = issue.issue_id % settings.num_logical_shards
status = tracker_bizobj.GetStatus(issue)
status_id = self.config_service.LookupStatusID(
cnxn, issue.project_id, status) or None
owner_id = tracker_bizobj.GetOwnerId(issue) or None
issuesnapshot_rows = [(issue.issue_id, shard, issue.project_id,
issue.local_id, issue.reporter_id, owner_id, status_id, right_now,
period_end, is_open)]
ids = self.issuesnapshot_tbl.InsertRows(
cnxn, ISSUESNAPSHOT_COLS[1:],
issuesnapshot_rows,
replace=True, commit=commit,
return_generated_ids=True)
issuesnapshot_id = ids[0]
# Add all labels to IssueSnapshot2Label.
label_rows = [
(issuesnapshot_id,
self.config_service.LookupLabelID(cnxn, issue.project_id, label))
for label in tracker_bizobj.GetLabels(issue)
]
self.issuesnapshot2label_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2LABEL_COLS,
label_rows, replace=True, commit=commit)
# Add all CCs to IssueSnapshot2Cc.
cc_rows = [
(issuesnapshot_id, cc_id)
for cc_id in tracker_bizobj.GetCcIds(issue)
]
self.issuesnapshot2cc_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2CC_COLS,
cc_rows,
replace=True, commit=commit)
# Add all components to IssueSnapshot2Component.
component_rows = [
(issuesnapshot_id, component_id)
for component_id in issue.component_ids
]
self.issuesnapshot2component_tbl.InsertRows(
cnxn, ISSUESNAPSHOT2COMPONENT_COLS,
component_rows,
replace=True, commit=commit)
# Add all components to IssueSnapshot2Hotlist.
# This is raw SQL to obviate passing FeaturesService down through
# the call stack wherever this function is called.
# TODO(jrobbins): sort out dependencies between service classes.
cnxn.Execute('''
INSERT INTO IssueSnapshot2Hotlist (issuesnapshot_id, hotlist_id)
SELECT %s, hotlist_id FROM Hotlist2Issue WHERE issue_id = %s
''', [issuesnapshot_id, issue.issue_id])
def ExpungeHotlistsFromIssueSnapshots(self, cnxn, hotlist_ids):
vals_ph = sql.PlaceHolders(hotlist_ids)
cnxn.Execute(
'DELETE FROM IssueSnapshot2Hotlist '
'WHERE hotlist_id IN ({vals_ph})'.format(vals_ph=vals_ph),
hotlist_ids,
commit=False)
def _currentTime(self):
return time.time()
def _QueryToWhere(self, cnxn, services, project_config, query, canned_query,
project):
if not (query or canned_query):
return [], [], []
query = query or ''
scope = canned_query or ''
query_ast = query2ast.ParseUserQuery(query, scope,
query2ast.BUILTIN_ISSUE_FIELDS, project_config)
query_ast = ast2ast.PreprocessAST(cnxn, query_ast, [project.project_id],
services, project_config)
left_joins, where, unsupported = ast2select.BuildSQLQuery(query_ast,
snapshot_mode=True)
return left_joins, where, unsupported
def _BuildSnapshotQuery(self, cols, where, joins, group_by, shard_id):
stmt = sql.Statement.MakeSelect('IssueSnapshot', cols, distinct=True)
stmt.AddJoinClauses(joins, left=True)
stmt.AddWhereTerms(where + [('IssueSnapshot.shard = %s', [shard_id])])
if group_by:
stmt.AddGroupByTerms(group_by)
stmt.SetLimitAndOffset(limit=settings.chart_query_max_rows, offset=0)
stmt_str, stmt_args = stmt.Generate()
if group_by:
if group_by[0] == 'IssueSnapshot.is_open':
count_stmt = ('SELECT IF(results.is_open = 1, "Opened", "Closed") ' \
'AS bool_open, results.issue_count ' \
'FROM (%s) AS results' % stmt_str)
else:
count_stmt = stmt_str
else:
count_stmt = 'SELECT COUNT(results.issue_id) FROM (%s) AS results' % (
stmt_str)
return count_stmt, stmt_args
| true
| true
|
79094171d897f57a2a4f0df5c1978a00d2070601
| 720
|
py
|
Python
|
iiits/migrations/0060_auto_20160717_0614.py
|
IIITS/iiits.ac.in
|
fd1bcd656a2f1a038d331b005224c546998a23a6
|
[
"MIT"
] | 6
|
2016-02-27T04:35:28.000Z
|
2020-06-09T04:18:38.000Z
|
iiits/migrations/0060_auto_20160717_0614.py
|
IIITS/iiits.ac.in
|
fd1bcd656a2f1a038d331b005224c546998a23a6
|
[
"MIT"
] | null | null | null |
iiits/migrations/0060_auto_20160717_0614.py
|
IIITS/iiits.ac.in
|
fd1bcd656a2f1a038d331b005224c546998a23a6
|
[
"MIT"
] | 5
|
2016-03-01T07:28:20.000Z
|
2021-01-19T10:51:58.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-07-17 06:14
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('iiits', '0059_auto_20160717_0609'),
]
operations = [
migrations.AlterField(
model_name='notice',
name='valid_until',
field=models.DateTimeField(default=datetime.datetime(2016, 7, 24, 6, 14, 48, 161315, tzinfo=utc)),
),
migrations.AlterField(
model_name='topstory',
name='title',
field=models.CharField(max_length=255),
),
]
| 25.714286
| 110
| 0.620833
|
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('iiits', '0059_auto_20160717_0609'),
]
operations = [
migrations.AlterField(
model_name='notice',
name='valid_until',
field=models.DateTimeField(default=datetime.datetime(2016, 7, 24, 6, 14, 48, 161315, tzinfo=utc)),
),
migrations.AlterField(
model_name='topstory',
name='title',
field=models.CharField(max_length=255),
),
]
| true
| true
|
7909432811788a47dbfd11ff0adf09a38108980d
| 5,966
|
py
|
Python
|
valhalla/src/valhalla/core.py
|
DEMON1A/connectors
|
86a1133735510154318030bcb971564e812e3ce0
|
[
"Apache-2.0"
] | null | null | null |
valhalla/src/valhalla/core.py
|
DEMON1A/connectors
|
86a1133735510154318030bcb971564e812e3ce0
|
[
"Apache-2.0"
] | null | null | null |
valhalla/src/valhalla/core.py
|
DEMON1A/connectors
|
86a1133735510154318030bcb971564e812e3ce0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""OpenCTI valhalla connector core module."""
import os
import yaml
import time
from typing import Any, Dict, Mapping, Optional
from datetime import datetime
from .knowledge import KnowledgeImporter
from .models import Status
from pycti import OpenCTIConnectorHelper, get_config_variable
from stix2 import TLP_WHITE, TLP_AMBER
from valhallaAPI.valhalla import ValhallaAPI
class Valhalla:
"""OpenCTI valhalla main class"""
_DEMO_API_KEY = "1111111111111111111111111111111111111111111111111111111111111111"
_STATE_LAST_RUN = "last_run"
_VALHALLA_LAST_VERSION = "valhalla_last_version"
def __init__(self):
# Instantiate the connector helper from config
config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/../config.yml"
config = (
yaml.load(open(config_file_path), Loader=yaml.SafeLoader)
if os.path.isfile(config_file_path)
else {}
)
# Extra config
self.confidence_level = get_config_variable(
"CONNECTOR_CONFIDENCE_LEVEL",
["connector", "confidence_level"],
config,
isNumber=True,
)
self.update_existing_data = get_config_variable(
"CONNECTOR_UPDATE_EXISTING_DATA",
["connector", "update_existing_data"],
config,
)
self.API_KEY = get_config_variable(
"VALHALLA_API_KEY", ["valhalla", "api_key"], config
)
self.INTERVAL_SEC = get_config_variable(
"VALHALLA_INTERVAL_SEC", ["valhalla", "interval_sec"], config, isNumber=True
)
self.helper = OpenCTIConnectorHelper(config)
self.helper.log_info(f"loaded valhalla config: {config}")
# If we run without API key we can assume all data is TLP:WHITE else we
# default to TLP:AMBER to be safe.
if self.API_KEY == "" or self.API_KEY is None:
self.default_marking = self.helper.api.marking_definition.read(
id=TLP_WHITE["id"]
)
self.valhalla_client = ValhallaAPI()
else:
self.default_marking = self.helper.api.marking_definition.read(
id=TLP_AMBER["id"]
)
self.valhalla_client = ValhallaAPI(api_key=self.API_KEY)
self.knowledge_importer = KnowledgeImporter(
self.helper,
self.confidence_level,
self.update_existing_data,
self.default_marking,
self.valhalla_client,
)
def run(self):
self.helper.log_info("starting valhalla connector...")
while True:
try:
status_data = self.valhalla_client.get_status()
api_status = Status.parse_obj(status_data)
self.helper.log_info(f"current valhalla status: {api_status}")
current_time = int(datetime.utcnow().timestamp())
current_state = self._load_state()
self.helper.log_info(f"loaded state: {current_state}")
last_run = self._get_state_value(current_state, self._STATE_LAST_RUN)
last_valhalla_version = self._get_state_value(
current_state, self._VALHALLA_LAST_VERSION
)
if self._is_scheduled(last_run, current_time) and self._check_version(
last_valhalla_version, api_status.version
):
self.helper.log_info("running importers")
knowledge_importer_state = self._run_knowledge_importer(
current_state
)
self.helper.log_info("done with running importers")
new_state = current_state.copy()
new_state.update(knowledge_importer_state)
new_state[self._STATE_LAST_RUN] = int(datetime.utcnow().timestamp())
new_state[self._VALHALLA_LAST_VERSION] = api_status.version
self.helper.log_info(f"storing new state: {new_state}")
self.helper.set_state(new_state)
self.helper.log_info(
f"state stored, next run in: {self._get_interval()} seconds"
)
else:
new_interval = self._get_interval() - (current_time - last_run)
self.helper.log_info(
f"connector will not run, next run in: {new_interval} seconds"
)
# After a successful run pause at least 60sec
time.sleep(60)
except (KeyboardInterrupt, SystemExit):
self.helper.log_info("connector stop")
exit(0)
except Exception as e:
self.helper.log_error(str(e))
exit(0)
def _run_knowledge_importer(
self, current_state: Mapping[str, Any]
) -> Mapping[str, Any]:
return self.knowledge_importer.run(current_state)
def _get_interval(self) -> int:
return int(self.INTERVAL_SEC)
def _load_state(self) -> Dict[str, Any]:
current_state = self.helper.get_state()
if not current_state:
return {}
return current_state
@staticmethod
def _get_state_value(
state: Optional[Mapping[str, Any]], key: str, default: Optional[Any] = None
) -> Any:
if state is not None:
return state.get(key, default)
return default
def _is_scheduled(self, last_run: Optional[int], current_time: int) -> bool:
if last_run is None:
return True
time_diff = current_time - last_run
return time_diff >= self._get_interval()
def _check_version(self, last_version: Optional[int], current_version: int) -> bool:
if last_version is None:
return True
return current_version > last_version
| 36.157576
| 88
| 0.599397
|
import os
import yaml
import time
from typing import Any, Dict, Mapping, Optional
from datetime import datetime
from .knowledge import KnowledgeImporter
from .models import Status
from pycti import OpenCTIConnectorHelper, get_config_variable
from stix2 import TLP_WHITE, TLP_AMBER
from valhallaAPI.valhalla import ValhallaAPI
class Valhalla:
_DEMO_API_KEY = "1111111111111111111111111111111111111111111111111111111111111111"
_STATE_LAST_RUN = "last_run"
_VALHALLA_LAST_VERSION = "valhalla_last_version"
def __init__(self):
config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/../config.yml"
config = (
yaml.load(open(config_file_path), Loader=yaml.SafeLoader)
if os.path.isfile(config_file_path)
else {}
)
self.confidence_level = get_config_variable(
"CONNECTOR_CONFIDENCE_LEVEL",
["connector", "confidence_level"],
config,
isNumber=True,
)
self.update_existing_data = get_config_variable(
"CONNECTOR_UPDATE_EXISTING_DATA",
["connector", "update_existing_data"],
config,
)
self.API_KEY = get_config_variable(
"VALHALLA_API_KEY", ["valhalla", "api_key"], config
)
self.INTERVAL_SEC = get_config_variable(
"VALHALLA_INTERVAL_SEC", ["valhalla", "interval_sec"], config, isNumber=True
)
self.helper = OpenCTIConnectorHelper(config)
self.helper.log_info(f"loaded valhalla config: {config}")
if self.API_KEY == "" or self.API_KEY is None:
self.default_marking = self.helper.api.marking_definition.read(
id=TLP_WHITE["id"]
)
self.valhalla_client = ValhallaAPI()
else:
self.default_marking = self.helper.api.marking_definition.read(
id=TLP_AMBER["id"]
)
self.valhalla_client = ValhallaAPI(api_key=self.API_KEY)
self.knowledge_importer = KnowledgeImporter(
self.helper,
self.confidence_level,
self.update_existing_data,
self.default_marking,
self.valhalla_client,
)
def run(self):
self.helper.log_info("starting valhalla connector...")
while True:
try:
status_data = self.valhalla_client.get_status()
api_status = Status.parse_obj(status_data)
self.helper.log_info(f"current valhalla status: {api_status}")
current_time = int(datetime.utcnow().timestamp())
current_state = self._load_state()
self.helper.log_info(f"loaded state: {current_state}")
last_run = self._get_state_value(current_state, self._STATE_LAST_RUN)
last_valhalla_version = self._get_state_value(
current_state, self._VALHALLA_LAST_VERSION
)
if self._is_scheduled(last_run, current_time) and self._check_version(
last_valhalla_version, api_status.version
):
self.helper.log_info("running importers")
knowledge_importer_state = self._run_knowledge_importer(
current_state
)
self.helper.log_info("done with running importers")
new_state = current_state.copy()
new_state.update(knowledge_importer_state)
new_state[self._STATE_LAST_RUN] = int(datetime.utcnow().timestamp())
new_state[self._VALHALLA_LAST_VERSION] = api_status.version
self.helper.log_info(f"storing new state: {new_state}")
self.helper.set_state(new_state)
self.helper.log_info(
f"state stored, next run in: {self._get_interval()} seconds"
)
else:
new_interval = self._get_interval() - (current_time - last_run)
self.helper.log_info(
f"connector will not run, next run in: {new_interval} seconds"
)
time.sleep(60)
except (KeyboardInterrupt, SystemExit):
self.helper.log_info("connector stop")
exit(0)
except Exception as e:
self.helper.log_error(str(e))
exit(0)
def _run_knowledge_importer(
self, current_state: Mapping[str, Any]
) -> Mapping[str, Any]:
return self.knowledge_importer.run(current_state)
def _get_interval(self) -> int:
return int(self.INTERVAL_SEC)
def _load_state(self) -> Dict[str, Any]:
current_state = self.helper.get_state()
if not current_state:
return {}
return current_state
@staticmethod
def _get_state_value(
state: Optional[Mapping[str, Any]], key: str, default: Optional[Any] = None
) -> Any:
if state is not None:
return state.get(key, default)
return default
def _is_scheduled(self, last_run: Optional[int], current_time: int) -> bool:
if last_run is None:
return True
time_diff = current_time - last_run
return time_diff >= self._get_interval()
def _check_version(self, last_version: Optional[int], current_version: int) -> bool:
if last_version is None:
return True
return current_version > last_version
| true
| true
|
79094328aad1e56498476572436d9a0a20e931b5
| 118
|
py
|
Python
|
airbyte-integrations/connectors/source-orb/source_orb/__init__.py
|
OTRI-Unipd/OTRI-airbyte
|
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
|
[
"MIT"
] | 22
|
2020-08-27T00:47:20.000Z
|
2020-09-17T15:39:39.000Z
|
airbyte-integrations/connectors/source-orb/source_orb/__init__.py
|
OTRI-Unipd/OTRI-airbyte
|
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
|
[
"MIT"
] | 116
|
2020-08-27T01:11:27.000Z
|
2020-09-19T02:47:52.000Z
|
airbyte-integrations/connectors/source-orb/source_orb/__init__.py
|
OTRI-Unipd/OTRI-airbyte
|
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
|
[
"MIT"
] | 1
|
2022-03-11T06:21:24.000Z
|
2022-03-11T06:21:24.000Z
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from .source import SourceOrb
__all__ = ["SourceOrb"]
| 13.111111
| 56
| 0.694915
|
from .source import SourceOrb
__all__ = ["SourceOrb"]
| true
| true
|
7909440cc60247dfcd02ffa434cb072d166ba8f9
| 1,376
|
py
|
Python
|
qurator/sbb_ned/models/evaluation.py
|
qurator-spk/sbb_ned
|
d4cfe249f72e48913f254a58fbe0dbe6e47bd168
|
[
"Apache-2.0"
] | 6
|
2020-09-05T16:08:59.000Z
|
2022-03-05T00:54:47.000Z
|
qurator/sbb_ned/models/evaluation.py
|
qurator-spk/sbb_ned
|
d4cfe249f72e48913f254a58fbe0dbe6e47bd168
|
[
"Apache-2.0"
] | 6
|
2020-09-23T17:58:37.000Z
|
2022-03-10T14:02:09.000Z
|
qurator/sbb_ned/models/evaluation.py
|
qurator-spk/sbb_ned
|
d4cfe249f72e48913f254a58fbe0dbe6e47bd168
|
[
"Apache-2.0"
] | 2
|
2021-03-22T00:12:51.000Z
|
2022-01-31T10:04:08.000Z
|
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def compute_lr(target_lr, n_epochs, train_set_size, batch_size, warmup):
total = (n_epochs - 1) * int(np.ceil(train_set_size / batch_size))
progress = [float(t) / total for t in range(0, total)]
factor = [p / warmup if p < warmup else max((p - 1.) / (warmup - 1.), 0.) for p in progress]
lr = [f * target_lr for f in factor]
return lr
def load_train_log(directories, num_epochs, target_lr, **kwargs):
parts = []
for d, ep, t_lr in zip(directories, num_epochs, target_lr):
files = ['{}/loss_ep{}.pkl'.format(d, i) for i in range(1, ep)]
files = [f for f in files if os.path.exists(f)]
part = pd.concat([pd.read_pickle(f) for f in files])
part['lr'] = compute_lr(target_lr=t_lr, n_epochs=ep, **kwargs)[0:len(part)]
parts.append(part)
return pd.concat(parts).reset_index(drop=True)
def plot_loss_against_lr(loss, wnd_size=6000):
fig = plt.figure(figsize=(11.69, 8.27))
ax1 = fig.add_subplot(111)
ax1.set_xlabel('time')
ax1.set_ylabel('loss', color='b')
ax1.plot(loss.loss.rolling(wnd_size).mean(), color='b')
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.set_ylabel('learning rate', color='r')
ax2.plot(loss.lr.rolling(wnd_size).mean(), 'r')
| 28.666667
| 96
| 0.648983
|
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def compute_lr(target_lr, n_epochs, train_set_size, batch_size, warmup):
total = (n_epochs - 1) * int(np.ceil(train_set_size / batch_size))
progress = [float(t) / total for t in range(0, total)]
factor = [p / warmup if p < warmup else max((p - 1.) / (warmup - 1.), 0.) for p in progress]
lr = [f * target_lr for f in factor]
return lr
def load_train_log(directories, num_epochs, target_lr, **kwargs):
parts = []
for d, ep, t_lr in zip(directories, num_epochs, target_lr):
files = ['{}/loss_ep{}.pkl'.format(d, i) for i in range(1, ep)]
files = [f for f in files if os.path.exists(f)]
part = pd.concat([pd.read_pickle(f) for f in files])
part['lr'] = compute_lr(target_lr=t_lr, n_epochs=ep, **kwargs)[0:len(part)]
parts.append(part)
return pd.concat(parts).reset_index(drop=True)
def plot_loss_against_lr(loss, wnd_size=6000):
fig = plt.figure(figsize=(11.69, 8.27))
ax1 = fig.add_subplot(111)
ax1.set_xlabel('time')
ax1.set_ylabel('loss', color='b')
ax1.plot(loss.loss.rolling(wnd_size).mean(), color='b')
ax2 = ax1.twinx()
ax2.set_ylabel('learning rate', color='r')
ax2.plot(loss.lr.rolling(wnd_size).mean(), 'r')
| true
| true
|
79094469e978444c608401df0f39d020f53a771e
| 1,847
|
py
|
Python
|
euca2ools/commands/cloudformation/liststackresources.py
|
salewski/euca2ools
|
6b3f62f2cb1c54f14d3bfa5fd92dab3c0ecafecb
|
[
"BSD-2-Clause"
] | 30
|
2015-02-10T05:47:38.000Z
|
2022-01-20T08:48:43.000Z
|
euca2ools/commands/cloudformation/liststackresources.py
|
salewski/euca2ools
|
6b3f62f2cb1c54f14d3bfa5fd92dab3c0ecafecb
|
[
"BSD-2-Clause"
] | 16
|
2015-01-08T23:24:34.000Z
|
2018-07-18T07:15:40.000Z
|
euca2ools/commands/cloudformation/liststackresources.py
|
salewski/euca2ools
|
6b3f62f2cb1c54f14d3bfa5fd92dab3c0ecafecb
|
[
"BSD-2-Clause"
] | 19
|
2015-05-07T05:34:42.000Z
|
2020-12-13T10:50:14.000Z
|
# Copyright 2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from requestbuilder import Arg
from euca2ools.commands.cloudformation import CloudFormationRequest
class ListStackResources(CloudFormationRequest):
DESCRIPTION = 'List all resources for a stack'
ARGS = [Arg('StackName', metavar='STACK',
help='name of the stack to list resources from (required)')]
LIST_TAGS = ['StackResourceSummaries']
def print_result(self, result):
for resource in result['StackResourceSummaries']:
self.print_resource(resource)
| 46.175
| 76
| 0.769356
|
from requestbuilder import Arg
from euca2ools.commands.cloudformation import CloudFormationRequest
class ListStackResources(CloudFormationRequest):
DESCRIPTION = 'List all resources for a stack'
ARGS = [Arg('StackName', metavar='STACK',
help='name of the stack to list resources from (required)')]
LIST_TAGS = ['StackResourceSummaries']
def print_result(self, result):
for resource in result['StackResourceSummaries']:
self.print_resource(resource)
| true
| true
|
790944dc37a4bce839bb96f93e2441e9f57866e1
| 136
|
py
|
Python
|
simple_toolbelt/path.py
|
mbiemann/python-simple-toolbelt
|
ee8a52938078347f42d31d2a99fdcc60ca2cce9f
|
[
"MIT"
] | null | null | null |
simple_toolbelt/path.py
|
mbiemann/python-simple-toolbelt
|
ee8a52938078347f42d31d2a99fdcc60ca2cce9f
|
[
"MIT"
] | null | null | null |
simple_toolbelt/path.py
|
mbiemann/python-simple-toolbelt
|
ee8a52938078347f42d31d2a99fdcc60ca2cce9f
|
[
"MIT"
] | null | null | null |
import os
def ensure_dir(path: str) -> str:
dirname = os.path.dirname(path)
os.makedirs(dirname, exist_ok=True)
return path
| 22.666667
| 39
| 0.691176
|
import os
def ensure_dir(path: str) -> str:
dirname = os.path.dirname(path)
os.makedirs(dirname, exist_ok=True)
return path
| true
| true
|
790945f2c63c6d135a8af9d3cd69f61a073b26b5
| 1,218
|
py
|
Python
|
TransformerNet/layers/Decoder_test.py
|
TeaKatz/Models_Corpus
|
6d9e91eb97829e73d88ecfc4754492f6324ef383
|
[
"MIT"
] | null | null | null |
TransformerNet/layers/Decoder_test.py
|
TeaKatz/Models_Corpus
|
6d9e91eb97829e73d88ecfc4754492f6324ef383
|
[
"MIT"
] | null | null | null |
TransformerNet/layers/Decoder_test.py
|
TeaKatz/Models_Corpus
|
6d9e91eb97829e73d88ecfc4754492f6324ef383
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from TransformerNet.layers import Encoder, Decoder
def Decoder_test(*args, **kwargs):
inputs = tf.random.uniform((64, 62), dtype=tf.int64, minval=0, maxval=200) # (batch_size, input_seq_len)
enc_output = Encoder(num_layers=2, d_model=512, num_heads=8,
d_ff=2048, input_vocab_size=8500,
maximum_position_encoding=10000)(inputs, False, None)
target = tf.random.uniform((64, 26), dtype=tf.int64, minval=0, maxval=200) # (batch_size, target_seq_len)
sample_decoder = Decoder(*args, **kwargs)
output, attn = sample_decoder(target,
enc_output=enc_output,
training=False,
look_ahead_mask=None,
padding_mask=None)
print(output.shape) # (batch_size, target_seq_len, d_model)
print(attn['decoder_layer2_attention2'].shape) # (batch_size, target_seq_len, input_seq_len)
if __name__ == "__main__":
Decoder_test(num_layers=2, d_model=512, num_heads=8,
d_ff=2048, target_vocab_size=8000,
maximum_position_encoding=5000)
| 45.111111
| 111
| 0.602627
|
import tensorflow as tf
from TransformerNet.layers import Encoder, Decoder
def Decoder_test(*args, **kwargs):
inputs = tf.random.uniform((64, 62), dtype=tf.int64, minval=0, maxval=200)
enc_output = Encoder(num_layers=2, d_model=512, num_heads=8,
d_ff=2048, input_vocab_size=8500,
maximum_position_encoding=10000)(inputs, False, None)
target = tf.random.uniform((64, 26), dtype=tf.int64, minval=0, maxval=200)
sample_decoder = Decoder(*args, **kwargs)
output, attn = sample_decoder(target,
enc_output=enc_output,
training=False,
look_ahead_mask=None,
padding_mask=None)
print(output.shape)
print(attn['decoder_layer2_attention2'].shape)
if __name__ == "__main__":
Decoder_test(num_layers=2, d_model=512, num_heads=8,
d_ff=2048, target_vocab_size=8000,
maximum_position_encoding=5000)
| true
| true
|
79094657a029d6ecdd5652c70d7c9bffca65391c
| 14,414
|
py
|
Python
|
scipy/io/wavfile.py
|
jeremiedbb/scipy
|
2bea64c334b18fd445a7945b350d7ace2dc22913
|
[
"BSD-3-Clause"
] | 2
|
2020-06-20T14:11:14.000Z
|
2020-10-12T07:11:36.000Z
|
scipy/io/wavfile.py
|
jeremiedbb/scipy
|
2bea64c334b18fd445a7945b350d7ace2dc22913
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/io/wavfile.py
|
jeremiedbb/scipy
|
2bea64c334b18fd445a7945b350d7ace2dc22913
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Module to read / write wav files using NumPy arrays
Functions
---------
`read`: Return the sample rate (in samples/sec) and data from a WAV file.
`write`: Write a NumPy array as a WAV file.
"""
from __future__ import division, print_function, absolute_import
import sys
import numpy
import struct
import warnings
__all__ = [
'WavFileWarning',
'read',
'write'
]
class WavFileWarning(UserWarning):
pass
WAVE_FORMAT_PCM = 0x0001
WAVE_FORMAT_IEEE_FLOAT = 0x0003
WAVE_FORMAT_EXTENSIBLE = 0xfffe
KNOWN_WAVE_FORMATS = (WAVE_FORMAT_PCM, WAVE_FORMAT_IEEE_FLOAT)
# assumes file pointer is immediately
# after the 'fmt ' id
def _read_fmt_chunk(fid, is_big_endian):
"""
Returns
-------
size : int
size of format subchunk in bytes (minus 8 for "fmt " and itself)
format_tag : int
PCM, float, or compressed format
channels : int
number of channels
fs : int
sampling frequency in samples per second
bytes_per_second : int
overall byte rate for the file
block_align : int
bytes per sample, including all channels
bit_depth : int
bits per sample
"""
if is_big_endian:
fmt = '>'
else:
fmt = '<'
size = res = struct.unpack(fmt+'I', fid.read(4))[0]
bytes_read = 0
if size < 16:
raise ValueError("Binary structure of wave file is not compliant")
res = struct.unpack(fmt+'HHIIHH', fid.read(16))
bytes_read += 16
format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res
if format_tag == WAVE_FORMAT_EXTENSIBLE and size >= (16+2):
ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0]
bytes_read += 2
if ext_chunk_size >= 22:
extensible_chunk_data = fid.read(22)
bytes_read += 22
raw_guid = extensible_chunk_data[2+4:2+4+16]
# GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361)
# MS GUID byte order: first three groups are native byte order,
# rest is Big Endian
if is_big_endian:
tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71'
else:
tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71'
if raw_guid.endswith(tail):
format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0]
else:
raise ValueError("Binary structure of wave file is not compliant")
if format_tag not in KNOWN_WAVE_FORMATS:
raise ValueError("Unknown wave file format")
# move file pointer to next chunk
if size > (bytes_read):
fid.read(size - bytes_read)
return (size, format_tag, channels, fs, bytes_per_second, block_align,
bit_depth)
# assumes file pointer is immediately after the 'data' id
def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian,
mmap=False):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
# Size of the data subchunk in bytes
size = struct.unpack(fmt, fid.read(4))[0]
# Number of bytes per sample
bytes_per_sample = bit_depth//8
if bit_depth == 8:
dtype = 'u1'
else:
if is_big_endian:
dtype = '>'
else:
dtype = '<'
if format_tag == WAVE_FORMAT_PCM:
dtype += 'i%d' % bytes_per_sample
else:
dtype += 'f%d' % bytes_per_sample
if not mmap:
data = numpy.frombuffer(fid.read(size), dtype=dtype)
else:
start = fid.tell()
data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start,
shape=(size//bytes_per_sample,))
fid.seek(start + size)
if channels > 1:
data = data.reshape(-1, channels)
return data
def _skip_unknown_chunk(fid, is_big_endian):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
data = fid.read(4)
# call unpack() and seek() only if we have really read data from file
# otherwise empty read at the end of the file would trigger
# unnecessary exception at unpack() call
# in case data equals somehow to 0, there is no need for seek() anyway
if data:
size = struct.unpack(fmt, data)[0]
fid.seek(size, 1)
def _read_riff_chunk(fid):
str1 = fid.read(4) # File signature
if str1 == b'RIFF':
is_big_endian = False
fmt = '<I'
elif str1 == b'RIFX':
is_big_endian = True
fmt = '>I'
else:
# There are also .wav files with "FFIR" or "XFIR" signatures?
raise ValueError("File format {}... not "
"understood.".format(repr(str1)))
# Size of entire file
file_size = struct.unpack(fmt, fid.read(4))[0] + 8
str2 = fid.read(4)
if str2 != b'WAVE':
raise ValueError("Not a WAV file.")
return file_size, is_big_endian
def read(filename, mmap=False):
"""
Open a WAV file
Return the sample rate (in samples/sec) and data from a WAV file.
Parameters
----------
filename : string or open file handle
Input wav file.
mmap : bool, optional
Whether to read data as memory-mapped.
Only to be used on real files (Default: False).
.. versionadded:: 0.12.0
Returns
-------
rate : int
Sample rate of wav file.
data : numpy array
Data read from wav file. Data-type is determined from the file;
see Notes.
Notes
-----
This function cannot read wav files with 24-bit data.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit PCM -2147483648 +2147483647 int32
16-bit PCM -32768 +32767 int16
8-bit PCM 0 255 uint8
===================== =========== =========== =============
Note that 8-bit PCM is unsigned.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www.tactilemedia.com/info/MCI_Control_Info.html
Examples
--------
>>> from os.path import dirname, join as pjoin
>>> import scipy.io as sio
Get the filename for an example .wav file from the tests/data directory.
>>> data_dir = pjoin(dirname(sio.__file__), 'tests', 'data')
>>> wav_fname = pjoin(data_dir, 'test-44100Hz-2ch-32bit-float-be.wav')
Load the .wav file contents.
>>> samplerate, data = sio.wavfile.read(wav_fname)
>>> print(f"number of channels = {data.shape[1]}")
number of channels = 2
>>> length = data.shape[0] / samplerate
>>> print(f"length = {length}s")
length = 0.01s
Plot the waveform.
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> time = np.linspace(0., length, data.shape[0])
>>> plt.plot(time, data[:, 0], label="Left channel")
>>> plt.plot(time, data[:, 1], label="Right channel")
>>> plt.legend()
>>> plt.xlabel("Time [s]")
>>> plt.ylabel("Amplitude")
>>> plt.show()
"""
if hasattr(filename, 'read'):
fid = filename
mmap = False
else:
fid = open(filename, 'rb')
try:
file_size, is_big_endian = _read_riff_chunk(fid)
fmt_chunk_received = False
data_chunk_received = False
channels = 1
bit_depth = 8
format_tag = WAVE_FORMAT_PCM
while fid.tell() < file_size:
# read the next chunk
chunk_id = fid.read(4)
if not chunk_id:
if data_chunk_received:
# End of file but data successfully read
warnings.warn(
"Reached EOF prematurely; finished at {:d} bytes, "
"expected {:d} bytes from header."
.format(fid.tell(), file_size),
WavFileWarning, stacklevel=2)
break
else:
raise ValueError("Unexpected end of file.")
elif len(chunk_id) < 4:
raise ValueError("Incomplete wav chunk.")
if chunk_id == b'fmt ':
fmt_chunk_received = True
fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
format_tag, channels, fs = fmt_chunk[1:4]
bit_depth = fmt_chunk[6]
if bit_depth not in (8, 16, 32, 64, 96, 128):
raise ValueError("Unsupported bit depth: the wav file "
"has {}-bit data.".format(bit_depth))
elif chunk_id == b'fact':
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id == b'data':
data_chunk_received = True
if not fmt_chunk_received:
raise ValueError("No fmt chunk before data")
data = _read_data_chunk(fid, format_tag, channels, bit_depth,
is_big_endian, mmap)
elif chunk_id == b'LIST':
# Someday this could be handled properly but for now skip it
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id in (b'JUNK', b'Fake'):
# Skip alignment chunks without warning
_skip_unknown_chunk(fid, is_big_endian)
else:
warnings.warn("Chunk (non-data) not understood, skipping it.",
WavFileWarning, stacklevel=2)
_skip_unknown_chunk(fid, is_big_endian)
finally:
if not hasattr(filename, 'read'):
fid.close()
else:
fid.seek(0)
return fs, data
def write(filename, rate, data):
"""
Write a NumPy array as a WAV file.
Parameters
----------
filename : string or open file handle
Output wav file.
rate : int
The sample rate (in samples/sec).
data : ndarray
A 1-D or 2-D NumPy array of either integer or float data-type.
Notes
-----
* Writes a simple uncompressed WAV file.
* To write multiple-channels, use a 2-D array of shape
(Nsamples, Nchannels).
* The bits-per-sample and PCM/float will be determined by the data-type.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit PCM -2147483648 +2147483647 int32
16-bit PCM -32768 +32767 int16
8-bit PCM 0 255 uint8
===================== =========== =========== =============
Note that 8-bit PCM is unsigned.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www.tactilemedia.com/info/MCI_Control_Info.html
Examples
--------
Create a 100Hz sine wave, sampled at 44100Hz.
Write to 16-bit PCM, Mono.
>>> from scipy.io.wavfile import write
>>> samplerate = 44100; fs = 100
>>> t = np.linspace(0., 1., samplerate)
>>> amplitude = np.iinfo(np.int16).max
>>> data = amplitude * np.sin(2. * np.pi * fs * t)
>>> write("example.wav", samplerate, data)
"""
if hasattr(filename, 'write'):
fid = filename
else:
fid = open(filename, 'wb')
fs = rate
try:
dkind = data.dtype.kind
if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
data.dtype.itemsize == 1)):
raise ValueError("Unsupported data type '%s'" % data.dtype)
header_data = b''
header_data += b'RIFF'
header_data += b'\x00\x00\x00\x00'
header_data += b'WAVE'
# fmt chunk
header_data += b'fmt '
if dkind == 'f':
format_tag = WAVE_FORMAT_IEEE_FLOAT
else:
format_tag = WAVE_FORMAT_PCM
if data.ndim == 1:
channels = 1
else:
channels = data.shape[1]
bit_depth = data.dtype.itemsize * 8
bytes_per_second = fs*(bit_depth // 8)*channels
block_align = channels * (bit_depth // 8)
fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs,
bytes_per_second, block_align, bit_depth)
if not (dkind == 'i' or dkind == 'u'):
# add cbSize field for non-PCM files
fmt_chunk_data += b'\x00\x00'
header_data += struct.pack('<I', len(fmt_chunk_data))
header_data += fmt_chunk_data
# fact chunk (non-PCM files)
if not (dkind == 'i' or dkind == 'u'):
header_data += b'fact'
header_data += struct.pack('<II', 4, data.shape[0])
# check data size (needs to be immediately before the data chunk)
if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF:
raise ValueError("Data exceeds wave file size limit")
fid.write(header_data)
# data chunk
fid.write(b'data')
fid.write(struct.pack('<I', data.nbytes))
if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and
sys.byteorder == 'big'):
data = data.byteswap()
_array_tofile(fid, data)
# Determine file size and place it in correct
# position at start of the file.
size = fid.tell()
fid.seek(4)
fid.write(struct.pack('<I', size-8))
finally:
if not hasattr(filename, 'write'):
fid.close()
else:
fid.seek(0)
if sys.version_info[0] >= 3:
def _array_tofile(fid, data):
# ravel gives a c-contiguous buffer
fid.write(data.ravel().view('b').data)
else:
def _array_tofile(fid, data):
fid.write(data.tostring())
| 31.334783
| 78
| 0.548009
|
from __future__ import division, print_function, absolute_import
import sys
import numpy
import struct
import warnings
__all__ = [
'WavFileWarning',
'read',
'write'
]
class WavFileWarning(UserWarning):
pass
WAVE_FORMAT_PCM = 0x0001
WAVE_FORMAT_IEEE_FLOAT = 0x0003
WAVE_FORMAT_EXTENSIBLE = 0xfffe
KNOWN_WAVE_FORMATS = (WAVE_FORMAT_PCM, WAVE_FORMAT_IEEE_FLOAT)
def _read_fmt_chunk(fid, is_big_endian):
if is_big_endian:
fmt = '>'
else:
fmt = '<'
size = res = struct.unpack(fmt+'I', fid.read(4))[0]
bytes_read = 0
if size < 16:
raise ValueError("Binary structure of wave file is not compliant")
res = struct.unpack(fmt+'HHIIHH', fid.read(16))
bytes_read += 16
format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res
if format_tag == WAVE_FORMAT_EXTENSIBLE and size >= (16+2):
ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0]
bytes_read += 2
if ext_chunk_size >= 22:
extensible_chunk_data = fid.read(22)
bytes_read += 22
raw_guid = extensible_chunk_data[2+4:2+4+16]
if is_big_endian:
tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71'
else:
tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71'
if raw_guid.endswith(tail):
format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0]
else:
raise ValueError("Binary structure of wave file is not compliant")
if format_tag not in KNOWN_WAVE_FORMATS:
raise ValueError("Unknown wave file format")
if size > (bytes_read):
fid.read(size - bytes_read)
return (size, format_tag, channels, fs, bytes_per_second, block_align,
bit_depth)
def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian,
mmap=False):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
size = struct.unpack(fmt, fid.read(4))[0]
bytes_per_sample = bit_depth//8
if bit_depth == 8:
dtype = 'u1'
else:
if is_big_endian:
dtype = '>'
else:
dtype = '<'
if format_tag == WAVE_FORMAT_PCM:
dtype += 'i%d' % bytes_per_sample
else:
dtype += 'f%d' % bytes_per_sample
if not mmap:
data = numpy.frombuffer(fid.read(size), dtype=dtype)
else:
start = fid.tell()
data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start,
shape=(size//bytes_per_sample,))
fid.seek(start + size)
if channels > 1:
data = data.reshape(-1, channels)
return data
def _skip_unknown_chunk(fid, is_big_endian):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
data = fid.read(4)
if data:
size = struct.unpack(fmt, data)[0]
fid.seek(size, 1)
def _read_riff_chunk(fid):
str1 = fid.read(4)
if str1 == b'RIFF':
is_big_endian = False
fmt = '<I'
elif str1 == b'RIFX':
is_big_endian = True
fmt = '>I'
else:
raise ValueError("File format {}... not "
"understood.".format(repr(str1)))
file_size = struct.unpack(fmt, fid.read(4))[0] + 8
str2 = fid.read(4)
if str2 != b'WAVE':
raise ValueError("Not a WAV file.")
return file_size, is_big_endian
def read(filename, mmap=False):
if hasattr(filename, 'read'):
fid = filename
mmap = False
else:
fid = open(filename, 'rb')
try:
file_size, is_big_endian = _read_riff_chunk(fid)
fmt_chunk_received = False
data_chunk_received = False
channels = 1
bit_depth = 8
format_tag = WAVE_FORMAT_PCM
while fid.tell() < file_size:
chunk_id = fid.read(4)
if not chunk_id:
if data_chunk_received:
warnings.warn(
"Reached EOF prematurely; finished at {:d} bytes, "
"expected {:d} bytes from header."
.format(fid.tell(), file_size),
WavFileWarning, stacklevel=2)
break
else:
raise ValueError("Unexpected end of file.")
elif len(chunk_id) < 4:
raise ValueError("Incomplete wav chunk.")
if chunk_id == b'fmt ':
fmt_chunk_received = True
fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
format_tag, channels, fs = fmt_chunk[1:4]
bit_depth = fmt_chunk[6]
if bit_depth not in (8, 16, 32, 64, 96, 128):
raise ValueError("Unsupported bit depth: the wav file "
"has {}-bit data.".format(bit_depth))
elif chunk_id == b'fact':
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id == b'data':
data_chunk_received = True
if not fmt_chunk_received:
raise ValueError("No fmt chunk before data")
data = _read_data_chunk(fid, format_tag, channels, bit_depth,
is_big_endian, mmap)
elif chunk_id == b'LIST':
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id in (b'JUNK', b'Fake'):
_skip_unknown_chunk(fid, is_big_endian)
else:
warnings.warn("Chunk (non-data) not understood, skipping it.",
WavFileWarning, stacklevel=2)
_skip_unknown_chunk(fid, is_big_endian)
finally:
if not hasattr(filename, 'read'):
fid.close()
else:
fid.seek(0)
return fs, data
def write(filename, rate, data):
if hasattr(filename, 'write'):
fid = filename
else:
fid = open(filename, 'wb')
fs = rate
try:
dkind = data.dtype.kind
if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
data.dtype.itemsize == 1)):
raise ValueError("Unsupported data type '%s'" % data.dtype)
header_data = b''
header_data += b'RIFF'
header_data += b'\x00\x00\x00\x00'
header_data += b'WAVE'
header_data += b'fmt '
if dkind == 'f':
format_tag = WAVE_FORMAT_IEEE_FLOAT
else:
format_tag = WAVE_FORMAT_PCM
if data.ndim == 1:
channels = 1
else:
channels = data.shape[1]
bit_depth = data.dtype.itemsize * 8
bytes_per_second = fs*(bit_depth // 8)*channels
block_align = channels * (bit_depth // 8)
fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs,
bytes_per_second, block_align, bit_depth)
if not (dkind == 'i' or dkind == 'u'):
fmt_chunk_data += b'\x00\x00'
header_data += struct.pack('<I', len(fmt_chunk_data))
header_data += fmt_chunk_data
if not (dkind == 'i' or dkind == 'u'):
header_data += b'fact'
header_data += struct.pack('<II', 4, data.shape[0])
if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF:
raise ValueError("Data exceeds wave file size limit")
fid.write(header_data)
fid.write(b'data')
fid.write(struct.pack('<I', data.nbytes))
if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and
sys.byteorder == 'big'):
data = data.byteswap()
_array_tofile(fid, data)
size = fid.tell()
fid.seek(4)
fid.write(struct.pack('<I', size-8))
finally:
if not hasattr(filename, 'write'):
fid.close()
else:
fid.seek(0)
if sys.version_info[0] >= 3:
def _array_tofile(fid, data):
fid.write(data.ravel().view('b').data)
else:
def _array_tofile(fid, data):
fid.write(data.tostring())
| true
| true
|
790946a5b7cd0becad3e0944422e311ff385860d
| 6,662
|
py
|
Python
|
tests/nnet3/nnet-compute-test.py
|
mxmpl/pykaldi
|
0570307138c5391cc47b019450d08bcb9686dd98
|
[
"Apache-2.0"
] | 916
|
2017-11-22T19:33:36.000Z
|
2022-03-31T11:51:58.000Z
|
tests/nnet3/nnet-compute-test.py
|
mxmpl/pykaldi
|
0570307138c5391cc47b019450d08bcb9686dd98
|
[
"Apache-2.0"
] | 268
|
2018-01-16T22:06:45.000Z
|
2022-03-29T03:24:41.000Z
|
tests/nnet3/nnet-compute-test.py
|
mxmpl/pykaldi
|
0570307138c5391cc47b019450d08bcb9686dd98
|
[
"Apache-2.0"
] | 260
|
2018-01-23T18:39:40.000Z
|
2022-03-24T08:17:39.000Z
|
#!/usr/bin/env python
import random
import unittest
from kaldi.base.io import istringstream, ostringstream
from kaldi.cudamatrix import cuda_available, approx_equal_cu_matrix, CuMatrix
from kaldi.matrix import Matrix, Vector
from kaldi.matrix.functions import approx_equal
from kaldi.nnet3 import *
class TestNnetCompute(unittest.TestCase):
def test_nnet_compute(self):
gen_config = NnetGenerationOptions()
test_collapse_model = random.choice([True, False])
configs = generate_config_sequence(gen_config)
nnet = Nnet()
for j, config in enumerate(configs):
# print("Input config[{}]:".format(j))
# print(config)
istrm = istringstream.from_str(config)
nnet.read_config(istrm)
request = ComputationRequest()
inputs = compute_example_computation_request_simple(nnet, request)
if test_collapse_model:
set_batchnorm_test_mode(True, nnet)
set_dropout_test_mode(True, nnet)
compiler = Compiler(request, nnet)
opts = CompilerOptions()
computation = compiler.create_computation(opts)
nnet_collapsed = Nnet.from_other(nnet)
if test_collapse_model:
collapse_config = CollapseModelConfig()
collapse_model(collapse_config, nnet_collapsed)
compiler_collapsed = Compiler(request, nnet_collapsed)
computation_collapsed = compiler_collapsed.create_computation(opts)
computation_collapsed.compute_cuda_indexes()
ostrm = ostringstream()
computation.print_computation(ostrm, nnet)
# print("Generated computation:")
# print(ostrm.to_str())
check_config = CheckComputationOptions()
check_config.check_rewrite = True
checker = ComputationChecker(check_config, nnet, computation)
checker.check()
if random.choice([True, False]):
opt_config = NnetOptimizeOptions()
optimize(opt_config, nnet, max_output_time_in_request(request),
computation)
ostrm = ostringstream()
computation.print_computation(ostrm, nnet)
# print("Optimized computation:")
# print(ostrm.to_str())
compute_opts = NnetComputeOptions()
compute_opts.debug = random.choice([True, False])
computation.compute_cuda_indexes()
computer = NnetComputer(compute_opts, computation, nnet, nnet)
for i, ispec in enumerate(request.inputs):
temp = CuMatrix.from_matrix(inputs[i])
print("Input sum:", temp.sum())
computer.accept_input(ispec.name, temp)
computer.run()
output = computer.get_output_destructive("output")
print("Output sum:", output.sum())
if test_collapse_model:
computer_collapsed = NnetComputer(compute_opts,
computation_collapsed,
nnet_collapsed, nnet_collapsed)
for i, ispec in enumerate(request.inputs):
temp = CuMatrix.from_matrix(inputs[i])
computer_collapsed.accept_input(ispec.name, temp)
computer_collapsed.run()
output_collapsed = computer_collapsed.get_output_destructive("output")
print("Output sum [collapsed]:", output_collapsed.sum())
self.assertTrue(approx_equal_cu_matrix(output, output_collapsed),
"Regular and collapsed computation outputs differ.")
output_deriv = CuMatrix.from_size(output.num_rows(), output.num_cols())
output_deriv.set_randn()
if request.outputs[0].has_deriv:
computer.accept_input("output", output_deriv)
computer.run()
for i, ispec in enumerate(request.inputs):
if ispec.has_deriv:
in_deriv = computer.get_output_destructive(ispec.name)
print("Input-deriv sum for input {} is:".format(ispec.name),
in_deriv.sum())
def test_nnet_decodable(self):
gen_config = NnetGenerationOptions()
configs = generate_config_sequence(gen_config)
nnet = Nnet()
for j, config in enumerate(configs):
# print("Input config[{}]:".format(j))
# print(config)
istrm = istringstream.from_str(config)
nnet.read_config(istrm)
num_frames = 5 + random.randint(1, 100)
input_dim = nnet.input_dim("input")
output_dim = nnet.output_dim("output")
ivector_dim = max(0, nnet.input_dim("ivector"))
input = Matrix(num_frames, input_dim)
set_batchnorm_test_mode(True, nnet)
set_dropout_test_mode(True, nnet)
input.set_randn_()
ivector = Vector(ivector_dim)
ivector.set_randn_()
priors = Vector(output_dim if random.choice([True, False]) else 0)
if len(priors) != 0:
priors.set_randn_()
priors.apply_exp_()
output1 = Matrix(num_frames, output_dim)
output2 = Matrix(num_frames, output_dim)
opts = NnetSimpleComputationOptions()
opts.frames_per_chunk = random.randint(5, 25)
compiler = CachingOptimizingCompiler(nnet)
decodable = DecodableNnetSimple(opts, nnet, priors, input, compiler,
ivector if ivector_dim else None)
for t in range(num_frames):
decodable.get_output_for_frame(t, output1[t])
opts = NnetSimpleLoopedComputationOptions()
info = DecodableNnetSimpleLoopedInfo.from_priors(opts, priors, nnet)
decodable = DecodableNnetSimpleLooped(info, input,
ivector if ivector_dim else None)
for t in range(num_frames):
decodable.get_output_for_frame(t, output2[t])
if (not nnet_is_recurrent(nnet)
and nnet.info().find("statistics-extraction") == -1
and nnet.info().find("TimeHeightConvolutionComponent") == -1
and nnet.info().find("RestrictedAttentionComponent") == -1):
for t in range(num_frames):
self.assertTrue(approx_equal(output1[t], output2[t]))
if __name__ == '__main__':
for i in range(2):
if cuda_available():
from kaldi.cudamatrix import CuDevice
CuDevice.instantiate().set_debug_stride_mode(True)
if i == 0:
CuDevice.instantiate().select_gpu_id("no")
else:
CuDevice.instantiate().select_gpu_id("yes")
unittest.main(exit=False)
| 40.375758
| 82
| 0.624287
|
import random
import unittest
from kaldi.base.io import istringstream, ostringstream
from kaldi.cudamatrix import cuda_available, approx_equal_cu_matrix, CuMatrix
from kaldi.matrix import Matrix, Vector
from kaldi.matrix.functions import approx_equal
from kaldi.nnet3 import *
class TestNnetCompute(unittest.TestCase):
def test_nnet_compute(self):
gen_config = NnetGenerationOptions()
test_collapse_model = random.choice([True, False])
configs = generate_config_sequence(gen_config)
nnet = Nnet()
for j, config in enumerate(configs):
istrm = istringstream.from_str(config)
nnet.read_config(istrm)
request = ComputationRequest()
inputs = compute_example_computation_request_simple(nnet, request)
if test_collapse_model:
set_batchnorm_test_mode(True, nnet)
set_dropout_test_mode(True, nnet)
compiler = Compiler(request, nnet)
opts = CompilerOptions()
computation = compiler.create_computation(opts)
nnet_collapsed = Nnet.from_other(nnet)
if test_collapse_model:
collapse_config = CollapseModelConfig()
collapse_model(collapse_config, nnet_collapsed)
compiler_collapsed = Compiler(request, nnet_collapsed)
computation_collapsed = compiler_collapsed.create_computation(opts)
computation_collapsed.compute_cuda_indexes()
ostrm = ostringstream()
computation.print_computation(ostrm, nnet)
check_config = CheckComputationOptions()
check_config.check_rewrite = True
checker = ComputationChecker(check_config, nnet, computation)
checker.check()
if random.choice([True, False]):
opt_config = NnetOptimizeOptions()
optimize(opt_config, nnet, max_output_time_in_request(request),
computation)
ostrm = ostringstream()
computation.print_computation(ostrm, nnet)
compute_opts = NnetComputeOptions()
compute_opts.debug = random.choice([True, False])
computation.compute_cuda_indexes()
computer = NnetComputer(compute_opts, computation, nnet, nnet)
for i, ispec in enumerate(request.inputs):
temp = CuMatrix.from_matrix(inputs[i])
print("Input sum:", temp.sum())
computer.accept_input(ispec.name, temp)
computer.run()
output = computer.get_output_destructive("output")
print("Output sum:", output.sum())
if test_collapse_model:
computer_collapsed = NnetComputer(compute_opts,
computation_collapsed,
nnet_collapsed, nnet_collapsed)
for i, ispec in enumerate(request.inputs):
temp = CuMatrix.from_matrix(inputs[i])
computer_collapsed.accept_input(ispec.name, temp)
computer_collapsed.run()
output_collapsed = computer_collapsed.get_output_destructive("output")
print("Output sum [collapsed]:", output_collapsed.sum())
self.assertTrue(approx_equal_cu_matrix(output, output_collapsed),
"Regular and collapsed computation outputs differ.")
output_deriv = CuMatrix.from_size(output.num_rows(), output.num_cols())
output_deriv.set_randn()
if request.outputs[0].has_deriv:
computer.accept_input("output", output_deriv)
computer.run()
for i, ispec in enumerate(request.inputs):
if ispec.has_deriv:
in_deriv = computer.get_output_destructive(ispec.name)
print("Input-deriv sum for input {} is:".format(ispec.name),
in_deriv.sum())
def test_nnet_decodable(self):
gen_config = NnetGenerationOptions()
configs = generate_config_sequence(gen_config)
nnet = Nnet()
for j, config in enumerate(configs):
istrm = istringstream.from_str(config)
nnet.read_config(istrm)
num_frames = 5 + random.randint(1, 100)
input_dim = nnet.input_dim("input")
output_dim = nnet.output_dim("output")
ivector_dim = max(0, nnet.input_dim("ivector"))
input = Matrix(num_frames, input_dim)
set_batchnorm_test_mode(True, nnet)
set_dropout_test_mode(True, nnet)
input.set_randn_()
ivector = Vector(ivector_dim)
ivector.set_randn_()
priors = Vector(output_dim if random.choice([True, False]) else 0)
if len(priors) != 0:
priors.set_randn_()
priors.apply_exp_()
output1 = Matrix(num_frames, output_dim)
output2 = Matrix(num_frames, output_dim)
opts = NnetSimpleComputationOptions()
opts.frames_per_chunk = random.randint(5, 25)
compiler = CachingOptimizingCompiler(nnet)
decodable = DecodableNnetSimple(opts, nnet, priors, input, compiler,
ivector if ivector_dim else None)
for t in range(num_frames):
decodable.get_output_for_frame(t, output1[t])
opts = NnetSimpleLoopedComputationOptions()
info = DecodableNnetSimpleLoopedInfo.from_priors(opts, priors, nnet)
decodable = DecodableNnetSimpleLooped(info, input,
ivector if ivector_dim else None)
for t in range(num_frames):
decodable.get_output_for_frame(t, output2[t])
if (not nnet_is_recurrent(nnet)
and nnet.info().find("statistics-extraction") == -1
and nnet.info().find("TimeHeightConvolutionComponent") == -1
and nnet.info().find("RestrictedAttentionComponent") == -1):
for t in range(num_frames):
self.assertTrue(approx_equal(output1[t], output2[t]))
if __name__ == '__main__':
for i in range(2):
if cuda_available():
from kaldi.cudamatrix import CuDevice
CuDevice.instantiate().set_debug_stride_mode(True)
if i == 0:
CuDevice.instantiate().select_gpu_id("no")
else:
CuDevice.instantiate().select_gpu_id("yes")
unittest.main(exit=False)
| true
| true
|
7909475aadf75da5fbcd516584081dbb378cca5c
| 3,210
|
py
|
Python
|
applauncher/configuration.py
|
maxpowel/applauncher
|
31d51b68f08c7f9595b3b610a7b52f9ed657d851
|
[
"Apache-2.0"
] | 3
|
2018-05-06T19:00:55.000Z
|
2018-06-05T09:03:34.000Z
|
applauncher/configuration.py
|
maxpowel/applauncher
|
31d51b68f08c7f9595b3b610a7b52f9ed657d851
|
[
"Apache-2.0"
] | 10
|
2018-03-15T13:14:59.000Z
|
2021-09-21T13:26:10.000Z
|
applauncher/configuration.py
|
maxpowel/applauncher
|
31d51b68f08c7f9595b3b610a7b52f9ed657d851
|
[
"Apache-2.0"
] | 2
|
2018-05-24T17:30:20.000Z
|
2021-09-06T22:03:31.000Z
|
"""Configuration format loaders"""
import locale
import os
from abc import ABC, abstractmethod
import yaml
from pydantic import create_model
def load_configuration(configuration_file_path, parameters_file_path, bundles):
"""Combines the configuration and parameters and build the configuration object"""
mappings = {}
for bundle in bundles:
if hasattr(bundle, "config_mapping"):
mappings.update(bundle.config_mapping)
loader = YmlLoader()
return loader.build_config(mappings, config_source=configuration_file_path, parameters_source=parameters_file_path)
def is_string(value):
"""Check if the value is actually a string or not"""
try:
float(value)
return False
except ValueError:
if value.lower() in ["true", "false"]:
return False
return True
class ConfigurationLoader(ABC):
"""Base configuration loader"""
@abstractmethod
def load_parameters(self, source):
"""Convert the source into a dictionary"""
@abstractmethod
def load_config(self, config_source, parameters_source):
"""Prase the config file and build a dictionary"""
def build_config(self, config_mappings, config_source, parameters_source):
"""By using the loaded parameters and loaded config, build the final configuration object"""
configuration_class = create_model('Configuration', **{k: (v, ...) for k, v in config_mappings.items()})
return configuration_class(**self.load_config(config_source, parameters_source))
class YmlLoader(ConfigurationLoader):
"""YML Format parser and config loader"""
def load_parameters(self, source):
"""For YML, the source it the file path"""
with open(source, encoding=locale.getpreferredencoding(False)) as parameters_source:
loaded = yaml.safe_load(parameters_source.read())
if loaded:
for key, value in loaded.items():
if isinstance(value, str):
loaded[key] = "'" + value + "'"
return loaded
return {}
def load_config(self, config_source, parameters_source):
"""For YML, the source it the file path"""
with open(config_source, encoding=locale.getpreferredencoding(False)) as config_source_file:
config_raw = config_source_file.read()
parameters = {}
# Parameters from file
if os.path.isfile(parameters_source):
params = self.load_parameters(parameters_source)
if params is not None:
parameters.update(params)
# Overwrite parameters with the environment variables
env_params = {}
env_params.update(os.environ)
for key, value in env_params.items():
if is_string(value):
env_params[key] = "'" + value + "'"
parameters.update(env_params)
# Replace the parameters
final_configuration = config_raw.format(**parameters)
final_configuration = yaml.safe_load(final_configuration)
return final_configuration if final_configuration is not None else {}
| 38.674699
| 119
| 0.65109
|
import locale
import os
from abc import ABC, abstractmethod
import yaml
from pydantic import create_model
def load_configuration(configuration_file_path, parameters_file_path, bundles):
mappings = {}
for bundle in bundles:
if hasattr(bundle, "config_mapping"):
mappings.update(bundle.config_mapping)
loader = YmlLoader()
return loader.build_config(mappings, config_source=configuration_file_path, parameters_source=parameters_file_path)
def is_string(value):
try:
float(value)
return False
except ValueError:
if value.lower() in ["true", "false"]:
return False
return True
class ConfigurationLoader(ABC):
@abstractmethod
def load_parameters(self, source):
@abstractmethod
def load_config(self, config_source, parameters_source):
def build_config(self, config_mappings, config_source, parameters_source):
configuration_class = create_model('Configuration', **{k: (v, ...) for k, v in config_mappings.items()})
return configuration_class(**self.load_config(config_source, parameters_source))
class YmlLoader(ConfigurationLoader):
def load_parameters(self, source):
with open(source, encoding=locale.getpreferredencoding(False)) as parameters_source:
loaded = yaml.safe_load(parameters_source.read())
if loaded:
for key, value in loaded.items():
if isinstance(value, str):
loaded[key] = "'" + value + "'"
return loaded
return {}
def load_config(self, config_source, parameters_source):
with open(config_source, encoding=locale.getpreferredencoding(False)) as config_source_file:
config_raw = config_source_file.read()
parameters = {}
if os.path.isfile(parameters_source):
params = self.load_parameters(parameters_source)
if params is not None:
parameters.update(params)
env_params = {}
env_params.update(os.environ)
for key, value in env_params.items():
if is_string(value):
env_params[key] = "'" + value + "'"
parameters.update(env_params)
final_configuration = config_raw.format(**parameters)
final_configuration = yaml.safe_load(final_configuration)
return final_configuration if final_configuration is not None else {}
| true
| true
|
7909480692fe4406dc5d81e5ca4b3456bb280fcb
| 5,899
|
py
|
Python
|
src/shut/model/version.py
|
NiklasRosenstein/shut
|
517bded2ff54306257d5622a08a1ba1ec967ffe5
|
[
"MIT"
] | 5
|
2020-11-30T04:06:27.000Z
|
2022-01-06T17:14:33.000Z
|
src/shut/model/version.py
|
NiklasRosenstein/shut
|
517bded2ff54306257d5622a08a1ba1ec967ffe5
|
[
"MIT"
] | 33
|
2020-09-07T16:58:14.000Z
|
2022-02-13T00:59:28.000Z
|
src/shut/model/version.py
|
NiklasRosenstein/shut
|
517bded2ff54306257d5622a08a1ba1ec967ffe5
|
[
"MIT"
] | 2
|
2020-12-12T10:02:12.000Z
|
2021-06-06T05:41:12.000Z
|
# -*- coding: utf8 -*-
# Copyright (c) 2021 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import logging
import re
from typing import Optional, Union
from databind.core import Converter, Context, Direction
from databind.core.mapper.objectmapper import ObjectMapper
from nr.utils.git import Git
from packaging.version import Version as _Version
logger = logging.getLogger(__name__)
class Version(_Version):
""" An extension of #packageing.version.Version which supports a
commit-distance and commit SHA suffix in the format of `-X-gY` (where
X is the distance and Y is the lowercase 7-character SHA sum). """
commit_distance: Optional[int]
def __init__(self, s: Union['Version', str]):
if isinstance(s, Version):
s = str(s)
elif not isinstance(s, str):
raise TypeError('expected Version or str, got {}'.format(type(s).__name__))
commit_distance: Optional[int]
sha: Optional[str]
match = re.match(r'(.*)-(\d+)-g([0-9a-f]{7})', s)
if match:
s = match.group(1)
commit_distance = int(match.group(2))
sha = match.group(3)
else:
commit_distance = None
sha = None
super().__init__(s)
self.commit_distance = commit_distance
self.sha = sha
def __str__(self):
s = super().__str__()
if self.commit_distance and self.sha:
s += '-{}-g{}'.format(self.commit_distance, self.sha)
return s
def __lt__(self, other):
if super().__lt__(other):
return True
if super().__eq__(other):
return (self.commit_distance or 0) < (other.commit_distance or 0)
return False
def __gt__(self, other):
return other < self and other != self
def __eq__(self, other):
if super().__eq__(other) is True:
return (self.commit_distance, self.sha) == (other.commit_distance, other.sha)
return False
def __ne__(self, other):
return not (self == other)
@property
def pep440_compliant(self):
return self.sha is None
def parse_version(version_string: str) -> Version:
return Version(version_string)
def bump_version(version: Version, kind: str) -> Version:
major, minor, patch, post = version.major, version.minor, version.micro, version.post
if kind == 'post':
if post is None:
post = ('post', 1)
else:
post = (post[0], post[1] + 1)
elif kind == 'patch':
post = None
patch += 1
elif kind == 'minor':
post = None
patch = 0
minor += 1
elif kind == 'major':
post = None
patch = minor = 0
major += 1
else:
raise ValueError('invalid kind: {!r}'.format(kind))
string = '%s.%s.%s' % (major, minor, patch)
if post:
string += '.post' + str(post)
return Version(string)
def get_commit_distance_version(repo_dir: str, version: Version, latest_tag: str) -> Optional[Version]:
"""
This function creates a string which describes the version of the
monorepo or package that includes the commit distance and SHA revision
number.
For a mono repository, the full commit distance is used. The same is true
for a single package. For a package inside a mono repository that does not
apply mono versioning, the packages' local commit distance is used.
This is close to what `git describe --tags` does. An example version number
generated by this function is: `0.1.0+24.gd9ade3f`. If the working state is
dirty, `.dirty` will be appended to the local version.
Notes:
- If there is no commit distance from the *latest_tag* to the current
state of the repository, this function returns None.
- The version returned by this function is a PEP440 local version that
cannot be used for packages when submitting them to PyPI.
- If the tag for the version of *subject* does not exist on the repository,
it will fall back to 0.0.0 as the version number which is treated as
"the beginning of the repository", even if no tag for this version exists.
Todo: We could try to find the previous tag for this subject and use that.
"""
git = Git(repo_dir)
dirty = git.has_diff()
if git.rev_parse(latest_tag):
distance = len(git.rev_list(latest_tag + '..HEAD'))
else:
logger.warning('tag "%s" does not exist', latest_tag)
version = Version('0.0.0')
distance = len(git.rev_list('HEAD'))
if distance == 0:
if dirty:
return parse_version(str(version) + '+dirty')
return None
rev = git.rev_parse('HEAD')
assert rev, git
local = '+{}.g{}{}'.format(distance, rev[:7], '.dirty' if dirty else '')
return parse_version(str(version) + local)
class VersionConverter(Converter):
def convert(self, ctx: Context) -> object:
if ctx.direction == Direction.serialize:
return str(ctx.value)
else:
return parse_version(ctx.value)
from .utils import StringConverter
from . import mapper
mapper.add_converter_for_type(Version, StringConverter(parse_version)) # type: ignore
| 32.955307
| 103
| 0.698423
|
import logging
import re
from typing import Optional, Union
from databind.core import Converter, Context, Direction
from databind.core.mapper.objectmapper import ObjectMapper
from nr.utils.git import Git
from packaging.version import Version as _Version
logger = logging.getLogger(__name__)
class Version(_Version):
commit_distance: Optional[int]
def __init__(self, s: Union['Version', str]):
if isinstance(s, Version):
s = str(s)
elif not isinstance(s, str):
raise TypeError('expected Version or str, got {}'.format(type(s).__name__))
commit_distance: Optional[int]
sha: Optional[str]
match = re.match(r'(.*)-(\d+)-g([0-9a-f]{7})', s)
if match:
s = match.group(1)
commit_distance = int(match.group(2))
sha = match.group(3)
else:
commit_distance = None
sha = None
super().__init__(s)
self.commit_distance = commit_distance
self.sha = sha
def __str__(self):
s = super().__str__()
if self.commit_distance and self.sha:
s += '-{}-g{}'.format(self.commit_distance, self.sha)
return s
def __lt__(self, other):
if super().__lt__(other):
return True
if super().__eq__(other):
return (self.commit_distance or 0) < (other.commit_distance or 0)
return False
def __gt__(self, other):
return other < self and other != self
def __eq__(self, other):
if super().__eq__(other) is True:
return (self.commit_distance, self.sha) == (other.commit_distance, other.sha)
return False
def __ne__(self, other):
return not (self == other)
@property
def pep440_compliant(self):
return self.sha is None
def parse_version(version_string: str) -> Version:
return Version(version_string)
def bump_version(version: Version, kind: str) -> Version:
major, minor, patch, post = version.major, version.minor, version.micro, version.post
if kind == 'post':
if post is None:
post = ('post', 1)
else:
post = (post[0], post[1] + 1)
elif kind == 'patch':
post = None
patch += 1
elif kind == 'minor':
post = None
patch = 0
minor += 1
elif kind == 'major':
post = None
patch = minor = 0
major += 1
else:
raise ValueError('invalid kind: {!r}'.format(kind))
string = '%s.%s.%s' % (major, minor, patch)
if post:
string += '.post' + str(post)
return Version(string)
def get_commit_distance_version(repo_dir: str, version: Version, latest_tag: str) -> Optional[Version]:
git = Git(repo_dir)
dirty = git.has_diff()
if git.rev_parse(latest_tag):
distance = len(git.rev_list(latest_tag + '..HEAD'))
else:
logger.warning('tag "%s" does not exist', latest_tag)
version = Version('0.0.0')
distance = len(git.rev_list('HEAD'))
if distance == 0:
if dirty:
return parse_version(str(version) + '+dirty')
return None
rev = git.rev_parse('HEAD')
assert rev, git
local = '+{}.g{}{}'.format(distance, rev[:7], '.dirty' if dirty else '')
return parse_version(str(version) + local)
class VersionConverter(Converter):
def convert(self, ctx: Context) -> object:
if ctx.direction == Direction.serialize:
return str(ctx.value)
else:
return parse_version(ctx.value)
from .utils import StringConverter
from . import mapper
mapper.add_converter_for_type(Version, StringConverter(parse_version))
| true
| true
|
79094888287f3474ad5629d99bfe9b593a29fedb
| 16,392
|
py
|
Python
|
cogs/music.py
|
ROYAL-HarsH/HexBot
|
572e3b26291f295f89bd0f7ad8ad5c03c4147470
|
[
"MIT"
] | 123
|
2020-09-10T06:52:00.000Z
|
2022-03-31T09:52:53.000Z
|
cogs/music.py
|
ROYAL-HarsH/HexBot
|
572e3b26291f295f89bd0f7ad8ad5c03c4147470
|
[
"MIT"
] | 6
|
2020-10-28T18:07:18.000Z
|
2021-09-14T17:22:03.000Z
|
cogs/music.py
|
ROYAL-HarsH/HexBot
|
572e3b26291f295f89bd0f7ad8ad5c03c4147470
|
[
"MIT"
] | 29
|
2020-09-05T12:23:34.000Z
|
2022-02-08T16:23:48.000Z
|
import math
import lavalink
import ksoftapi
import discord
from discord.ext import commands
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.kclient = bot.kclient
if not hasattr(bot, 'lavalink'):
bot.lavalink = lavalink.Client(bot.user.id)
bot.lavalink.add_node('localhost', 1616, 'proto', 'in', 'default-node') # Host, Port, Password, Region, Name
bot.add_listener(bot.lavalink.voice_update_handler, 'on_socket_response')
lavalink.add_event_hook(self.track_hook)
def cog_unload(self):
""" Cog unload handler. This removes any event hooks that were registered. """
self.bot.lavalink._event_hooks.clear()
async def cog_command_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send(error.original)
async def track_hook(self, event):
if isinstance(event, lavalink.events.QueueEndEvent):
guild_id = int(event.player.guild_id)
await self.connect_to(guild_id, None)
await self.bot.change_presence(status=discord.Status.idle, activity=discord.Game(name="Nothing"))
async def cog_before_invoke(self, ctx):
""" Command before-invoke handler. """
guild_check = ctx.guild is not None
if guild_check:
await self.ensure_voice(ctx)
# Ensure that the bot and command author share a mutual voicechannel.
return guild_check
async def ensure_voice(self, ctx):
""" This check ensures that the bot and command author are in the same voicechannel. """
player = self.bot.lavalink.player_manager.create(ctx.guild.id, endpoint=str(ctx.guild.region))
should_connect = ctx.command.name in ('play',)
if not ctx.author.voice or not ctx.author.voice.channel:
raise commands.CommandInvokeError('Join a voice channel first :loud_sound:')
if not player.is_connected:
if not should_connect:
raise commands.CommandInvokeError('Not connected :mute:')
permissions = ctx.author.voice.channel.permissions_for(ctx.me)
if not permissions.connect or not permissions.speak: # Check user limit too?
raise commands.CommandInvokeError('I need the `CONNECT` and `SPEAK` permissions. :disappointed_relieved:')
player.store('channel', ctx.channel.id)
await self.connect_to(ctx.guild.id, str(ctx.author.voice.channel.id))
else:
if int(player.channel_id) != ctx.author.voice.channel.id:
raise commands.CommandInvokeError('You need to be in my voice channel :loud_sound:')
async def connect_to(self, guild_id: int, channel_id: str):
""" Connects to the given voicechannel ID. A channel_id of `None` means disconnect. """
ws = self.bot._connection._get_websocket(guild_id)
await ws.voice_state(str(guild_id), channel_id)
@commands.command(name='play', aliases=['p', 'sing'])
async def play(self, ctx, *, query):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
query = query.strip('<>')
if not query.startswith('http'):
query = f'ytsearch:{query}'
results = await player.node.get_tracks(query)
if not results or not results['tracks']:
return await ctx.send('Song not found :x: Please try again :mag_right:')
em = discord.Embed(colour=discord.Colour(0x59FFC8))
if results['loadType'] == 'PLAYLIST_LOADED':
tracks = results['tracks']
for track in tracks:
# Add all of the tracks from the playlist to the queue.
player.add(requester=ctx.author.id, track=track)
em.title = 'Playlist Enqueued!'
em.description = f'{results["playlistInfo"]["name"]} - {len(tracks)} tracks'
else:
track = results['tracks'][0]
em.title = 'Track Enqueued'
em.description = f'[{track["info"]["title"]}]({track["info"]["uri"]})'
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{track['info']['identifier']}/hqdefault.jpg")
em.add_field(name='Channel', value=track['info']['author'])
if track['info']['isStream']:
duration = 'Live'
else:
duration = lavalink.format_time(track['info']['length']).lstrip('00:')
em.add_field(name='Duration', value=duration)
track = lavalink.models.AudioTrack(track, ctx.author.id, recommended=True)
player.add(requester=ctx.author.id, track=track)
msg = await ctx.send(embed=em)
if not player.is_playing:
await player.play()
await player.reset_equalizer()
await msg.delete(delay=1)
await self.now(ctx)
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=player.current.title))
@commands.command(name='seek')
async def seek(self, ctx, seconds=None):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
if not seconds:
return await ctx.send('You need to specify the amount of seconds to seek :fast_forward:')
try:
track_time = player.position + int(seconds) * 1000
await player.seek(track_time)
except ValueError:
return await ctx.send('Specify valid amount of seconds :clock3:')
await ctx.send(f'Moved track to **{lavalink.format_time(track_time)}**')
@commands.command(name='skip', aliases=['forceskip', 'fs', 'next'])
async def skip(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
await ctx.send('⏭ | Skipped.')
await player.skip()
@commands.command(name='now', aliases=['current', 'currentsong', 'playing', 'np'])
async def now(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
song = 'Nothing'
if player.current:
if player.current.stream:
dur = 'LIVE'
pos = ''
count = total = 1
else:
count = player.position
pos = lavalink.format_time(count)
total = player.current.duration
dur = lavalink.format_time(total)
if pos == dur: # When called immediatly after enqueue
count = 0
pos = '00:00:00'
dur = dur.lstrip('00:')
pos = pos[-len(dur):]
bar_len = 30 # bar length
filled_len = int(bar_len * count // float(total))
bar = '═' * filled_len + '◈' + '─' * (bar_len - filled_len)
song = f'[{player.current.title}]({player.current.uri})\n`{pos} {bar} {dur}`'
em = discord.Embed(colour=discord.Colour(0x59FFC8), description=song)
em.set_author(name="Now Playing 🎵", icon_url="https://i.ibb.co/DGsmTvh/star.gif")
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{player.current.identifier}/hqdefault.jpg")
requester = ctx.guild.get_member(player.current.requester)
em.set_footer(text=f"Requested by: {requester}", icon_url=requester.avatar_url)
await ctx.send(embed=em)
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=player.current.title))
else:
await ctx.send('Not playing anything :mute:')
@commands.command(name='save', aliases=['star'])
async def savetodm(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if player.current:
if player.current.stream:
dur = 'Live'
else:
dur = lavalink.format_time(player.current.duration).lstrip('00:')
song = f'[{player.current.title}]({player.current.uri})'
em = discord.Embed(colour=discord.Colour(0x59FFC8), description=song)
em.set_author(name="Now Playing 🎵", icon_url="https://i.ibb.co/DGsmTvh/star.gif")
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{player.current.identifier}/hqdefault.jpg")
em.add_field(name='Channel', value=player.current.author)
em.add_field(name='Duration', value=dur)
user = ctx.author
await user.send(embed=em)
await ctx.send(f"Current song has been sent to you {ctx.author.mention} :floppy_disk:")
else:
await ctx.send('Not playing anything :mute:')
@commands.command(name='queue', aliases=['q', 'playlist'])
async def queue(self, ctx, page: int=1):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.queue:
return await ctx.send('Queue empty! Why not queue something? :cd:')
items_per_page = 10
pages = math.ceil(len(player.queue) / items_per_page)
start = (page - 1) * items_per_page
end = start + items_per_page
queue_list = ''
for i, track in enumerate(player.queue[start:end], start=start):
queue_list += f'`{i + 1}.` [**{track.title}**]({track.uri})\n'
embed = discord.Embed(colour=ctx.guild.me.top_role.colour,
description=f'**{len(player.queue)} tracks**\n\n{queue_list}')
embed.set_footer(text=f'Viewing page {page}/{pages}')
await ctx.send(embed=embed)
@commands.command(name='pause', aliases=['resume'])
async def pause(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
if player.paused:
await player.set_pause(False)
await ctx.message.add_reaction('▶')
else:
await player.set_pause(True)
await ctx.message.add_reaction('⏸')
@commands.command(name='volume', aliases=['vol'])
async def volume(self, ctx, volume: int=None):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not volume:
return await ctx.send(f'🔈 | {player.volume}%')
await player.set_volume(volume)
await ctx.send(f'🔈 | Set to {player.volume}%')
@commands.command(name='shuffle')
async def shuffle(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
player.shuffle = not player.shuffle
await ctx.send('🔀 | Shuffle ' + ('enabled' if player.shuffle else 'disabled'))
@commands.command(name='repeat')
async def repeat(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
player.repeat = not player.repeat
await ctx.send('🔁 | Repeat ' + ('enabled' if player.repeat else 'disabled'))
@commands.command(name='remove', aliases=['dequeue', 'pop'])
async def remove(self, ctx, index: int):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.queue:
return await ctx.send('Nothing queued :cd:')
if index > len(player.queue) or index < 1:
return await ctx.send('Index has to be >=1 and <=queue size')
index = index - 1
removed = player.queue.pop(index)
await ctx.send('Removed **' + removed.title + '** from the queue.')
@commands.command(name='disconnect', aliases=['dis', 'stop', 'leave'])
async def disconnect(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):
return await ctx.send('You\'re not in my voice channel :loud_sound:')
if not player.is_connected:
return await ctx.send('Not connected :mute:')
player.queue.clear()
# Stop the current track so Lavalink consumes less resources.
await player.stop()
# Disconnect from the voice channel.
await self.connect_to(ctx.guild.id, None)
await ctx.send('Disconnected :mute:')
await self.bot.change_presence(status=discord.Status.idle, activity=discord.Game(name="Nothing"))
@commands.command(name='lyrics', aliases=['ly'])
async def get_lyrics(self, ctx, *, query: str=""):
"""Get lyrics of current song"""
if not query:
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('I\'m not currently playing anything :warning:')
query = player.current.title
try:
async with ctx.typing():
results = await self.kclient.music.lyrics(query, limit=1)
except ksoftapi.NoResults:
await ctx.send(f'No lyrics found for `{query}`')
else:
lyrics = results[0].lyrics
result = results[0]
embed = discord.Embed(title=f'{result.name} - {result.artist}', color=discord.Color(0xCCFF00), description=lyrics[:2048])
embed.set_thumbnail(url=result.album_art)
embed.set_author(name="Lyrics:")
lyrics = lyrics[2048:]
embeds = [embed] # create embeds' list for long lyrics
while len(lyrics) > 0 and len(embeds) < 10: # limiting embeds to 10
embed = discord.Embed(color=discord.Color(0xCCFF00), description=lyrics[:2048])
lyrics = lyrics[len(embeds)*2048:]
embeds.append(embed)
embeds[-1].set_footer(text="Source: KSoft.Si") # set footer for last embed
for embed in embeds:
await ctx.send(embed=embed)
@commands.command(name='equalizer', aliases=['eq'])
async def equalizer(self, ctx, *args):
"""Equalizer"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if len(args) == 0:
await ctx.send('Specify `band gain` or `preset` to change frequencies :control_knobs:')
elif len(args) == 1:
presets ={
'reset': 'Default',
'bassboost': [0.08, 0.12, 0.2, 0.18, 0.15, 0.1, 0.05, 0.0, 0.02, -0.04, -0.06, -0.08, -0.10, -0.12, -0.14],
'jazz': [-0.13, -0.11, -0.1, -0.1, 0.14, 0.2, -0.18, 0.0, 0.24, 0.22, 0.2, 0.0, 0.0, 0.0, 0.0],
'pop': [-0.02, -0.01, 0.08, 0.1, 0.15, 0.1, 0.03, -0.02, -0.035, -0.05, -0.05, -0.05, -0.05, -0.05, -0.05],
'treble': [-0.1, -0.12, -0.12, -0.12, -0.08, -0.04, 0.0, 0.3, 0.34, 0.4, 0.35, 0.3, 0.3, 0.3, 0.3]
}
preset = args[0].lower()
if preset in ['reset', 'default']:
await player.reset_equalizer()
elif preset in presets:
gain_list = enumerate(presets[preset])
await player.set_gains(*gain_list)
elif preset == '--list':
em = discord.Embed(title=':control_knobs: EQ presets:', color=discord.Color(0xFF6EFF), description='\n'.join(presets.keys()))
return await ctx.send(embed=em)
else:
return await ctx.send('Invalid preset specified :control_knobs:\nType `~eq --list` for all presets')
elif len(args) == 2:
try:
band = int(args[0])
gain = float(args[1])
await player.set_gain(band, gain)
except ValueError:
return await ctx.send('Specify valid `band gain` values :control_knobs:')
else:
return await ctx.send('Specify `band gain` or `preset` :control_knobs:')
# Print final EQ settings
eq_frequencies = [f"`{gain}`" for gain in player.equalizer]
await ctx.send(":level_slider: Current Values:\n" + ' '.join(eq_frequencies))
def setup(bot):
bot.add_cog(Music(bot))
| 42.798956
| 141
| 0.596694
|
import math
import lavalink
import ksoftapi
import discord
from discord.ext import commands
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.kclient = bot.kclient
if not hasattr(bot, 'lavalink'):
bot.lavalink = lavalink.Client(bot.user.id)
bot.lavalink.add_node('localhost', 1616, 'proto', 'in', 'default-node')
bot.add_listener(bot.lavalink.voice_update_handler, 'on_socket_response')
lavalink.add_event_hook(self.track_hook)
def cog_unload(self):
self.bot.lavalink._event_hooks.clear()
async def cog_command_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send(error.original)
async def track_hook(self, event):
if isinstance(event, lavalink.events.QueueEndEvent):
guild_id = int(event.player.guild_id)
await self.connect_to(guild_id, None)
await self.bot.change_presence(status=discord.Status.idle, activity=discord.Game(name="Nothing"))
async def cog_before_invoke(self, ctx):
guild_check = ctx.guild is not None
if guild_check:
await self.ensure_voice(ctx)
return guild_check
async def ensure_voice(self, ctx):
player = self.bot.lavalink.player_manager.create(ctx.guild.id, endpoint=str(ctx.guild.region))
should_connect = ctx.command.name in ('play',)
if not ctx.author.voice or not ctx.author.voice.channel:
raise commands.CommandInvokeError('Join a voice channel first :loud_sound:')
if not player.is_connected:
if not should_connect:
raise commands.CommandInvokeError('Not connected :mute:')
permissions = ctx.author.voice.channel.permissions_for(ctx.me)
if not permissions.connect or not permissions.speak:
raise commands.CommandInvokeError('I need the `CONNECT` and `SPEAK` permissions. :disappointed_relieved:')
player.store('channel', ctx.channel.id)
await self.connect_to(ctx.guild.id, str(ctx.author.voice.channel.id))
else:
if int(player.channel_id) != ctx.author.voice.channel.id:
raise commands.CommandInvokeError('You need to be in my voice channel :loud_sound:')
async def connect_to(self, guild_id: int, channel_id: str):
ws = self.bot._connection._get_websocket(guild_id)
await ws.voice_state(str(guild_id), channel_id)
@commands.command(name='play', aliases=['p', 'sing'])
async def play(self, ctx, *, query):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
query = query.strip('<>')
if not query.startswith('http'):
query = f'ytsearch:{query}'
results = await player.node.get_tracks(query)
if not results or not results['tracks']:
return await ctx.send('Song not found :x: Please try again :mag_right:')
em = discord.Embed(colour=discord.Colour(0x59FFC8))
if results['loadType'] == 'PLAYLIST_LOADED':
tracks = results['tracks']
for track in tracks:
player.add(requester=ctx.author.id, track=track)
em.title = 'Playlist Enqueued!'
em.description = f'{results["playlistInfo"]["name"]} - {len(tracks)} tracks'
else:
track = results['tracks'][0]
em.title = 'Track Enqueued'
em.description = f'[{track["info"]["title"]}]({track["info"]["uri"]})'
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{track['info']['identifier']}/hqdefault.jpg")
em.add_field(name='Channel', value=track['info']['author'])
if track['info']['isStream']:
duration = 'Live'
else:
duration = lavalink.format_time(track['info']['length']).lstrip('00:')
em.add_field(name='Duration', value=duration)
track = lavalink.models.AudioTrack(track, ctx.author.id, recommended=True)
player.add(requester=ctx.author.id, track=track)
msg = await ctx.send(embed=em)
if not player.is_playing:
await player.play()
await player.reset_equalizer()
await msg.delete(delay=1)
await self.now(ctx)
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=player.current.title))
@commands.command(name='seek')
async def seek(self, ctx, seconds=None):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
if not seconds:
return await ctx.send('You need to specify the amount of seconds to seek :fast_forward:')
try:
track_time = player.position + int(seconds) * 1000
await player.seek(track_time)
except ValueError:
return await ctx.send('Specify valid amount of seconds :clock3:')
await ctx.send(f'Moved track to **{lavalink.format_time(track_time)}**')
@commands.command(name='skip', aliases=['forceskip', 'fs', 'next'])
async def skip(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
await ctx.send('⏭ | Skipped.')
await player.skip()
@commands.command(name='now', aliases=['current', 'currentsong', 'playing', 'np'])
async def now(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
song = 'Nothing'
if player.current:
if player.current.stream:
dur = 'LIVE'
pos = ''
count = total = 1
else:
count = player.position
pos = lavalink.format_time(count)
total = player.current.duration
dur = lavalink.format_time(total)
if pos == dur:
count = 0
pos = '00:00:00'
dur = dur.lstrip('00:')
pos = pos[-len(dur):]
bar_len = 30
filled_len = int(bar_len * count // float(total))
bar = '═' * filled_len + '◈' + '─' * (bar_len - filled_len)
song = f'[{player.current.title}]({player.current.uri})\n`{pos} {bar} {dur}`'
em = discord.Embed(colour=discord.Colour(0x59FFC8), description=song)
em.set_author(name="Now Playing 🎵", icon_url="https://i.ibb.co/DGsmTvh/star.gif")
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{player.current.identifier}/hqdefault.jpg")
requester = ctx.guild.get_member(player.current.requester)
em.set_footer(text=f"Requested by: {requester}", icon_url=requester.avatar_url)
await ctx.send(embed=em)
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=player.current.title))
else:
await ctx.send('Not playing anything :mute:')
@commands.command(name='save', aliases=['star'])
async def savetodm(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if player.current:
if player.current.stream:
dur = 'Live'
else:
dur = lavalink.format_time(player.current.duration).lstrip('00:')
song = f'[{player.current.title}]({player.current.uri})'
em = discord.Embed(colour=discord.Colour(0x59FFC8), description=song)
em.set_author(name="Now Playing 🎵", icon_url="https://i.ibb.co/DGsmTvh/star.gif")
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{player.current.identifier}/hqdefault.jpg")
em.add_field(name='Channel', value=player.current.author)
em.add_field(name='Duration', value=dur)
user = ctx.author
await user.send(embed=em)
await ctx.send(f"Current song has been sent to you {ctx.author.mention} :floppy_disk:")
else:
await ctx.send('Not playing anything :mute:')
@commands.command(name='queue', aliases=['q', 'playlist'])
async def queue(self, ctx, page: int=1):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.queue:
return await ctx.send('Queue empty! Why not queue something? :cd:')
items_per_page = 10
pages = math.ceil(len(player.queue) / items_per_page)
start = (page - 1) * items_per_page
end = start + items_per_page
queue_list = ''
for i, track in enumerate(player.queue[start:end], start=start):
queue_list += f'`{i + 1}.` [**{track.title}**]({track.uri})\n'
embed = discord.Embed(colour=ctx.guild.me.top_role.colour,
description=f'**{len(player.queue)} tracks**\n\n{queue_list}')
embed.set_footer(text=f'Viewing page {page}/{pages}')
await ctx.send(embed=embed)
@commands.command(name='pause', aliases=['resume'])
async def pause(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
if player.paused:
await player.set_pause(False)
await ctx.message.add_reaction('▶')
else:
await player.set_pause(True)
await ctx.message.add_reaction('⏸')
@commands.command(name='volume', aliases=['vol'])
async def volume(self, ctx, volume: int=None):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not volume:
return await ctx.send(f'🔈 | {player.volume}%')
await player.set_volume(volume)
await ctx.send(f'🔈 | Set to {player.volume}%')
@commands.command(name='shuffle')
async def shuffle(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
player.shuffle = not player.shuffle
await ctx.send('🔀 | Shuffle ' + ('enabled' if player.shuffle else 'disabled'))
@commands.command(name='repeat')
async def repeat(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
player.repeat = not player.repeat
await ctx.send('🔁 | Repeat ' + ('enabled' if player.repeat else 'disabled'))
@commands.command(name='remove', aliases=['dequeue', 'pop'])
async def remove(self, ctx, index: int):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.queue:
return await ctx.send('Nothing queued :cd:')
if index > len(player.queue) or index < 1:
return await ctx.send('Index has to be >=1 and <=queue size')
index = index - 1
removed = player.queue.pop(index)
await ctx.send('Removed **' + removed.title + '** from the queue.')
@commands.command(name='disconnect', aliases=['dis', 'stop', 'leave'])
async def disconnect(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):
return await ctx.send('You\'re not in my voice channel :loud_sound:')
if not player.is_connected:
return await ctx.send('Not connected :mute:')
player.queue.clear()
# Stop the current track so Lavalink consumes less resources.
await player.stop()
# Disconnect from the voice channel.
await self.connect_to(ctx.guild.id, None)
await ctx.send('Disconnected :mute:')
await self.bot.change_presence(status=discord.Status.idle, activity=discord.Game(name="Nothing"))
@commands.command(name='lyrics', aliases=['ly'])
async def get_lyrics(self, ctx, *, query: str=""):
if not query:
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('I\'m not currently playing anything :warning:')
query = player.current.title
try:
async with ctx.typing():
results = await self.kclient.music.lyrics(query, limit=1)
except ksoftapi.NoResults:
await ctx.send(f'No lyrics found for `{query}`')
else:
lyrics = results[0].lyrics
result = results[0]
embed = discord.Embed(title=f'{result.name} - {result.artist}', color=discord.Color(0xCCFF00), description=lyrics[:2048])
embed.set_thumbnail(url=result.album_art)
embed.set_author(name="Lyrics:")
lyrics = lyrics[2048:]
embeds = [embed]
while len(lyrics) > 0 and len(embeds) < 10: # limiting embeds to 10
embed = discord.Embed(color=discord.Color(0xCCFF00), description=lyrics[:2048])
lyrics = lyrics[len(embeds)*2048:]
embeds.append(embed)
embeds[-1].set_footer(text="Source: KSoft.Si") # set footer for last embed
for embed in embeds:
await ctx.send(embed=embed)
@commands.command(name='equalizer', aliases=['eq'])
async def equalizer(self, ctx, *args):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if len(args) == 0:
await ctx.send('Specify `band gain` or `preset` to change frequencies :control_knobs:')
elif len(args) == 1:
presets ={
'reset': 'Default',
'bassboost': [0.08, 0.12, 0.2, 0.18, 0.15, 0.1, 0.05, 0.0, 0.02, -0.04, -0.06, -0.08, -0.10, -0.12, -0.14],
'jazz': [-0.13, -0.11, -0.1, -0.1, 0.14, 0.2, -0.18, 0.0, 0.24, 0.22, 0.2, 0.0, 0.0, 0.0, 0.0],
'pop': [-0.02, -0.01, 0.08, 0.1, 0.15, 0.1, 0.03, -0.02, -0.035, -0.05, -0.05, -0.05, -0.05, -0.05, -0.05],
'treble': [-0.1, -0.12, -0.12, -0.12, -0.08, -0.04, 0.0, 0.3, 0.34, 0.4, 0.35, 0.3, 0.3, 0.3, 0.3]
}
preset = args[0].lower()
if preset in ['reset', 'default']:
await player.reset_equalizer()
elif preset in presets:
gain_list = enumerate(presets[preset])
await player.set_gains(*gain_list)
elif preset == '--list':
em = discord.Embed(title=':control_knobs: EQ presets:', color=discord.Color(0xFF6EFF), description='\n'.join(presets.keys()))
return await ctx.send(embed=em)
else:
return await ctx.send('Invalid preset specified :control_knobs:\nType `~eq --list` for all presets')
elif len(args) == 2:
try:
band = int(args[0])
gain = float(args[1])
await player.set_gain(band, gain)
except ValueError:
return await ctx.send('Specify valid `band gain` values :control_knobs:')
else:
return await ctx.send('Specify `band gain` or `preset` :control_knobs:')
# Print final EQ settings
eq_frequencies = [f"`{gain}`" for gain in player.equalizer]
await ctx.send(":level_slider: Current Values:\n" + ' '.join(eq_frequencies))
def setup(bot):
bot.add_cog(Music(bot))
| true
| true
|
790948b2ebce8cfa83196475fbc1ece91c2f3a2b
| 2,529
|
py
|
Python
|
django_pages/dashboard.py
|
lunemec/django-pages
|
caed40f9275919b81417924550e7bcfdc7c5ffbf
|
[
"BSD-3-Clause"
] | 3
|
2015-11-24T02:30:48.000Z
|
2018-11-01T10:10:24.000Z
|
django_pages/dashboard.py
|
lunemec/django-pages
|
caed40f9275919b81417924550e7bcfdc7c5ffbf
|
[
"BSD-3-Clause"
] | 1
|
2015-04-18T16:37:36.000Z
|
2015-04-18T16:37:36.000Z
|
django_pages/dashboard.py
|
lunemec/django-pages
|
caed40f9275919b81417924550e7bcfdc7c5ffbf
|
[
"BSD-3-Clause"
] | 2
|
2015-11-24T02:01:00.000Z
|
2019-04-09T15:33:56.000Z
|
# -*- encoding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from grappelli.dashboard import modules, Dashboard
from grappelli.dashboard.utils import get_admin_site_name
class DjangoPagesDashboard(Dashboard):
"""
Custom index dashboard for Django-pages
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
self.children.append(
modules.ModelList(
_('General'),
column=1,
collapsible=True,
models=(
'django_pages.site.models.Site',
'django_pages.site.models.Script',
'django_pages.language.models.*',
'django_pages.looks.models.*',
'django_pages.feed.models.*'
),
)
)
self.children.append(
modules.ModelList(
_('Pages'),
column=1,
collapsible=True,
models=('django_pages.pages.models.*', )
)
)
self.children.append(
modules.ModelList(
_('Menu'),
column=2,
collapsible=True,
models=('django_pages.menu.models.*', )
)
)
self.children.append(
modules.ModelList(
_('Comments'),
column=2,
collapsible=True,
models=('django_pages.comments.models.*', )
)
)
self.children.append(
modules.ModelList(
_('SEO'),
column=2,
collapsible=True,
models=('django_pages.metadata.models.*', )
)
)
self.children.append(
modules.AppList(
_('Administration'),
column=1,
collapsible=False,
models=('django.contrib.*', )
)
)
self.children.append(modules.LinkList(
_('File Management'),
column=3,
children=[
{
'title': _('File Browser'),
'url': '/admin/filebrowser/browse/',
'external': False,
},
]
))
self.children.append(modules.RecentActions(
_('Recent Actions'),
limit=5,
collapsible=False,
column=3,
))
| 26.621053
| 59
| 0.451562
|
from django.utils.translation import ugettext_lazy as _
from grappelli.dashboard import modules, Dashboard
from grappelli.dashboard.utils import get_admin_site_name
class DjangoPagesDashboard(Dashboard):
def init_with_context(self, context):
site_name = get_admin_site_name(context)
self.children.append(
modules.ModelList(
_('General'),
column=1,
collapsible=True,
models=(
'django_pages.site.models.Site',
'django_pages.site.models.Script',
'django_pages.language.models.*',
'django_pages.looks.models.*',
'django_pages.feed.models.*'
),
)
)
self.children.append(
modules.ModelList(
_('Pages'),
column=1,
collapsible=True,
models=('django_pages.pages.models.*', )
)
)
self.children.append(
modules.ModelList(
_('Menu'),
column=2,
collapsible=True,
models=('django_pages.menu.models.*', )
)
)
self.children.append(
modules.ModelList(
_('Comments'),
column=2,
collapsible=True,
models=('django_pages.comments.models.*', )
)
)
self.children.append(
modules.ModelList(
_('SEO'),
column=2,
collapsible=True,
models=('django_pages.metadata.models.*', )
)
)
self.children.append(
modules.AppList(
_('Administration'),
column=1,
collapsible=False,
models=('django.contrib.*', )
)
)
self.children.append(modules.LinkList(
_('File Management'),
column=3,
children=[
{
'title': _('File Browser'),
'url': '/admin/filebrowser/browse/',
'external': False,
},
]
))
self.children.append(modules.RecentActions(
_('Recent Actions'),
limit=5,
collapsible=False,
column=3,
))
| true
| true
|
790948f12fb2c097c5f715da10b669a2a2549d97
| 1,377
|
py
|
Python
|
encoding/functions/rectify.py
|
Womcos/SCARF
|
b90251bc23410cb810a7082ca75147a7aae21dec
|
[
"MIT"
] | 1
|
2021-04-06T11:29:04.000Z
|
2021-04-06T11:29:04.000Z
|
encoding/functions/rectify.py
|
Womcos/SCARF
|
b90251bc23410cb810a7082ca75147a7aae21dec
|
[
"MIT"
] | null | null | null |
encoding/functions/rectify.py
|
Womcos/SCARF
|
b90251bc23410cb810a7082ca75147a7aae21dec
|
[
"MIT"
] | 1
|
2021-04-06T08:41:12.000Z
|
2021-04-06T08:41:12.000Z
|
"""Rectify function"""
import torch
from torch.autograd import Function
from encoding import cpu
if torch.cuda.device_count() > 0:
from encoding import gpu
__all__ = ['rectify']
class _rectify(Function):
@staticmethod
def forward(ctx, y, x, kernel_size, stride, padding, dilation, average):
ctx.save_for_backward(x)
# assuming kernel_size is 3
kernel_size = [k + 2 * (d - 1) for k,d in zip(kernel_size, dilation)]
ctx.kernel_size = kernel_size
ctx.stride = stride
ctx.padding = padding
ctx.dilation = dilation
ctx.average = average
if x.is_cuda:
gpu.conv_rectify(y, x, kernel_size, stride, padding, dilation, average)
else:
cpu.conv_rectify(y, x, kernel_size, stride, padding, dilation, average)
ctx.mark_dirty(y)
return y
@staticmethod
def backward(ctx, grad_y):
x, = ctx.saved_variables
if x.is_cuda:
gpu.conv_rectify(grad_y, x, ctx.kernel_size, ctx.stride,
ctx.padding, ctx.dilation, ctx.average)
else:
cpu.conv_rectify(grad_y, x, ctx.kernel_size, ctx.stride,
ctx.padding, ctx.dilation, ctx.average)
ctx.mark_dirty(grad_y)
return grad_y, None, None, None, None, None, None
rectify = _rectify.apply
| 32.023256
| 83
| 0.6122
|
import torch
from torch.autograd import Function
from encoding import cpu
if torch.cuda.device_count() > 0:
from encoding import gpu
__all__ = ['rectify']
class _rectify(Function):
@staticmethod
def forward(ctx, y, x, kernel_size, stride, padding, dilation, average):
ctx.save_for_backward(x)
kernel_size = [k + 2 * (d - 1) for k,d in zip(kernel_size, dilation)]
ctx.kernel_size = kernel_size
ctx.stride = stride
ctx.padding = padding
ctx.dilation = dilation
ctx.average = average
if x.is_cuda:
gpu.conv_rectify(y, x, kernel_size, stride, padding, dilation, average)
else:
cpu.conv_rectify(y, x, kernel_size, stride, padding, dilation, average)
ctx.mark_dirty(y)
return y
@staticmethod
def backward(ctx, grad_y):
x, = ctx.saved_variables
if x.is_cuda:
gpu.conv_rectify(grad_y, x, ctx.kernel_size, ctx.stride,
ctx.padding, ctx.dilation, ctx.average)
else:
cpu.conv_rectify(grad_y, x, ctx.kernel_size, ctx.stride,
ctx.padding, ctx.dilation, ctx.average)
ctx.mark_dirty(grad_y)
return grad_y, None, None, None, None, None, None
rectify = _rectify.apply
| true
| true
|
79094900117045887ff35ab105c9d4f49981c999
| 221
|
py
|
Python
|
primeiros-exercicios/lpc002.py
|
miguelsndc/PythonFirstLooks
|
1b4a1b4feaf3638fb4304ca0c42d332a64cab478
|
[
"MIT"
] | 1
|
2020-10-30T12:57:38.000Z
|
2020-10-30T12:57:38.000Z
|
primeiros-exercicios/lpc002.py
|
miguelsndc/python
|
1b4a1b4feaf3638fb4304ca0c42d332a64cab478
|
[
"MIT"
] | null | null | null |
primeiros-exercicios/lpc002.py
|
miguelsndc/python
|
1b4a1b4feaf3638fb4304ca0c42d332a64cab478
|
[
"MIT"
] | null | null | null |
h = input('Digite algo: ')
print(type(h))
print('É alfanumérico?',h.isalnum())
print('É decimal?',h.isdecimal())
print('É maiúsculo?',h.isupper())
print('É minúsculo?',h.islower())
print('É imprimível?',h.isprintable())
| 24.555556
| 38
| 0.674208
|
h = input('Digite algo: ')
print(type(h))
print('É alfanumérico?',h.isalnum())
print('É decimal?',h.isdecimal())
print('É maiúsculo?',h.isupper())
print('É minúsculo?',h.islower())
print('É imprimível?',h.isprintable())
| true
| true
|
790949c1413a85ef6f95a0a35c479129b871ae5c
| 13,031
|
py
|
Python
|
scripts/ComunityDesign/taxonomicprofile.py
|
fplaza/CAMISIM
|
4f2ab5e94773a355210568be946e732df7437cb6
|
[
"Apache-2.0"
] | null | null | null |
scripts/ComunityDesign/taxonomicprofile.py
|
fplaza/CAMISIM
|
4f2ab5e94773a355210568be946e732df7437cb6
|
[
"Apache-2.0"
] | null | null | null |
scripts/ComunityDesign/taxonomicprofile.py
|
fplaza/CAMISIM
|
4f2ab5e94773a355210568be946e732df7437cb6
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'hofmann'
__version__ = '0.0.2.1'
import os
from scripts.MetaDataTable.metadatatable import MetadataTable
from scripts.NcbiTaxonomy.ncbitaxonomy import NcbiTaxonomy
from scripts.Validator.validator import Validator
class TaxonomicProfile(Validator):
"""
Constructing taxonomic profiles from files with relative abundances.
"""
_taxonomic_profile_version = "0.9.1"
def __init__(self, taxonomy, logfile=None, verbose=True, debug=False):
"""
@param taxonomy: taxonomy handler
@type taxonomy: NcbiTaxonomy
@param logfile: file handler or file path to a log file
@type logfile: file | FileIO | StringIO | str
@param verbose: Not verbose means that only warnings and errors will be past to stream
@type verbose: bool
@param debug: Display debug messages
@type debug: bool
"""
super(TaxonomicProfile, self).__init__(label="TaxonomicProfile", logfile=logfile, verbose=verbose, debug=debug)
self._ranks = ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species', 'strain']
assert isinstance(taxonomy, NcbiTaxonomy)
self._taxonomy = taxonomy
self._filename_taxonomic_profile = "taxonomic_profile_{sample_index}.txt"
def write_taxonomic_profile_from_abundance_files(
self, metadata_table, list_of_file_paths, directory_output, sample_id=""):
"""
Write a taxonomic profile file for each relative abundance file
@param metadata_table: Contains metadata of all communities
@type metadata_table: MetadataTable
@param list_of_file_paths: List of abundance file paths
@type list_of_file_paths: list[str | unicode]
@param directory_output: Profiles are written in this directory
@type directory_output: str | unicode
@param sample_id: Identifier of a sample
@type sample_id: str | unicode
"""
metadata_table_tmp = MetadataTable(logfile=self._logfile, verbose=self._verbose)
for index_abundance, file_path in enumerate(list_of_file_paths):
community_abundance = metadata_table_tmp.parse_file(file_path, column_names=False)
file_path_output = os.path.join(directory_output, self._filename_taxonomic_profile.format(
sample_index=index_abundance))
with open(file_path_output, 'w') as stream_output:
self.write_taxonomic_profile(
community_abundance,
stream_output,
metadata_table,
sample_id)
def write_taxonomic_profile(self, community_abundance, stream_output, metadata_table, sample_id=""):
"""
Stream a taxonomic profile by list of relative abundances
@param community_abundance: list of relative abundances
@type community_abundance: generator[ dict[int|long|str|unicode, str|unicode] ]
@param stream_output: Output of taxonomic profile
@type stream_output: file | FileIO | StringIO
@param metadata_table: Contains metadata of all communities
@type metadata_table: MetadataTable
@param sample_id: Identifier of a sample
@type sample_id: str | unicode
"""
assert isinstance(metadata_table, MetadataTable)
genome_abundance = {}
total_abundance = 0.0
# for community in community_abundance:
# all_communities += community
for genome_id, abundance in community_abundance:
if genome_id in genome_abundance:
raise IOError("genome id '{}' is not unique!".format(genome_id))
genome_abundance[genome_id] = float(abundance) # *float(total_length)
total_abundance += genome_abundance[genome_id]
for key, value in genome_abundance.items():
genome_abundance[key] = value / total_abundance
self._stream_taxonomic_profile(stream_output, genome_abundance, metadata_table, sample_id)
def _stream_taxonomic_profile(self, stream_output, genome_id_to_percent, metadata_table, sample_id=""):
"""
Stream a taxonomic profile by list of percentages by genome id
@param stream_output: Output of taxonomic profile
@type stream_output: file | FileIO | StringIO
@param genome_id_to_percent: Percentage for each genome id
@type genome_id_to_percent: dict[str|unicode, float]
@param metadata_table: Contains metadata of all communities
@type metadata_table: MetadataTable
@param sample_id: Identifier of a sample
@type sample_id: str | unicode
"""
strain_id_to_genome_id = {}
genome_id_to_strain_id = {}
genome_id_to_taxid = metadata_table.get_map(key_column_name="genome_ID", value_column_name="NCBI_ID")
genome_id_to_otu = metadata_table.get_map(key_column_name="genome_ID", value_column_name="OTU")
column_genome_id = metadata_table.get_column("genome_ID")
if not metadata_table.has_column("strain_id"):
column_strain_id = metadata_table.get_empty_column()
else:
column_strain_id = metadata_table.get_column("strain_id")
genome_id_to_strain_id = metadata_table.get_map(key_column_name="genome_ID", value_column_name="strain_id")
genome_id_to_lineage = self._get_genome_id_to_lineage(
genome_id_to_percent.keys(), genome_id_to_taxid, strain_id_to_genome_id, genome_id_to_strain_id)
percent_by_rank_by_taxid = self._get_percent_by_rank_by_taxid(genome_id_to_lineage, genome_id_to_percent)
# add strain_id to metadata
#for row_index, genome_id in enumerate(column_genome_id):
# column_strain_id[row_index] = genome_id_to_strain_id[genome_id]
#assert len(column_strain_id) == len(set(column_strain_id))
#metadata_table.insert_column(column_strain_id, "strain_id")
# stream taxonomic profile
self._stream_tp_header(stream_output, sample_id)
self._stream_tp_rows(stream_output, percent_by_rank_by_taxid, strain_id_to_genome_id, genome_id_to_otu)
def _get_genome_id_to_lineage(
self, list_of_genome_id, genome_id_to_taxid, strain_id_to_genome_id, genome_id_to_strain_id):
"""
Returnes the lineage for each genome id, assigning new strain id if not available
@param list_of_genome_id: List of identifier of genomes
@type list_of_genome_id: list[str|unicode]
@param genome_id_to_taxid: Assigned taxid for each genome id
@type genome_id_to_taxid: dict[str|unicode, str|unicode]
@param strain_id_to_genome_id: Mapping from strain id to genome id
@type strain_id_to_genome_id: dict[str|unicode, str|unicode]
@param genome_id_to_strain_id: Mapping from genome id to strain id
@type genome_id_to_strain_id: dict[str|unicode, str|unicode]
@return: lineage for each genome id using genome id as key
@rtype: dict[str|unicode, list[None|str|unicode]]
"""
strains_by_taxid = {}
genome_id_to_lineage = {}
for genome_id in list_of_genome_id:
tax_id = genome_id_to_taxid[genome_id]
if tax_id == "":
raise KeyError("genome_ID '{}' has no taxid!".format(genome_id))
tax_id = self._taxonomy.get_updated_taxid(tax_id)
genome_id_to_lineage[genome_id] = self._taxonomy.get_lineage_of_legal_ranks(
tax_id, ranks=self._ranks, default_value=None)
if genome_id_to_lineage[genome_id][-1] is not None:
continue
if tax_id not in strains_by_taxid:
strains_by_taxid[tax_id] = 0
strains_by_taxid[tax_id] += 1
if genome_id in genome_id_to_strain_id and genome_id_to_strain_id[genome_id]:
strain_id = genome_id_to_strain_id[genome_id]
else:
strain_id = "{}.{}".format(tax_id, strains_by_taxid[tax_id])
# make sure assigned strain ids are unique, in case of previous assigned ids
while strain_id in genome_id_to_strain_id.values():
strains_by_taxid[tax_id] += 1
strain_id = "{}.{}".format(tax_id, strains_by_taxid[tax_id])
genome_id_to_strain_id[genome_id] = strain_id
genome_id_to_lineage[genome_id][-1] = strain_id
strain_id_to_genome_id[strain_id] = genome_id
return genome_id_to_lineage
def _get_percent_by_rank_by_taxid(self, genome_id_to_lineage, genome_id_to_percent):
"""
Return the percentage for each taxid of a list of default ranks
@param genome_id_to_lineage: Mapping from genome id to a lineage (list)
@type genome_id_to_lineage: dict[str|unicode, list[None|str|unicode]]
@param genome_id_to_percent: Mapping from genome id to percentage
@type genome_id_to_percent: dict[str|unicode, float]
@return: Percentage for each taxid of a list of default ranks as dictionary of dictionaries
@rtype: dict[str|unicode, dict[str|unicode, float]]
"""
percent_by_rank_by_taxid = {}
for rank in self._ranks:
percent_by_rank_by_taxid[rank] = dict()
for rank_index, rank in enumerate(self._ranks):
# rank = ranks[rank_index]
for genome_id in genome_id_to_lineage:
tax_id = genome_id_to_lineage[genome_id][rank_index]
if tax_id is None:
continue
percent = genome_id_to_percent[genome_id]
if tax_id not in percent_by_rank_by_taxid[rank]:
percent_by_rank_by_taxid[rank][tax_id] = 0
percent_by_rank_by_taxid[rank][tax_id] += percent
return percent_by_rank_by_taxid
def _stream_tp_rows(self, stream_output, percent_by_rank_by_taxid, strain_id_to_genome_id, genome_id_to_otu):
"""
Stream the rows of the taxonomic profile.
@param stream_output: Output of taxonomic profile
@type stream_output: file | FileIO | StringIO
@param percent_by_rank_by_taxid: Percentage for each taxid of a list of default ranks as dictionary of dictionaries
@type percent_by_rank_by_taxid: dict[str|unicode, dict[str|unicode, float]]
@param strain_id_to_genome_id: Map from strain id to a genome identifier
@type strain_id_to_genome_id: dict[str|unicode, str|unicode]
@param genome_id_to_otu: Map from genome id to an otu identifier
@type genome_id_to_otu: dict[str|unicode, str|unicode]
"""
row_format = "{taxid}\t{rank}\t{taxpath}\t{taxpath_sn}\t{abp:.4f}\t{gid}\t{otu}\n"
for rank_index, rank in enumerate(self._ranks):
for tax_id in percent_by_rank_by_taxid[rank]:
if tax_id == '':
self._logger.warning("Missing rank %s for a genome" % rank)
continue
if '.' in tax_id:
genome_id = strain_id_to_genome_id[tax_id]
otu = genome_id_to_otu[genome_id]
lineage = self._taxonomy.get_lineage_of_legal_ranks(tax_id.split('.')[0], ranks=self._ranks, default_value="")
lineage[-1] = tax_id
else:
genome_id = ""
otu = ""
lineage = self._taxonomy.get_lineage_of_legal_ranks(tax_id, ranks=self._ranks, default_value="")
lineage = lineage[:rank_index+1]
lineage_sn = [self._taxonomy.get_scientific_name(tid) if tid != "" and '.' not in tid else "" for tid in lineage]
if '.' in tax_id:
lineage_sn[-1] = self._taxonomy.get_scientific_name(tax_id.split('.')[0]) + " strain" # ""
if percent_by_rank_by_taxid[rank][tax_id] != 0:
stream_output.write(row_format.format(
taxid=tax_id,
rank=rank,
taxpath="|".join(lineage),
taxpath_sn="|".join(lineage_sn),
abp=percent_by_rank_by_taxid[rank][tax_id]*100,
gid=genome_id,
otu=otu
))
def _stream_tp_header(self, output_stream, identifier):
"""
Stream the header of the taxonomic profile.
@param output_stream: Output of taxonomic profile
@type output_stream: file | FileIO | StringIO
@param identifier: Identifier of a sample
@type identifier: str | unicode
"""
output_stream.write("@SampleID:{}\n".format(identifier))
output_stream.write("@Version:{}\n".format(self._taxonomic_profile_version))
output_stream.write("@Ranks:{ranks}\n\n".format(ranks="|".join(self._ranks)))
output_stream.write("@@TAXID\tRANK\tTAXPATH\tTAXPATHSN\tPERCENTAGE\t_CAMI_genomeID\t_CAMI_OTU\n")
| 49.359848
| 130
| 0.659197
|
__author__ = 'hofmann'
__version__ = '0.0.2.1'
import os
from scripts.MetaDataTable.metadatatable import MetadataTable
from scripts.NcbiTaxonomy.ncbitaxonomy import NcbiTaxonomy
from scripts.Validator.validator import Validator
class TaxonomicProfile(Validator):
_taxonomic_profile_version = "0.9.1"
def __init__(self, taxonomy, logfile=None, verbose=True, debug=False):
super(TaxonomicProfile, self).__init__(label="TaxonomicProfile", logfile=logfile, verbose=verbose, debug=debug)
self._ranks = ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species', 'strain']
assert isinstance(taxonomy, NcbiTaxonomy)
self._taxonomy = taxonomy
self._filename_taxonomic_profile = "taxonomic_profile_{sample_index}.txt"
def write_taxonomic_profile_from_abundance_files(
self, metadata_table, list_of_file_paths, directory_output, sample_id=""):
metadata_table_tmp = MetadataTable(logfile=self._logfile, verbose=self._verbose)
for index_abundance, file_path in enumerate(list_of_file_paths):
community_abundance = metadata_table_tmp.parse_file(file_path, column_names=False)
file_path_output = os.path.join(directory_output, self._filename_taxonomic_profile.format(
sample_index=index_abundance))
with open(file_path_output, 'w') as stream_output:
self.write_taxonomic_profile(
community_abundance,
stream_output,
metadata_table,
sample_id)
def write_taxonomic_profile(self, community_abundance, stream_output, metadata_table, sample_id=""):
assert isinstance(metadata_table, MetadataTable)
genome_abundance = {}
total_abundance = 0.0
for genome_id, abundance in community_abundance:
if genome_id in genome_abundance:
raise IOError("genome id '{}' is not unique!".format(genome_id))
genome_abundance[genome_id] = float(abundance)
total_abundance += genome_abundance[genome_id]
for key, value in genome_abundance.items():
genome_abundance[key] = value / total_abundance
self._stream_taxonomic_profile(stream_output, genome_abundance, metadata_table, sample_id)
def _stream_taxonomic_profile(self, stream_output, genome_id_to_percent, metadata_table, sample_id=""):
strain_id_to_genome_id = {}
genome_id_to_strain_id = {}
genome_id_to_taxid = metadata_table.get_map(key_column_name="genome_ID", value_column_name="NCBI_ID")
genome_id_to_otu = metadata_table.get_map(key_column_name="genome_ID", value_column_name="OTU")
column_genome_id = metadata_table.get_column("genome_ID")
if not metadata_table.has_column("strain_id"):
column_strain_id = metadata_table.get_empty_column()
else:
column_strain_id = metadata_table.get_column("strain_id")
genome_id_to_strain_id = metadata_table.get_map(key_column_name="genome_ID", value_column_name="strain_id")
genome_id_to_lineage = self._get_genome_id_to_lineage(
genome_id_to_percent.keys(), genome_id_to_taxid, strain_id_to_genome_id, genome_id_to_strain_id)
percent_by_rank_by_taxid = self._get_percent_by_rank_by_taxid(genome_id_to_lineage, genome_id_to_percent)
self._stream_tp_header(stream_output, sample_id)
self._stream_tp_rows(stream_output, percent_by_rank_by_taxid, strain_id_to_genome_id, genome_id_to_otu)
def _get_genome_id_to_lineage(
self, list_of_genome_id, genome_id_to_taxid, strain_id_to_genome_id, genome_id_to_strain_id):
strains_by_taxid = {}
genome_id_to_lineage = {}
for genome_id in list_of_genome_id:
tax_id = genome_id_to_taxid[genome_id]
if tax_id == "":
raise KeyError("genome_ID '{}' has no taxid!".format(genome_id))
tax_id = self._taxonomy.get_updated_taxid(tax_id)
genome_id_to_lineage[genome_id] = self._taxonomy.get_lineage_of_legal_ranks(
tax_id, ranks=self._ranks, default_value=None)
if genome_id_to_lineage[genome_id][-1] is not None:
continue
if tax_id not in strains_by_taxid:
strains_by_taxid[tax_id] = 0
strains_by_taxid[tax_id] += 1
if genome_id in genome_id_to_strain_id and genome_id_to_strain_id[genome_id]:
strain_id = genome_id_to_strain_id[genome_id]
else:
strain_id = "{}.{}".format(tax_id, strains_by_taxid[tax_id])
while strain_id in genome_id_to_strain_id.values():
strains_by_taxid[tax_id] += 1
strain_id = "{}.{}".format(tax_id, strains_by_taxid[tax_id])
genome_id_to_strain_id[genome_id] = strain_id
genome_id_to_lineage[genome_id][-1] = strain_id
strain_id_to_genome_id[strain_id] = genome_id
return genome_id_to_lineage
def _get_percent_by_rank_by_taxid(self, genome_id_to_lineage, genome_id_to_percent):
percent_by_rank_by_taxid = {}
for rank in self._ranks:
percent_by_rank_by_taxid[rank] = dict()
for rank_index, rank in enumerate(self._ranks):
for genome_id in genome_id_to_lineage:
tax_id = genome_id_to_lineage[genome_id][rank_index]
if tax_id is None:
continue
percent = genome_id_to_percent[genome_id]
if tax_id not in percent_by_rank_by_taxid[rank]:
percent_by_rank_by_taxid[rank][tax_id] = 0
percent_by_rank_by_taxid[rank][tax_id] += percent
return percent_by_rank_by_taxid
def _stream_tp_rows(self, stream_output, percent_by_rank_by_taxid, strain_id_to_genome_id, genome_id_to_otu):
row_format = "{taxid}\t{rank}\t{taxpath}\t{taxpath_sn}\t{abp:.4f}\t{gid}\t{otu}\n"
for rank_index, rank in enumerate(self._ranks):
for tax_id in percent_by_rank_by_taxid[rank]:
if tax_id == '':
self._logger.warning("Missing rank %s for a genome" % rank)
continue
if '.' in tax_id:
genome_id = strain_id_to_genome_id[tax_id]
otu = genome_id_to_otu[genome_id]
lineage = self._taxonomy.get_lineage_of_legal_ranks(tax_id.split('.')[0], ranks=self._ranks, default_value="")
lineage[-1] = tax_id
else:
genome_id = ""
otu = ""
lineage = self._taxonomy.get_lineage_of_legal_ranks(tax_id, ranks=self._ranks, default_value="")
lineage = lineage[:rank_index+1]
lineage_sn = [self._taxonomy.get_scientific_name(tid) if tid != "" and '.' not in tid else "" for tid in lineage]
if '.' in tax_id:
lineage_sn[-1] = self._taxonomy.get_scientific_name(tax_id.split('.')[0]) + " strain"
if percent_by_rank_by_taxid[rank][tax_id] != 0:
stream_output.write(row_format.format(
taxid=tax_id,
rank=rank,
taxpath="|".join(lineage),
taxpath_sn="|".join(lineage_sn),
abp=percent_by_rank_by_taxid[rank][tax_id]*100,
gid=genome_id,
otu=otu
))
def _stream_tp_header(self, output_stream, identifier):
output_stream.write("@SampleID:{}\n".format(identifier))
output_stream.write("@Version:{}\n".format(self._taxonomic_profile_version))
output_stream.write("@Ranks:{ranks}\n\n".format(ranks="|".join(self._ranks)))
output_stream.write("@@TAXID\tRANK\tTAXPATH\tTAXPATHSN\tPERCENTAGE\t_CAMI_genomeID\t_CAMI_OTU\n")
| true
| true
|
79094a47130fbebad4d532fd2b26acc79d338c9c
| 1,878
|
py
|
Python
|
locations/spiders/aldi_uk.py
|
bealbrown/allhours
|
f750ee7644246a97bd16879f14115d7845f76b89
|
[
"MIT"
] | null | null | null |
locations/spiders/aldi_uk.py
|
bealbrown/allhours
|
f750ee7644246a97bd16879f14115d7845f76b89
|
[
"MIT"
] | null | null | null |
locations/spiders/aldi_uk.py
|
bealbrown/allhours
|
f750ee7644246a97bd16879f14115d7845f76b89
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
import re
import json
from locations.hourstudy import inputoutput
class AldiUKSpider(scrapy.Spider):
name = "aldiuk"
allowed_domains = ['www.aldi.co.uk']
start_urls = (
'https://www.aldi.co.uk/sitemap/store',
)
def parse(self, response):
response.selector.remove_namespaces()
city_urls = response.xpath('//url/loc/text()').extract()
for path in city_urls:
yield scrapy.Request(
path.strip(),
callback=self.parse_store,
)
else:
pass
def parse_store(self, response):
json_data = response.xpath('//script[@type="text/javascript"]/text()').extract_first().replace('\n','').replace('\t','').split('.push(')[1].rstrip(')')
data = json.loads(json_data)
geojson_data = response.xpath('//script[@class="js-store-finder-initial-state"][@type="application/json"]/text()').extract_first()
geodata = json.loads(geojson_data)
# properties = {
# 'name': data['seoData']['name'],
# 'ref': data['seoData']['name'],
# 'addr_full': data['seoData']['address']['streetAddress'],
# 'city': data['seoData']['address']['addressLocality'],
# 'postcode': data['seoData']['address']['postalCode'],
# 'country': data['seoData']['address']['addressCountry'],
# 'website': response.request.url,
# 'opening_hours': str(data['seoData']['openingHours']).replace('[','').replace(']','').replace("'",''),
# 'lat': float(geodata['store']['latlng']['lat']),
# 'lon': float(geodata['store']['latlng']['lng']),
# }
raw = str(data['seoData']['openingHours'])
formatted = str(data['seoData']['openingHours']).replace('[','').replace(']','').replace("'",'')
yield inputoutput(raw,formatted)
| 39.125
| 159
| 0.571353
|
import scrapy
import re
import json
from locations.hourstudy import inputoutput
class AldiUKSpider(scrapy.Spider):
name = "aldiuk"
allowed_domains = ['www.aldi.co.uk']
start_urls = (
'https://www.aldi.co.uk/sitemap/store',
)
def parse(self, response):
response.selector.remove_namespaces()
city_urls = response.xpath('//url/loc/text()').extract()
for path in city_urls:
yield scrapy.Request(
path.strip(),
callback=self.parse_store,
)
else:
pass
def parse_store(self, response):
json_data = response.xpath('//script[@type="text/javascript"]/text()').extract_first().replace('\n','').replace('\t','').split('.push(')[1].rstrip(')')
data = json.loads(json_data)
geojson_data = response.xpath('//script[@class="js-store-finder-initial-state"][@type="application/json"]/text()').extract_first()
geodata = json.loads(geojson_data)
# 'lat': float(geodata['store']['latlng']['lat']),
# 'lon': float(geodata['store']['latlng']['lng']),
# }
raw = str(data['seoData']['openingHours'])
formatted = str(data['seoData']['openingHours']).replace('[','').replace(']','').replace("'",'')
yield inputoutput(raw,formatted)
| true
| true
|
79094a923bcf0bd351933af93ab2684f6143ebc1
| 1,975
|
py
|
Python
|
train_joint.py
|
locdoan12121997/Indoor_Segmentation
|
7e90fceb92e1be035a5eedec6ee53bf343bcdab6
|
[
"Apache-2.0"
] | 2
|
2020-03-27T14:50:12.000Z
|
2022-03-30T02:40:21.000Z
|
train_joint.py
|
locdoan12121997/Indoor_Segmentation
|
7e90fceb92e1be035a5eedec6ee53bf343bcdab6
|
[
"Apache-2.0"
] | null | null | null |
train_joint.py
|
locdoan12121997/Indoor_Segmentation
|
7e90fceb92e1be035a5eedec6ee53bf343bcdab6
|
[
"Apache-2.0"
] | null | null | null |
from models.joint_fpn import JointFpn
from trainers.segmentation_trainer import SegmentationTrainer
from data_generators.joint_data_generator import JointDataGenerator
from data_generators.scenenet_rgbd_data_generator import ScenenetRGBDDataGenerator
from utils.config import process_config
from utils.dirs import create_dirs
from utils.utils import get_args
import tensorflow as tf
from utils import factory
from tensorflow.keras.mixed_precision import experimental as mixed_precision
def main():
# capture the config path from the run arguments
# then process the json configuration file
try:
args = get_args()
config = process_config(args.config)
except:
print("missing or invalid arguments")
exit(0)
# use mixed precision for training
if config.exp.mixed_precision:
print('Use mixed precision training')
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
if config.exp.jpa_optimization:
tf.config.optimizer.set_jit(True)
# create the experiments dirs
create_dirs([config.callbacks.tensorboard_log_dir,
config.callbacks.checkpoint_dir])
print('Create the training data generator.')
if config.generator.is_scenenet == True:
train_data = ScenenetRGBDDataGenerator(config)
else:
train_data = JointDataGenerator(config)
validation_data = None
if type(config.validation.img_dir) == str:
print('Create the validation data generator.')
validation_data = JointDataGenerator(
config, is_training_set=False)
print('Create the model.')
model = factory.create(config.model.class_name)(config, train_data)
print('Create the trainer')
trainer = SegmentationTrainer(
model, train_data, config, validation_generator=validation_data)
print('Start training the model.')
trainer.train()
if __name__ == '__main__':
main()
| 32.377049
| 82
| 0.729114
|
from models.joint_fpn import JointFpn
from trainers.segmentation_trainer import SegmentationTrainer
from data_generators.joint_data_generator import JointDataGenerator
from data_generators.scenenet_rgbd_data_generator import ScenenetRGBDDataGenerator
from utils.config import process_config
from utils.dirs import create_dirs
from utils.utils import get_args
import tensorflow as tf
from utils import factory
from tensorflow.keras.mixed_precision import experimental as mixed_precision
def main():
try:
args = get_args()
config = process_config(args.config)
except:
print("missing or invalid arguments")
exit(0)
if config.exp.mixed_precision:
print('Use mixed precision training')
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
if config.exp.jpa_optimization:
tf.config.optimizer.set_jit(True)
create_dirs([config.callbacks.tensorboard_log_dir,
config.callbacks.checkpoint_dir])
print('Create the training data generator.')
if config.generator.is_scenenet == True:
train_data = ScenenetRGBDDataGenerator(config)
else:
train_data = JointDataGenerator(config)
validation_data = None
if type(config.validation.img_dir) == str:
print('Create the validation data generator.')
validation_data = JointDataGenerator(
config, is_training_set=False)
print('Create the model.')
model = factory.create(config.model.class_name)(config, train_data)
print('Create the trainer')
trainer = SegmentationTrainer(
model, train_data, config, validation_generator=validation_data)
print('Start training the model.')
trainer.train()
if __name__ == '__main__':
main()
| true
| true
|
79094b1325f9f2bc6884b041da0987bd432b2b90
| 3,520
|
py
|
Python
|
src/edubot/client.py
|
wendlers/edubot-snap
|
09c471ef8738a3fc2aae6772a1e02ef8e15d5737
|
[
"MIT"
] | null | null | null |
src/edubot/client.py
|
wendlers/edubot-snap
|
09c471ef8738a3fc2aae6772a1e02ef8e15d5737
|
[
"MIT"
] | null | null | null |
src/edubot/client.py
|
wendlers/edubot-snap
|
09c471ef8738a3fc2aae6772a1e02ef8e15d5737
|
[
"MIT"
] | null | null | null |
##
# The MIT License (MIT)
#
# Copyright (c) 2016 Stefan Wendler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##
import os
import subprocess
class AbstractBrowser:
_binary = None
def __init__(self, url, user_data_dir):
self.user_data_dir = os.path.join(user_data_dir, self._binary)
self.url = url
if not os.path.exists(self.user_data_dir):
os.makedirs(self.user_data_dir)
@staticmethod
def _available(binary):
extensions = os.environ.get("PATHEXT", "").split(os.pathsep)
for directory in os.environ.get("PATH", "").split(os.pathsep):
base = os.path.join(directory, binary)
options = [base] + [(base + ext) for ext in extensions]
for filename in options:
if os.path.exists(filename):
return True
return False
def _start(self, args):
print("running: " + self._binary)
try:
subprocess.check_output([self._binary] + args)
except subprocess.CalledProcessError as e:
print(e.output)
return e.returncode
except Exception as e:
print(e)
return -1
return 0
def start(self):
return -1
@staticmethod
def available():
return False
class Chrome(AbstractBrowser):
_binary = "google-chrome"
@staticmethod
def available():
return AbstractBrowser._available(Chrome._binary)
def start(self):
args = ["--app=%s" % self.url]
args += ["--user-data-dir=%s" % self.user_data_dir]
return self._start(args)
class Chromium(Chrome):
_binary = "xchromium"
@staticmethod
def available():
return AbstractBrowser._available(Chromium._binary)
class Firefox(AbstractBrowser):
_binary = "firefox"
@staticmethod
def available():
return AbstractBrowser._available(Firefox._binary)
def start(self):
args = ["--profile", self.user_data_dir]
args += ["--no-remote"]
args += [self.url]
return self._start(args)
class Browser:
def __init__(self, url, user_data_dir=None):
self.client = None
for cls in [Chrome, Chromium, Firefox]:
if cls.available():
self.client = cls(url, user_data_dir)
break
if self.client is None:
raise Exception("No suitable client found!")
def start(self):
return self.client.start()
| 26.074074
| 79
| 0.646591
|
import os
import subprocess
class AbstractBrowser:
_binary = None
def __init__(self, url, user_data_dir):
self.user_data_dir = os.path.join(user_data_dir, self._binary)
self.url = url
if not os.path.exists(self.user_data_dir):
os.makedirs(self.user_data_dir)
@staticmethod
def _available(binary):
extensions = os.environ.get("PATHEXT", "").split(os.pathsep)
for directory in os.environ.get("PATH", "").split(os.pathsep):
base = os.path.join(directory, binary)
options = [base] + [(base + ext) for ext in extensions]
for filename in options:
if os.path.exists(filename):
return True
return False
def _start(self, args):
print("running: " + self._binary)
try:
subprocess.check_output([self._binary] + args)
except subprocess.CalledProcessError as e:
print(e.output)
return e.returncode
except Exception as e:
print(e)
return -1
return 0
def start(self):
return -1
@staticmethod
def available():
return False
class Chrome(AbstractBrowser):
_binary = "google-chrome"
@staticmethod
def available():
return AbstractBrowser._available(Chrome._binary)
def start(self):
args = ["--app=%s" % self.url]
args += ["--user-data-dir=%s" % self.user_data_dir]
return self._start(args)
class Chromium(Chrome):
_binary = "xchromium"
@staticmethod
def available():
return AbstractBrowser._available(Chromium._binary)
class Firefox(AbstractBrowser):
_binary = "firefox"
@staticmethod
def available():
return AbstractBrowser._available(Firefox._binary)
def start(self):
args = ["--profile", self.user_data_dir]
args += ["--no-remote"]
args += [self.url]
return self._start(args)
class Browser:
def __init__(self, url, user_data_dir=None):
self.client = None
for cls in [Chrome, Chromium, Firefox]:
if cls.available():
self.client = cls(url, user_data_dir)
break
if self.client is None:
raise Exception("No suitable client found!")
def start(self):
return self.client.start()
| true
| true
|
79094b8da361144427c8e1c416c4560ff428ffa4
| 3,337
|
py
|
Python
|
src/main/resources/arxan/UploadApplication.py
|
xebialabs-community/xlr-essential-app-protection-plugin
|
f68df83cca35bfe3f70c7de1f33ebafc98752bde
|
[
"MIT"
] | null | null | null |
src/main/resources/arxan/UploadApplication.py
|
xebialabs-community/xlr-essential-app-protection-plugin
|
f68df83cca35bfe3f70c7de1f33ebafc98752bde
|
[
"MIT"
] | null | null | null |
src/main/resources/arxan/UploadApplication.py
|
xebialabs-community/xlr-essential-app-protection-plugin
|
f68df83cca35bfe3f70c7de1f33ebafc98752bde
|
[
"MIT"
] | null | null | null |
#
# Copyright 2021 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import json
import requests
import org.slf4j.LoggerFactory as LoggerFactory
logger = LoggerFactory.getLogger("Arxan")
# New ARXAN logic
# setup the request url
api_token_endpoint = "/v2/apaas/apps"
url = server.get('url') + "%s" % api_token_endpoint
headers = {
'Content-Type': "application/x-www-form-urlencoded"
}
with open(file_path, 'rb') as app_file:
logger.info('Filepath: %s' % file_path)
files = {'appFile': app_file}
headers = {
'Authorization': auth_string,
}
data = {
'productId' : 'Essential Protection',
'protection': {
'appAware': {
'applicationToken': server.get('app_token'),
'endpoint': server.get('app_endpoint')
}
}
}
logger.info('Uploading file...')
logger.info('URL: %s' % url)
logger.info('Headers: %s' % json.dumps(headers))
logger.info('JSON: %s' % json.dumps(data))
response = requests.post(url, files = files, data = {'data': json.dumps(data)}, headers = headers, verify = False)
logger.info('Uploading app response status code: %s.' % response.status_code)
logger.info(response.json()['message'])
# output = response.json().get('protectionId')
if response.status_code == 200:
logger.info('App uploaded')
json_response = response.json()
logger.debug('App upload response: %s', json_response)
if 'protectionId' not in json_response:
logger.error('There was a problem uploading the app. Missing protectionId in the response')
else:
protection_id = json_response['protectionId']
logger.debug('App protection id is %s', protection_id)
output = protection_id
elif response.status_code == 400:
error_message = response.json()['message']
logger.error('There was a problem protecting %s', error_message)
elif response.status_code == 401 or response.status_code == 403:
raise AuthorizationError()
elif response.status_code == 404:
logger.error('Cannot reach server %s', server)
else:
logger.error('An unexpected error has occurred. (%d)', response.status_code)
raise Exception('Incorrect response code for upload app: (%s)', response.status_code)
| 48.362319
| 462
| 0.69104
|
import json
import requests
import org.slf4j.LoggerFactory as LoggerFactory
logger = LoggerFactory.getLogger("Arxan")
api_token_endpoint = "/v2/apaas/apps"
url = server.get('url') + "%s" % api_token_endpoint
headers = {
'Content-Type': "application/x-www-form-urlencoded"
}
with open(file_path, 'rb') as app_file:
logger.info('Filepath: %s' % file_path)
files = {'appFile': app_file}
headers = {
'Authorization': auth_string,
}
data = {
'productId' : 'Essential Protection',
'protection': {
'appAware': {
'applicationToken': server.get('app_token'),
'endpoint': server.get('app_endpoint')
}
}
}
logger.info('Uploading file...')
logger.info('URL: %s' % url)
logger.info('Headers: %s' % json.dumps(headers))
logger.info('JSON: %s' % json.dumps(data))
response = requests.post(url, files = files, data = {'data': json.dumps(data)}, headers = headers, verify = False)
logger.info('Uploading app response status code: %s.' % response.status_code)
logger.info(response.json()['message'])
if response.status_code == 200:
logger.info('App uploaded')
json_response = response.json()
logger.debug('App upload response: %s', json_response)
if 'protectionId' not in json_response:
logger.error('There was a problem uploading the app. Missing protectionId in the response')
else:
protection_id = json_response['protectionId']
logger.debug('App protection id is %s', protection_id)
output = protection_id
elif response.status_code == 400:
error_message = response.json()['message']
logger.error('There was a problem protecting %s', error_message)
elif response.status_code == 401 or response.status_code == 403:
raise AuthorizationError()
elif response.status_code == 404:
logger.error('Cannot reach server %s', server)
else:
logger.error('An unexpected error has occurred. (%d)', response.status_code)
raise Exception('Incorrect response code for upload app: (%s)', response.status_code)
| true
| true
|
79094ba4c931ead41bcd45004879c5fa37426b73
| 822
|
py
|
Python
|
ml_project/entities/train_pipeline_params.py
|
made-ml-in-prod-2021/marina-zav
|
7b4b6e5f333707001e36dfb014dcd36bf975d969
|
[
"FTL"
] | null | null | null |
ml_project/entities/train_pipeline_params.py
|
made-ml-in-prod-2021/marina-zav
|
7b4b6e5f333707001e36dfb014dcd36bf975d969
|
[
"FTL"
] | null | null | null |
ml_project/entities/train_pipeline_params.py
|
made-ml-in-prod-2021/marina-zav
|
7b4b6e5f333707001e36dfb014dcd36bf975d969
|
[
"FTL"
] | null | null | null |
from dataclasses import dataclass
import yaml
from marshmallow_dataclass import class_schema
from .feature_params import FeatureParams
from .preprocess_params import PreprocessParams
from .split_params import SplittingParams
from .train_params import TrainingParams
@dataclass()
class TrainingPipelineParams:
input_data_path: str
output_model_path: str
metric_path: str
splitting_params: SplittingParams
preprocess_params: PreprocessParams
feature_params: FeatureParams
train_params: TrainingParams
TrainingPipelineParamsSchema = class_schema(TrainingPipelineParams)
def read_training_pipeline_params(path: str) -> TrainingPipelineParams:
with open(path, "r") as input_stream:
schema = TrainingPipelineParamsSchema()
return schema.load(yaml.safe_load(input_stream))
| 27.4
| 71
| 0.810219
|
from dataclasses import dataclass
import yaml
from marshmallow_dataclass import class_schema
from .feature_params import FeatureParams
from .preprocess_params import PreprocessParams
from .split_params import SplittingParams
from .train_params import TrainingParams
@dataclass()
class TrainingPipelineParams:
input_data_path: str
output_model_path: str
metric_path: str
splitting_params: SplittingParams
preprocess_params: PreprocessParams
feature_params: FeatureParams
train_params: TrainingParams
TrainingPipelineParamsSchema = class_schema(TrainingPipelineParams)
def read_training_pipeline_params(path: str) -> TrainingPipelineParams:
with open(path, "r") as input_stream:
schema = TrainingPipelineParamsSchema()
return schema.load(yaml.safe_load(input_stream))
| true
| true
|
79094bb47bef7847079dcc4eb501ab993a68de94
| 344
|
py
|
Python
|
mppsolar/outputs/baseoutput.py
|
20after4/mpp-solar
|
31181f69abd18137c8a7f2c088691d464fb75acb
|
[
"MIT"
] | null | null | null |
mppsolar/outputs/baseoutput.py
|
20after4/mpp-solar
|
31181f69abd18137c8a7f2c088691d464fb75acb
|
[
"MIT"
] | null | null | null |
mppsolar/outputs/baseoutput.py
|
20after4/mpp-solar
|
31181f69abd18137c8a7f2c088691d464fb75acb
|
[
"MIT"
] | null | null | null |
import logging
log = logging.getLogger("MPP-Solar")
class baseoutput:
def __str__(self):
return "baseoutput - the base class for the output processors, not used directly"
def get_kwargs(self, kwargs, key, default=None):
if not key in kwargs or not kwargs[key]:
return default
return kwargs[key]
| 22.933333
| 89
| 0.665698
|
import logging
log = logging.getLogger("MPP-Solar")
class baseoutput:
def __str__(self):
return "baseoutput - the base class for the output processors, not used directly"
def get_kwargs(self, kwargs, key, default=None):
if not key in kwargs or not kwargs[key]:
return default
return kwargs[key]
| true
| true
|
79094be2c1880506031c887e98b8bf683679f18d
| 203
|
py
|
Python
|
upto7-12-2020/amstrongnospl.py
|
nikhilsamninan/python-files
|
15198459081097058a939b40b5e8ef754e578fe0
|
[
"Apache-2.0"
] | null | null | null |
upto7-12-2020/amstrongnospl.py
|
nikhilsamninan/python-files
|
15198459081097058a939b40b5e8ef754e578fe0
|
[
"Apache-2.0"
] | null | null | null |
upto7-12-2020/amstrongnospl.py
|
nikhilsamninan/python-files
|
15198459081097058a939b40b5e8ef754e578fe0
|
[
"Apache-2.0"
] | null | null | null |
n=(input("Enter a number"))
a=len(n)
s=int(n)
sum=0
p=s
while s>0:
b=s%10
sum=sum+b**a
s=s//10
if sum==p:
print("It is an Amstrong Number")
else:
print("It is Not an Amstrong Number")
| 15.615385
| 41
| 0.591133
|
n=(input("Enter a number"))
a=len(n)
s=int(n)
sum=0
p=s
while s>0:
b=s%10
sum=sum+b**a
s=s//10
if sum==p:
print("It is an Amstrong Number")
else:
print("It is Not an Amstrong Number")
| true
| true
|
79094c50813a8f89ddfeab75de69df4aacf7b75a
| 17,699
|
py
|
Python
|
fastinference/tabular/pd.py
|
floleuerer/fastinference
|
bab8251385416140cf2611016ea1b40c8f9032ff
|
[
"Apache-2.0"
] | 79
|
2020-06-08T02:08:06.000Z
|
2022-02-07T11:01:59.000Z
|
fastinference/tabular/pd.py
|
floleuerer/fastinference
|
bab8251385416140cf2611016ea1b40c8f9032ff
|
[
"Apache-2.0"
] | 41
|
2020-06-20T17:00:29.000Z
|
2022-02-03T12:43:58.000Z
|
fastinference/tabular/pd.py
|
floleuerer/fastinference
|
bab8251385416140cf2611016ea1b40c8f9032ff
|
[
"Apache-2.0"
] | 19
|
2020-06-14T19:39:37.000Z
|
2021-05-30T14:33:26.000Z
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_tabular.pd.ipynb (unless otherwise specified).
__all__ = ['PartDep']
# Cell
from fastai.tabular.all import *
from .core import *
# Cell
from plotnine import *
# Cell
from IPython.display import clear_output
# Cell
class PartDep(Interpret):
"""
Calculate Partial Dependence. Countinious vars are divided into buckets and are analized as well
Fields is a list of lists of what columns we want to test. The inner items are treated as connected fields.
For ex. fields = [['Store','StoreType']] mean that Store and StoreType is treated as one entity
(it's values are substitute as a pair, not as separate values)
coef is useful when we don't want to deal with all the variants, but only with most common
In short if coef for ex. is 0.9, then function outputs number of occurrences for all but least 10%
of the least used
If coef is more 1.0, then 'coef' itself is used as threshold (as min number of occurances)
use_log=True is needed if we have transformed depended variable into log
use_int=True is needed if we want to log-detransformed (exponented) var to me integer not float
is_couninue=True helps with long calculation, it continues the last calculation from the saved file
is_use_cache=True loads last fully calculated result. Can distinct caches that were mede with different
fields and coef
no_precalc=True -- don't calculate PartDep (usefull if you want to use `plot_raw` and `plot_model` only)
"""
def __init__(self, learn, df, model_name: str, fields: list = (), coef: float = 1.0,
is_sorted: bool = True, use_log=False, use_int=False,
cache_path=None, is_use_cache=True, is_continue=False, no_precalc=False):
super().__init__(learn, df)
self.use_log = use_log
self.use_int = use_int
self.coef = coef
self.is_sorted = is_sorted
if (fields is None) or (len(fields) == 0):
self.fields = self._get_all_columns()
else:
self.fields = listify(fields)
self.part_dep_df = None
self.cache_path = ifnone(cache_path, learn.path / 'cache')
self.save_name = f"{model_name}_part_dep"
self.is_use_cache = is_use_cache
self.is_continue = is_continue
self.dep_var = self._get_dep_var()
self.is_biclassification = True if (learn.dls.c == 2) else False
if (no_precalc==False):
self._load_or_calculate()
@classmethod
def what_cached(self, model_name: str, path=None, learn=None):
"""
Shows what keys are cached
"""
if isNone(path) and isNone(learn):
print("path and learn cannot be None at the same time")
return
elif isNone(path):
path = learn.path
name = f"{model_name}_part_dep"
folder = 'cache'
path = path / folder
if not (Path(f"{path / name}.pkl").exists()):
print(f"No chache file")
else:
f = open(path / f"{name}.pkl", "rb")
var = load(f)
f.close()
for k in var.keys():
print(k)
@classmethod
def empty_cache(self, model_name: str, path=None, learn=None):
"""
deletes the cache file
"""
if isNone(path) and isNone(learn):
print("path and learn cannot be None at the same time")
return
elif isNone(path):
path = learn.path
name = f"{model_name}_part_dep"
folder = 'cache'
path = path / folder
files = (Path(f"{path / name}.pkl"), Path(path / 'pd_interm.pkl'))
for file in files:
if not (file.exists()):
print(f"No chache file {file}")
else:
file.unlink()
def _cont_into_buckets(self, df_init, CONT_COLS):
"""
Categorical values can be easily distiguished one from another
But that doesn't work with continious values, we have to divede it's
values into buckets and then use all values in a bucket as a single value
that avarages the bucket. This way we convert cont feture into pseudo categorical
and are able to apply partial dependense analysis to it
"""
fields = self.fields
df = df_init.copy()
if is_in_list(values=fields, in_list=CONT_COLS):
for col in which_elms(values=fields, in_list=CONT_COLS):
edges = np.histogram_bin_edges(a=df[col].dropna(), bins='auto')
for x, y in zip(edges[::], edges[1::]):
df.loc[(df[col] > x) & (df[col] < y), col] = (x + y) / 2
return df
def _get_field_uniq_x_coef(self, df: pd.DataFrame, fields: list, coef: float) -> list:
'''
This function outputs threshold to number of occurrences different variants of list of columns (fields)
In short if coef for ex. is 0.9, then function outputs number of occurrences for all but least 10%
of the least used
If coef is more 1.0, then 'coef' itself is used as threshold
'''
if (coef > 1):
return math.ceil(coef)
coef = 0. if (coef < 0) else coef
occs = df.groupby(fields).size().reset_index(name="Times").sort_values(['Times'], ascending=False)
num = math.ceil(coef * len(occs))
if (num <= 0):
# number of occurances is now = max_occs+1 (so it will be no items with this filter)
return occs.iloc[0]['Times'] + 1
else:
return occs.iloc[num - 1]['Times']
def _get_part_dep_one(self, fields: list, masterbar=None) -> pd.DataFrame:
'''
Function calculate partial dependency for column in fields.
Fields is a list of lists of what columns we want to test. The inner items are treated as connected fields.
For ex. fields = [['Store','StoreType']] mean that Store and StoreType is treated as one entity
(it's values are substitute as a pair, not as separate values)
coef is useful when we don't want to deal with all the variants, but only with most common
'''
NAN_SUBST = '###na###'
cont_vars = self._get_cont_columns()
fields = listify(fields)
coef, is_sorted, use_log, use_int = self.coef, self.is_sorted, self.use_log, self.use_int
dep_name = self._get_dep_var()
df = self._cont_into_buckets(df_init=self.df, CONT_COLS=cont_vars)
# here we prepare data to eliminate pairs that occure too little
# and make NaN a separate value to appear in occures
field_min_occ = self._get_field_uniq_x_coef(df=df, fields=fields, coef=coef)
df[fields] = df[fields].fillna(NAN_SUBST) # to treat None as a separate field
occs = df.groupby(fields).size().reset_index(name="Times").sort_values(['Times'], ascending=False)
occs[fields] = occs[fields].replace(to_replace=NAN_SUBST, value=np.nan) # get back Nones from NAN_SUBST
df[fields] = df[fields].replace(to_replace=NAN_SUBST, value=np.nan) # get back Nones from NAN_SUBST
occs = occs[occs['Times'] >= field_min_occ]
df_copy = df.merge(occs[fields]).copy()
# here for every pair of values of fields we substitute it's values in original df
# with the current one and calculate predictions
# So we predict mean dep_var for every pairs of value of fields on the whole dataset
frame = []
ln = len(occs)
if (ln > 0):
for _, row in progress_bar(occs.iterrows(), total=ln, parent=masterbar):
# We don't need to do df_copy = df.merge(occs[field]).copy() every time
# as every time we change the same column (set of columns)
record = []
for fld in fields:
df_copy[fld] = row[fld]
preds = self._predict_df(df=df_copy)
preds = np.exp(np.mean(preds)) if (use_log == True) else np.mean(preds)
preds = int(preds) if (use_int == True) else preds
for fld in fields:
record.append(row[fld])
record.append(preds)
record.append(row['Times'])
frame.append(record)
# Here for every pair of fields we calculate mean dep_var deviation
# This devition is the score that shows how and where this partucular pair of fields
# moves depend valiable
# Added times to more easily understand the data (more times more sure we are)
out = pd.DataFrame(frame, columns=fields + [dep_name, 'times'])
median = out[dep_name].median()
out[dep_name] /= median
if (is_sorted == True):
out = out.sort_values(by=dep_name, ascending=False)
return out
def _get_part_dep(self):
'''
Makes a datafreme with partial dependencies for every pair of columns in fields
'''
fields = self.fields
learn = self.learn
cache_path = self.cache_path
dep_name = self._get_dep_var()
is_continue = self.is_continue
l2k = self._list_to_key
result = []
to_save = {}
from_saved = {}
# Load from cache
if (is_continue == True):
if Path(cache_path / 'pd_interm.pkl').exists():
from_saved = ld_var(name='pd_interm', path=cache_path)
else:
is_continue = False
elapsed = []
left = []
if (is_continue == True):
for field in fields:
if (l2k(field) in from_saved):
elapsed.append(field)
new_df = from_saved[l2k(field)]
result.append(new_df)
to_save[l2k(field)] = new_df
for field in fields:
if (l2k(field) not in from_saved):
left.append(field)
# Calculate
pbar = master_bar(left)
cache_path.mkdir(parents=True, exist_ok=True)
sv_var(var=to_save, name='pd_interm', path=cache_path)
for field in pbar:
new_df = self._get_part_dep_one(fields=field, masterbar=pbar)
new_df['feature'] = self._list_to_key(field)
if is_listy(field):
new_df['value'] = new_df[field].values.tolist()
new_df.drop(columns=field, inplace=True)
else:
new_df = new_df.rename(index=str, columns={str(field): "value"})
result.append(new_df)
to_save[l2k(field)] = new_df
sv_var(var=to_save, name='pd_interm', path=cache_path)
clear_output()
if Path(cache_path / 'pd_interm.pkl').exists():
Path(cache_path / 'pd_interm.pkl').unlink() # delete intermediate file
result = pd.concat(result, ignore_index=True, sort=True)
result = result[['feature', 'value', dep_name, 'times']]
clear_output()
self.part_dep_df = result
def _load_dict(self, name, path):
if not (Path(f"{path / name}.pkl").exists()):
return None
return self._ld_var(name=name, path=path)
def _save_cached(self):
"""
Saves calculated PartDep df into path.
Can be saved more than one with as an dict with fields as key
"""
path = self.cache_path
path.mkdir(parents=True, exist_ok=True)
name = self.save_name
sv_dict = self._load_dict(name=name, path=path)
key = self._list_to_key(self.fields + [self.coef])
if isNone(sv_dict):
sv_dict = {key: self.part_dep_df}
else:
sv_dict[key] = self.part_dep_df
self._sv_var(var=sv_dict, name=name, path=path)
def _load_cached(self):
"""
Load calculated PartDep df if hash exist.
"""
name = self.save_name
path = self.cache_path
if not (Path(f"{path / name}.pkl").exists()):
return None
ld_dict = self._ld_var(name=name, path=path)
key = self._list_to_key(self.fields + [self.coef])
if (key not in ld_dict):
return None
return ld_dict[key]
def _load_or_calculate(self):
"""
Calculates part dep or load it from cache if possible
"""
if (self.is_use_cache == False) or isNone(self._load_cached()):
self._get_part_dep()
return self._save_cached()
else:
self.part_dep_df = self._load_cached()
def _general2partial(self, df):
if (len(df) == 0):
return None
copy_df = df.copy()
feature = copy_df['feature'].iloc[0]
copy_df.drop(columns='feature', inplace=True)
copy_df.rename(columns={"value": feature}, inplace=True)
return copy_df
def plot_raw(self, field, sample=1.0):
"""
Plot dependency graph from data itself
field must be list of exactly one feature
sample is a coef to len(df). Lower if kernel use to shut down on that
"""
df = self.df
df = df.sample(int(len(df)*sample))
field = field[0]
dep_var = f"{self._get_dep_var()}_orig" if (self.use_log == True) else self._get_dep_var()
return ggplot(df, aes(field, dep_var)) + stat_smooth(se=True, method='loess');
def plot_model(self, field, strict_recalc=False, sample=1.0):
'''
Plot dependency graph from the model.
It also take into account times, so plot becomes much more resilient, cause not every value treats as equal
(more occurences means more power)
field must be list of exactly one feature
strict_recalc=True ignores precalculated `part_dep_df` and calculate it anyway
sample is a coef to len(df). Lower if kernel use to shut down on that
'''
cached = self.get_pd(feature=self._list_to_key(field))
if (strict_recalc == False) and isNotNone(cached):
pd_table = cached
else:
pd_table = self._get_part_dep_one(fields=field)
clear_output()
field = field[0]
dep_var = f"{self._get_dep_var()}"
rearr = []
for var, fee, times in zip(pd_table[field], pd_table[dep_var], pd_table['times']):
for i in range(int(times)):
rearr.append([var, fee])
rearr = pd.DataFrame(rearr, columns=[field, dep_var])
rearr = rearr.sample(int(len(rearr)*sample))
return ggplot(rearr, aes(field, dep_var)) + stat_smooth(se=True, method='loess');
def get_pd(self, feature, min_tm=1):
"""
Gets particular feature subtable from the whole one (min times is optional parameter)
"""
if isNone(self.part_dep_df):
return None
df = self.part_dep_df.query(f"""(feature == "{feature}") and (times > {min_tm})""")
return self._general2partial(df=df)
def get_pd_main_chained_feat(self, main_feat_idx=0, show_min=1):
"""
Transforms whole features table to get_part_dep_one output table format
"""
def get_xth_el(str_list: str, indexes: list):
lst = str_list if is_listy(str_list) else ast.literal_eval(str_list)
lst = listify(lst)
if (len(lst) == 1):
return lst[0]
elif (len(lst) > 1):
if (len(indexes) == 1):
return lst[indexes[0]]
else:
return [lst[idx] for idx in indexes]
else:
return None
feat_table = self.part_dep_df
main_feat_idx = listify(main_feat_idx)
feat_table_copy = feat_table.copy()
func = functools.partial(get_xth_el, indexes=main_feat_idx)
feat_table_copy['value'] = feat_table_copy['value'].apply(func)
feat_table_copy.drop(columns='feature', inplace=True)
return feat_table_copy.query(f'times > {show_min}')
def plot_part_dep(self, fields, limit=20, asc=False):
"""
Plots partial dependency plot for sublist of connected `fields`
`fields` must be sublist of `fields` given on initalization calculation
"""
def prepare_colors(df_pd: pd.DataFrame):
heat_min = df_pd['times'].min()
heat_max = df_pd['times'].max()
dif = heat_max - heat_min
colors = [((times - heat_min) / (dif), (times - heat_min) / (4 * dif), 0.75) for times in df_pd['times']]
return colors
df = self.part_dep_df.query(f"feature == '{self._list_to_key(fields)}'")
dep_var = self.dep_var
df_copy = df.copy()
df_copy['feature'] = df_copy['feature'].str.slice(0, 45)
df_copy = df_copy.sort_values(by=dep_var, ascending=asc)[:limit].sort_values(by=dep_var, ascending=not (asc))
colors = prepare_colors(df_pd=df_copy)
ax = df_copy.plot.barh(x="value", y=dep_var, sort_columns=True, figsize=(10, 10),
color=colors, title=self._list_to_key(fields))
ax.set_ylabel(fields)
if (self.is_biclassification):
txt = f"According to probability of {self._get_dep_var()} is '{learn.dls.vocab[0]}'"
ax.annotate(txt, (0,0), (0, -30),
xycoords='axes fraction', textcoords='offset points',
va='top')
for (p, t) in zip(ax.patches, df_copy['times']):
ax.annotate(f'{p.get_width():.4f}', ((p.get_width() * 1.005), p.get_y() * 1.005))
ax.annotate(f'{int(t)}', ((p.get_width() * .45), p.get_y() + 0.1), color='white', weight='bold')
| 41.449649
| 117
| 0.598339
|
__all__ = ['PartDep']
from fastai.tabular.all import *
from .core import *
from plotnine import *
from IPython.display import clear_output
class PartDep(Interpret):
def __init__(self, learn, df, model_name: str, fields: list = (), coef: float = 1.0,
is_sorted: bool = True, use_log=False, use_int=False,
cache_path=None, is_use_cache=True, is_continue=False, no_precalc=False):
super().__init__(learn, df)
self.use_log = use_log
self.use_int = use_int
self.coef = coef
self.is_sorted = is_sorted
if (fields is None) or (len(fields) == 0):
self.fields = self._get_all_columns()
else:
self.fields = listify(fields)
self.part_dep_df = None
self.cache_path = ifnone(cache_path, learn.path / 'cache')
self.save_name = f"{model_name}_part_dep"
self.is_use_cache = is_use_cache
self.is_continue = is_continue
self.dep_var = self._get_dep_var()
self.is_biclassification = True if (learn.dls.c == 2) else False
if (no_precalc==False):
self._load_or_calculate()
@classmethod
def what_cached(self, model_name: str, path=None, learn=None):
if isNone(path) and isNone(learn):
print("path and learn cannot be None at the same time")
return
elif isNone(path):
path = learn.path
name = f"{model_name}_part_dep"
folder = 'cache'
path = path / folder
if not (Path(f"{path / name}.pkl").exists()):
print(f"No chache file")
else:
f = open(path / f"{name}.pkl", "rb")
var = load(f)
f.close()
for k in var.keys():
print(k)
@classmethod
def empty_cache(self, model_name: str, path=None, learn=None):
if isNone(path) and isNone(learn):
print("path and learn cannot be None at the same time")
return
elif isNone(path):
path = learn.path
name = f"{model_name}_part_dep"
folder = 'cache'
path = path / folder
files = (Path(f"{path / name}.pkl"), Path(path / 'pd_interm.pkl'))
for file in files:
if not (file.exists()):
print(f"No chache file {file}")
else:
file.unlink()
def _cont_into_buckets(self, df_init, CONT_COLS):
fields = self.fields
df = df_init.copy()
if is_in_list(values=fields, in_list=CONT_COLS):
for col in which_elms(values=fields, in_list=CONT_COLS):
edges = np.histogram_bin_edges(a=df[col].dropna(), bins='auto')
for x, y in zip(edges[::], edges[1::]):
df.loc[(df[col] > x) & (df[col] < y), col] = (x + y) / 2
return df
def _get_field_uniq_x_coef(self, df: pd.DataFrame, fields: list, coef: float) -> list:
if (coef > 1):
return math.ceil(coef)
coef = 0. if (coef < 0) else coef
occs = df.groupby(fields).size().reset_index(name="Times").sort_values(['Times'], ascending=False)
num = math.ceil(coef * len(occs))
if (num <= 0):
return occs.iloc[0]['Times'] + 1
else:
return occs.iloc[num - 1]['Times']
def _get_part_dep_one(self, fields: list, masterbar=None) -> pd.DataFrame:
NAN_SUBST = '###na###'
cont_vars = self._get_cont_columns()
fields = listify(fields)
coef, is_sorted, use_log, use_int = self.coef, self.is_sorted, self.use_log, self.use_int
dep_name = self._get_dep_var()
df = self._cont_into_buckets(df_init=self.df, CONT_COLS=cont_vars)
field_min_occ = self._get_field_uniq_x_coef(df=df, fields=fields, coef=coef)
df[fields] = df[fields].fillna(NAN_SUBST)
occs = df.groupby(fields).size().reset_index(name="Times").sort_values(['Times'], ascending=False)
occs[fields] = occs[fields].replace(to_replace=NAN_SUBST, value=np.nan)
df[fields] = df[fields].replace(to_replace=NAN_SUBST, value=np.nan)
occs = occs[occs['Times'] >= field_min_occ]
df_copy = df.merge(occs[fields]).copy()
# with the current one and calculate predictions
# So we predict mean dep_var for every pairs of value of fields on the whole dataset
frame = []
ln = len(occs)
if (ln > 0):
for _, row in progress_bar(occs.iterrows(), total=ln, parent=masterbar):
# We don't need to do df_copy = df.merge(occs[field]).copy() every time
record = []
for fld in fields:
df_copy[fld] = row[fld]
preds = self._predict_df(df=df_copy)
preds = np.exp(np.mean(preds)) if (use_log == True) else np.mean(preds)
preds = int(preds) if (use_int == True) else preds
for fld in fields:
record.append(row[fld])
record.append(preds)
record.append(row['Times'])
frame.append(record)
out = pd.DataFrame(frame, columns=fields + [dep_name, 'times'])
median = out[dep_name].median()
out[dep_name] /= median
if (is_sorted == True):
out = out.sort_values(by=dep_name, ascending=False)
return out
def _get_part_dep(self):
fields = self.fields
learn = self.learn
cache_path = self.cache_path
dep_name = self._get_dep_var()
is_continue = self.is_continue
l2k = self._list_to_key
result = []
to_save = {}
from_saved = {}
if (is_continue == True):
if Path(cache_path / 'pd_interm.pkl').exists():
from_saved = ld_var(name='pd_interm', path=cache_path)
else:
is_continue = False
elapsed = []
left = []
if (is_continue == True):
for field in fields:
if (l2k(field) in from_saved):
elapsed.append(field)
new_df = from_saved[l2k(field)]
result.append(new_df)
to_save[l2k(field)] = new_df
for field in fields:
if (l2k(field) not in from_saved):
left.append(field)
pbar = master_bar(left)
cache_path.mkdir(parents=True, exist_ok=True)
sv_var(var=to_save, name='pd_interm', path=cache_path)
for field in pbar:
new_df = self._get_part_dep_one(fields=field, masterbar=pbar)
new_df['feature'] = self._list_to_key(field)
if is_listy(field):
new_df['value'] = new_df[field].values.tolist()
new_df.drop(columns=field, inplace=True)
else:
new_df = new_df.rename(index=str, columns={str(field): "value"})
result.append(new_df)
to_save[l2k(field)] = new_df
sv_var(var=to_save, name='pd_interm', path=cache_path)
clear_output()
if Path(cache_path / 'pd_interm.pkl').exists():
Path(cache_path / 'pd_interm.pkl').unlink()
result = pd.concat(result, ignore_index=True, sort=True)
result = result[['feature', 'value', dep_name, 'times']]
clear_output()
self.part_dep_df = result
def _load_dict(self, name, path):
if not (Path(f"{path / name}.pkl").exists()):
return None
return self._ld_var(name=name, path=path)
def _save_cached(self):
path = self.cache_path
path.mkdir(parents=True, exist_ok=True)
name = self.save_name
sv_dict = self._load_dict(name=name, path=path)
key = self._list_to_key(self.fields + [self.coef])
if isNone(sv_dict):
sv_dict = {key: self.part_dep_df}
else:
sv_dict[key] = self.part_dep_df
self._sv_var(var=sv_dict, name=name, path=path)
def _load_cached(self):
name = self.save_name
path = self.cache_path
if not (Path(f"{path / name}.pkl").exists()):
return None
ld_dict = self._ld_var(name=name, path=path)
key = self._list_to_key(self.fields + [self.coef])
if (key not in ld_dict):
return None
return ld_dict[key]
def _load_or_calculate(self):
if (self.is_use_cache == False) or isNone(self._load_cached()):
self._get_part_dep()
return self._save_cached()
else:
self.part_dep_df = self._load_cached()
def _general2partial(self, df):
if (len(df) == 0):
return None
copy_df = df.copy()
feature = copy_df['feature'].iloc[0]
copy_df.drop(columns='feature', inplace=True)
copy_df.rename(columns={"value": feature}, inplace=True)
return copy_df
def plot_raw(self, field, sample=1.0):
df = self.df
df = df.sample(int(len(df)*sample))
field = field[0]
dep_var = f"{self._get_dep_var()}_orig" if (self.use_log == True) else self._get_dep_var()
return ggplot(df, aes(field, dep_var)) + stat_smooth(se=True, method='loess');
def plot_model(self, field, strict_recalc=False, sample=1.0):
cached = self.get_pd(feature=self._list_to_key(field))
if (strict_recalc == False) and isNotNone(cached):
pd_table = cached
else:
pd_table = self._get_part_dep_one(fields=field)
clear_output()
field = field[0]
dep_var = f"{self._get_dep_var()}"
rearr = []
for var, fee, times in zip(pd_table[field], pd_table[dep_var], pd_table['times']):
for i in range(int(times)):
rearr.append([var, fee])
rearr = pd.DataFrame(rearr, columns=[field, dep_var])
rearr = rearr.sample(int(len(rearr)*sample))
return ggplot(rearr, aes(field, dep_var)) + stat_smooth(se=True, method='loess');
def get_pd(self, feature, min_tm=1):
if isNone(self.part_dep_df):
return None
df = self.part_dep_df.query(f"""(feature == "{feature}") and (times > {min_tm})""")
return self._general2partial(df=df)
def get_pd_main_chained_feat(self, main_feat_idx=0, show_min=1):
def get_xth_el(str_list: str, indexes: list):
lst = str_list if is_listy(str_list) else ast.literal_eval(str_list)
lst = listify(lst)
if (len(lst) == 1):
return lst[0]
elif (len(lst) > 1):
if (len(indexes) == 1):
return lst[indexes[0]]
else:
return [lst[idx] for idx in indexes]
else:
return None
feat_table = self.part_dep_df
main_feat_idx = listify(main_feat_idx)
feat_table_copy = feat_table.copy()
func = functools.partial(get_xth_el, indexes=main_feat_idx)
feat_table_copy['value'] = feat_table_copy['value'].apply(func)
feat_table_copy.drop(columns='feature', inplace=True)
return feat_table_copy.query(f'times > {show_min}')
def plot_part_dep(self, fields, limit=20, asc=False):
def prepare_colors(df_pd: pd.DataFrame):
heat_min = df_pd['times'].min()
heat_max = df_pd['times'].max()
dif = heat_max - heat_min
colors = [((times - heat_min) / (dif), (times - heat_min) / (4 * dif), 0.75) for times in df_pd['times']]
return colors
df = self.part_dep_df.query(f"feature == '{self._list_to_key(fields)}'")
dep_var = self.dep_var
df_copy = df.copy()
df_copy['feature'] = df_copy['feature'].str.slice(0, 45)
df_copy = df_copy.sort_values(by=dep_var, ascending=asc)[:limit].sort_values(by=dep_var, ascending=not (asc))
colors = prepare_colors(df_pd=df_copy)
ax = df_copy.plot.barh(x="value", y=dep_var, sort_columns=True, figsize=(10, 10),
color=colors, title=self._list_to_key(fields))
ax.set_ylabel(fields)
if (self.is_biclassification):
txt = f"According to probability of {self._get_dep_var()} is '{learn.dls.vocab[0]}'"
ax.annotate(txt, (0,0), (0, -30),
xycoords='axes fraction', textcoords='offset points',
va='top')
for (p, t) in zip(ax.patches, df_copy['times']):
ax.annotate(f'{p.get_width():.4f}', ((p.get_width() * 1.005), p.get_y() * 1.005))
ax.annotate(f'{int(t)}', ((p.get_width() * .45), p.get_y() + 0.1), color='white', weight='bold')
| true
| true
|
79094cde17adfcba18f15fdad7aac1cd83f9949d
| 3,235
|
py
|
Python
|
src/ggrc/snapshotter/rules.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-01-12T23:46:00.000Z
|
2019-01-12T23:46:00.000Z
|
src/ggrc/snapshotter/rules.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/snapshotter/rules.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Generate rules for snapshoting"""
from ggrc.snapshotter.datastructures import Attr
class Types(object):
"""Get default types for snapshotting"""
# pylint: disable=too-few-public-methods
all = {
"AccessGroup",
"AccountBalance",
"Contract",
"Control",
"DataAsset",
"Facility",
"Market",
"Objective",
"OrgGroup",
"Policy",
"Process",
"Product",
"Project",
"Regulation",
"Requirement",
"Standard",
"System",
"Vendor",
"Risk",
"TechnologyEnvironment",
"Threat",
"Metric",
"ProductGroup",
"KeyReport",
}
parents = {
"Audit",
}
scoped = {
"Assessment",
}
trans_scope = {
"Issue",
}
ignore = {
"Assessment",
"AssessmentTemplate",
"Issue",
"Workflow",
"Audit",
"Person"
}
external = {
"AccessGroup",
"AccountBalance",
"DataAsset",
"Facility",
"KeyReport",
"Market",
"Metric",
"OrgGroup",
"Process",
"Product",
"ProductGroup",
"Project",
"System",
"Vendor",
"TechnologyEnvironment",
"Control",
"Risk",
}
@classmethod
def internal_types(cls):
"""Return set of internal type names."""
return cls.all - cls.external
@classmethod
def external_types(cls):
"""Return set of external type names."""
return cls.external
class Rules(object):
"""Returns a dictionary of rules
Expected format of rule_list is the following:
[
{"master_object_type", ...},
{"first degree object types"},
{"second degree object types"}
]
For all master objects of type master_object_type, it will gather all
related objects from first degree object types (which can be related via
relationships table or via direct mapping (in which case you should wrap
the attribute name in Attr) and gather all of first degrees related objects
of the types listed in the second degree object type.
Example:
[
{"object_type_1", ["object_type_2", ...]},
{"type_of_related_object_or_attribute", ["second..."]},
{"type_of_object_to_snapshot_1", ["type_2", ...]}
]
From it, it will build a dictionary of format:
{
"parent_type": {
"fst": {"type_of_related_object_or_attribute_1", ...},
"snd": {"type_1", "type_2", ...}
},
...
}
"""
# pylint: disable=too-few-public-methods
def __init__(self, rule_list):
self.rules = dict()
for parents, fstdeg, snddeg in rule_list:
for parent in parents:
self.rules[parent] = {
"fst": fstdeg,
"snd": snddeg
}
DEFAULT_RULE_LIST = [
[
{"Audit"},
{Attr("program")},
Types.all - Types.ignore
]
]
def get_rules(rule_list=None):
"""Get the rules governing the snapshot creation
Args:
rule_list: List of rules
Returns:
Rules object with attribute `rules`. See Rules object for detailed doc.
"""
if not rule_list:
rule_list = DEFAULT_RULE_LIST
return Rules(rule_list)
| 20.093168
| 78
| 0.590108
|
from ggrc.snapshotter.datastructures import Attr
class Types(object):
all = {
"AccessGroup",
"AccountBalance",
"Contract",
"Control",
"DataAsset",
"Facility",
"Market",
"Objective",
"OrgGroup",
"Policy",
"Process",
"Product",
"Project",
"Regulation",
"Requirement",
"Standard",
"System",
"Vendor",
"Risk",
"TechnologyEnvironment",
"Threat",
"Metric",
"ProductGroup",
"KeyReport",
}
parents = {
"Audit",
}
scoped = {
"Assessment",
}
trans_scope = {
"Issue",
}
ignore = {
"Assessment",
"AssessmentTemplate",
"Issue",
"Workflow",
"Audit",
"Person"
}
external = {
"AccessGroup",
"AccountBalance",
"DataAsset",
"Facility",
"KeyReport",
"Market",
"Metric",
"OrgGroup",
"Process",
"Product",
"ProductGroup",
"Project",
"System",
"Vendor",
"TechnologyEnvironment",
"Control",
"Risk",
}
@classmethod
def internal_types(cls):
return cls.all - cls.external
@classmethod
def external_types(cls):
return cls.external
class Rules(object):
def __init__(self, rule_list):
self.rules = dict()
for parents, fstdeg, snddeg in rule_list:
for parent in parents:
self.rules[parent] = {
"fst": fstdeg,
"snd": snddeg
}
DEFAULT_RULE_LIST = [
[
{"Audit"},
{Attr("program")},
Types.all - Types.ignore
]
]
def get_rules(rule_list=None):
if not rule_list:
rule_list = DEFAULT_RULE_LIST
return Rules(rule_list)
| true
| true
|
79094d19015e9b63c663e0d024f66a89371ff799
| 3,754
|
py
|
Python
|
github/get/zup-insights/src/formula/formula.py
|
GuillaumeFalourd/formulas-insights
|
c43f8f96e28343ab0919e10d7dc26b2dfeb0792b
|
[
"Apache-2.0"
] | 5
|
2020-09-30T19:20:42.000Z
|
2022-02-25T22:20:30.000Z
|
github/get/zup-insights/src/formula/formula.py
|
GuillaumeFalourd/formulas-insights
|
c43f8f96e28343ab0919e10d7dc26b2dfeb0792b
|
[
"Apache-2.0"
] | 5
|
2020-09-28T21:53:07.000Z
|
2021-05-06T14:58:10.000Z
|
github/get/zup-insights/src/formula/formula.py
|
GuillaumeFalourd/formulas-insights
|
c43f8f96e28343ab0919e10d7dc26b2dfeb0792b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import datetime
import inquirer
import requests
import re
import csv
import os
import json
repositories = [
"beagle",
"beagle-web-react",
"beagle-web-core",
"beagle-web-angular",
"charlescd",
"charlescd-docs",
"horusec",
"horusec-engine-docs",
"ritchie-cli",
"ritchie-formulas",
"ritchie-formulas-demo"
]
def run(token):
insights = []
authorization = f"token {token}"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization" : authorization,
}
for repository in repositories:
repo_url = f"https://api.github.com/repos/ZupIT/{repository}"
print(f"🐙 Getting insights for ZupIT's \033[36m{repository}\033[0m repository.")
traffic = requests.get(
url = repo_url + "/traffic/views",
headers = headers,
).json()
clones = requests.get(
url = repo_url + "/traffic/clones",
headers = headers,
).json()
contributors = requests.get(
url = repo_url + "/contributors",
headers = headers,
).json()
repo_stats = requests.get(
url = repo_url,
headers = headers,
).json()
try:
clones = clones["count"]
except (IndexError, KeyError) :
clones = "-"
try:
forks = repo_stats["forks_count"]
except (IndexError, KeyError):
forks = "-"
try:
stars = repo_stats["stargazers_count"]
except (IndexError, KeyError):
stars = "-"
try:
watchers = repo_stats["subscribers_count"]
except (IndexError, KeyError):
watchers = "-"
try:
views = traffic["count"]
except (IndexError, KeyError):
views = "-"
try:
uniques = traffic["uniques"]
except (IndexError, KeyError):
uniques = "-"
insights.append(
{
"repo": repository,
"views": views,
"uniques": uniques,
"clones": clones,
"contributors": len(contributors),
"contributors_list": contributors,
"forks": forks,
"stars": stars,
"watchers": watchers,
}
)
create_csv_file(insights)
def get_repositories(url, headers):
result = []
r = requests.get(
url = url,
headers = headers
)
if "next" in r.links :
result += get_repositories(r.links["next"]["url"], headers)
for data in r.json():
result.append(data["name"])
return result
def create_csv_file(insights):
current_date = datetime.datetime.now()
current_date_format = current_date.strftime("%m-%d-%Y-%Hh%M")
current_date_format_string = str(current_date_format)
filename = "zup-insights-" + current_date_format_string + ".csv"
file = open(filename, 'w+', newline ='')
with file:
header = ["Repository", "Views (14d)", "Uniques (14d)", "Clones (14d)", "Contributors", "Forks", "Stars", "Watchers"]
writer = csv.DictWriter(file, fieldnames = header)
writer.writeheader()
file = open(filename, 'a+', newline ='')
with file:
for insight in insights:
data = [[insight["repo"], insight["views"], insight["uniques"], insight["clones"], insight["contributors"], insight["forks"], insight["stars"], insight["watchers"]]]
write = csv.writer(file)
write.writerows(data)
print(f"\n\033[1m✅ Successfully generated \033[4m{filename}\033[0m\033[1m file for ZupIT's repositories\033[0m")
| 26.43662
| 177
| 0.546883
|
import datetime
import inquirer
import requests
import re
import csv
import os
import json
repositories = [
"beagle",
"beagle-web-react",
"beagle-web-core",
"beagle-web-angular",
"charlescd",
"charlescd-docs",
"horusec",
"horusec-engine-docs",
"ritchie-cli",
"ritchie-formulas",
"ritchie-formulas-demo"
]
def run(token):
insights = []
authorization = f"token {token}"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization" : authorization,
}
for repository in repositories:
repo_url = f"https://api.github.com/repos/ZupIT/{repository}"
print(f"🐙 Getting insights for ZupIT's \033[36m{repository}\033[0m repository.")
traffic = requests.get(
url = repo_url + "/traffic/views",
headers = headers,
).json()
clones = requests.get(
url = repo_url + "/traffic/clones",
headers = headers,
).json()
contributors = requests.get(
url = repo_url + "/contributors",
headers = headers,
).json()
repo_stats = requests.get(
url = repo_url,
headers = headers,
).json()
try:
clones = clones["count"]
except (IndexError, KeyError) :
clones = "-"
try:
forks = repo_stats["forks_count"]
except (IndexError, KeyError):
forks = "-"
try:
stars = repo_stats["stargazers_count"]
except (IndexError, KeyError):
stars = "-"
try:
watchers = repo_stats["subscribers_count"]
except (IndexError, KeyError):
watchers = "-"
try:
views = traffic["count"]
except (IndexError, KeyError):
views = "-"
try:
uniques = traffic["uniques"]
except (IndexError, KeyError):
uniques = "-"
insights.append(
{
"repo": repository,
"views": views,
"uniques": uniques,
"clones": clones,
"contributors": len(contributors),
"contributors_list": contributors,
"forks": forks,
"stars": stars,
"watchers": watchers,
}
)
create_csv_file(insights)
def get_repositories(url, headers):
result = []
r = requests.get(
url = url,
headers = headers
)
if "next" in r.links :
result += get_repositories(r.links["next"]["url"], headers)
for data in r.json():
result.append(data["name"])
return result
def create_csv_file(insights):
current_date = datetime.datetime.now()
current_date_format = current_date.strftime("%m-%d-%Y-%Hh%M")
current_date_format_string = str(current_date_format)
filename = "zup-insights-" + current_date_format_string + ".csv"
file = open(filename, 'w+', newline ='')
with file:
header = ["Repository", "Views (14d)", "Uniques (14d)", "Clones (14d)", "Contributors", "Forks", "Stars", "Watchers"]
writer = csv.DictWriter(file, fieldnames = header)
writer.writeheader()
file = open(filename, 'a+', newline ='')
with file:
for insight in insights:
data = [[insight["repo"], insight["views"], insight["uniques"], insight["clones"], insight["contributors"], insight["forks"], insight["stars"], insight["watchers"]]]
write = csv.writer(file)
write.writerows(data)
print(f"\n\033[1m✅ Successfully generated \033[4m{filename}\033[0m\033[1m file for ZupIT's repositories\033[0m")
| true
| true
|
79094d564680b1cedceb022e0d8bc957fd07bd8d
| 486
|
py
|
Python
|
Python/Sort_Visualizer/bubbleSort.py
|
HarshOza36/hacktoberfest2021
|
c8e115815beb2d2372d0646b1c2c8eda3dac2972
|
[
"CC0-1.0"
] | null | null | null |
Python/Sort_Visualizer/bubbleSort.py
|
HarshOza36/hacktoberfest2021
|
c8e115815beb2d2372d0646b1c2c8eda3dac2972
|
[
"CC0-1.0"
] | null | null | null |
Python/Sort_Visualizer/bubbleSort.py
|
HarshOza36/hacktoberfest2021
|
c8e115815beb2d2372d0646b1c2c8eda3dac2972
|
[
"CC0-1.0"
] | null | null | null |
import time
def bubblesort_Alg(arr, drawData, timeSpeed):
for i in range(len(arr)-1):
for j in range(len(arr)-1):
if(arr[j] > arr[j+1]):
arr[j], arr[j+1] = arr[j+1], arr[j]
# To draw the bars
drawData(arr, ['red' if x == j or x == j +
1 else 'blue' for x in range(len(arr))])
time.sleep(timeSpeed)
drawData(arr, ['red' for i in range(len(arr))])
return arr
| 32.4
| 71
| 0.475309
|
import time
def bubblesort_Alg(arr, drawData, timeSpeed):
for i in range(len(arr)-1):
for j in range(len(arr)-1):
if(arr[j] > arr[j+1]):
arr[j], arr[j+1] = arr[j+1], arr[j]
drawData(arr, ['red' if x == j or x == j +
1 else 'blue' for x in range(len(arr))])
time.sleep(timeSpeed)
drawData(arr, ['red' for i in range(len(arr))])
return arr
| true
| true
|
79094e0a91a0c10b1f15f7ca1f4ae92f88de0dcc
| 1,679
|
py
|
Python
|
LeetCode/LeetCode_Python-master/LeetCode_Python-master/Algorithm-Easy/155_Min_Stack.py
|
Sycamore-City-passerby/ML
|
605cfc70bdda2c99e5f1c16b25812b59c98a72ad
|
[
"MIT"
] | null | null | null |
LeetCode/LeetCode_Python-master/LeetCode_Python-master/Algorithm-Easy/155_Min_Stack.py
|
Sycamore-City-passerby/ML
|
605cfc70bdda2c99e5f1c16b25812b59c98a72ad
|
[
"MIT"
] | null | null | null |
LeetCode/LeetCode_Python-master/LeetCode_Python-master/Algorithm-Easy/155_Min_Stack.py
|
Sycamore-City-passerby/ML
|
605cfc70bdda2c99e5f1c16b25812b59c98a72ad
|
[
"MIT"
] | null | null | null |
class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.stack1 = []
self.stack2 = []
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.stack1.append(x)
if len(self.stack2) == 0 or x <= self.stack2[-1]:
self.stack2.append(x)
def pop(self):
"""
:rtype: void
"""
top = self.stack1[-1]
self.stack1.pop()
if top == self.stack2[-1]:
self.stack2.pop()
def top(self):
"""
:rtype: int
"""
return self.stack1[-1]
def getMin(self):
"""
:rtype: int
"""
return self.stack2[-1]
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
"""
Time Complexity = O(n)
Space Complexity = O(n)
Design a stack that supports push, pop, top, and retrieving the minimum element in constant time.
push(x) -- Push element x onto stack.
pop() -- Removes the element on top of the stack.
top() -- Get the top element.
getMin() -- Retrieve the minimum element in the stack.
Example:
MinStack minStack = new MinStack();
minStack.push(-2);
minStack.push(0);
minStack.push(-3);
minStack.getMin(); --> Returns -3.
minStack.pop();
minStack.top(); --> Returns 0.
minStack.getMin(); --> Returns -2.
"""
| 22.689189
| 109
| 0.48243
|
class MinStack(object):
def __init__(self):
self.stack1 = []
self.stack2 = []
def push(self, x):
self.stack1.append(x)
if len(self.stack2) == 0 or x <= self.stack2[-1]:
self.stack2.append(x)
def pop(self):
top = self.stack1[-1]
self.stack1.pop()
if top == self.stack2[-1]:
self.stack2.pop()
def top(self):
return self.stack1[-1]
def getMin(self):
return self.stack2[-1]
| true
| true
|
79094f65da54a8db48158acbd24fb4d77acac004
| 22,392
|
py
|
Python
|
src/sage/combinat/designs/latin_squares.py
|
bopopescu/Sage-8
|
71be00ad5f25ca95381fae7cce96421ffdd43425
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/designs/latin_squares.py
|
bopopescu/Sage-8
|
71be00ad5f25ca95381fae7cce96421ffdd43425
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/designs/latin_squares.py
|
bopopescu/Sage-8
|
71be00ad5f25ca95381fae7cce96421ffdd43425
|
[
"BSL-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
r"""
Mutually Orthogonal Latin Squares (MOLS)
The main function of this module is :func:`mutually_orthogonal_latin_squares`
and can be can be used to generate MOLS (or check that they exist)::
sage: MOLS = designs.mutually_orthogonal_latin_squares(4,8)
For more information on MOLS, see the :wikipedia:`Wikipedia entry on MOLS
<Graeco-Latin_square#Mutually_orthogonal_Latin_squares>`. If you are only
interested by latin squares, see :mod:`~sage.combinat.matrices.latin`.
The functions defined here are
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: |
:meth:`mutually_orthogonal_latin_squares` | Return `k` Mutually Orthogonal `n\times n` Latin Squares.
:meth:`are_mutually_orthogonal_latin_squares` | Check that the list ``l`` of matrices in are MOLS.
:meth:`latin_square_product` | Return the product of two (or more) latin squares.
:meth:`MOLS_table` | Prints the MOLS table.
**Table of MOLS**
Sage can produce a table of MOLS similar to the one from the Handbook of
Combinatorial Designs [DesignHandbook]_ (`available here
<http://books.google.fr/books?id=S9FA9rq1BgoC&dq=handbook%20combinatorial%20designs%20MOLS%2010000&pg=PA176>`_).
::
sage: from sage.combinat.designs.latin_squares import MOLS_table
sage: MOLS_table(600) # long time
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
________________________________________________________________________________
0| +oo +oo 1 2 3 4 1 6 7 8 2 10 5 12 4 4 15 16 5 18
20| 4 5 3 22 7 24 4 26 5 28 4 30 31 5 4 5 8 36 4 5
40| 7 40 5 42 5 6 4 46 8 48 6 5 5 52 5 6 7 7 5 58
60| 5 60 5 6 63 7 5 66 5 6 6 70 7 72 5 7 6 6 6 78
80| 9 80 8 82 6 6 6 6 7 88 6 7 6 6 6 6 7 96 6 8
100| 8 100 6 102 7 7 6 106 6 108 6 6 13 112 6 7 6 8 6 6
120| 7 120 6 6 6 124 6 126 127 7 6 130 6 7 6 7 7 136 6 138
140| 6 7 6 10 10 7 6 7 6 148 6 150 7 8 8 7 6 156 7 6
160| 9 7 6 162 6 7 6 166 7 168 6 8 6 172 6 6 14 9 6 178
180| 6 180 6 6 7 9 6 10 6 8 6 190 7 192 6 7 6 196 6 198
200| 7 7 6 7 6 8 6 8 14 11 10 210 6 7 6 7 7 8 6 10
220| 6 12 6 222 13 8 6 226 6 228 6 7 7 232 6 7 6 7 6 238
240| 7 240 6 242 6 7 6 12 7 7 6 250 6 12 9 7 255 256 6 12
260| 6 8 8 262 7 8 7 10 7 268 7 270 15 16 6 13 10 276 6 9
280| 7 280 6 282 6 12 6 7 15 288 6 6 6 292 6 6 7 10 10 12
300| 7 7 7 7 15 15 6 306 7 7 7 310 7 312 7 10 7 316 7 10
320| 15 15 6 16 8 12 6 7 7 9 6 330 7 8 7 6 7 336 6 7
340| 6 10 10 342 7 7 6 346 6 348 8 12 18 352 6 9 7 9 6 358
360| 7 360 6 7 7 7 6 366 15 15 7 15 7 372 7 15 7 13 7 378
380| 7 12 7 382 15 15 7 15 7 388 7 16 7 7 7 7 8 396 7 7
400| 15 400 7 15 11 8 7 15 8 408 7 13 8 12 10 9 18 15 7 418
420| 7 420 7 15 7 16 6 7 7 7 6 430 15 432 6 15 6 18 7 438
440| 7 15 7 442 7 13 7 11 15 448 7 15 7 7 7 15 7 456 7 16
460| 7 460 7 462 15 15 7 466 8 8 7 15 7 15 10 18 7 15 6 478
480| 15 15 6 15 8 7 6 486 7 15 6 490 6 16 6 7 15 15 6 498
500| 7 8 9 502 7 15 6 15 7 508 6 15 511 18 7 15 8 12 8 15
520| 8 520 10 522 12 15 8 16 15 528 7 15 8 12 7 15 8 15 10 15
540| 12 540 7 15 18 7 7 546 7 8 7 18 7 7 7 7 7 556 7 12
560| 15 7 7 562 7 7 6 7 7 568 6 570 7 7 15 22 8 576 7 7
580| 7 8 7 10 7 8 7 586 7 18 17 7 15 592 8 15 7 7 8 598
Comparison with the results from the Handbook of Combinatorial Designs (2ed)
[DesignHandbook]_::
sage: MOLS_table(600,compare=True) # long time
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
________________________________________________________________________________
0| + +
20|
40|
60| +
80|
100|
120|
140|
160|
180|
200| -
220|
240|
260|
280|
300|
320| -
340|
360| - -
380| -
400|
420| -
440|
460|
480|
500| -
520|
540|
560|
580|
TODO:
* Look at [ColDin01]_.
REFERENCES:
.. [Stinson2004] Douglas R. Stinson,
Combinatorial designs: construction and analysis,
Springer, 2004.
.. [ColDin01] Charles Colbourn, Jeffrey Dinitz,
Mutually orthogonal latin squares: a brief survey of constructions,
Volume 95, Issues 1-2, Pages 9-48,
Journal of Statistical Planning and Inference,
Springer, 1 May 2001.
Functions
---------
"""
from sage.categories.sets_cat import EmptySetError
from sage.misc.unknown import Unknown
def are_mutually_orthogonal_latin_squares(l, verbose=False):
r"""
Check wether the list of matrices in ``l`` form mutually orthogonal latin
squares.
INPUT:
- ``verbose`` - if ``True`` then print why the list of matrices provided are
not mutually orthogonal latin squares
EXAMPLES::
sage: from sage.combinat.designs.latin_squares import are_mutually_orthogonal_latin_squares
sage: m1 = matrix([[0,1,2],[2,0,1],[1,2,0]])
sage: m2 = matrix([[0,1,2],[1,2,0],[2,0,1]])
sage: m3 = matrix([[0,1,2],[2,0,1],[1,2,0]])
sage: are_mutually_orthogonal_latin_squares([m1,m2])
True
sage: are_mutually_orthogonal_latin_squares([m1,m3])
False
sage: are_mutually_orthogonal_latin_squares([m2,m3])
True
sage: are_mutually_orthogonal_latin_squares([m1,m2,m3], verbose=True)
Squares 0 and 2 are not orthogonal
False
sage: m = designs.mutually_orthogonal_latin_squares(7,8)
sage: are_mutually_orthogonal_latin_squares(m)
True
TESTS:
Not a latin square::
sage: m1 = matrix([[0,1,0],[2,0,1],[1,2,0]])
sage: m2 = matrix([[0,1,2],[1,2,0],[2,0,1]])
sage: are_mutually_orthogonal_latin_squares([m1,m2], verbose=True)
Matrix 0 is not row latin
False
sage: m1 = matrix([[0,1,2],[1,0,2],[1,2,0]])
sage: are_mutually_orthogonal_latin_squares([m1,m2], verbose=True)
Matrix 0 is not column latin
False
sage: m1 = matrix([[0,0,0],[1,1,1],[2,2,2]])
sage: m2 = matrix([[0,1,2],[0,1,2],[0,1,2]])
sage: are_mutually_orthogonal_latin_squares([m1,m2])
False
"""
if not l:
raise ValueError("the list must be non empty")
n = l[0].ncols()
k = len(l)
if any(M.ncols() != n or M.nrows() != n for M in l):
if verbose:
print "Not all matrices are square matrices of the same dimensions"
return False
# Check that all matrices are latin squares
for i,M in enumerate(l):
if any(len(set(R)) != n for R in M):
if verbose:
print "Matrix {} is not row latin".format(i)
return False
if any(len(set(R)) != n for R in zip(*M)):
if verbose:
print "Matrix {} is not column latin".format(i)
return False
from designs_pyx import is_orthogonal_array
return is_orthogonal_array(zip(*[[x for R in M for x in R] for M in l]),k,n, verbose=verbose, terminology="MOLS")
def mutually_orthogonal_latin_squares(k,n, partitions = False, check = True, existence=False):
r"""
Return `k` Mutually Orthogonal `n\times n` Latin Squares (MOLS).
For more information on Mutually Orthogonal Latin Squares, see
:mod:`~sage.combinat.designs.latin_squares`.
INPUT:
- ``k`` (integer) -- number of MOLS. If ``k=None`` it is set to the largest
value available.
- ``n`` (integer) -- size of the latin square.
- ``partition`` (boolean) -- a Latin Square can be seen as 3 partitions of
the `n^2` cells of the array into `n` sets of size `n`, respectively :
* The partition of rows
* The partition of columns
* The partition of number (cells numbered with 0, cells numbered with 1,
...)
These partitions have the additional property that any two sets from
different partitions intersect on exactly one element.
When ``partition`` is set to ``True``, this function returns a list of `k+2`
partitions satisfying this intersection property instead of the `k+2` MOLS
(though the data is exactly the same in both cases).
- ``existence`` (boolean) -- instead of building the design, return:
- ``True`` -- meaning that Sage knows how to build the design
- ``Unknown`` -- meaning that Sage does not know how to build the
design, but that the design may exist (see :mod:`sage.misc.unknown`).
- ``False`` -- meaning that the design does not exist.
.. NOTE::
When ``k=None`` and ``existence=True`` the function returns an
integer, i.e. the largest `k` such that we can build a `k` MOLS of
order `n`.
- ``check`` -- (boolean) Whether to check that output is correct before
returning it. As this is expected to be useless (but we are cautious
guys), you may want to disable it whenever you want speed. Set to
``True`` by default.
EXAMPLES::
sage: designs.mutually_orthogonal_latin_squares(4,5)
[
[0 2 4 1 3] [0 3 1 4 2] [0 4 3 2 1] [0 1 2 3 4]
[4 1 3 0 2] [3 1 4 2 0] [2 1 0 4 3] [4 0 1 2 3]
[3 0 2 4 1] [1 4 2 0 3] [4 3 2 1 0] [3 4 0 1 2]
[2 4 1 3 0] [4 2 0 3 1] [1 0 4 3 2] [2 3 4 0 1]
[1 3 0 2 4], [2 0 3 1 4], [3 2 1 0 4], [1 2 3 4 0]
]
sage: designs.mutually_orthogonal_latin_squares(3,7)
[
[0 2 4 6 1 3 5] [0 3 6 2 5 1 4] [0 4 1 5 2 6 3]
[6 1 3 5 0 2 4] [5 1 4 0 3 6 2] [4 1 5 2 6 3 0]
[5 0 2 4 6 1 3] [3 6 2 5 1 4 0] [1 5 2 6 3 0 4]
[4 6 1 3 5 0 2] [1 4 0 3 6 2 5] [5 2 6 3 0 4 1]
[3 5 0 2 4 6 1] [6 2 5 1 4 0 3] [2 6 3 0 4 1 5]
[2 4 6 1 3 5 0] [4 0 3 6 2 5 1] [6 3 0 4 1 5 2]
[1 3 5 0 2 4 6], [2 5 1 4 0 3 6], [3 0 4 1 5 2 6]
]
sage: designs.mutually_orthogonal_latin_squares(2,5,partitions=True)
[[[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]],
[[0, 5, 10, 15, 20],
[1, 6, 11, 16, 21],
[2, 7, 12, 17, 22],
[3, 8, 13, 18, 23],
[4, 9, 14, 19, 24]],
[[0, 8, 11, 19, 22],
[3, 6, 14, 17, 20],
[1, 9, 12, 15, 23],
[4, 7, 10, 18, 21],
[2, 5, 13, 16, 24]],
[[0, 9, 13, 17, 21],
[2, 6, 10, 19, 23],
[4, 8, 12, 16, 20],
[1, 5, 14, 18, 22],
[3, 7, 11, 15, 24]]]
What is the maximum number of MOLS of size 8 that Sage knows how to build?::
sage: designs.orthogonal_arrays.largest_available_k(8)-2
7
If you only want to know if Sage is able to build a given set of
MOLS, query the ``orthogonal_arrays.*`` functions::
sage: designs.orthogonal_arrays.is_available(5+2, 5) # 5 MOLS of order 5
False
sage: designs.orthogonal_arrays.is_available(4+2,6) # 4 MOLS of order 6
False
Sage, however, is not able to prove that the second MOLS do not exist::
sage: designs.orthogonal_arrays.exists(4+2,6) # 4 MOLS of order 6
Unknown
If you ask for such a MOLS then you will respecively get an informative
``EmptySetError`` or ``NotImplementedError``::
sage: designs.mutually_orthogonal_latin_squares(5, 5)
Traceback (most recent call last):
...
EmptySetError: There exist at most n-1 MOLS of size n if n>=2.
sage: designs.mutually_orthogonal_latin_squares(4,6)
Traceback (most recent call last):
...
NotImplementedError: I don't know how to build 4 MOLS of order 6
TESTS:
The special case `n=1`::
sage: designs.mutually_orthogonal_latin_squares(3, 1)
[[0], [0], [0]]
sage: designs.mutually_orthogonal_latin_squares(None, 1)
Traceback (most recent call last):
...
ValueError: there are no bound on k when 0<=n<=1
sage: designs.mutually_orthogonal_latin_squares(2,10)
[
[1 8 9 0 2 4 6 3 5 7] [1 7 6 5 0 9 8 2 3 4]
[7 2 8 9 0 3 5 4 6 1] [8 2 1 7 6 0 9 3 4 5]
[6 1 3 8 9 0 4 5 7 2] [9 8 3 2 1 7 0 4 5 6]
[5 7 2 4 8 9 0 6 1 3] [0 9 8 4 3 2 1 5 6 7]
[0 6 1 3 5 8 9 7 2 4] [2 0 9 8 5 4 3 6 7 1]
[9 0 7 2 4 6 8 1 3 5] [4 3 0 9 8 6 5 7 1 2]
[8 9 0 1 3 5 7 2 4 6] [6 5 4 0 9 8 7 1 2 3]
[2 3 4 5 6 7 1 8 9 0] [3 4 5 6 7 1 2 8 0 9]
[3 4 5 6 7 1 2 0 8 9] [5 6 7 1 2 3 4 0 9 8]
[4 5 6 7 1 2 3 9 0 8], [7 1 2 3 4 5 6 9 8 0]
]
"""
from sage.combinat.designs.orthogonal_arrays import orthogonal_array
from sage.matrix.constructor import Matrix
from sage.rings.arith import factor
from database import MOLS_constructions
# Is k is None we find the largest available
if k is None:
from sage.misc.superseded import deprecation
deprecation(17034,"please use designs.orthogonal_arrays.largest_available_k instead of k=None")
if n == 0 or n == 1:
if existence:
from sage.rings.infinity import Infinity
return Infinity
raise ValueError("there are no bound on k when 0<=n<=1")
k = orthogonal_array(None,n,existence=True) - 2
if existence:
return k
if existence:
from sage.misc.superseded import deprecation
deprecation(17034,"please use designs.orthogonal_arrays.is_available/exists instead of existence=True")
if n == 1:
if existence:
return True
matrices = [Matrix([[0]])]*k
elif k >= n:
if existence:
return False
raise EmptySetError("There exist at most n-1 MOLS of size n if n>=2.")
elif n in MOLS_constructions and k <= MOLS_constructions[n][0]:
if existence:
return True
_, construction = MOLS_constructions[n]
matrices = construction()[:k]
elif orthogonal_array(k+2,n,existence=True) is not Unknown:
# Forwarding non-existence results
if orthogonal_array(k+2,n,existence=True):
if existence:
return True
else:
if existence:
return False
raise EmptySetError("There does not exist {} MOLS of order {}!".format(k,n))
OA = orthogonal_array(k+2,n,check=False)
OA.sort() # make sure that the first two columns are "11, 12, ..., 1n, 21, 22, ..."
# We first define matrices as lists of n^2 values
matrices = [[] for _ in range(k)]
for L in OA:
for i in range(2,k+2):
matrices[i-2].append(L[i])
# The real matrices
matrices = [[M[i*n:(i+1)*n] for i in range(n)] for M in matrices]
matrices = [Matrix(M) for M in matrices]
else:
if existence:
return Unknown
raise NotImplementedError("I don't know how to build {} MOLS of order {}".format(k,n))
if check:
assert are_mutually_orthogonal_latin_squares(matrices)
# partitions have been requested but have not been computed yet
if partitions is True:
partitions = [[[i*n+j for j in range(n)] for i in range(n)],
[[j*n+i for j in range(n)] for i in range(n)]]
for m in matrices:
partition = [[] for i in range(n)]
for i in range(n):
for j in range(n):
partition[m[i,j]].append(i*n+j)
partitions.append(partition)
if partitions:
return partitions
else:
return matrices
def latin_square_product(M,N,*others):
r"""
Return the product of two (or more) latin squares.
Given two Latin Squares `M,N` of respective sizes `m,n`, the direct product
`M\times N` of size `mn` is defined by `(M\times
N)((i_1,i_2),(j_1,j_2))=(M(i_1,j_1),N(i_2,j_2))` where `i_1,j_1\in [m],
i_2,j_2\in [n]`
Each pair of values `(i,j)\in [m]\times [n]` is then relabeled to `in+j`.
This is Lemma 6.25 of [Stinson2004]_.
INPUT:
An arbitrary number of latin squares (greater than 2).
EXAMPLES::
sage: from sage.combinat.designs.latin_squares import latin_square_product
sage: m=designs.mutually_orthogonal_latin_squares(3,4)[0]
sage: latin_square_product(m,m,m)
64 x 64 sparse matrix over Integer Ring (use the '.str()' method to see the entries)
"""
from sage.matrix.constructor import Matrix
m = M.nrows()
n = N.nrows()
D = {((i,j),(ii,jj)):(M[i,ii],N[j,jj])
for i in range(m)
for ii in range(m)
for j in range(n)
for jj in range(n)}
L = lambda i_j: i_j[0] * n + i_j[1]
D = {(L(c[0]),L(c[1])): L(v) for c,v in D.iteritems()}
P = Matrix(D)
if others:
return latin_square_product(P, others[0],*others[1:])
else:
return P
def MOLS_table(start,stop=None,compare=False,width=None):
r"""
Prints the MOLS table that Sage can produce.
INPUT:
- ``start,stop`` (integers) -- print the table of MOLS for value of `n` such
that ``start<=n<stop``. If only one integer is given as input, it is
interpreted as the value of ``stop`` with ``start=0`` (same behaviour as
``range``).
- ``compare`` (boolean) -- if sets to ``True`` the MOLS displays
with `+` and `-` entries its difference with the table from the
Handbook of Combinatorial Designs (2ed).
- ``width`` (integer) -- the width of each column of the table. By default,
it is computed from range of values determined by the parameters ``start``
and ``stop``.
EXAMPLES::
sage: from sage.combinat.designs.latin_squares import MOLS_table
sage: MOLS_table(100)
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
________________________________________________________________________________
0| +oo +oo 1 2 3 4 1 6 7 8 2 10 5 12 4 4 15 16 5 18
20| 4 5 3 22 7 24 4 26 5 28 4 30 31 5 4 5 8 36 4 5
40| 7 40 5 42 5 6 4 46 8 48 6 5 5 52 5 6 7 7 5 58
60| 5 60 5 6 63 7 5 66 5 6 6 70 7 72 5 7 6 6 6 78
80| 9 80 8 82 6 6 6 6 7 88 6 7 6 6 6 6 7 96 6 8
sage: MOLS_table(100, width=4)
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
____________________________________________________________________________________________________
0| +oo +oo 1 2 3 4 1 6 7 8 2 10 5 12 4 4 15 16 5 18
20| 4 5 3 22 7 24 4 26 5 28 4 30 31 5 4 5 8 36 4 5
40| 7 40 5 42 5 6 4 46 8 48 6 5 5 52 5 6 7 7 5 58
60| 5 60 5 6 63 7 5 66 5 6 6 70 7 72 5 7 6 6 6 78
80| 9 80 8 82 6 6 6 6 7 88 6 7 6 6 6 6 7 96 6 8
sage: MOLS_table(100, compare=True)
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
________________________________________________________________________________
0| + +
20|
40|
60| +
80|
sage: MOLS_table(50, 100, compare=True)
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
________________________________________________________________________________
40|
60| +
80|
"""
from orthogonal_arrays import largest_available_k
if stop is None:
start,stop = 0,start
# make start and stop be congruent to 0 mod 20
start = start - (start%20)
stop = stop-1
stop = stop + (20-(stop%20))
assert start%20 == 0 and stop%20 == 0
if stop <= start:
return
if compare:
from sage.env import SAGE_SHARE
handbook_file = open(SAGE_SHARE+"/combinatorial_designs/MOLS_table.txt",'r')
hb = [int(_) for _ in handbook_file.readlines()[9].split(',')]
handbook_file.close()
# choose an appropriate width (needs to be >= 3 because "+oo" should fit)
if width is None:
from sage.rings.integer import Integer
width = max(3,Integer(stop-1).ndigits(10))
print " "*(width+2) + "".join("{i:>{width}}".format(i=i,width=width) for i in range(20))
print " "*(width+1) + "_"*((width+1)*20),
for i in range(start,stop):
if i%20==0:
print "\n{:>{width}}|".format(i,width=width),
k = largest_available_k(i)-2
if compare:
if i < 2 or hb[i] == k:
c = ""
elif hb[i] < k:
c = "+"
else:
c = "-"
else:
if i < 2:
c = "+oo"
else:
c = k
print '{:>{width}}'.format(c,width=width),
| 39.078534
| 117
| 0.535013
|
r"""
Mutually Orthogonal Latin Squares (MOLS)
The main function of this module is :func:`mutually_orthogonal_latin_squares`
and can be can be used to generate MOLS (or check that they exist)::
sage: MOLS = designs.mutually_orthogonal_latin_squares(4,8)
For more information on MOLS, see the :wikipedia:`Wikipedia entry on MOLS
<Graeco-Latin_square#Mutually_orthogonal_Latin_squares>`. If you are only
interested by latin squares, see :mod:`~sage.combinat.matrices.latin`.
The functions defined here are
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: |
:meth:`mutually_orthogonal_latin_squares` | Return `k` Mutually Orthogonal `n\times n` Latin Squares.
:meth:`are_mutually_orthogonal_latin_squares` | Check that the list ``l`` of matrices in are MOLS.
:meth:`latin_square_product` | Return the product of two (or more) latin squares.
:meth:`MOLS_table` | Prints the MOLS table.
**Table of MOLS**
Sage can produce a table of MOLS similar to the one from the Handbook of
Combinatorial Designs [DesignHandbook]_ (`available here
<http://books.google.fr/books?id=S9FA9rq1BgoC&dq=handbook%20combinatorial%20designs%20MOLS%2010000&pg=PA176>`_).
::
sage: from sage.combinat.designs.latin_squares import MOLS_table
sage: MOLS_table(600) # long time
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
________________________________________________________________________________
0| +oo +oo 1 2 3 4 1 6 7 8 2 10 5 12 4 4 15 16 5 18
20| 4 5 3 22 7 24 4 26 5 28 4 30 31 5 4 5 8 36 4 5
40| 7 40 5 42 5 6 4 46 8 48 6 5 5 52 5 6 7 7 5 58
60| 5 60 5 6 63 7 5 66 5 6 6 70 7 72 5 7 6 6 6 78
80| 9 80 8 82 6 6 6 6 7 88 6 7 6 6 6 6 7 96 6 8
100| 8 100 6 102 7 7 6 106 6 108 6 6 13 112 6 7 6 8 6 6
120| 7 120 6 6 6 124 6 126 127 7 6 130 6 7 6 7 7 136 6 138
140| 6 7 6 10 10 7 6 7 6 148 6 150 7 8 8 7 6 156 7 6
160| 9 7 6 162 6 7 6 166 7 168 6 8 6 172 6 6 14 9 6 178
180| 6 180 6 6 7 9 6 10 6 8 6 190 7 192 6 7 6 196 6 198
200| 7 7 6 7 6 8 6 8 14 11 10 210 6 7 6 7 7 8 6 10
220| 6 12 6 222 13 8 6 226 6 228 6 7 7 232 6 7 6 7 6 238
240| 7 240 6 242 6 7 6 12 7 7 6 250 6 12 9 7 255 256 6 12
260| 6 8 8 262 7 8 7 10 7 268 7 270 15 16 6 13 10 276 6 9
280| 7 280 6 282 6 12 6 7 15 288 6 6 6 292 6 6 7 10 10 12
300| 7 7 7 7 15 15 6 306 7 7 7 310 7 312 7 10 7 316 7 10
320| 15 15 6 16 8 12 6 7 7 9 6 330 7 8 7 6 7 336 6 7
340| 6 10 10 342 7 7 6 346 6 348 8 12 18 352 6 9 7 9 6 358
360| 7 360 6 7 7 7 6 366 15 15 7 15 7 372 7 15 7 13 7 378
380| 7 12 7 382 15 15 7 15 7 388 7 16 7 7 7 7 8 396 7 7
400| 15 400 7 15 11 8 7 15 8 408 7 13 8 12 10 9 18 15 7 418
420| 7 420 7 15 7 16 6 7 7 7 6 430 15 432 6 15 6 18 7 438
440| 7 15 7 442 7 13 7 11 15 448 7 15 7 7 7 15 7 456 7 16
460| 7 460 7 462 15 15 7 466 8 8 7 15 7 15 10 18 7 15 6 478
480| 15 15 6 15 8 7 6 486 7 15 6 490 6 16 6 7 15 15 6 498
500| 7 8 9 502 7 15 6 15 7 508 6 15 511 18 7 15 8 12 8 15
520| 8 520 10 522 12 15 8 16 15 528 7 15 8 12 7 15 8 15 10 15
540| 12 540 7 15 18 7 7 546 7 8 7 18 7 7 7 7 7 556 7 12
560| 15 7 7 562 7 7 6 7 7 568 6 570 7 7 15 22 8 576 7 7
580| 7 8 7 10 7 8 7 586 7 18 17 7 15 592 8 15 7 7 8 598
Comparison with the results from the Handbook of Combinatorial Designs (2ed)
[DesignHandbook]_::
sage: MOLS_table(600,compare=True) # long time
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
________________________________________________________________________________
0| + +
20|
40|
60| +
80|
100|
120|
140|
160|
180|
200| -
220|
240|
260|
280|
300|
320| -
340|
360| - -
380| -
400|
420| -
440|
460|
480|
500| -
520|
540|
560|
580|
TODO:
* Look at [ColDin01]_.
REFERENCES:
.. [Stinson2004] Douglas R. Stinson,
Combinatorial designs: construction and analysis,
Springer, 2004.
.. [ColDin01] Charles Colbourn, Jeffrey Dinitz,
Mutually orthogonal latin squares: a brief survey of constructions,
Volume 95, Issues 1-2, Pages 9-48,
Journal of Statistical Planning and Inference,
Springer, 1 May 2001.
Functions
---------
"""
from sage.categories.sets_cat import EmptySetError
from sage.misc.unknown import Unknown
def are_mutually_orthogonal_latin_squares(l, verbose=False):
r"""
Check wether the list of matrices in ``l`` form mutually orthogonal latin
squares.
INPUT:
- ``verbose`` - if ``True`` then print why the list of matrices provided are
not mutually orthogonal latin squares
EXAMPLES::
sage: from sage.combinat.designs.latin_squares import are_mutually_orthogonal_latin_squares
sage: m1 = matrix([[0,1,2],[2,0,1],[1,2,0]])
sage: m2 = matrix([[0,1,2],[1,2,0],[2,0,1]])
sage: m3 = matrix([[0,1,2],[2,0,1],[1,2,0]])
sage: are_mutually_orthogonal_latin_squares([m1,m2])
True
sage: are_mutually_orthogonal_latin_squares([m1,m3])
False
sage: are_mutually_orthogonal_latin_squares([m2,m3])
True
sage: are_mutually_orthogonal_latin_squares([m1,m2,m3], verbose=True)
Squares 0 and 2 are not orthogonal
False
sage: m = designs.mutually_orthogonal_latin_squares(7,8)
sage: are_mutually_orthogonal_latin_squares(m)
True
TESTS:
Not a latin square::
sage: m1 = matrix([[0,1,0],[2,0,1],[1,2,0]])
sage: m2 = matrix([[0,1,2],[1,2,0],[2,0,1]])
sage: are_mutually_orthogonal_latin_squares([m1,m2], verbose=True)
Matrix 0 is not row latin
False
sage: m1 = matrix([[0,1,2],[1,0,2],[1,2,0]])
sage: are_mutually_orthogonal_latin_squares([m1,m2], verbose=True)
Matrix 0 is not column latin
False
sage: m1 = matrix([[0,0,0],[1,1,1],[2,2,2]])
sage: m2 = matrix([[0,1,2],[0,1,2],[0,1,2]])
sage: are_mutually_orthogonal_latin_squares([m1,m2])
False
"""
if not l:
raise ValueError("the list must be non empty")
n = l[0].ncols()
k = len(l)
if any(M.ncols() != n or M.nrows() != n for M in l):
if verbose:
print "Not all matrices are square matrices of the same dimensions"
return False
for i,M in enumerate(l):
if any(len(set(R)) != n for R in M):
if verbose:
print "Matrix {} is not row latin".format(i)
return False
if any(len(set(R)) != n for R in zip(*M)):
if verbose:
print "Matrix {} is not column latin".format(i)
return False
from designs_pyx import is_orthogonal_array
return is_orthogonal_array(zip(*[[x for R in M for x in R] for M in l]),k,n, verbose=verbose, terminology="MOLS")
def mutually_orthogonal_latin_squares(k,n, partitions = False, check = True, existence=False):
r"""
Return `k` Mutually Orthogonal `n\times n` Latin Squares (MOLS).
For more information on Mutually Orthogonal Latin Squares, see
:mod:`~sage.combinat.designs.latin_squares`.
INPUT:
- ``k`` (integer) -- number of MOLS. If ``k=None`` it is set to the largest
value available.
- ``n`` (integer) -- size of the latin square.
- ``partition`` (boolean) -- a Latin Square can be seen as 3 partitions of
the `n^2` cells of the array into `n` sets of size `n`, respectively :
* The partition of rows
* The partition of columns
* The partition of number (cells numbered with 0, cells numbered with 1,
...)
These partitions have the additional property that any two sets from
different partitions intersect on exactly one element.
When ``partition`` is set to ``True``, this function returns a list of `k+2`
partitions satisfying this intersection property instead of the `k+2` MOLS
(though the data is exactly the same in both cases).
- ``existence`` (boolean) -- instead of building the design, return:
- ``True`` -- meaning that Sage knows how to build the design
- ``Unknown`` -- meaning that Sage does not know how to build the
design, but that the design may exist (see :mod:`sage.misc.unknown`).
- ``False`` -- meaning that the design does not exist.
.. NOTE::
When ``k=None`` and ``existence=True`` the function returns an
integer, i.e. the largest `k` such that we can build a `k` MOLS of
order `n`.
- ``check`` -- (boolean) Whether to check that output is correct before
returning it. As this is expected to be useless (but we are cautious
guys), you may want to disable it whenever you want speed. Set to
``True`` by default.
EXAMPLES::
sage: designs.mutually_orthogonal_latin_squares(4,5)
[
[0 2 4 1 3] [0 3 1 4 2] [0 4 3 2 1] [0 1 2 3 4]
[4 1 3 0 2] [3 1 4 2 0] [2 1 0 4 3] [4 0 1 2 3]
[3 0 2 4 1] [1 4 2 0 3] [4 3 2 1 0] [3 4 0 1 2]
[2 4 1 3 0] [4 2 0 3 1] [1 0 4 3 2] [2 3 4 0 1]
[1 3 0 2 4], [2 0 3 1 4], [3 2 1 0 4], [1 2 3 4 0]
]
sage: designs.mutually_orthogonal_latin_squares(3,7)
[
[0 2 4 6 1 3 5] [0 3 6 2 5 1 4] [0 4 1 5 2 6 3]
[6 1 3 5 0 2 4] [5 1 4 0 3 6 2] [4 1 5 2 6 3 0]
[5 0 2 4 6 1 3] [3 6 2 5 1 4 0] [1 5 2 6 3 0 4]
[4 6 1 3 5 0 2] [1 4 0 3 6 2 5] [5 2 6 3 0 4 1]
[3 5 0 2 4 6 1] [6 2 5 1 4 0 3] [2 6 3 0 4 1 5]
[2 4 6 1 3 5 0] [4 0 3 6 2 5 1] [6 3 0 4 1 5 2]
[1 3 5 0 2 4 6], [2 5 1 4 0 3 6], [3 0 4 1 5 2 6]
]
sage: designs.mutually_orthogonal_latin_squares(2,5,partitions=True)
[[[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]],
[[0, 5, 10, 15, 20],
[1, 6, 11, 16, 21],
[2, 7, 12, 17, 22],
[3, 8, 13, 18, 23],
[4, 9, 14, 19, 24]],
[[0, 8, 11, 19, 22],
[3, 6, 14, 17, 20],
[1, 9, 12, 15, 23],
[4, 7, 10, 18, 21],
[2, 5, 13, 16, 24]],
[[0, 9, 13, 17, 21],
[2, 6, 10, 19, 23],
[4, 8, 12, 16, 20],
[1, 5, 14, 18, 22],
[3, 7, 11, 15, 24]]]
What is the maximum number of MOLS of size 8 that Sage knows how to build?::
sage: designs.orthogonal_arrays.largest_available_k(8)-2
7
If you only want to know if Sage is able to build a given set of
MOLS, query the ``orthogonal_arrays.*`` functions::
sage: designs.orthogonal_arrays.is_available(5+2, 5) # 5 MOLS of order 5
False
sage: designs.orthogonal_arrays.is_available(4+2,6) # 4 MOLS of order 6
False
Sage, however, is not able to prove that the second MOLS do not exist::
sage: designs.orthogonal_arrays.exists(4+2,6) # 4 MOLS of order 6
Unknown
If you ask for such a MOLS then you will respecively get an informative
``EmptySetError`` or ``NotImplementedError``::
sage: designs.mutually_orthogonal_latin_squares(5, 5)
Traceback (most recent call last):
...
EmptySetError: There exist at most n-1 MOLS of size n if n>=2.
sage: designs.mutually_orthogonal_latin_squares(4,6)
Traceback (most recent call last):
...
NotImplementedError: I don't know how to build 4 MOLS of order 6
TESTS:
The special case `n=1`::
sage: designs.mutually_orthogonal_latin_squares(3, 1)
[[0], [0], [0]]
sage: designs.mutually_orthogonal_latin_squares(None, 1)
Traceback (most recent call last):
...
ValueError: there are no bound on k when 0<=n<=1
sage: designs.mutually_orthogonal_latin_squares(2,10)
[
[1 8 9 0 2 4 6 3 5 7] [1 7 6 5 0 9 8 2 3 4]
[7 2 8 9 0 3 5 4 6 1] [8 2 1 7 6 0 9 3 4 5]
[6 1 3 8 9 0 4 5 7 2] [9 8 3 2 1 7 0 4 5 6]
[5 7 2 4 8 9 0 6 1 3] [0 9 8 4 3 2 1 5 6 7]
[0 6 1 3 5 8 9 7 2 4] [2 0 9 8 5 4 3 6 7 1]
[9 0 7 2 4 6 8 1 3 5] [4 3 0 9 8 6 5 7 1 2]
[8 9 0 1 3 5 7 2 4 6] [6 5 4 0 9 8 7 1 2 3]
[2 3 4 5 6 7 1 8 9 0] [3 4 5 6 7 1 2 8 0 9]
[3 4 5 6 7 1 2 0 8 9] [5 6 7 1 2 3 4 0 9 8]
[4 5 6 7 1 2 3 9 0 8], [7 1 2 3 4 5 6 9 8 0]
]
"""
from sage.combinat.designs.orthogonal_arrays import orthogonal_array
from sage.matrix.constructor import Matrix
from sage.rings.arith import factor
from database import MOLS_constructions
# Is k is None we find the largest available
if k is None:
from sage.misc.superseded import deprecation
deprecation(17034,"please use designs.orthogonal_arrays.largest_available_k instead of k=None")
if n == 0 or n == 1:
if existence:
from sage.rings.infinity import Infinity
return Infinity
raise ValueError("there are no bound on k when 0<=n<=1")
k = orthogonal_array(None,n,existence=True) - 2
if existence:
return k
if existence:
from sage.misc.superseded import deprecation
deprecation(17034,"please use designs.orthogonal_arrays.is_available/exists instead of existence=True")
if n == 1:
if existence:
return True
matrices = [Matrix([[0]])]*k
elif k >= n:
if existence:
return False
raise EmptySetError("There exist at most n-1 MOLS of size n if n>=2.")
elif n in MOLS_constructions and k <= MOLS_constructions[n][0]:
if existence:
return True
_, construction = MOLS_constructions[n]
matrices = construction()[:k]
elif orthogonal_array(k+2,n,existence=True) is not Unknown:
# Forwarding non-existence results
if orthogonal_array(k+2,n,existence=True):
if existence:
return True
else:
if existence:
return False
raise EmptySetError("There does not exist {} MOLS of order {}!".format(k,n))
OA = orthogonal_array(k+2,n,check=False)
OA.sort() # make sure that the first two columns are "11, 12, ..., 1n, 21, 22, ..."
# We first define matrices as lists of n^2 values
matrices = [[] for _ in range(k)]
for L in OA:
for i in range(2,k+2):
matrices[i-2].append(L[i])
# The real matrices
matrices = [[M[i*n:(i+1)*n] for i in range(n)] for M in matrices]
matrices = [Matrix(M) for M in matrices]
else:
if existence:
return Unknown
raise NotImplementedError("I don't know how to build {} MOLS of order {}".format(k,n))
if check:
assert are_mutually_orthogonal_latin_squares(matrices)
if partitions is True:
partitions = [[[i*n+j for j in range(n)] for i in range(n)],
[[j*n+i for j in range(n)] for i in range(n)]]
for m in matrices:
partition = [[] for i in range(n)]
for i in range(n):
for j in range(n):
partition[m[i,j]].append(i*n+j)
partitions.append(partition)
if partitions:
return partitions
else:
return matrices
def latin_square_product(M,N,*others):
r"""
Return the product of two (or more) latin squares.
Given two Latin Squares `M,N` of respective sizes `m,n`, the direct product
`M\times N` of size `mn` is defined by `(M\times
N)((i_1,i_2),(j_1,j_2))=(M(i_1,j_1),N(i_2,j_2))` where `i_1,j_1\in [m],
i_2,j_2\in [n]`
Each pair of values `(i,j)\in [m]\times [n]` is then relabeled to `in+j`.
This is Lemma 6.25 of [Stinson2004]_.
INPUT:
An arbitrary number of latin squares (greater than 2).
EXAMPLES::
sage: from sage.combinat.designs.latin_squares import latin_square_product
sage: m=designs.mutually_orthogonal_latin_squares(3,4)[0]
sage: latin_square_product(m,m,m)
64 x 64 sparse matrix over Integer Ring (use the '.str()' method to see the entries)
"""
from sage.matrix.constructor import Matrix
m = M.nrows()
n = N.nrows()
D = {((i,j),(ii,jj)):(M[i,ii],N[j,jj])
for i in range(m)
for ii in range(m)
for j in range(n)
for jj in range(n)}
L = lambda i_j: i_j[0] * n + i_j[1]
D = {(L(c[0]),L(c[1])): L(v) for c,v in D.iteritems()}
P = Matrix(D)
if others:
return latin_square_product(P, others[0],*others[1:])
else:
return P
def MOLS_table(start,stop=None,compare=False,width=None):
r"""
Prints the MOLS table that Sage can produce.
INPUT:
- ``start,stop`` (integers) -- print the table of MOLS for value of `n` such
that ``start<=n<stop``. If only one integer is given as input, it is
interpreted as the value of ``stop`` with ``start=0`` (same behaviour as
``range``).
- ``compare`` (boolean) -- if sets to ``True`` the MOLS displays
with `+` and `-` entries its difference with the table from the
Handbook of Combinatorial Designs (2ed).
- ``width`` (integer) -- the width of each column of the table. By default,
it is computed from range of values determined by the parameters ``start``
and ``stop``.
EXAMPLES::
sage: from sage.combinat.designs.latin_squares import MOLS_table
sage: MOLS_table(100)
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
________________________________________________________________________________
0| +oo +oo 1 2 3 4 1 6 7 8 2 10 5 12 4 4 15 16 5 18
20| 4 5 3 22 7 24 4 26 5 28 4 30 31 5 4 5 8 36 4 5
40| 7 40 5 42 5 6 4 46 8 48 6 5 5 52 5 6 7 7 5 58
60| 5 60 5 6 63 7 5 66 5 6 6 70 7 72 5 7 6 6 6 78
80| 9 80 8 82 6 6 6 6 7 88 6 7 6 6 6 6 7 96 6 8
sage: MOLS_table(100, width=4)
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
____________________________________________________________________________________________________
0| +oo +oo 1 2 3 4 1 6 7 8 2 10 5 12 4 4 15 16 5 18
20| 4 5 3 22 7 24 4 26 5 28 4 30 31 5 4 5 8 36 4 5
40| 7 40 5 42 5 6 4 46 8 48 6 5 5 52 5 6 7 7 5 58
60| 5 60 5 6 63 7 5 66 5 6 6 70 7 72 5 7 6 6 6 78
80| 9 80 8 82 6 6 6 6 7 88 6 7 6 6 6 6 7 96 6 8
sage: MOLS_table(100, compare=True)
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
________________________________________________________________________________
0| + +
20|
40|
60| +
80|
sage: MOLS_table(50, 100, compare=True)
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
________________________________________________________________________________
40|
60| +
80|
"""
from orthogonal_arrays import largest_available_k
if stop is None:
start,stop = 0,start
start = start - (start%20)
stop = stop-1
stop = stop + (20-(stop%20))
assert start%20 == 0 and stop%20 == 0
if stop <= start:
return
if compare:
from sage.env import SAGE_SHARE
handbook_file = open(SAGE_SHARE+"/combinatorial_designs/MOLS_table.txt",'r')
hb = [int(_) for _ in handbook_file.readlines()[9].split(',')]
handbook_file.close()
if width is None:
from sage.rings.integer import Integer
width = max(3,Integer(stop-1).ndigits(10))
print " "*(width+2) + "".join("{i:>{width}}".format(i=i,width=width) for i in range(20))
print " "*(width+1) + "_"*((width+1)*20),
for i in range(start,stop):
if i%20==0:
print "\n{:>{width}}|".format(i,width=width),
k = largest_available_k(i)-2
if compare:
if i < 2 or hb[i] == k:
c = ""
elif hb[i] < k:
c = "+"
else:
c = "-"
else:
if i < 2:
c = "+oo"
else:
c = k
print '{:>{width}}'.format(c,width=width),
| false
| true
|
79094fa405e3f0bf9830f8c74721ee170ec3c33a
| 9,137
|
py
|
Python
|
qa/pull-tester/pull-tester.py
|
StarShares/StarShares
|
523869ed7b391664d1b8cac61ad500b9ee8663a3
|
[
"MIT"
] | 4
|
2017-02-10T06:48:28.000Z
|
2021-03-06T02:58:33.000Z
|
qa/pull-tester/pull-tester.py
|
StarShares/StarShares
|
523869ed7b391664d1b8cac61ad500b9ee8663a3
|
[
"MIT"
] | null | null | null |
qa/pull-tester/pull-tester.py
|
StarShares/StarShares
|
523869ed7b391664d1b8cac61ad500b9ee8663a3
|
[
"MIT"
] | 3
|
2017-02-10T06:48:29.000Z
|
2020-10-26T03:27:50.000Z
|
#!/usr/bin/python
# Copyright (c) 2013 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
from urllib import urlopen
import requests
import getpass
from string import Template
import sys
import os
import subprocess
class RunError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def run(command, **kwargs):
fail_hard = kwargs.pop("fail_hard", True)
# output to /dev/null by default:
kwargs.setdefault("stdout", open('/dev/null', 'w'))
kwargs.setdefault("stderr", open('/dev/null', 'w'))
command = Template(command).substitute(os.environ)
if "TRACE" in os.environ:
if 'cwd' in kwargs:
print("[cwd=%s] %s"%(kwargs['cwd'], command))
else: print(command)
try:
process = subprocess.Popen(command.split(' '), **kwargs)
process.wait()
except KeyboardInterrupt:
process.terminate()
raise
if process.returncode != 0 and fail_hard:
raise RunError("Failed: "+command)
return process.returncode
def checkout_pull(clone_url, commit, out):
# Init
build_dir=os.environ["BUILD_DIR"]
run("umount ${CHROOT_COPY}/proc", fail_hard=False)
run("rsync --delete -apv ${CHROOT_MASTER}/ ${CHROOT_COPY}")
run("rm -rf ${CHROOT_COPY}${SCRIPTS_DIR}")
run("cp -a ${SCRIPTS_DIR} ${CHROOT_COPY}${SCRIPTS_DIR}")
# Merge onto upstream/master
run("rm -rf ${BUILD_DIR}")
run("mkdir -p ${BUILD_DIR}")
run("git clone ${CLONE_URL} ${BUILD_DIR}")
run("git remote add pull "+clone_url, cwd=build_dir, stdout=out, stderr=out)
run("git fetch pull", cwd=build_dir, stdout=out, stderr=out)
if run("git merge "+ commit, fail_hard=False, cwd=build_dir, stdout=out, stderr=out) != 0:
return False
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${BUILD_DIR}", stdout=out, stderr=out)
run("mount --bind /proc ${CHROOT_COPY}/proc")
return True
def commentOn(commentUrl, success, inMerge, needTests, linkUrl):
common_message = """
This test script verifies pulls every time they are updated. It, however, dies sometimes and fails to test properly. If you are waiting on a test, please check timestamps to verify that the test.log is moving at http://jenkins.bluematt.me/pull-tester/current/
Contact BlueMatt on freenode if something looks broken."""
# Remove old BitcoinPullTester comments (I'm being lazy and not paginating here)
recentcomments = requests.get(commentUrl+"?sort=created&direction=desc",
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
for comment in recentcomments:
if comment["user"]["login"] == os.environ["GITHUB_USER"] and common_message in comment["body"]:
requests.delete(comment["url"],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
if success == True:
if needTests:
message = "Automatic sanity-testing: PLEASE ADD TEST-CASES, though technically passed. See " + linkUrl + " for binaries and test log."
else:
message = "Automatic sanity-testing: PASSED, see " + linkUrl + " for binaries and test log."
post_data = { "body" : message + common_message}
elif inMerge:
post_data = { "body" : "Automatic sanity-testing: FAILED MERGE, see " + linkUrl + " for test log." + """
This pull does not merge cleanly onto current master""" + common_message}
else:
post_data = { "body" : "Automatic sanity-testing: FAILED BUILD/TEST, see " + linkUrl + " for binaries and test log." + """
This could happen for one of several reasons:
1. It chanages changes build scripts in a way that made them incompatible with the automated testing scripts (please tweak those patches in qa/pull-tester)
2. It adds/modifies tests which test network rules (thanks for doing that), which conflicts with a patch applied at test time
3. It does not build on either Linux i386 or Win32 (via MinGW cross compile)
4. The test suite fails on either Linux i386 or Win32
5. The block test-cases failed (lookup the first bNN identifier which failed in https://github.com/TheBlueMatt/test-scripts/blob/master/FullBlockTestGenerator.java)
If you believe this to be in error, please ping BlueMatt on freenode or TheBlueMatt here.
""" + common_message}
resp = requests.post(commentUrl, json.dumps(post_data), auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
def testpull(number, comment_url, clone_url, commit):
print("Testing pull %d: %s : %s"%(number, clone_url,commit))
dir = os.environ["RESULTS_DIR"] + "/" + commit + "/"
print(" ouput to %s"%dir)
if os.path.exists(dir):
os.system("rm -r " + dir)
os.makedirs(dir)
currentdir = os.environ["RESULTS_DIR"] + "/current"
os.system("rm -r "+currentdir)
os.system("ln -s " + dir + " " + currentdir)
out = open(dir + "test.log", 'w+')
resultsurl = os.environ["RESULTS_URL"] + commit
checkedout = checkout_pull(clone_url, commit, out)
if checkedout != True:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, True, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
return
run("rm -rf ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("mkdir -p ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False)
script = os.environ["BUILD_PATH"]+"/qa/pull-tester/pull-tester.sh"
script += " ${BUILD_PATH} ${MINGW_DEPS_DIR} ${SCRIPTS_DIR}/BitcoindComparisonTool_jar/BitcoindComparisonTool.jar 0 6 ${OUT_DIR}"
returncode = run("chroot ${CHROOT_COPY} sudo -u ${BUILD_USER} -H timeout ${TEST_TIMEOUT} "+script,
fail_hard=False, stdout=out, stderr=out)
run("mv ${CHROOT_COPY}/${OUT_DIR} " + dir)
run("mv ${BUILD_DIR} " + dir)
if returncode == 42:
print("Successfully tested pull (needs tests) - sending comment to: " + comment_url)
commentOn(comment_url, True, False, True, resultsurl)
elif returncode != 0:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, False, False, resultsurl)
else:
print("Successfully tested pull - sending comment to: " + comment_url)
commentOn(comment_url, True, False, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
def environ_default(setting, value):
if not setting in os.environ:
os.environ[setting] = value
if getpass.getuser() != "root":
print("Run me as root!")
sys.exit(1)
if "GITHUB_USER" not in os.environ or "GITHUB_AUTH_TOKEN" not in os.environ:
print("GITHUB_USER and/or GITHUB_AUTH_TOKEN environment variables not set")
sys.exit(1)
environ_default("CLONE_URL", "https://github.com/bitcoin/bitcoin.git")
environ_default("MINGW_DEPS_DIR", "/mnt/w32deps")
environ_default("SCRIPTS_DIR", "/mnt/test-scripts")
environ_default("CHROOT_COPY", "/mnt/chroot-tmp")
environ_default("CHROOT_MASTER", "/mnt/chroot")
environ_default("OUT_DIR", "/mnt/out")
environ_default("BUILD_PATH", "/mnt/bitcoin")
os.environ["BUILD_DIR"] = os.environ["CHROOT_COPY"] + os.environ["BUILD_PATH"]
environ_default("RESULTS_DIR", "/mnt/www/pull-tester")
environ_default("RESULTS_URL", "http://jenkins.bluematt.me/pull-tester/")
environ_default("GITHUB_REPO", "bitcoin/bitcoin")
environ_default("TESTED_DB", "/mnt/commits-tested.txt")
environ_default("BUILD_USER", "matt")
environ_default("BUILD_GROUP", "matt")
environ_default("TEST_TIMEOUT", str(60*60*2))
print("Optional usage: pull-tester.py 2112")
f = open(os.environ["TESTED_DB"])
tested = set( line.rstrip() for line in f.readlines() )
f.close()
if len(sys.argv) > 1:
pull = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls/"+sys.argv[1],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
testpull(pull["number"], pull["_links"]["comments"]["href"],
pull["head"]["repo"]["clone_url"], pull["head"]["sha"])
else:
for page in range(1,100):
result = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls?state=open&page=%d"%(page,),
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
if len(result) == 0: break;
for pull in result:
if pull["head"]["sha"] in tested:
print("Pull %d already tested"%(pull["number"],))
continue
testpull(pull["number"], pull["_links"]["comments"]["href"],
pull["head"]["repo"]["clone_url"], pull["head"]["sha"])
| 47.097938
| 261
| 0.652731
|
import json
from urllib import urlopen
import requests
import getpass
from string import Template
import sys
import os
import subprocess
class RunError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def run(command, **kwargs):
fail_hard = kwargs.pop("fail_hard", True)
kwargs.setdefault("stdout", open('/dev/null', 'w'))
kwargs.setdefault("stderr", open('/dev/null', 'w'))
command = Template(command).substitute(os.environ)
if "TRACE" in os.environ:
if 'cwd' in kwargs:
print("[cwd=%s] %s"%(kwargs['cwd'], command))
else: print(command)
try:
process = subprocess.Popen(command.split(' '), **kwargs)
process.wait()
except KeyboardInterrupt:
process.terminate()
raise
if process.returncode != 0 and fail_hard:
raise RunError("Failed: "+command)
return process.returncode
def checkout_pull(clone_url, commit, out):
build_dir=os.environ["BUILD_DIR"]
run("umount ${CHROOT_COPY}/proc", fail_hard=False)
run("rsync --delete -apv ${CHROOT_MASTER}/ ${CHROOT_COPY}")
run("rm -rf ${CHROOT_COPY}${SCRIPTS_DIR}")
run("cp -a ${SCRIPTS_DIR} ${CHROOT_COPY}${SCRIPTS_DIR}")
run("rm -rf ${BUILD_DIR}")
run("mkdir -p ${BUILD_DIR}")
run("git clone ${CLONE_URL} ${BUILD_DIR}")
run("git remote add pull "+clone_url, cwd=build_dir, stdout=out, stderr=out)
run("git fetch pull", cwd=build_dir, stdout=out, stderr=out)
if run("git merge "+ commit, fail_hard=False, cwd=build_dir, stdout=out, stderr=out) != 0:
return False
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${BUILD_DIR}", stdout=out, stderr=out)
run("mount --bind /proc ${CHROOT_COPY}/proc")
return True
def commentOn(commentUrl, success, inMerge, needTests, linkUrl):
common_message = """
This test script verifies pulls every time they are updated. It, however, dies sometimes and fails to test properly. If you are waiting on a test, please check timestamps to verify that the test.log is moving at http://jenkins.bluematt.me/pull-tester/current/
Contact BlueMatt on freenode if something looks broken."""
recentcomments = requests.get(commentUrl+"?sort=created&direction=desc",
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
for comment in recentcomments:
if comment["user"]["login"] == os.environ["GITHUB_USER"] and common_message in comment["body"]:
requests.delete(comment["url"],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
if success == True:
if needTests:
message = "Automatic sanity-testing: PLEASE ADD TEST-CASES, though technically passed. See " + linkUrl + " for binaries and test log."
else:
message = "Automatic sanity-testing: PASSED, see " + linkUrl + " for binaries and test log."
post_data = { "body" : message + common_message}
elif inMerge:
post_data = { "body" : "Automatic sanity-testing: FAILED MERGE, see " + linkUrl + " for test log." + """
This pull does not merge cleanly onto current master""" + common_message}
else:
post_data = { "body" : "Automatic sanity-testing: FAILED BUILD/TEST, see " + linkUrl + " for binaries and test log." + """
This could happen for one of several reasons:
1. It chanages changes build scripts in a way that made them incompatible with the automated testing scripts (please tweak those patches in qa/pull-tester)
2. It adds/modifies tests which test network rules (thanks for doing that), which conflicts with a patch applied at test time
3. It does not build on either Linux i386 or Win32 (via MinGW cross compile)
4. The test suite fails on either Linux i386 or Win32
5. The block test-cases failed (lookup the first bNN identifier which failed in https://github.com/TheBlueMatt/test-scripts/blob/master/FullBlockTestGenerator.java)
If you believe this to be in error, please ping BlueMatt on freenode or TheBlueMatt here.
""" + common_message}
resp = requests.post(commentUrl, json.dumps(post_data), auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
def testpull(number, comment_url, clone_url, commit):
print("Testing pull %d: %s : %s"%(number, clone_url,commit))
dir = os.environ["RESULTS_DIR"] + "/" + commit + "/"
print(" ouput to %s"%dir)
if os.path.exists(dir):
os.system("rm -r " + dir)
os.makedirs(dir)
currentdir = os.environ["RESULTS_DIR"] + "/current"
os.system("rm -r "+currentdir)
os.system("ln -s " + dir + " " + currentdir)
out = open(dir + "test.log", 'w+')
resultsurl = os.environ["RESULTS_URL"] + commit
checkedout = checkout_pull(clone_url, commit, out)
if checkedout != True:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, True, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
return
run("rm -rf ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("mkdir -p ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False)
script = os.environ["BUILD_PATH"]+"/qa/pull-tester/pull-tester.sh"
script += " ${BUILD_PATH} ${MINGW_DEPS_DIR} ${SCRIPTS_DIR}/BitcoindComparisonTool_jar/BitcoindComparisonTool.jar 0 6 ${OUT_DIR}"
returncode = run("chroot ${CHROOT_COPY} sudo -u ${BUILD_USER} -H timeout ${TEST_TIMEOUT} "+script,
fail_hard=False, stdout=out, stderr=out)
run("mv ${CHROOT_COPY}/${OUT_DIR} " + dir)
run("mv ${BUILD_DIR} " + dir)
if returncode == 42:
print("Successfully tested pull (needs tests) - sending comment to: " + comment_url)
commentOn(comment_url, True, False, True, resultsurl)
elif returncode != 0:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, False, False, resultsurl)
else:
print("Successfully tested pull - sending comment to: " + comment_url)
commentOn(comment_url, True, False, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
def environ_default(setting, value):
if not setting in os.environ:
os.environ[setting] = value
if getpass.getuser() != "root":
print("Run me as root!")
sys.exit(1)
if "GITHUB_USER" not in os.environ or "GITHUB_AUTH_TOKEN" not in os.environ:
print("GITHUB_USER and/or GITHUB_AUTH_TOKEN environment variables not set")
sys.exit(1)
environ_default("CLONE_URL", "https://github.com/bitcoin/bitcoin.git")
environ_default("MINGW_DEPS_DIR", "/mnt/w32deps")
environ_default("SCRIPTS_DIR", "/mnt/test-scripts")
environ_default("CHROOT_COPY", "/mnt/chroot-tmp")
environ_default("CHROOT_MASTER", "/mnt/chroot")
environ_default("OUT_DIR", "/mnt/out")
environ_default("BUILD_PATH", "/mnt/bitcoin")
os.environ["BUILD_DIR"] = os.environ["CHROOT_COPY"] + os.environ["BUILD_PATH"]
environ_default("RESULTS_DIR", "/mnt/www/pull-tester")
environ_default("RESULTS_URL", "http://jenkins.bluematt.me/pull-tester/")
environ_default("GITHUB_REPO", "bitcoin/bitcoin")
environ_default("TESTED_DB", "/mnt/commits-tested.txt")
environ_default("BUILD_USER", "matt")
environ_default("BUILD_GROUP", "matt")
environ_default("TEST_TIMEOUT", str(60*60*2))
print("Optional usage: pull-tester.py 2112")
f = open(os.environ["TESTED_DB"])
tested = set( line.rstrip() for line in f.readlines() )
f.close()
if len(sys.argv) > 1:
pull = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls/"+sys.argv[1],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
testpull(pull["number"], pull["_links"]["comments"]["href"],
pull["head"]["repo"]["clone_url"], pull["head"]["sha"])
else:
for page in range(1,100):
result = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls?state=open&page=%d"%(page,),
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
if len(result) == 0: break;
for pull in result:
if pull["head"]["sha"] in tested:
print("Pull %d already tested"%(pull["number"],))
continue
testpull(pull["number"], pull["_links"]["comments"]["href"],
pull["head"]["repo"]["clone_url"], pull["head"]["sha"])
| true
| true
|
7909507130ecd4d26a35b2ea1fbaf5e56d26ab85
| 4,928
|
py
|
Python
|
Lab6/Lab6.py
|
natalievolk/ESC190Labs
|
4b254e9247d5ac44e378606bc604ee2c7f67a02c
|
[
"CNRI-Python"
] | null | null | null |
Lab6/Lab6.py
|
natalievolk/ESC190Labs
|
4b254e9247d5ac44e378606bc604ee2c7f67a02c
|
[
"CNRI-Python"
] | null | null | null |
Lab6/Lab6.py
|
natalievolk/ESC190Labs
|
4b254e9247d5ac44e378606bc604ee2c7f67a02c
|
[
"CNRI-Python"
] | null | null | null |
# Lab 6
#
# We'll define a node of a binary tree and introduce some features of Python
# classes along the way
import random
class BST:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def insert(self, value):
'''
node.insert(5) is the same as BST.insert(node, 5)
We use this when recursively calling, e.g. self.left.insert
'''
if value < self.value:
if self.left == None:
self.left = BST(value)
else:
self.left.insert(value)
else:
if self.right == None:
self.right = BST(value)
else:
self.right.insert(value)
def __repr__(self):
'''The string representation of a node.
Here, we convert the value of the node to a string and make that
the representation.
We can now use
a = Node(4)
print(a) # prints 4
'''
return str(self.value)
a = BST(4)
a.insert(2)
a.insert(5)
a.insert(10)
a.insert(3)
a.insert(15)
b = BST(5)
b.insert(2)
b.insert(10)
b.insert(1)
b.insert(3)
b.insert(7)
b.insert(14)
# Problem 1
# Draw (manually) the binary tree rooted in a.
'''
4
/ \
2 5
\ \
3 10
\
15
'''
# Problem 2
# Write code to find the height of a Binary Search Tree
def find_height(bst):
cur_height = 0
max_height = 0
s = [[bst, 0]]
explored = []
cur = bst
count = 0
while len(s) > 0 and count < 20:
if cur.left != None and cur.right != None: #and cur not in explored:
s.append([bst, cur_height])
cur = cur.left
cur_height += 1
elif cur.left != None: #and cur not in explored:
cur_height += 1
cur = cur.left
elif cur.right != None:
cur_height += 1
cur = cur.right
else:
if cur_height > max_height:
max_height = cur_height
temp = (s.pop(-1))
cur = temp[0]
cur_height = temp[1]
explored.append(cur)
count += 1
return max_height
def find_height_rec(bst):
if bst.left == None and bst.right == None:
return 0
elif bst.left == None:
return 1+find_height_rec(bst.right)
elif bst.right == None:
return 1+find_height_rec(bst.left)
return max(1+find_height_rec(bst.left), 1+find_height_rec(bst.right))
print(find_height(a))
print(find_height_rec(a))
print(find_height_rec(b))
# Problem 3
# Write code to print out the nodes of the BST using
# Breadth-First Search. How would you get the Breadth-First Traversal
# from the tree you've drawn?
# (Modify the BFS function from lecture for this problem)
def BFS_tree(node):
# NOTE: commented out the explored list and checks because not necessary
# think about why it's not necessary ...
q = [node]
cur = node
while len(q) > 0:
cur = q.pop(0)
print(cur)
if cur.left != None and cur.right != None:
q.extend([cur.left, cur.right])
elif cur.left != None:
q.append(cur.left)
cur = cur.left
elif cur.right != None:
q.append(cur.right)
cur = cur.right
#don't need explored list
BFS_tree(a)
print("\n")
BFS_tree(b)
# Problem 4
# Empirically investigate the relationship between the number of nodes in the
# tree and the height of the tree when inserting nodes with values generated
# using random.random()
def make_random_tree(n_nodes):
'''Make a tree with n_nodes nodes by inserting nodes with values
drawn using random.random()
'''
a = BST(random.random())
for i in range(n_nodes-1):
a.insert(random.random())
return a
def height_random_tree(n_nodes):
'''Generate a random tree with n_nodes nodes, and return its height'''
a = make_random_tree(n_nodes)
return find_height_rec(a)
def make_data(max_nodes):
'''Make two lists representing the empirical relationship between
the number of nodes in a random tree and the height of the tree.
Generate N_TREES = 40 trees with each of
n_nodes = 5, int(1.2*5), int(1.2^2*5), .....
return n (a list of values of n_nodes) and h (a list of heights)
'''
N_TREES = 40
n_nodes = [5]
heights = [0]
while n_nodes[-1]*1.2 <= max_nodes:
n_nodes.append(int(n_nodes[-1]*1.2))
heights.append(0)
for k in range(len(n_nodes)):
cur_heights = 0
for i in range(N_TREES):
cur_heights += height_random_tree(n_nodes[k])
heights[k] = cur_heights / N_TREES
return n_nodes, heights
'''
n, h = make_data(100000)
import matplotlib.pyplot as plt
plt.scatter(n, h)
plt.show()
'''
# plt.savefig("trees.png") can save the data to disk
| 23.578947
| 77
| 0.582589
|
# classes along the way
import random
class BST:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def insert(self, value):
if value < self.value:
if self.left == None:
self.left = BST(value)
else:
self.left.insert(value)
else:
if self.right == None:
self.right = BST(value)
else:
self.right.insert(value)
def __repr__(self):
return str(self.value)
a = BST(4)
a.insert(2)
a.insert(5)
a.insert(10)
a.insert(3)
a.insert(15)
b = BST(5)
b.insert(2)
b.insert(10)
b.insert(1)
b.insert(3)
b.insert(7)
b.insert(14)
# Problem 1
# Draw (manually) the binary tree rooted in a.
# Problem 2
# Write code to find the height of a Binary Search Tree
def find_height(bst):
cur_height = 0
max_height = 0
s = [[bst, 0]]
explored = []
cur = bst
count = 0
while len(s) > 0 and count < 20:
if cur.left != None and cur.right != None: #and cur not in explored:
s.append([bst, cur_height])
cur = cur.left
cur_height += 1
elif cur.left != None: #and cur not in explored:
cur_height += 1
cur = cur.left
elif cur.right != None:
cur_height += 1
cur = cur.right
else:
if cur_height > max_height:
max_height = cur_height
temp = (s.pop(-1))
cur = temp[0]
cur_height = temp[1]
explored.append(cur)
count += 1
return max_height
def find_height_rec(bst):
if bst.left == None and bst.right == None:
return 0
elif bst.left == None:
return 1+find_height_rec(bst.right)
elif bst.right == None:
return 1+find_height_rec(bst.left)
return max(1+find_height_rec(bst.left), 1+find_height_rec(bst.right))
print(find_height(a))
print(find_height_rec(a))
print(find_height_rec(b))
# Problem 3
# Write code to print out the nodes of the BST using
# Breadth-First Search. How would you get the Breadth-First Traversal
# from the tree you've drawn?
def BFS_tree(node):
q = [node]
cur = node
while len(q) > 0:
cur = q.pop(0)
print(cur)
if cur.left != None and cur.right != None:
q.extend([cur.left, cur.right])
elif cur.left != None:
q.append(cur.left)
cur = cur.left
elif cur.right != None:
q.append(cur.right)
cur = cur.right
#don't need explored list
BFS_tree(a)
print("\n")
BFS_tree(b)
def make_random_tree(n_nodes):
a = BST(random.random())
for i in range(n_nodes-1):
a.insert(random.random())
return a
def height_random_tree(n_nodes):
a = make_random_tree(n_nodes)
return find_height_rec(a)
def make_data(max_nodes):
N_TREES = 40
n_nodes = [5]
heights = [0]
while n_nodes[-1]*1.2 <= max_nodes:
n_nodes.append(int(n_nodes[-1]*1.2))
heights.append(0)
for k in range(len(n_nodes)):
cur_heights = 0
for i in range(N_TREES):
cur_heights += height_random_tree(n_nodes[k])
heights[k] = cur_heights / N_TREES
return n_nodes, heights
| true
| true
|
7909509d4f39b3e67495046107e53a704636ebbe
| 3,137
|
py
|
Python
|
scripts/test.py
|
dumpram/stm32_real_time_test
|
59b3e6bbd11498df032a180e06144c8046b14bbe
|
[
"MIT"
] | null | null | null |
scripts/test.py
|
dumpram/stm32_real_time_test
|
59b3e6bbd11498df032a180e06144c8046b14bbe
|
[
"MIT"
] | null | null | null |
scripts/test.py
|
dumpram/stm32_real_time_test
|
59b3e6bbd11498df032a180e06144c8046b14bbe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# System imports
import argparse
import sys
import serial
# Data processing imports
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import seaborn as sns
def checkparams(pwm_freq, pwm_duty, num_samples):
check_ok = True
if pwm_freq < 20 or pwm_freq > 100:
print("Allowed PWM freq is between in [20, 100] kHz interval.")
check_ok = False
if pwm_duty < 5 or pwm_duty > 80:
print("Allowed PWM duty is between in [5, 80] percent interval.")
check_ok = False
if num_samples < 1 or num_samples > 20000:
print("Allowed samples num is between in [1, 8192] interval.")
check_ok = False
if check_ok == False:
sys.exit(1);
def main(baudrate, pwm_freq, pwm_duty, num_samples, delays_file):
ser = serial.Serial(
port='/dev/ttyUSB0',
baudrate=baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
rtscts=0
)
if not ser.is_open:
print("Error opening serial port device.")
sys.exit(1)
checkparams(pwm_freq, pwm_duty, num_samples)
print("Params OK!")
delays = np.empty(num_samples)
ser.write(str.encode('{},{},{}\r\n'.format(
pwm_freq,
pwm_duty,
num_samples)))
timer_frequency = int(ser.readline().strip()) # MHz
ser.write(str.encode('\n')); # start measurement
for i in range(num_samples):
delays[i] = int(ser.readline().strip())
ser.close()
delays *= (1e-6 / timer_frequency);
delays = np.delete(delays, 0);
delays = np.delete(delays, 0);
print("min: {}, avg: {}, max = {}".format(
np.min(delays),
np.mean(delays),
np.max(delays)));
print("std: ", np.std(delays))
LOG_FILE = open(delays_file, 'w')
np.save(delays_file, delays);
# mean = np.mean(delays);
# maxi = np.max(delays);
# mini = np.min(delays);
# # sns.distplot(delays, norm_hist=True);
# # plt.show();
# #
# delays *= 1e6;
# plt.plot(delays)
# plt.ylabel('Vrijeme kašnjenja (${\mu}s$)')
# plt.xlabel('Uzorci (padajući brid odziva)')
# plt.show()
# plt.figure(0)
# n, bins, patches = plt.hist(delays, 50, normed=True,
# histtype='step');
# y = mlab.normpdf(bins,
# np.mean(delays),
# np.std(delays))
# plt.show()
# plt.figure(1)
# plt.plot(bins, y)
# plt.xlabel('Vrijeme kašnjenja (${\mu}s$)')
# plt.ylabel('Funkcija gustoće vjerojatnosti')
# plt.show();
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--baudrate', type=int, default=115200)
parser.add_argument('--pwm_freq', type=int, default=20)
parser.add_argument('--pwm_duty', type=int, default=50)
parser.add_argument('--num_samples', type=int, default=20000)
parser.add_argument('--delays_file', type=str, default='novo.npy')
ARGS, other = parser.parse_known_args()
main(ARGS.baudrate, ARGS.pwm_freq, ARGS.pwm_duty, ARGS.num_samples,
ARGS.delays_file);
| 25.504065
| 73
| 0.613006
|
import argparse
import sys
import serial
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import seaborn as sns
def checkparams(pwm_freq, pwm_duty, num_samples):
check_ok = True
if pwm_freq < 20 or pwm_freq > 100:
print("Allowed PWM freq is between in [20, 100] kHz interval.")
check_ok = False
if pwm_duty < 5 or pwm_duty > 80:
print("Allowed PWM duty is between in [5, 80] percent interval.")
check_ok = False
if num_samples < 1 or num_samples > 20000:
print("Allowed samples num is between in [1, 8192] interval.")
check_ok = False
if check_ok == False:
sys.exit(1);
def main(baudrate, pwm_freq, pwm_duty, num_samples, delays_file):
ser = serial.Serial(
port='/dev/ttyUSB0',
baudrate=baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
rtscts=0
)
if not ser.is_open:
print("Error opening serial port device.")
sys.exit(1)
checkparams(pwm_freq, pwm_duty, num_samples)
print("Params OK!")
delays = np.empty(num_samples)
ser.write(str.encode('{},{},{}\r\n'.format(
pwm_freq,
pwm_duty,
num_samples)))
timer_frequency = int(ser.readline().strip())
ser.write(str.encode('\n'));
for i in range(num_samples):
delays[i] = int(ser.readline().strip())
ser.close()
delays *= (1e-6 / timer_frequency);
delays = np.delete(delays, 0);
delays = np.delete(delays, 0);
print("min: {}, avg: {}, max = {}".format(
np.min(delays),
np.mean(delays),
np.max(delays)));
print("std: ", np.std(delays))
LOG_FILE = open(delays_file, 'w')
np.save(delays_file, delays);
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--baudrate', type=int, default=115200)
parser.add_argument('--pwm_freq', type=int, default=20)
parser.add_argument('--pwm_duty', type=int, default=50)
parser.add_argument('--num_samples', type=int, default=20000)
parser.add_argument('--delays_file', type=str, default='novo.npy')
ARGS, other = parser.parse_known_args()
main(ARGS.baudrate, ARGS.pwm_freq, ARGS.pwm_duty, ARGS.num_samples,
ARGS.delays_file);
| true
| true
|
7909526a408024180a4b4fccfa17d62f682c0aad
| 8,149
|
py
|
Python
|
contrib/devtools/update-translations.py
|
ALLMINER/elli
|
9a21aaff9968ee023ba2017cc485d787ff24b038
|
[
"MIT"
] | 4
|
2018-05-19T16:47:15.000Z
|
2019-11-13T08:59:50.000Z
|
contrib/devtools/update-translations.py
|
ALLMINER/elli
|
9a21aaff9968ee023ba2017cc485d787ff24b038
|
[
"MIT"
] | 3
|
2018-05-09T14:39:32.000Z
|
2018-08-23T22:07:09.000Z
|
contrib/devtools/update-translations.py
|
ALLMINER/elli
|
9a21aaff9968ee023ba2017cc485d787ff24b038
|
[
"MIT"
] | 19
|
2018-04-01T18:17:04.000Z
|
2019-01-20T22:34:03.000Z
|
#!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'elli_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| 38.620853
| 124
| 0.63382
|
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
TX = 'tx'
SOURCE_LANG = 'elli_en.ts'
LOCALE_DIR = 'src/qt/locale'
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| true
| true
|
790952910036afe78590addee199c1ac59adeab4
| 4,580
|
py
|
Python
|
tests/riscv/vector/vector_wide_operand_conflict_force.py
|
Imperas/force-riscv
|
c15bc18e4d70e6c2f50bad1e9176e13575de6081
|
[
"Apache-2.0"
] | null | null | null |
tests/riscv/vector/vector_wide_operand_conflict_force.py
|
Imperas/force-riscv
|
c15bc18e4d70e6c2f50bad1e9176e13575de6081
|
[
"Apache-2.0"
] | null | null | null |
tests/riscv/vector/vector_wide_operand_conflict_force.py
|
Imperas/force-riscv
|
c15bc18e4d70e6c2f50bad1e9176e13575de6081
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from VectorTestSequence import VectorTestSequence
from base.ChoicesModifier import ChoicesModifier
## This test verifies that vector register operands with different layouts don't overlap.
class MainSequence(VectorTestSequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mInstrList = (
'VNSRA.WI##RISCV',
'VNSRA.WV##RISCV',
'VNSRA.WX##RISCV',
'VNSRL.WI##RISCV',
'VNSRL.WV##RISCV',
'VNSRL.WX##RISCV',
'VWADD.VV##RISCV',
'VWADD.VX##RISCV',
'VWADD.WV##RISCV',
'VWADD.WX##RISCV',
'VWADDU.VV##RISCV',
'VWADDU.VX##RISCV',
'VWADDU.WV##RISCV',
'VWADDU.WX##RISCV',
'VWMACC.VV##RISCV',
'VWMACC.VX##RISCV',
'VWMACCSU.VV##RISCV',
'VWMACCSU.VX##RISCV',
'VWMACCU.VV##RISCV',
'VWMACCU.VX##RISCV',
'VWMACCUS.VX##RISCV',
'VWMUL.VV##RISCV',
'VWMUL.VX##RISCV',
'VWMULSU.VV##RISCV',
'VWMULSU.VX##RISCV',
'VWMULU.VV##RISCV',
'VWMULU.VX##RISCV',
'VWSUB.VV##RISCV',
'VWSUB.VX##RISCV',
'VWSUB.WV##RISCV',
'VWSUB.WX##RISCV',
'VWSUBU.VV##RISCV',
'VWSUBU.VX##RISCV',
'VWSUBU.WV##RISCV',
'VWSUBU.WX##RISCV',
)
## Set up the environment prior to generating the test instructions.
def _setUpTest(self):
choices_mod = ChoicesModifier(self.genThread)
# TODO(Noah): Remove the restriction on SEW when a mechanism to skip instructions with
# illegal vector layouts is implemented. For now, ensure vector element width is set to no
# more than 32 bits.
choice_weights = {'0x0': 10, '0x1': 10, '0x2': 10, '0x3': 0, '0x4': 0, '0x5': 0, '0x6': 0, '0x7': 0}
choices_mod.modifyRegisterFieldValueChoices('vtype.VSEW', choice_weights)
# Ensure vector register group size is no more than 4, as larger values are not legal for
# widening and narrowing instructions
vlmul_choice_weights = {'0x0': 10, '0x1': 10, '0x2': 10, '0x3': 0, '0x4': 0, '0x5': 10, '0x6': 10, '0x7': 10}
choices_mod.modifyRegisterFieldValueChoices('vtype.VLMUL', vlmul_choice_weights)
choices_mod.commitSet()
## Return the maximum number of test instructions to generate.
def _getMaxInstructionCount(self):
return 1000
## Return a list of test instructions to randomly choose from.
def _getInstructionList(self):
return self._mInstrList
## Verify additional aspects of the instruction generation and execution.
#
# @param aInstr The name of the instruction.
# @param aInstrRecord A record of the generated instruction.
def _performAdditionalVerification(self, aInstr, aInstrRecord):
vd_val = aInstrRecord['Dests']['vd']
vs1_val = aInstrRecord['Srcs'].get('vs1')
vs2_val = aInstrRecord['Srcs']['vs2']
if aInstr.startswith('VW'):
if vs1_val and (vd_val == (vs1_val & 0x1F)):
self.error('Instruction %s used overlapping source and destination registers of different formats' % aInstr)
if ('.W' not in aInstr) and (vd_val == (vs2_val & 0x1F)):
self.error('Instruction %s used overlapping source and destination registers of different formats' % aInstr)
elif aInstr.startswith('VN'):
if (vd_val & 0x1F) == vs2_val:
self.error('Instruction %s used overlapping source and destination registers of different formats' % aInstr)
else:
self.error('Unexpected instruction %s' % aInstr)
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| 40.175439
| 124
| 0.624672
|
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from VectorTestSequence import VectorTestSequence
from base.ChoicesModifier import ChoicesModifier
:
super().__init__(aGenThread, aName)
self._mInstrList = (
'VNSRA.WI 'VNSRA.WV 'VNSRA.WX 'VNSRL.WI 'VNSRL.WV 'VNSRL.WX 'VWADD.VV 'VWADD.VX 'VWADD.WV 'VWADD.WX 'VWADDU.VV 'VWADDU.VX 'VWADDU.WV 'VWADDU.WX 'VWMACC.VV 'VWMACC.VX 'VWMACCSU.VV 'VWMACCSU.VX 'VWMACCU.VV 'VWMACCU.VX 'VWMACCUS.VX 'VWMUL.VV 'VWMUL.VX 'VWMULSU.VV 'VWMULSU.VX 'VWMULU.VV 'VWMULU.VX 'VWSUB.VV 'VWSUB.VX 'VWSUB.WV 'VWSUB.WX 'VWSUBU.VV 'VWSUBU.VX 'VWSUBU.WV 'VWSUBU.WX )
## Set up the environment prior to generating the test instructions.
def _setUpTest(self):
choices_mod = ChoicesModifier(self.genThread)
# TODO(Noah): Remove the restriction on SEW when a mechanism to skip instructions with
# illegal vector layouts is implemented. For now, ensure vector element width is set to no
# more than 32 bits.
choice_weights = {'0x0': 10, '0x1': 10, '0x2': 10, '0x3': 0, '0x4': 0, '0x5': 0, '0x6': 0, '0x7': 0}
choices_mod.modifyRegisterFieldValueChoices('vtype.VSEW', choice_weights)
# Ensure vector register group size is no more than 4, as larger values are not legal for
# widening and narrowing instructions
vlmul_choice_weights = {'0x0': 10, '0x1': 10, '0x2': 10, '0x3': 0, '0x4': 0, '0x5': 10, '0x6': 10, '0x7': 10}
choices_mod.modifyRegisterFieldValueChoices('vtype.VLMUL', vlmul_choice_weights)
choices_mod.commitSet()
## Return the maximum number of test instructions to generate.
def _getMaxInstructionCount(self):
return 1000
## Return a list of test instructions to randomly choose from.
def _getInstructionList(self):
return self._mInstrList
## Verify additional aspects of the instruction generation and execution.
#
# @param aInstr The name of the instruction.
# @param aInstrRecord A record of the generated instruction.
def _performAdditionalVerification(self, aInstr, aInstrRecord):
vd_val = aInstrRecord['Dests']['vd']
vs1_val = aInstrRecord['Srcs'].get('vs1')
vs2_val = aInstrRecord['Srcs']['vs2']
if aInstr.startswith('VW'):
if vs1_val and (vd_val == (vs1_val & 0x1F)):
self.error('Instruction %s used overlapping source and destination registers of different formats' % aInstr)
if ('.W' not in aInstr) and (vd_val == (vs2_val & 0x1F)):
self.error('Instruction %s used overlapping source and destination registers of different formats' % aInstr)
elif aInstr.startswith('VN'):
if (vd_val & 0x1F) == vs2_val:
self.error('Instruction %s used overlapping source and destination registers of different formats' % aInstr)
else:
self.error('Unexpected instruction %s' % aInstr)
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| true
| true
|
7909535b9bf702e013d466744bbc0102744b4643
| 922
|
py
|
Python
|
server/server.py
|
alimfeld/codenames
|
dcb8a09a031b953f68b5789ab7bcff3a01ea9e2e
|
[
"MIT"
] | null | null | null |
server/server.py
|
alimfeld/codenames
|
dcb8a09a031b953f68b5789ab7bcff3a01ea9e2e
|
[
"MIT"
] | null | null | null |
server/server.py
|
alimfeld/codenames
|
dcb8a09a031b953f68b5789ab7bcff3a01ea9e2e
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, jsonify, send_from_directory
import engine
app = Flask(__name__)
@app.route('/api/texts')
def texts():
return send_from_directory('i18n', 'ui.de.json');
@app.route('/api/codenames')
def codenames():
return jsonify(engine.codenames())
@app.route('/api/ready')
def ready():
return jsonify(engine.ready())
@app.route('/api/clue', methods=['POST'])
def clue():
content = request.json
return jsonify(engine.clue(
our_agents=content['ourAgents'],
assassin=content['assassin'],
previous_clues=content['previousClues'],
min_related=content['minRelated'],
max_related=content['maxRelated']
))
@app.route('/api/guess', methods=['POST'])
def guess():
content = request.json
return jsonify(engine.guess(
codenames=content['codenames'],
word=content['word'],
number=content['number']
))
| 24.918919
| 62
| 0.649675
|
from flask import Flask, request, jsonify, send_from_directory
import engine
app = Flask(__name__)
@app.route('/api/texts')
def texts():
return send_from_directory('i18n', 'ui.de.json');
@app.route('/api/codenames')
def codenames():
return jsonify(engine.codenames())
@app.route('/api/ready')
def ready():
return jsonify(engine.ready())
@app.route('/api/clue', methods=['POST'])
def clue():
content = request.json
return jsonify(engine.clue(
our_agents=content['ourAgents'],
assassin=content['assassin'],
previous_clues=content['previousClues'],
min_related=content['minRelated'],
max_related=content['maxRelated']
))
@app.route('/api/guess', methods=['POST'])
def guess():
content = request.json
return jsonify(engine.guess(
codenames=content['codenames'],
word=content['word'],
number=content['number']
))
| true
| true
|
7909544692f2d29f705fe1f598e59f74529bf29e
| 6,183
|
py
|
Python
|
mparser.py
|
marco-aziz/mPulse
|
0722da3c01a3a086e2d474840bbae829d9821438
|
[
"MIT"
] | null | null | null |
mparser.py
|
marco-aziz/mPulse
|
0722da3c01a3a086e2d474840bbae829d9821438
|
[
"MIT"
] | 4
|
2021-03-30T13:53:49.000Z
|
2021-09-22T19:22:15.000Z
|
mparser.py
|
marco-aziz/mPulse
|
0722da3c01a3a086e2d474840bbae829d9821438
|
[
"MIT"
] | null | null | null |
"""
Based on REST Framework Parsers, optimized for csv
Parsers are used to parse the content of incoming HTTP requests.
They give us a generic way of being able to handle various media types
on the request, such as form content or json encoded data.
"""
import codecs
from urllib import parse
from django.conf import settings
from django.core.files.uploadhandler import StopFutureHandlers
from django.http import QueryDict
from django.http.multipartparser import ChunkIter
from django.http.multipartparser import \
MultiPartParser as DjangoMultiPartParser
from django.http.multipartparser import MultiPartParserError, parse_header
from django.utils.encoding import force_str
from rest_framework import renderers
from rest_framework.exceptions import ParseError
from rest_framework.settings import api_settings
from rest_framework.utils import json
class DataAndFiles:
def __init__(self, data, files):
self.data = data
self.files = files
class BaseParser:
"""
All parsers should extend `BaseParser`, specifying a `media_type`
attribute, and overriding the `.parse()` method.
"""
media_type = None
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
raise NotImplementedError(".parse() must be overridden.")
class MParser(BaseParser):
"""
Parser for file upload data.
"""
media_type = '*/*'
errors = {
'unhandled': 'FileUpload parse error - none of upload handlers can handle the stream',
'no_filename': 'Missing filename. Request should include a Content-Disposition header with a filename parameter.',
}
def parse(self, stream, media_type=None, parser_context=None):
"""
Treats the incoming bytestream as a raw file upload and returns
a `DataAndFiles` object.
`.data` will be None (we expect request body to be a file content).
`.files` will be a `QueryDict` containing one 'file' element.
"""
parser_context = parser_context or {}
request = parser_context['request']
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
meta = request.META
upload_handlers = request.upload_handlers
filename = self.get_filename(stream, media_type, parser_context)
# Note that this code is extracted from Django's handling of
# file uploads in MultiPartParser.
content_type = meta.get('HTTP_CONTENT_TYPE',
meta.get('CONTENT_TYPE', ''))
try:
content_length = int(meta.get('HTTP_CONTENT_LENGTH',
meta.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = None
# See if the handler will want to take care of the parsing.
for handler in upload_handlers:
result = handler.handle_raw_input(stream,
meta,
content_length,
None,
encoding)
if result is not None:
return DataAndFiles({}, {'file': result[1]})
# This is the standard case.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
chunk_size = min([2 ** 31 - 4] + possible_sizes)
chunks = ChunkIter(stream, chunk_size)
counters = [0] * len(upload_handlers)
for index, handler in enumerate(upload_handlers):
try:
handler.new_file(None, filename, content_type,
content_length, encoding)
except StopFutureHandlers:
upload_handlers = upload_handlers[:index + 1]
break
for chunk in chunks:
for index, handler in enumerate(upload_handlers):
"""
Trimming HttpResponse encapsulation from parsed file stream
"""
chunk_length = len(chunk)
start = chunk.find(bytes('\n\r\n','utf-8')) + 3
end = chunk.rfind(bytes('\r\n','utf-8'))
end = chunk[:end].rfind(bytes('\r\n','utf-8')) + 2
chunk = handler.receive_data_chunk(chunk[start:end], counters[index])
counters[index] += chunk_length
if chunk is None:
break
for index, handler in enumerate(upload_handlers):
file_obj = handler.file_complete(counters[index])
if file_obj is not None:
return DataAndFiles({}, {'file': file_obj})
raise ParseError(self.errors['unhandled'])
def get_filename(self, stream, media_type, parser_context):
"""
Detects the uploaded file name. First searches a 'filename' url kwarg.
Then tries to parse Content-Disposition header.
"""
try:
return parser_context['kwargs']['filename']
except KeyError:
pass
try:
meta = parser_context['request'].META
disposition = parse_header(meta['HTTP_CONTENT_DISPOSITION'].encode())
filename_parm = disposition[1]
if 'filename*' in filename_parm:
return self.get_encoded_filename(filename_parm)
return force_str(filename_parm['filename'])
except (AttributeError, KeyError, ValueError):
pass
def get_encoded_filename(self, filename_parm):
"""
Handle encoded filenames per RFC6266. See also:
https://tools.ietf.org/html/rfc2231#section-4
"""
encoded_filename = force_str(filename_parm['filename*'])
try:
charset, lang, filename = encoded_filename.split('\'', 2)
filename = parse.unquote(filename)
except (ValueError, LookupError):
filename = force_str(filename_parm['filename'])
return filename
| 38.64375
| 122
| 0.613456
|
import codecs
from urllib import parse
from django.conf import settings
from django.core.files.uploadhandler import StopFutureHandlers
from django.http import QueryDict
from django.http.multipartparser import ChunkIter
from django.http.multipartparser import \
MultiPartParser as DjangoMultiPartParser
from django.http.multipartparser import MultiPartParserError, parse_header
from django.utils.encoding import force_str
from rest_framework import renderers
from rest_framework.exceptions import ParseError
from rest_framework.settings import api_settings
from rest_framework.utils import json
class DataAndFiles:
def __init__(self, data, files):
self.data = data
self.files = files
class BaseParser:
media_type = None
def parse(self, stream, media_type=None, parser_context=None):
raise NotImplementedError(".parse() must be overridden.")
class MParser(BaseParser):
media_type = '*/*'
errors = {
'unhandled': 'FileUpload parse error - none of upload handlers can handle the stream',
'no_filename': 'Missing filename. Request should include a Content-Disposition header with a filename parameter.',
}
def parse(self, stream, media_type=None, parser_context=None):
parser_context = parser_context or {}
request = parser_context['request']
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
meta = request.META
upload_handlers = request.upload_handlers
filename = self.get_filename(stream, media_type, parser_context)
# file uploads in MultiPartParser.
content_type = meta.get('HTTP_CONTENT_TYPE',
meta.get('CONTENT_TYPE', ''))
try:
content_length = int(meta.get('HTTP_CONTENT_LENGTH',
meta.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = None
# See if the handler will want to take care of the parsing.
for handler in upload_handlers:
result = handler.handle_raw_input(stream,
meta,
content_length,
None,
encoding)
if result is not None:
return DataAndFiles({}, {'file': result[1]})
# This is the standard case.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
chunk_size = min([2 ** 31 - 4] + possible_sizes)
chunks = ChunkIter(stream, chunk_size)
counters = [0] * len(upload_handlers)
for index, handler in enumerate(upload_handlers):
try:
handler.new_file(None, filename, content_type,
content_length, encoding)
except StopFutureHandlers:
upload_handlers = upload_handlers[:index + 1]
break
for chunk in chunks:
for index, handler in enumerate(upload_handlers):
chunk_length = len(chunk)
start = chunk.find(bytes('\n\r\n','utf-8')) + 3
end = chunk.rfind(bytes('\r\n','utf-8'))
end = chunk[:end].rfind(bytes('\r\n','utf-8')) + 2
chunk = handler.receive_data_chunk(chunk[start:end], counters[index])
counters[index] += chunk_length
if chunk is None:
break
for index, handler in enumerate(upload_handlers):
file_obj = handler.file_complete(counters[index])
if file_obj is not None:
return DataAndFiles({}, {'file': file_obj})
raise ParseError(self.errors['unhandled'])
def get_filename(self, stream, media_type, parser_context):
try:
return parser_context['kwargs']['filename']
except KeyError:
pass
try:
meta = parser_context['request'].META
disposition = parse_header(meta['HTTP_CONTENT_DISPOSITION'].encode())
filename_parm = disposition[1]
if 'filename*' in filename_parm:
return self.get_encoded_filename(filename_parm)
return force_str(filename_parm['filename'])
except (AttributeError, KeyError, ValueError):
pass
def get_encoded_filename(self, filename_parm):
encoded_filename = force_str(filename_parm['filename*'])
try:
charset, lang, filename = encoded_filename.split('\'', 2)
filename = parse.unquote(filename)
except (ValueError, LookupError):
filename = force_str(filename_parm['filename'])
return filename
| true
| true
|
790954bc550462219b4cd2a72db6d5bd531ed27f
| 1,515
|
py
|
Python
|
halo_flask/models.py
|
yoramk2/halo_flask
|
d3daddb19b1236f50332c18c8a34ca129746549c
|
[
"MIT"
] | 1
|
2020-07-14T12:49:22.000Z
|
2020-07-14T12:49:22.000Z
|
halo_flask/models.py
|
yoramk2/halo_flask
|
d3daddb19b1236f50332c18c8a34ca129746549c
|
[
"MIT"
] | null | null | null |
halo_flask/models.py
|
yoramk2/halo_flask
|
d3daddb19b1236f50332c18c8a34ca129746549c
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import datetime
import hashlib
import logging
from abc import ABCMeta
from halo_flask.classes import AbsBaseClass
from halo_flask.logs import log_json
from halo_flask.const import SYSTEMChoice,LOGChoice
from .settingsx import settingsx
settings = settingsx()
logger = logging.getLogger(__name__)
ver = settings.DB_VER
uri = settings.DB_URL
tbl = False
page_size = settings.PAGE_SIZE
class AbsDbMixin(AbsBaseClass):
__metaclass__ = ABCMeta
# intercept db calls
halo_context = None
def __init__(self, halo_context):
self.halo_context = halo_context
def __getattribute__(self, name):
attr = object.__getattribute__(self, name)
if hasattr(attr, '__call__'):
def newfunc(*args, **kwargs):
now = datetime.datetime.now()
result = attr(*args, **kwargs)
total = datetime.datetime.now() - now
logger.info(LOGChoice.performance_data.value, extra=log_json(self.halo_context,
{LOGChoice.type.value: SYSTEMChoice.dbaccess.value,
LOGChoice.milliseconds.value: int(total.total_seconds() * 1000),
LOGChoice.function.value: str(attr.__name__)}))
return result
return newfunc
else:
return attr
class AbsModel(AbsBaseClass):
pass
| 28.584906
| 123
| 0.611881
|
from __future__ import print_function
import datetime
import hashlib
import logging
from abc import ABCMeta
from halo_flask.classes import AbsBaseClass
from halo_flask.logs import log_json
from halo_flask.const import SYSTEMChoice,LOGChoice
from .settingsx import settingsx
settings = settingsx()
logger = logging.getLogger(__name__)
ver = settings.DB_VER
uri = settings.DB_URL
tbl = False
page_size = settings.PAGE_SIZE
class AbsDbMixin(AbsBaseClass):
__metaclass__ = ABCMeta
halo_context = None
def __init__(self, halo_context):
self.halo_context = halo_context
def __getattribute__(self, name):
attr = object.__getattribute__(self, name)
if hasattr(attr, '__call__'):
def newfunc(*args, **kwargs):
now = datetime.datetime.now()
result = attr(*args, **kwargs)
total = datetime.datetime.now() - now
logger.info(LOGChoice.performance_data.value, extra=log_json(self.halo_context,
{LOGChoice.type.value: SYSTEMChoice.dbaccess.value,
LOGChoice.milliseconds.value: int(total.total_seconds() * 1000),
LOGChoice.function.value: str(attr.__name__)}))
return result
return newfunc
else:
return attr
class AbsModel(AbsBaseClass):
pass
| true
| true
|
7909551686d22dfe6e2f43c4c937014fd62945d1
| 21,563
|
py
|
Python
|
src/ecn.py
|
mnoukhov/ecn
|
f1b838cfe2e27f7cc30cdf2e711b9a474b27a158
|
[
"MIT"
] | 1
|
2021-05-05T18:28:13.000Z
|
2021-05-05T18:28:13.000Z
|
src/ecn.py
|
mnoukhov/ecn
|
f1b838cfe2e27f7cc30cdf2e711b9a474b27a158
|
[
"MIT"
] | null | null | null |
src/ecn.py
|
mnoukhov/ecn
|
f1b838cfe2e27f7cc30cdf2e711b9a474b27a158
|
[
"MIT"
] | null | null | null |
import argparse
import datetime
import json
import os
import time
from os import path
import numpy as np
import torch
from absl import flags
from torch import optim
from pprint import pprint
import wandb
from src.alive_sieve import AliveSieve, SievePlayback
from src.nets import AgentModel
from src.rewards_lib import calc_rewards
from src.sampling import (generate_test_batches,
generate_training_batch,
hash_batches)
FLAGS = flags.FLAGS
def render_action(t, s, prop, term):
agent = t % 2
speaker = 'A' if agent == 0 else 'B'
utility = s.utilities[:, agent]
print(' ', end='')
if speaker == 'B':
print(' ', end='')
print(' ' + ''.join([str(v) for v in s.m_prev[0].view(-1).tolist()]), end='')
print(' %s/%s %s/%s %s/%s' % (
prop[0][0].item(), s.pool[0][0].item(),
prop[0][1].item(), s.pool[0][1].item(),
prop[0][2].item(), s.pool[0][2].item(),
), end='')
print('')
if t + 1 == s.N[0]:
print(' [out of time]')
elif term[0][0]:
print(' ACC')
def save_model(model_file, agent_models, agent_opts, start_time, episode):
state = {}
for i in range(2):
state['agent%s' % i] = {}
state['agent%s' % i]['model_state'] = agent_models[i].state_dict()
state['agent%s' % i]['opt_state'] = agent_opts[i].state_dict()
state['episode'] = episode
state['elapsed_time'] = time.time() - start_time
with open(model_file + '.tmp', 'wb') as f:
torch.save(state, f)
os.rename(model_file + '.tmp', model_file)
def load_model(model_file, agent_models, agent_opts):
with open(model_file, 'rb') as f:
state = torch.load(f)
for i in range(2):
agent_models[i].load_state_dict(state['agent%s' % i]['model_state'])
agent_opts[i].load_state_dict(state['agent%s' % i]['opt_state'])
episode = state['episode']
# create a kind of 'virtual' start_time
start_time = time.time() - state['elapsed_time']
return episode, start_time
class State(object):
def __init__(self, N, pool, utilities):
batch_size = N.size()[0]
self.N = N
self.pool = pool
self.utilities = torch.zeros(batch_size, 2, 3, dtype=torch.int64, device=FLAGS.device)
self.utilities[:, 0] = utilities[0]
self.utilities[:, 1] = utilities[1]
self.last_proposal = torch.zeros(batch_size, 3, dtype=torch.int64, device=FLAGS.device)
self.m_prev = torch.zeros(batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device)
def sieve_(self, still_alive_idxes):
self.N = self.N[still_alive_idxes]
self.pool = self.pool[still_alive_idxes]
self.utilities = self.utilities[still_alive_idxes]
self.last_proposal = self.last_proposal[still_alive_idxes]
self.m_prev = self.m_prev[still_alive_idxes]
def run_episode(
batch,
agent_models,
batch_size,
testing,
render=False,
initial_agent=0):
"""
turning testing on means, we disable stochasticity: always pick the argmax
"""
s = State(**batch)
sieve = AliveSieve(batch_size=batch_size)
actions_by_timestep = []
alive_masks = []
# next two tensors wont be sieved, they will stay same size throughout
# entire batch, we will update them using sieve.out_idxes[...]
rewards = torch.zeros(batch_size, 3, device=FLAGS.device)
num_steps = torch.full((batch_size,), FLAGS.max_timesteps, dtype=torch.int64, device=FLAGS.device)
term_matches_argmax_count = 0
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
num_policy_runs = 0
prop_matches_argmax_count = 0
prop_stochastic_draws = 0
utt_mask = torch.zeros(2, batch_size, 3, dtype=torch.int64, device=FLAGS.device)
prop_mask = torch.zeros(2, batch_size, 3, dtype=torch.int64, device=FLAGS.device)
entropy_loss_by_agent = [
torch.zeros(1, device=FLAGS.device),
torch.zeros(1, device=FLAGS.device)
]
if render:
print(' ')
print(' ',
'{} {} {}'.format(*s.utilities[0][0].tolist()),
' ',
'{} {} {}'.format(*s.pool[0].tolist()),
' ',
'{} {} {}'.format(*s.utilities[0][1].tolist()))
current_A_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device)
prev_A_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device)
current_A_message = torch.zeros(sieve.batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device)
prev_A_message = torch.zeros(sieve.batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device)
current_A_term = torch.zeros(sieve.batch_size, 1, dtype=torch.uint8)
for t in range(FLAGS.max_timesteps):
if FLAGS.linguistic:
if FLAGS.normal_form and t % 2 == 1:
_prev_message = prev_A_message
else:
_prev_message = s.m_prev
else:
_prev_message = torch.zeros(sieve.batch_size, 6, dtype=torch.int64, device=FLAGS.device)
if FLAGS.proposal:
if FLAGS.normal_form and t % 2 == 1:
_prev_proposal = prev_A_proposal
else:
_prev_proposal = s.last_proposal
else:
_prev_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device)
# agent = t % 2
agent = (initial_agent + t) % 2
agent_model = agent_models[agent]
(nodes, term_a, s.m_prev, this_proposal, _entropy_loss,
_term_matches_argmax_count, _utt_matches_argmax_count, _utt_stochastic_draws,
_prop_matches_argmax_count, _prop_stochastic_draws, _utt_mask, _prop_mask) = agent_model(
pool=s.pool,
utility=s.utilities[:, agent],
m_prev=_prev_message,
prev_proposal=_prev_proposal,
testing=testing,
)
entropy_loss_by_agent[agent] += _entropy_loss
actions_by_timestep.append(nodes)
term_matches_argmax_count += _term_matches_argmax_count
num_policy_runs += sieve.batch_size
utt_matches_argmax_count += _utt_matches_argmax_count
utt_stochastic_draws += _utt_stochastic_draws
prop_matches_argmax_count += _prop_matches_argmax_count
prop_stochastic_draws += _prop_stochastic_draws
if FLAGS.force_masking_comm:
utt_mask[agent][sieve.out_idxes] |= _utt_mask
prop_mask[agent][sieve.out_idxes] |= _prop_mask
if FLAGS.proposal_termination and not FLAGS.normal_form:
term_a = torch.prod(this_proposal == _prev_proposal,
dim=1,
keepdim=True)
elif not FLAGS.proposal_termination and FLAGS.normal_form:
#TODO which proposal to use here?
if t % 2 == 1:
term_a = (term_a * current_A_term)
else:
current_A_term = term_a
term_a = torch.zeros((sieve.batch_size,1), dtype=torch.uint8, device=FLAGS.device)
elif FLAGS.proposal_termination and FLAGS.normal_form:
if t % 2 == 1:
term_a = torch.prod(this_proposal == current_A_proposal,
dim=1,
keepdim=True)
else:
term_a = torch.zeros((sieve.batch_size,1), dtype=torch.uint8, device=FLAGS.device)
if render and sieve.out_idxes[0] == 0:
render_action(
t=t,
s=s,
term=term_a,
prop=this_proposal
)
new_rewards = calc_rewards(
t=t,
s=s,
term=term_a,
agent=agent,
)
rewards[sieve.out_idxes] = new_rewards
s.last_proposal = this_proposal
if FLAGS.normal_form and t % 2 == 0:
prev_A_proposal = current_A_proposal
current_A_proposal = this_proposal
prev_A_message = current_A_message
current_A_message = s.m_prev
sieve.mark_dead(term_a)
sieve.mark_dead(t + 1 >= s.N)
alive_masks.append(sieve.alive_mask.clone())
sieve.set_dead_global(num_steps, t + 1)
if sieve.all_dead():
break
s.sieve_(sieve.alive_idxes)
if FLAGS.normal_form:
current_A_proposal = current_A_proposal[sieve.alive_idxes]
prev_A_proposal = prev_A_proposal[sieve.alive_idxes]
current_A_message = current_A_message[sieve.alive_idxes]
prev_A_message = prev_A_message[sieve.alive_idxes]
sieve.self_sieve_()
if render:
print(' rewards: {:2.2f} {:2.2f} {:2.2f}'.format(*rewards[0].tolist()))
print(' ')
utt_mask_count = utt_mask.sum(dim=[1,2]).cpu().numpy()
prop_mask_count = prop_mask.sum(dim=[1,2]).cpu().numpy()
return (actions_by_timestep, rewards, num_steps, alive_masks, entropy_loss_by_agent,
term_matches_argmax_count, num_policy_runs, utt_matches_argmax_count, utt_stochastic_draws,
prop_matches_argmax_count, prop_stochastic_draws, utt_mask_count, prop_mask_count)
def safe_div(a, b):
"""
returns a / b, unless b is zero, in which case returns 0
this is primarily for usage in cases where b might be systemtically zero, eg because comms are disabled or similar
also accounts for a or b being tensors
"""
if isinstance(a, torch.Tensor):
a = a.item()
if isinstance(b, torch.Tensor):
b = b.item()
return 0 if b == 0 else a / b
def run(args):
"""
testing option will:
- use argmax, ie disable stochastic draws
- not run optimizers
- not save model
"""
if args.wandb:
if args.wandb_offline:
os.environ["WANDB_MODE"] = "dryrun"
wandb.init(project='ecn',
name=args.name,
dir=f'{args.savedir}',
group=args.wandb_group)
wandb.config.update(args)
wandb.config.update(FLAGS)
flags_dict = {flag.name: flag.value for flag in FLAGS.flags_by_module_dict()['main.py']}
args_dict = args.__dict__
pprint(args_dict)
pprint(flags_dict)
os.makedirs(args.model_dir, exist_ok=True)
os.makedirs(args.logdir, exist_ok=True)
if args.seed is not None:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
train_r = np.random.RandomState(args.seed)
else:
train_r = np.random
test_r = np.random.RandomState(args.test_seed)
test_batches = generate_test_batches(batch_size=args.batch_size,
num_batches=5,
random_state=test_r)
test_hashes = hash_batches(test_batches)
episode = 0
start_time = time.time()
agent_models = []
agent_opts = []
agent_name = ['A', 'B']
for i in range(2):
model = AgentModel(
name=agent_name[i],
term_entropy_reg=args.term_entropy_reg,
utterance_entropy_reg=args.utterance_entropy_reg,
proposal_entropy_reg=args.proposal_entropy_reg
).to(FLAGS.device)
agent_models.append(model)
agent_opts.append(optim.Adam(params=agent_models[i].parameters()))
if args.wandb:
wandb.watch(agent_models)
if path.isfile(args.model_file) and not args.no_load:
episode, start_time = load_model(
model_file=args.model_file,
agent_models=agent_models,
agent_opts=agent_opts)
print('loaded model')
elif args.testing:
print('')
print('ERROR: must have loadable model to use --testing option')
print('')
return
last_print = time.time()
rewards_sum = torch.zeros(3, device=FLAGS.device)
steps_sum = 0
count_sum = 0
f_log = open(args.log_file, 'w')
all_args = {**args_dict, **flags_dict}
f_log.write('meta: %s\n' % json.dumps(all_args))
last_save = time.time()
baseline = torch.zeros(3, device=FLAGS.device)
term_matches_argmax_count = 0
num_policy_runs = 0
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
prop_matches_argmax_count = 0
prop_stochastic_draws = 0
utt_mask_count = np.array([0,0])
prop_mask_count = np.array([0,0])
while episode < args.episodes:
render = (episode % args.render_every_episode == 0)
split = 2 if FLAGS.randomize_first else 1
agent_losses = [0,0]
both_rewards = []
for i in range(2):
agent_opts[i].zero_grad()
for initial_agent in range(split):
batch = generate_training_batch(batch_size=args.batch_size // split,
test_hashes=test_hashes,
random_state=train_r)
(actions, rewards, steps, alive_masks, entropy_loss_by_agent,
_term_matches_argmax_count, _num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws,
_prop_matches_argmax_count, _prop_stochastic_draws,
_utt_mask_count, _prop_mask_count) = run_episode(
batch=batch,
agent_models=agent_models,
batch_size=args.batch_size // split,
render=render,
initial_agent=initial_agent,
testing=args.testing)
term_matches_argmax_count += _term_matches_argmax_count
utt_matches_argmax_count += _utt_matches_argmax_count
utt_stochastic_draws += _utt_stochastic_draws
num_policy_runs += _num_policy_runs
prop_matches_argmax_count += _prop_matches_argmax_count
prop_stochastic_draws += _prop_stochastic_draws
utt_mask_count += _utt_mask_count
prop_mask_count += _prop_mask_count
if not args.testing:
reward_loss_by_agent = [0, 0]
baselined_rewards = rewards - baseline
rewards_by_agent = []
for i in range(2):
if FLAGS.prosocial:
rewards_by_agent.append(baselined_rewards[:, 2])
else:
rewards_by_agent.append(baselined_rewards[:, i])
sieve_playback = SievePlayback(alive_masks)
for t, global_idxes in sieve_playback:
agent = (initial_agent + t) % 2
if len(actions[t]) > 0:
for action in actions[t]:
_rewards = rewards_by_agent[agent]
_reward = _rewards[global_idxes].float().contiguous().view(
sieve_playback.batch_size, 1)
_reward_loss = - (action * _reward)
_reward_loss = _reward_loss.sum()
reward_loss_by_agent[agent] += _reward_loss
for i in range(2):
loss = entropy_loss_by_agent[i] + reward_loss_by_agent[i]
loss.backward()
rewards_sum += rewards.detach().sum(0)
steps_sum += steps.sum()
count_sum += args.batch_size // split
both_rewards.append(rewards)
for i in range(2):
agent_opts[i].step()
rewards = torch.cat(both_rewards).detach()
baseline = 0.7 * baseline + 0.3 * rewards.mean(0).detach()
if render:
"""
run the test batches, print the results
"""
test_rewards_sum = np.zeros(3)
test_count_sum = len(test_batches) * args.batch_size
test_num_policy_runs = 0
test_utt_mask_count = [0,0]
test_prop_mask_count = [0,0]
test_utt_mask_count = np.array([0,0])
test_prop_mask_count = np.array([0,0])
for test_batch in test_batches:
(actions, test_rewards, steps, alive_masks, entropy_loss_by_agent,
_term_matches_argmax_count, _test_num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws,
_prop_matches_argmax_count, _prop_stochastic_draws,
_test_utt_mask_count, _test_prop_mask_count) = run_episode(
batch=test_batch,
agent_models=agent_models,
batch_size=args.batch_size,
render=True,
testing=True)
test_rewards_sum += test_rewards.sum(0).cpu().numpy()
test_num_policy_runs += _test_num_policy_runs
test_utt_mask_count += _test_utt_mask_count
test_prop_mask_count += _test_prop_mask_count
time_since_last = time.time() - last_print
rewards_str = '%.2f,%.2f,%.2f' % (rewards_sum[0] / count_sum,
rewards_sum[1] / count_sum,
rewards_sum[2] / count_sum)
test_rewards_str = '%.2f,%.2f,%.2f' % (test_rewards_sum[0] / test_count_sum,
test_rewards_sum[1] / test_count_sum,
test_rewards_sum[2] / test_count_sum)
baseline_str = '%.2f,%.2f,%.2f' % (baseline[0], baseline[1], baseline[2])
utt_mask_pct = utt_mask_count / (3 * count_sum)
test_utt_mask_pct = test_utt_mask_count / (3 * test_count_sum)
prop_mask_pct = prop_mask_count / (3 * count_sum)
test_prop_mask_pct = test_prop_mask_count / (3 * test_count_sum)
print('test {}'.format(test_rewards_str))
print('train {}'.format(rewards_str))
print('base {}'.format(baseline_str))
print('ep {}, {} games/sec, {:2.2f} avg steps'.format(
episode,
int(count_sum / time_since_last),
steps_sum.item() / count_sum
))
print('argmaxp term={:4.4f} utt={:4.4f} prop={:4.4f}'.format(
term_matches_argmax_count / num_policy_runs,
safe_div(utt_matches_argmax_count, utt_stochastic_draws),
prop_matches_argmax_count / prop_stochastic_draws
))
if FLAGS.force_masking_comm:
print('utt mask % {:2.2f},{:2.2f} test % {:2.2f},{:2.2f}'.format(
*utt_mask_pct, *test_utt_mask_pct,
))
print('prop mask % {:2.2f},{:2.2f} test % {:2.2f},{:2.2f}'.format(
*prop_mask_pct, *test_prop_mask_pct,
))
episode_log = {
'episode': episode,
'avg_reward_A': (rewards_sum[0] / count_sum).item(),
'avg_reward_B': (rewards_sum[1] / count_sum).item(),
'avg_reward_0': (rewards_sum[2] / count_sum).item(),
'test_reward_A': (test_rewards_sum[0] / test_count_sum).item(),
'test_reward_B': (test_rewards_sum[1] / test_count_sum).item(),
'test_reward': (test_rewards_sum[2] / test_count_sum).item(),
'avg_steps': torch.true_divide(steps_sum, count_sum).item(),
'games_sec': (count_sum / time_since_last),
'elapsed': time.time() - start_time,
'argmaxp_term': term_matches_argmax_count / num_policy_runs,
'argmaxp_utt': safe_div(utt_matches_argmax_count, utt_stochastic_draws),
'argmaxp_prop': prop_matches_argmax_count / prop_stochastic_draws,
'utt_unmasked_A': utt_mask_pct[0],
'utt_unmasked_B': utt_mask_pct[1],
'prop_unmasked_A': prop_mask_pct[0],
'prop_unmasked_B': prop_mask_pct[1],
'test_utt_unmasked_A': test_utt_mask_pct[0],
'test_utt_unmasked_B': test_utt_mask_pct[1],
'test_prop_unmasked_A': test_prop_mask_pct[0],
'test_prop_unmasked_B': test_prop_mask_pct[1],
}
f_log.write(json.dumps(episode_log) + '\n')
f_log.flush()
if args.wandb:
wandb.log(episode_log)
last_print = time.time()
steps_sum = 0
rewards_sum.fill_(0)
term_matches_argmax_count = 0
num_policy_runs = 0
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
prop_matches_argmax_count = 0
prop_stochastic_draws = 0
count_sum = 0
utt_mask_count.fill(0)
prop_mask_count.fill(0)
if (not args.testing
and not args.no_save
and episode > 0
and episode % args.save_every_episode == 0):
save_model(model_file=args.model_file,
agent_models=agent_models,
agent_opts=agent_opts,
start_time=start_time,
episode=episode)
print('saved model')
episode += 1
if (not args.no_save and
not args.testing):
save_model(
model_file=args.model_file,
agent_models=agent_models,
agent_opts=agent_opts,
start_time=start_time,
episode=episode)
print('saved model')
f_log.close()
| 39.565138
| 118
| 0.58554
|
import argparse
import datetime
import json
import os
import time
from os import path
import numpy as np
import torch
from absl import flags
from torch import optim
from pprint import pprint
import wandb
from src.alive_sieve import AliveSieve, SievePlayback
from src.nets import AgentModel
from src.rewards_lib import calc_rewards
from src.sampling import (generate_test_batches,
generate_training_batch,
hash_batches)
FLAGS = flags.FLAGS
def render_action(t, s, prop, term):
agent = t % 2
speaker = 'A' if agent == 0 else 'B'
utility = s.utilities[:, agent]
print(' ', end='')
if speaker == 'B':
print(' ', end='')
print(' ' + ''.join([str(v) for v in s.m_prev[0].view(-1).tolist()]), end='')
print(' %s/%s %s/%s %s/%s' % (
prop[0][0].item(), s.pool[0][0].item(),
prop[0][1].item(), s.pool[0][1].item(),
prop[0][2].item(), s.pool[0][2].item(),
), end='')
print('')
if t + 1 == s.N[0]:
print(' [out of time]')
elif term[0][0]:
print(' ACC')
def save_model(model_file, agent_models, agent_opts, start_time, episode):
state = {}
for i in range(2):
state['agent%s' % i] = {}
state['agent%s' % i]['model_state'] = agent_models[i].state_dict()
state['agent%s' % i]['opt_state'] = agent_opts[i].state_dict()
state['episode'] = episode
state['elapsed_time'] = time.time() - start_time
with open(model_file + '.tmp', 'wb') as f:
torch.save(state, f)
os.rename(model_file + '.tmp', model_file)
def load_model(model_file, agent_models, agent_opts):
with open(model_file, 'rb') as f:
state = torch.load(f)
for i in range(2):
agent_models[i].load_state_dict(state['agent%s' % i]['model_state'])
agent_opts[i].load_state_dict(state['agent%s' % i]['opt_state'])
episode = state['episode']
start_time = time.time() - state['elapsed_time']
return episode, start_time
class State(object):
def __init__(self, N, pool, utilities):
batch_size = N.size()[0]
self.N = N
self.pool = pool
self.utilities = torch.zeros(batch_size, 2, 3, dtype=torch.int64, device=FLAGS.device)
self.utilities[:, 0] = utilities[0]
self.utilities[:, 1] = utilities[1]
self.last_proposal = torch.zeros(batch_size, 3, dtype=torch.int64, device=FLAGS.device)
self.m_prev = torch.zeros(batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device)
def sieve_(self, still_alive_idxes):
self.N = self.N[still_alive_idxes]
self.pool = self.pool[still_alive_idxes]
self.utilities = self.utilities[still_alive_idxes]
self.last_proposal = self.last_proposal[still_alive_idxes]
self.m_prev = self.m_prev[still_alive_idxes]
def run_episode(
batch,
agent_models,
batch_size,
testing,
render=False,
initial_agent=0):
s = State(**batch)
sieve = AliveSieve(batch_size=batch_size)
actions_by_timestep = []
alive_masks = []
rewards = torch.zeros(batch_size, 3, device=FLAGS.device)
num_steps = torch.full((batch_size,), FLAGS.max_timesteps, dtype=torch.int64, device=FLAGS.device)
term_matches_argmax_count = 0
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
num_policy_runs = 0
prop_matches_argmax_count = 0
prop_stochastic_draws = 0
utt_mask = torch.zeros(2, batch_size, 3, dtype=torch.int64, device=FLAGS.device)
prop_mask = torch.zeros(2, batch_size, 3, dtype=torch.int64, device=FLAGS.device)
entropy_loss_by_agent = [
torch.zeros(1, device=FLAGS.device),
torch.zeros(1, device=FLAGS.device)
]
if render:
print(' ')
print(' ',
'{} {} {}'.format(*s.utilities[0][0].tolist()),
' ',
'{} {} {}'.format(*s.pool[0].tolist()),
' ',
'{} {} {}'.format(*s.utilities[0][1].tolist()))
current_A_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device)
prev_A_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device)
current_A_message = torch.zeros(sieve.batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device)
prev_A_message = torch.zeros(sieve.batch_size, FLAGS.utt_max_length, dtype=torch.int64, device=FLAGS.device)
current_A_term = torch.zeros(sieve.batch_size, 1, dtype=torch.uint8)
for t in range(FLAGS.max_timesteps):
if FLAGS.linguistic:
if FLAGS.normal_form and t % 2 == 1:
_prev_message = prev_A_message
else:
_prev_message = s.m_prev
else:
_prev_message = torch.zeros(sieve.batch_size, 6, dtype=torch.int64, device=FLAGS.device)
if FLAGS.proposal:
if FLAGS.normal_form and t % 2 == 1:
_prev_proposal = prev_A_proposal
else:
_prev_proposal = s.last_proposal
else:
_prev_proposal = torch.zeros(sieve.batch_size, 3, dtype=torch.int64, device=FLAGS.device)
agent = (initial_agent + t) % 2
agent_model = agent_models[agent]
(nodes, term_a, s.m_prev, this_proposal, _entropy_loss,
_term_matches_argmax_count, _utt_matches_argmax_count, _utt_stochastic_draws,
_prop_matches_argmax_count, _prop_stochastic_draws, _utt_mask, _prop_mask) = agent_model(
pool=s.pool,
utility=s.utilities[:, agent],
m_prev=_prev_message,
prev_proposal=_prev_proposal,
testing=testing,
)
entropy_loss_by_agent[agent] += _entropy_loss
actions_by_timestep.append(nodes)
term_matches_argmax_count += _term_matches_argmax_count
num_policy_runs += sieve.batch_size
utt_matches_argmax_count += _utt_matches_argmax_count
utt_stochastic_draws += _utt_stochastic_draws
prop_matches_argmax_count += _prop_matches_argmax_count
prop_stochastic_draws += _prop_stochastic_draws
if FLAGS.force_masking_comm:
utt_mask[agent][sieve.out_idxes] |= _utt_mask
prop_mask[agent][sieve.out_idxes] |= _prop_mask
if FLAGS.proposal_termination and not FLAGS.normal_form:
term_a = torch.prod(this_proposal == _prev_proposal,
dim=1,
keepdim=True)
elif not FLAGS.proposal_termination and FLAGS.normal_form:
if t % 2 == 1:
term_a = (term_a * current_A_term)
else:
current_A_term = term_a
term_a = torch.zeros((sieve.batch_size,1), dtype=torch.uint8, device=FLAGS.device)
elif FLAGS.proposal_termination and FLAGS.normal_form:
if t % 2 == 1:
term_a = torch.prod(this_proposal == current_A_proposal,
dim=1,
keepdim=True)
else:
term_a = torch.zeros((sieve.batch_size,1), dtype=torch.uint8, device=FLAGS.device)
if render and sieve.out_idxes[0] == 0:
render_action(
t=t,
s=s,
term=term_a,
prop=this_proposal
)
new_rewards = calc_rewards(
t=t,
s=s,
term=term_a,
agent=agent,
)
rewards[sieve.out_idxes] = new_rewards
s.last_proposal = this_proposal
if FLAGS.normal_form and t % 2 == 0:
prev_A_proposal = current_A_proposal
current_A_proposal = this_proposal
prev_A_message = current_A_message
current_A_message = s.m_prev
sieve.mark_dead(term_a)
sieve.mark_dead(t + 1 >= s.N)
alive_masks.append(sieve.alive_mask.clone())
sieve.set_dead_global(num_steps, t + 1)
if sieve.all_dead():
break
s.sieve_(sieve.alive_idxes)
if FLAGS.normal_form:
current_A_proposal = current_A_proposal[sieve.alive_idxes]
prev_A_proposal = prev_A_proposal[sieve.alive_idxes]
current_A_message = current_A_message[sieve.alive_idxes]
prev_A_message = prev_A_message[sieve.alive_idxes]
sieve.self_sieve_()
if render:
print(' rewards: {:2.2f} {:2.2f} {:2.2f}'.format(*rewards[0].tolist()))
print(' ')
utt_mask_count = utt_mask.sum(dim=[1,2]).cpu().numpy()
prop_mask_count = prop_mask.sum(dim=[1,2]).cpu().numpy()
return (actions_by_timestep, rewards, num_steps, alive_masks, entropy_loss_by_agent,
term_matches_argmax_count, num_policy_runs, utt_matches_argmax_count, utt_stochastic_draws,
prop_matches_argmax_count, prop_stochastic_draws, utt_mask_count, prop_mask_count)
def safe_div(a, b):
if isinstance(a, torch.Tensor):
a = a.item()
if isinstance(b, torch.Tensor):
b = b.item()
return 0 if b == 0 else a / b
def run(args):
if args.wandb:
if args.wandb_offline:
os.environ["WANDB_MODE"] = "dryrun"
wandb.init(project='ecn',
name=args.name,
dir=f'{args.savedir}',
group=args.wandb_group)
wandb.config.update(args)
wandb.config.update(FLAGS)
flags_dict = {flag.name: flag.value for flag in FLAGS.flags_by_module_dict()['main.py']}
args_dict = args.__dict__
pprint(args_dict)
pprint(flags_dict)
os.makedirs(args.model_dir, exist_ok=True)
os.makedirs(args.logdir, exist_ok=True)
if args.seed is not None:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
train_r = np.random.RandomState(args.seed)
else:
train_r = np.random
test_r = np.random.RandomState(args.test_seed)
test_batches = generate_test_batches(batch_size=args.batch_size,
num_batches=5,
random_state=test_r)
test_hashes = hash_batches(test_batches)
episode = 0
start_time = time.time()
agent_models = []
agent_opts = []
agent_name = ['A', 'B']
for i in range(2):
model = AgentModel(
name=agent_name[i],
term_entropy_reg=args.term_entropy_reg,
utterance_entropy_reg=args.utterance_entropy_reg,
proposal_entropy_reg=args.proposal_entropy_reg
).to(FLAGS.device)
agent_models.append(model)
agent_opts.append(optim.Adam(params=agent_models[i].parameters()))
if args.wandb:
wandb.watch(agent_models)
if path.isfile(args.model_file) and not args.no_load:
episode, start_time = load_model(
model_file=args.model_file,
agent_models=agent_models,
agent_opts=agent_opts)
print('loaded model')
elif args.testing:
print('')
print('ERROR: must have loadable model to use --testing option')
print('')
return
last_print = time.time()
rewards_sum = torch.zeros(3, device=FLAGS.device)
steps_sum = 0
count_sum = 0
f_log = open(args.log_file, 'w')
all_args = {**args_dict, **flags_dict}
f_log.write('meta: %s\n' % json.dumps(all_args))
last_save = time.time()
baseline = torch.zeros(3, device=FLAGS.device)
term_matches_argmax_count = 0
num_policy_runs = 0
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
prop_matches_argmax_count = 0
prop_stochastic_draws = 0
utt_mask_count = np.array([0,0])
prop_mask_count = np.array([0,0])
while episode < args.episodes:
render = (episode % args.render_every_episode == 0)
split = 2 if FLAGS.randomize_first else 1
agent_losses = [0,0]
both_rewards = []
for i in range(2):
agent_opts[i].zero_grad()
for initial_agent in range(split):
batch = generate_training_batch(batch_size=args.batch_size // split,
test_hashes=test_hashes,
random_state=train_r)
(actions, rewards, steps, alive_masks, entropy_loss_by_agent,
_term_matches_argmax_count, _num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws,
_prop_matches_argmax_count, _prop_stochastic_draws,
_utt_mask_count, _prop_mask_count) = run_episode(
batch=batch,
agent_models=agent_models,
batch_size=args.batch_size // split,
render=render,
initial_agent=initial_agent,
testing=args.testing)
term_matches_argmax_count += _term_matches_argmax_count
utt_matches_argmax_count += _utt_matches_argmax_count
utt_stochastic_draws += _utt_stochastic_draws
num_policy_runs += _num_policy_runs
prop_matches_argmax_count += _prop_matches_argmax_count
prop_stochastic_draws += _prop_stochastic_draws
utt_mask_count += _utt_mask_count
prop_mask_count += _prop_mask_count
if not args.testing:
reward_loss_by_agent = [0, 0]
baselined_rewards = rewards - baseline
rewards_by_agent = []
for i in range(2):
if FLAGS.prosocial:
rewards_by_agent.append(baselined_rewards[:, 2])
else:
rewards_by_agent.append(baselined_rewards[:, i])
sieve_playback = SievePlayback(alive_masks)
for t, global_idxes in sieve_playback:
agent = (initial_agent + t) % 2
if len(actions[t]) > 0:
for action in actions[t]:
_rewards = rewards_by_agent[agent]
_reward = _rewards[global_idxes].float().contiguous().view(
sieve_playback.batch_size, 1)
_reward_loss = - (action * _reward)
_reward_loss = _reward_loss.sum()
reward_loss_by_agent[agent] += _reward_loss
for i in range(2):
loss = entropy_loss_by_agent[i] + reward_loss_by_agent[i]
loss.backward()
rewards_sum += rewards.detach().sum(0)
steps_sum += steps.sum()
count_sum += args.batch_size // split
both_rewards.append(rewards)
for i in range(2):
agent_opts[i].step()
rewards = torch.cat(both_rewards).detach()
baseline = 0.7 * baseline + 0.3 * rewards.mean(0).detach()
if render:
test_rewards_sum = np.zeros(3)
test_count_sum = len(test_batches) * args.batch_size
test_num_policy_runs = 0
test_utt_mask_count = [0,0]
test_prop_mask_count = [0,0]
test_utt_mask_count = np.array([0,0])
test_prop_mask_count = np.array([0,0])
for test_batch in test_batches:
(actions, test_rewards, steps, alive_masks, entropy_loss_by_agent,
_term_matches_argmax_count, _test_num_policy_runs, _utt_matches_argmax_count, _utt_stochastic_draws,
_prop_matches_argmax_count, _prop_stochastic_draws,
_test_utt_mask_count, _test_prop_mask_count) = run_episode(
batch=test_batch,
agent_models=agent_models,
batch_size=args.batch_size,
render=True,
testing=True)
test_rewards_sum += test_rewards.sum(0).cpu().numpy()
test_num_policy_runs += _test_num_policy_runs
test_utt_mask_count += _test_utt_mask_count
test_prop_mask_count += _test_prop_mask_count
time_since_last = time.time() - last_print
rewards_str = '%.2f,%.2f,%.2f' % (rewards_sum[0] / count_sum,
rewards_sum[1] / count_sum,
rewards_sum[2] / count_sum)
test_rewards_str = '%.2f,%.2f,%.2f' % (test_rewards_sum[0] / test_count_sum,
test_rewards_sum[1] / test_count_sum,
test_rewards_sum[2] / test_count_sum)
baseline_str = '%.2f,%.2f,%.2f' % (baseline[0], baseline[1], baseline[2])
utt_mask_pct = utt_mask_count / (3 * count_sum)
test_utt_mask_pct = test_utt_mask_count / (3 * test_count_sum)
prop_mask_pct = prop_mask_count / (3 * count_sum)
test_prop_mask_pct = test_prop_mask_count / (3 * test_count_sum)
print('test {}'.format(test_rewards_str))
print('train {}'.format(rewards_str))
print('base {}'.format(baseline_str))
print('ep {}, {} games/sec, {:2.2f} avg steps'.format(
episode,
int(count_sum / time_since_last),
steps_sum.item() / count_sum
))
print('argmaxp term={:4.4f} utt={:4.4f} prop={:4.4f}'.format(
term_matches_argmax_count / num_policy_runs,
safe_div(utt_matches_argmax_count, utt_stochastic_draws),
prop_matches_argmax_count / prop_stochastic_draws
))
if FLAGS.force_masking_comm:
print('utt mask % {:2.2f},{:2.2f} test % {:2.2f},{:2.2f}'.format(
*utt_mask_pct, *test_utt_mask_pct,
))
print('prop mask % {:2.2f},{:2.2f} test % {:2.2f},{:2.2f}'.format(
*prop_mask_pct, *test_prop_mask_pct,
))
episode_log = {
'episode': episode,
'avg_reward_A': (rewards_sum[0] / count_sum).item(),
'avg_reward_B': (rewards_sum[1] / count_sum).item(),
'avg_reward_0': (rewards_sum[2] / count_sum).item(),
'test_reward_A': (test_rewards_sum[0] / test_count_sum).item(),
'test_reward_B': (test_rewards_sum[1] / test_count_sum).item(),
'test_reward': (test_rewards_sum[2] / test_count_sum).item(),
'avg_steps': torch.true_divide(steps_sum, count_sum).item(),
'games_sec': (count_sum / time_since_last),
'elapsed': time.time() - start_time,
'argmaxp_term': term_matches_argmax_count / num_policy_runs,
'argmaxp_utt': safe_div(utt_matches_argmax_count, utt_stochastic_draws),
'argmaxp_prop': prop_matches_argmax_count / prop_stochastic_draws,
'utt_unmasked_A': utt_mask_pct[0],
'utt_unmasked_B': utt_mask_pct[1],
'prop_unmasked_A': prop_mask_pct[0],
'prop_unmasked_B': prop_mask_pct[1],
'test_utt_unmasked_A': test_utt_mask_pct[0],
'test_utt_unmasked_B': test_utt_mask_pct[1],
'test_prop_unmasked_A': test_prop_mask_pct[0],
'test_prop_unmasked_B': test_prop_mask_pct[1],
}
f_log.write(json.dumps(episode_log) + '\n')
f_log.flush()
if args.wandb:
wandb.log(episode_log)
last_print = time.time()
steps_sum = 0
rewards_sum.fill_(0)
term_matches_argmax_count = 0
num_policy_runs = 0
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
prop_matches_argmax_count = 0
prop_stochastic_draws = 0
count_sum = 0
utt_mask_count.fill(0)
prop_mask_count.fill(0)
if (not args.testing
and not args.no_save
and episode > 0
and episode % args.save_every_episode == 0):
save_model(model_file=args.model_file,
agent_models=agent_models,
agent_opts=agent_opts,
start_time=start_time,
episode=episode)
print('saved model')
episode += 1
if (not args.no_save and
not args.testing):
save_model(
model_file=args.model_file,
agent_models=agent_models,
agent_opts=agent_opts,
start_time=start_time,
episode=episode)
print('saved model')
f_log.close()
| true
| true
|
79095552abce67db07c09775dc986a97974c551c
| 2,858
|
py
|
Python
|
myProject/settings.py
|
anthonyc1/django-materialize-boilerplate
|
ba1ae43bf153647d7a26f665a13596f2b0217d0f
|
[
"MIT"
] | null | null | null |
myProject/settings.py
|
anthonyc1/django-materialize-boilerplate
|
ba1ae43bf153647d7a26f665a13596f2b0217d0f
|
[
"MIT"
] | null | null | null |
myProject/settings.py
|
anthonyc1/django-materialize-boilerplate
|
ba1ae43bf153647d7a26f665a13596f2b0217d0f
|
[
"MIT"
] | null | null | null |
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'aa!b!ug6opqr*_f60k&%orwoqus_ecvlgjtsn0y)c)1o7-_at&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myApp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
| 25.517857
| 91
| 0.689293
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'aa!b!ug6opqr*_f60k&%orwoqus_ecvlgjtsn0y)c)1o7-_at&'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myApp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
| true
| true
|
790955e7b71fae23bcfcee0cd2a34e07e6164e1f
| 14,067
|
py
|
Python
|
Lib/multiprocessing/util.py
|
foxleoly/cpython
|
7866690fc7fc1115553fa401a19dc6c95de54d82
|
[
"0BSD"
] | null | null | null |
Lib/multiprocessing/util.py
|
foxleoly/cpython
|
7866690fc7fc1115553fa401a19dc6c95de54d82
|
[
"0BSD"
] | 4
|
2021-12-01T00:05:19.000Z
|
2022-03-27T04:53:40.000Z
|
Lib/multiprocessing/util.py
|
foxleoly/cpython
|
7866690fc7fc1115553fa401a19dc6c95de54d82
|
[
"0BSD"
] | null | null | null |
#
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import os
import itertools
import sys
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags
from . import process
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
# Abstract socket support
def _platform_supports_abstract_sockets():
if sys.platform == "linux":
return True
if hasattr(sys, 'getandroidapilevel'):
return True
return False
def is_abstract_socket_namespace(address):
if not address:
return False
if isinstance(address, bytes):
return address[0] == 0
elif isinstance(address, str):
return address[0] == "\0"
raise TypeError('address type of {address!r} unrecognized')
abstract_sockets_supported = _platform_supports_abstract_sockets()
#
# Function returning a temp directory which will be removed on exit
#
def _remove_temp_dir(rmtree, tempdir):
rmtree(tempdir)
current_process = process.current_process()
# current_process() can be None if the finalizer is called
# late during Python finalization
if current_process is not None:
current_process._config['tempdir'] = None
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
tempdir = process.current_process()._config.get('tempdir')
if tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
# keep a strong reference to shutil.rmtree(), since the finalizer
# can be called late during Python shutdown
Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir),
exitpriority=-100)
process.current_process()._config['tempdir'] = tempdir
return tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
if (exitpriority is not None) and not isinstance(exitpriority,int):
raise TypeError(
"Exitpriority ({0!r}) must be None or int, not {1!s}".format(
exitpriority, type(exitpriority)))
if obj is not None:
self._weakref = weakref.ref(obj, self)
elif exitpriority is None:
raise ValueError("Without object, exitpriority cannot be None")
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, next(_finalizer_counter))
self._pid = os.getpid()
_finalizer_registry[self._key] = self
def __call__(self, wr=None,
# Need to bind these locally because the globals can have
# been cleared at shutdown
_finalizer_registry=_finalizer_registry,
sub_debug=sub_debug, getpid=os.getpid):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
if self._pid != getpid():
sub_debug('finalizer ignored because different process')
res = None
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<%s object, dead>' % self.__class__.__name__
x = '<%s object, callback=%s' % (
self.__class__.__name__,
getattr(self._callback, '__name__', self._callback))
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitpriority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if _finalizer_registry is None:
# This function may be called after this module's globals are
# destroyed. See the _exit_function function in this module for more
# notes.
return
if minpriority is None:
f = lambda p : p[0] is not None
else:
f = lambda p : p[0] is not None and p[0] >= minpriority
# Careful: _finalizer_registry may be mutated while this function
# is running (either by a GC run or by another thread).
# list(_finalizer_registry) should be atomic, while
# list(_finalizer_registry.items()) is not.
keys = [key for key in list(_finalizer_registry) if f(key)]
keys.sort(reverse=True)
for key in keys:
finalizer = _finalizer_registry.get(key)
# key may have been removed from the registry
if finalizer is not None:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
active_children=process.active_children,
current_process=process.current_process):
# We hold on to references to functions in the arglist due to the
# situation described below, where this function is called after this
# module's globals are destroyed.
global _exiting
if not _exiting:
_exiting = True
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
if current_process() is not None:
# We check if the current process is None here because if
# it's None, any call to ``active_children()`` will raise
# an AttributeError (active_children winds up trying to
# get attributes from util._current_process). One
# situation where this can happen is if someone has
# manipulated sys.modules, causing this module to be
# garbage collected. The destructor for the module type
# then replaces all values in the module dict with None.
# For instance, after setuptools runs a test it replaces
# sys.modules with a copy created earlier. See issues
# #9775 and #15881. Also related: #4106, #9205, and
# #9207.
for p in active_children():
if p.daemon:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
register_after_fork(self, ForkAwareThreadLock._at_fork_reinit)
def _at_fork_reinit(self):
self._lock._at_fork_reinit()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
#
# Close fds except those specified
#
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except Exception:
MAXFD = 256
def close_all_fds_except(fds):
fds = list(fds) + [-1, MAXFD]
fds.sort()
assert fds[-1] == MAXFD, 'fd too large'
for i in range(len(fds) - 1):
os.closerange(fds[i]+1, fds[i+1])
#
# Close sys.stdin and replace stdin with os.devnull
#
def _close_stdin():
if sys.stdin is None:
return
try:
sys.stdin.close()
except (OSError, ValueError):
pass
try:
fd = os.open(os.devnull, os.O_RDONLY)
try:
sys.stdin = open(fd, encoding="utf-8", closefd=False)
except:
os.close(fd)
raise
except (OSError, ValueError):
pass
#
# Flush standard streams, if any
#
def _flush_std_streams():
try:
sys.stdout.flush()
except (AttributeError, ValueError):
pass
try:
sys.stderr.flush()
except (AttributeError, ValueError):
pass
#
# Start a program with only specified fds kept open
#
def spawnv_passfds(path, args, passfds):
import _posixsubprocess
import subprocess
passfds = tuple(sorted(map(int, passfds)))
errpipe_read, errpipe_write = os.pipe()
try:
return _posixsubprocess.fork_exec(
args, [path], True, passfds, None, None,
-1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write,
False, False, None, None, None, -1, None,
subprocess._USE_VFORK)
finally:
os.close(errpipe_read)
os.close(errpipe_write)
def close_fds(*fds):
"""Close each file descriptor given as an argument"""
for fd in fds:
os.close(fd)
def _cleanup_tests():
"""Cleanup multiprocessing resources when multiprocessing tests
completed."""
from test import support
# cleanup multiprocessing
process._cleanup()
# Stop the ForkServer process if it's running
from multiprocessing import forkserver
forkserver._forkserver._stop()
# Stop the ResourceTracker process if it's running
from multiprocessing import resource_tracker
resource_tracker._resource_tracker._stop()
# bpo-37421: Explicitly call _run_finalizers() to remove immediately
# temporary directories created by multiprocessing.util.get_temp_dir().
_run_finalizers()
support.gc_collect()
support.reap_children()
| 28.591463
| 79
| 0.629843
|
import os
import itertools
import sys
import weakref
import atexit
import threading
# cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags
from . import process
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
# Abstract socket support
def _platform_supports_abstract_sockets():
if sys.platform == "linux":
return True
if hasattr(sys, 'getandroidapilevel'):
return True
return False
def is_abstract_socket_namespace(address):
if not address:
return False
if isinstance(address, bytes):
return address[0] == 0
elif isinstance(address, str):
return address[0] == "\0"
raise TypeError('address type of {address!r} unrecognized')
abstract_sockets_supported = _platform_supports_abstract_sockets()
#
# Function returning a temp directory which will be removed on exit
#
def _remove_temp_dir(rmtree, tempdir):
rmtree(tempdir)
current_process = process.current_process()
# current_process() can be None if the finalizer is called
# late during Python finalization
if current_process is not None:
current_process._config['tempdir'] = None
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
tempdir = process.current_process()._config.get('tempdir')
if tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
# keep a strong reference to shutil.rmtree(), since the finalizer
# can be called late during Python shutdown
Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir),
exitpriority=-100)
process.current_process()._config['tempdir'] = tempdir
return tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
if (exitpriority is not None) and not isinstance(exitpriority,int):
raise TypeError(
"Exitpriority ({0!r}) must be None or int, not {1!s}".format(
exitpriority, type(exitpriority)))
if obj is not None:
self._weakref = weakref.ref(obj, self)
elif exitpriority is None:
raise ValueError("Without object, exitpriority cannot be None")
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, next(_finalizer_counter))
self._pid = os.getpid()
_finalizer_registry[self._key] = self
def __call__(self, wr=None,
# Need to bind these locally because the globals can have
# been cleared at shutdown
_finalizer_registry=_finalizer_registry,
sub_debug=sub_debug, getpid=os.getpid):
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
if self._pid != getpid():
sub_debug('finalizer ignored because different process')
res = None
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<%s object, dead>' % self.__class__.__name__
x = '<%s object, callback=%s' % (
self.__class__.__name__,
getattr(self._callback, '__name__', self._callback))
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitpriority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
if _finalizer_registry is None:
# This function may be called after this module's globals are
return
if minpriority is None:
f = lambda p : p[0] is not None
else:
f = lambda p : p[0] is not None and p[0] >= minpriority
keys = [key for key in list(_finalizer_registry) if f(key)]
keys.sort(reverse=True)
for key in keys:
finalizer = _finalizer_registry.get(key)
if finalizer is not None:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
def is_exiting():
return _exiting or _exiting is None
_exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
active_children=process.active_children,
current_process=process.current_process):
global _exiting
if not _exiting:
_exiting = True
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
if current_process() is not None:
# We check if the current process is None here because if
# it's None, any call to ``active_children()`` will raise
nate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
class ForkAwareThreadLock(object):
def __init__(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
register_after_fork(self, ForkAwareThreadLock._at_fork_reinit)
def _at_fork_reinit(self):
self._lock._at_fork_reinit()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except Exception:
MAXFD = 256
def close_all_fds_except(fds):
fds = list(fds) + [-1, MAXFD]
fds.sort()
assert fds[-1] == MAXFD, 'fd too large'
for i in range(len(fds) - 1):
os.closerange(fds[i]+1, fds[i+1])
def _close_stdin():
if sys.stdin is None:
return
try:
sys.stdin.close()
except (OSError, ValueError):
pass
try:
fd = os.open(os.devnull, os.O_RDONLY)
try:
sys.stdin = open(fd, encoding="utf-8", closefd=False)
except:
os.close(fd)
raise
except (OSError, ValueError):
pass
def _flush_std_streams():
try:
sys.stdout.flush()
except (AttributeError, ValueError):
pass
try:
sys.stderr.flush()
except (AttributeError, ValueError):
pass
def spawnv_passfds(path, args, passfds):
import _posixsubprocess
import subprocess
passfds = tuple(sorted(map(int, passfds)))
errpipe_read, errpipe_write = os.pipe()
try:
return _posixsubprocess.fork_exec(
args, [path], True, passfds, None, None,
-1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write,
False, False, None, None, None, -1, None,
subprocess._USE_VFORK)
finally:
os.close(errpipe_read)
os.close(errpipe_write)
def close_fds(*fds):
for fd in fds:
os.close(fd)
def _cleanup_tests():
from test import support
process._cleanup()
from multiprocessing import forkserver
forkserver._forkserver._stop()
# Stop the ResourceTracker process if it's running
from multiprocessing import resource_tracker
resource_tracker._resource_tracker._stop()
_run_finalizers()
support.gc_collect()
support.reap_children()
| true
| true
|
7909563a78a4e905ad64bf54c9107f01802bfe93
| 7,828
|
py
|
Python
|
app.py
|
N1ght-Owls/reposi
|
bf26fe668d1ae5faf4559550aedd1e149a0bf51e
|
[
"MIT"
] | 16
|
2020-04-12T12:06:30.000Z
|
2022-02-04T03:55:46.000Z
|
app.py
|
N1ght-Owls/hackathon
|
bf26fe668d1ae5faf4559550aedd1e149a0bf51e
|
[
"MIT"
] | 6
|
2020-04-14T17:53:28.000Z
|
2021-02-07T18:30:23.000Z
|
app.py
|
N1ght-Owls/hackathon
|
bf26fe668d1ae5faf4559550aedd1e149a0bf51e
|
[
"MIT"
] | 1
|
2020-04-14T17:30:33.000Z
|
2020-04-14T17:30:33.000Z
|
from werkzeug.wrappers import Request
from flask import Flask, redirect, url_for, request, flash
from flask_sqlalchemy import SQLAlchemy
import os
import requests
import random
from contact_form import ContactForm
from flask_dance.contrib.github import make_github_blueprint, github
from flask_dance.contrib.gitlab import make_gitlab_blueprint, gitlab
from discord_webhook import DiscordWebhook
import flask
from os import path
from flask_dance.consumer import oauth_authorized
app = Flask(__name__, template_folder="templates", static_folder='static')
# Various environmental variables
app.secret_key = os.environ.get("FLASK_SECRET")
discord_url = os.environ.get("WEBHOOK")
FLASK_HOST = os.environ.get("FLASK_HOST")
app.config["GITHUB_OAUTH_CLIENT_ID"] = os.environ.get(
"REPOSI_GITHUB_CLIENT_ID")
app.config["GITHUB_OAUTH_CLIENT_SECRET"] = os.environ.get(
"REPOSI_GITHUB_SECRET")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
# Github blueprint
github_bp = make_github_blueprint()
github_bp.redirect_url = FLASK_HOST+"/docs"
app.register_blueprint(github_bp, url_prefix="/login")
app.config["GITLAB_OAUTH_CLIENT_ID"] = os.environ.get(
"REPOSI_GITLAB_ID")
app.config["GITLAB_OAUTH_CLIENT_SECRET"] = os.environ.get(
"REPOSI_GITLAB_SECRET")
gitlab_bp = make_gitlab_blueprint()
app.register_blueprint(gitlab_bp, url_prefix="/login")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
# Database model & connection
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db.sqlite"
db = SQLAlchemy(app)
git_token = os.environ.get("GITHUB_TOKEN")
print(git_token)
@oauth_authorized.connect
def redirect_to_docs(blueprint, token):
blueprint.token = token
user = []
git_hash = []
resp = github.get("/user")
user = User.query.filter_by(username=resp.json()['login']).first()
if not user:
user = User(username=resp.json()['login'],
github_hash=str(random.getrandbits(128)))
db.session.add(user)
db.session.commit()
DiscordWebhook(url=discord_url, content=f"New user: {resp.json()['login']}. Check out profile at https://github.com/{resp.json()['login']}").execute()
git_hash = user.github_hash
return redirect(f"/docs?username={resp.json()['login']}&token={git_hash}")
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
github_hash = db.Column(db.String(80), unique=True, nullable=True)
# gitlab_hash = db.Column(db.String(80), unique=True, nullable=True)
def __repr__(self):
return '<User %r>' % self.username
if path.exists("db.sqlite") == True:
print("Database exists")
else:
print("Creating database")
db.create_all()
# Routing and repository parsing
@app.route("/signup")
def signup():
resp = github.get("/user")
if not github.authorized:
return redirect(url_for("github.login"))
print(resp)
assert resp.ok
user = User.query.filter_by(username=resp.json()['login']).first()
username = resp.json()['login']
github_hash = user.github_hash
return redirect(f"/docs?username={username}&token={github_hash}")
def parseGithubRepos(repos):
parsedRepos = []
displayForks = request.args.get('forks')
for repo in repos:
parsedRepo = {
'name': repo['full_name'],
'description': repo['description'],
'issues': repo['open_issues'],
'owner': repo['owner']['login'],
'stars': repo['stargazers_count'],
'forks': repo['forks_count'],
'url': repo['html_url'],
'size': repo['size'],
'language': repo['language']
}
if parsedRepo['description'] == None:
parsedRepo['description'] = "No description provided"
if displayForks == 'hidden':
if repo['fork'] == False:
parsedRepos.append(parsedRepo)
else:
parsedRepos.append(parsedRepo)
# if repo['fork'] == False: parsedRepos.append(parsedRepo)
parsedRepos.sort(key=lambda repo: repo["stars"], reverse=True)
return parsedRepos
@app.route("/widget/<username>")
def thing(username):
token = request.args.get('token')
db.session.commit()
user = User.query.filter_by(username=username).first()
resp = {}
theme = request.args.get('theme')
if theme != 'dark': theme = 'light'
if user == None:
return "User not found"
else:
repos = []
if user.github_hash == token:
page = 1
resp = requests.get(
f"https://api.github.com/users/{username}/repos?per_page=100&page=1", auth=("Uzay-G", git_token)).json()
while resp != []:
print(resp, "\n\n\n")
repos += parseGithubRepos(resp)
page += 1
resp = requests.get(
f"https://api.github.com/users/{username}/repos?per_page=100&page={page}", auth=("Uzay-G", git_token)).json()
if type(resp) is dict:
return f'ERROR: {resp["message"]}'
return flask.render_template('widget.html', repos=repos, theme=theme)
else:
return "You do not have a valid api token"
@app.route("/")
def serveMain():
form = ContactForm()
return flask.render_template('index.html', form=form)
@app.route("/docs")
def docs():
form = ContactForm()
return flask.render_template('docs.html', username=request.args.get('username'), token=request.args.get("token"), hostname=FLASK_HOST, form=form)
@app.route("/contact", methods=['POST'])
def contact():
form = ContactForm()
if form.validate_on_submit():
flash('Your message was received')
DiscordWebhook(url=discord_url, content=f"Contact @hackathon: name: {form.name.data}, email: {form.email.data}, message: {form.message.data}").execute()
else:
flash('Your message was not transferred correctly.')
return redirect('/')
if __name__ == '__main__':
app.run(debug=True)
# @app.route("/signup_gitlab")
# def signup_gitlab():
# resp = gitlab.get("/user")
# if not gitlab.authorized:
# return redirect(url_for("gitlab.login"))
# print(resp)
# assert resp.ok
# user = User.query.filter_by(username=resp.json()['login']).first()
# username = resp.json()['login']
# gitlab_hash = user.gitlab_hash
# return redirect(f"/docs?username={username}&token={gitlab_hash}")
# def getGitlabRepoLanguage(repo):
# resp = requests.get(f"https://gitlab.com/api/v4/projects/{repo['id']}/languages").json()
# return next(iter(resp))
# def parseGitlabRepos(repos):
# parsedRepos = []
# for repo in repos:
# parsedRepo = {}
# parsedRepo['name'] = repo['name']
# if repo['description'] == None:
# parsedRepo['description'] = "No description provided"
# else:
# parsedRepo['description'] = repo['description']
# try:
# parsedRepo['issues'] = repo['open_issues_count']
# except:
# parsedRepo['issues'] = 0
# parsedRepo['owner'] = repo['namespace']['name']
# parsedRepo['stars'] = repo['star_count']
# parsedRepo['forks'] = repo['forks_count']
# parsedRepo['url'] = repo['web_url']
# try:
# parsedRepo['size'] = repo['statistics']['repository_size'],
# except:
# parsedRepo['size'] = None
# parsedRepo['language'] = getGitlabRepoLanguage(repo)
# parsedRepos.append(parsedRepo)
# return parsedRepos
| 35.908257
| 161
| 0.624681
|
from werkzeug.wrappers import Request
from flask import Flask, redirect, url_for, request, flash
from flask_sqlalchemy import SQLAlchemy
import os
import requests
import random
from contact_form import ContactForm
from flask_dance.contrib.github import make_github_blueprint, github
from flask_dance.contrib.gitlab import make_gitlab_blueprint, gitlab
from discord_webhook import DiscordWebhook
import flask
from os import path
from flask_dance.consumer import oauth_authorized
app = Flask(__name__, template_folder="templates", static_folder='static')
app.secret_key = os.environ.get("FLASK_SECRET")
discord_url = os.environ.get("WEBHOOK")
FLASK_HOST = os.environ.get("FLASK_HOST")
app.config["GITHUB_OAUTH_CLIENT_ID"] = os.environ.get(
"REPOSI_GITHUB_CLIENT_ID")
app.config["GITHUB_OAUTH_CLIENT_SECRET"] = os.environ.get(
"REPOSI_GITHUB_SECRET")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
github_bp = make_github_blueprint()
github_bp.redirect_url = FLASK_HOST+"/docs"
app.register_blueprint(github_bp, url_prefix="/login")
app.config["GITLAB_OAUTH_CLIENT_ID"] = os.environ.get(
"REPOSI_GITLAB_ID")
app.config["GITLAB_OAUTH_CLIENT_SECRET"] = os.environ.get(
"REPOSI_GITLAB_SECRET")
gitlab_bp = make_gitlab_blueprint()
app.register_blueprint(gitlab_bp, url_prefix="/login")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db.sqlite"
db = SQLAlchemy(app)
git_token = os.environ.get("GITHUB_TOKEN")
print(git_token)
@oauth_authorized.connect
def redirect_to_docs(blueprint, token):
blueprint.token = token
user = []
git_hash = []
resp = github.get("/user")
user = User.query.filter_by(username=resp.json()['login']).first()
if not user:
user = User(username=resp.json()['login'],
github_hash=str(random.getrandbits(128)))
db.session.add(user)
db.session.commit()
DiscordWebhook(url=discord_url, content=f"New user: {resp.json()['login']}. Check out profile at https://github.com/{resp.json()['login']}").execute()
git_hash = user.github_hash
return redirect(f"/docs?username={resp.json()['login']}&token={git_hash}")
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
github_hash = db.Column(db.String(80), unique=True, nullable=True)
def __repr__(self):
return '<User %r>' % self.username
if path.exists("db.sqlite") == True:
print("Database exists")
else:
print("Creating database")
db.create_all()
@app.route("/signup")
def signup():
resp = github.get("/user")
if not github.authorized:
return redirect(url_for("github.login"))
print(resp)
assert resp.ok
user = User.query.filter_by(username=resp.json()['login']).first()
username = resp.json()['login']
github_hash = user.github_hash
return redirect(f"/docs?username={username}&token={github_hash}")
def parseGithubRepos(repos):
parsedRepos = []
displayForks = request.args.get('forks')
for repo in repos:
parsedRepo = {
'name': repo['full_name'],
'description': repo['description'],
'issues': repo['open_issues'],
'owner': repo['owner']['login'],
'stars': repo['stargazers_count'],
'forks': repo['forks_count'],
'url': repo['html_url'],
'size': repo['size'],
'language': repo['language']
}
if parsedRepo['description'] == None:
parsedRepo['description'] = "No description provided"
if displayForks == 'hidden':
if repo['fork'] == False:
parsedRepos.append(parsedRepo)
else:
parsedRepos.append(parsedRepo)
parsedRepos.sort(key=lambda repo: repo["stars"], reverse=True)
return parsedRepos
@app.route("/widget/<username>")
def thing(username):
token = request.args.get('token')
db.session.commit()
user = User.query.filter_by(username=username).first()
resp = {}
theme = request.args.get('theme')
if theme != 'dark': theme = 'light'
if user == None:
return "User not found"
else:
repos = []
if user.github_hash == token:
page = 1
resp = requests.get(
f"https://api.github.com/users/{username}/repos?per_page=100&page=1", auth=("Uzay-G", git_token)).json()
while resp != []:
print(resp, "\n\n\n")
repos += parseGithubRepos(resp)
page += 1
resp = requests.get(
f"https://api.github.com/users/{username}/repos?per_page=100&page={page}", auth=("Uzay-G", git_token)).json()
if type(resp) is dict:
return f'ERROR: {resp["message"]}'
return flask.render_template('widget.html', repos=repos, theme=theme)
else:
return "You do not have a valid api token"
@app.route("/")
def serveMain():
form = ContactForm()
return flask.render_template('index.html', form=form)
@app.route("/docs")
def docs():
form = ContactForm()
return flask.render_template('docs.html', username=request.args.get('username'), token=request.args.get("token"), hostname=FLASK_HOST, form=form)
@app.route("/contact", methods=['POST'])
def contact():
form = ContactForm()
if form.validate_on_submit():
flash('Your message was received')
DiscordWebhook(url=discord_url, content=f"Contact @hackathon: name: {form.name.data}, email: {form.email.data}, message: {form.message.data}").execute()
else:
flash('Your message was not transferred correctly.')
return redirect('/')
if __name__ == '__main__':
app.run(debug=True)
| true
| true
|
7909563e27e264288c90f1e9cecfc98806506423
| 4,093
|
py
|
Python
|
robogym/randomization/tests/test_randomization.py
|
0xflotus/robogym
|
5ec2fcbda9828941fe3072792dd25fb5a915bbbb
|
[
"MIT"
] | 288
|
2020-11-12T21:39:34.000Z
|
2022-03-19T23:27:50.000Z
|
robogym/randomization/tests/test_randomization.py
|
0xflotus/robogym
|
5ec2fcbda9828941fe3072792dd25fb5a915bbbb
|
[
"MIT"
] | 3
|
2020-12-12T19:19:30.000Z
|
2022-03-24T05:21:39.000Z
|
robogym/randomization/tests/test_randomization.py
|
0xflotus/robogym
|
5ec2fcbda9828941fe3072792dd25fb5a915bbbb
|
[
"MIT"
] | 31
|
2020-11-12T22:31:01.000Z
|
2022-02-28T20:34:48.000Z
|
import unittest
import attr
import numpy as np
from robogym.randomization.env import (
EnvActionRandomizer,
EnvObservationRandomizer,
EnvParameterRandomizer,
EnvRandomization,
EnvSimulationRandomizer,
build_randomizable_param,
)
from robogym.randomization.observation import ObservationRandomizer
from robogym.randomization.parameters import FloatRandomizerParameter
class DummyRandomizerParameter(FloatRandomizerParameter):
def __init__(self, name, val):
super().__init__(
name, val, value_range=(-1.0, 1.0), delta=1.0,
)
@attr.s(auto_attribs=True)
class DummyNestedEnvParameter:
c: int = build_randomizable_param(1, low=-3, high=3)
@attr.s(auto_attribs=True)
class DummyEnvParameter:
a: int = build_randomizable_param(0, low=-5, high=5)
b: float = build_randomizable_param(0.0, low=-1.0, high=1.0)
x: int = 0 # Non randomizable parameter.
nested: DummyNestedEnvParameter = DummyNestedEnvParameter()
class DummyObservationRandomizer(ObservationRandomizer):
def __init__(self, name, val):
super().__init__(name)
self.val = self.register_parameter(val)
def _randomize(self, target, random_state):
target[self.val.name] = self.val.get_value()
return target
class TestRandomization(unittest.TestCase):
def setUp(self):
super().setUp()
self.random_state = np.random.RandomState()
def test_randomizer_parameters(self):
parameter = DummyRandomizerParameter("foo", 0.0)
assert parameter.get_value() == 0.0
assert parameter.get_range() == (-1.0, 1.0)
assert parameter.get_delta() == 1.0
parameter.set_value(1.0)
assert parameter.get_value() == 1.0
def test_randomizer_basic(self):
"""
Test functionality of basic randomizer.
"""
randomizer = EnvParameterRandomizer(DummyEnvParameter())
assert len(randomizer.get_parameters()) == 3
# Make sure register duplicate parameter is not allowed.
with self.assertRaises(AssertionError):
randomizer.register_parameter(DummyRandomizerParameter("a", 1))
randomizer.register_parameter(DummyRandomizerParameter("d", 1))
assert len(randomizer.get_parameters()) == 4
randomizer.get_parameter("a").set_value(1)
randomizer.get_parameter("b").set_value(0.5)
randomizer.get_parameter("c").set_value(2)
parameters = randomizer.randomize(DummyEnvParameter(), self.random_state)
assert parameters.a == 1
assert parameters.b == 0.5
assert parameters.nested.c == 2
randomizer.disable()
parameters = randomizer.randomize(DummyEnvParameter(), self.random_state)
randomizer.get_parameter("a").set_value(1)
assert parameters.a == 0
def test_observation_randomizer(self):
randomizer = EnvObservationRandomizer(
[
DummyObservationRandomizer("r1", DummyRandomizerParameter("foo", 0.0)),
DummyObservationRandomizer("r2", DummyRandomizerParameter("bar", 1.0)),
]
)
assert len(randomizer.get_randomizers()) == 2
assert len(randomizer.get_parameters()) == 2
obs = randomizer.randomize({}, self.random_state)
assert obs["foo"] == 0.0
assert obs["bar"] == 1.0
def test_env_randomization(self):
randomization = EnvRandomization(
parameter_randomizer=EnvParameterRandomizer(DummyEnvParameter()),
observation_randomizer=EnvObservationRandomizer(
[
DummyObservationRandomizer(
"r1", DummyRandomizerParameter("foo", 0.0)
),
]
),
action_randomizer=EnvActionRandomizer([]),
simulation_randomizer=EnvSimulationRandomizer([]),
)
randomization.update_parameter("observation.r1:foo", 0.5)
parameter = randomization.get_parameter("observation.r1:foo")
assert parameter.get_value() == 0.5
| 31.976563
| 87
| 0.658441
|
import unittest
import attr
import numpy as np
from robogym.randomization.env import (
EnvActionRandomizer,
EnvObservationRandomizer,
EnvParameterRandomizer,
EnvRandomization,
EnvSimulationRandomizer,
build_randomizable_param,
)
from robogym.randomization.observation import ObservationRandomizer
from robogym.randomization.parameters import FloatRandomizerParameter
class DummyRandomizerParameter(FloatRandomizerParameter):
def __init__(self, name, val):
super().__init__(
name, val, value_range=(-1.0, 1.0), delta=1.0,
)
@attr.s(auto_attribs=True)
class DummyNestedEnvParameter:
c: int = build_randomizable_param(1, low=-3, high=3)
@attr.s(auto_attribs=True)
class DummyEnvParameter:
a: int = build_randomizable_param(0, low=-5, high=5)
b: float = build_randomizable_param(0.0, low=-1.0, high=1.0)
x: int = 0
nested: DummyNestedEnvParameter = DummyNestedEnvParameter()
class DummyObservationRandomizer(ObservationRandomizer):
def __init__(self, name, val):
super().__init__(name)
self.val = self.register_parameter(val)
def _randomize(self, target, random_state):
target[self.val.name] = self.val.get_value()
return target
class TestRandomization(unittest.TestCase):
def setUp(self):
super().setUp()
self.random_state = np.random.RandomState()
def test_randomizer_parameters(self):
parameter = DummyRandomizerParameter("foo", 0.0)
assert parameter.get_value() == 0.0
assert parameter.get_range() == (-1.0, 1.0)
assert parameter.get_delta() == 1.0
parameter.set_value(1.0)
assert parameter.get_value() == 1.0
def test_randomizer_basic(self):
randomizer = EnvParameterRandomizer(DummyEnvParameter())
assert len(randomizer.get_parameters()) == 3
with self.assertRaises(AssertionError):
randomizer.register_parameter(DummyRandomizerParameter("a", 1))
randomizer.register_parameter(DummyRandomizerParameter("d", 1))
assert len(randomizer.get_parameters()) == 4
randomizer.get_parameter("a").set_value(1)
randomizer.get_parameter("b").set_value(0.5)
randomizer.get_parameter("c").set_value(2)
parameters = randomizer.randomize(DummyEnvParameter(), self.random_state)
assert parameters.a == 1
assert parameters.b == 0.5
assert parameters.nested.c == 2
randomizer.disable()
parameters = randomizer.randomize(DummyEnvParameter(), self.random_state)
randomizer.get_parameter("a").set_value(1)
assert parameters.a == 0
def test_observation_randomizer(self):
randomizer = EnvObservationRandomizer(
[
DummyObservationRandomizer("r1", DummyRandomizerParameter("foo", 0.0)),
DummyObservationRandomizer("r2", DummyRandomizerParameter("bar", 1.0)),
]
)
assert len(randomizer.get_randomizers()) == 2
assert len(randomizer.get_parameters()) == 2
obs = randomizer.randomize({}, self.random_state)
assert obs["foo"] == 0.0
assert obs["bar"] == 1.0
def test_env_randomization(self):
randomization = EnvRandomization(
parameter_randomizer=EnvParameterRandomizer(DummyEnvParameter()),
observation_randomizer=EnvObservationRandomizer(
[
DummyObservationRandomizer(
"r1", DummyRandomizerParameter("foo", 0.0)
),
]
),
action_randomizer=EnvActionRandomizer([]),
simulation_randomizer=EnvSimulationRandomizer([]),
)
randomization.update_parameter("observation.r1:foo", 0.5)
parameter = randomization.get_parameter("observation.r1:foo")
assert parameter.get_value() == 0.5
| true
| true
|
7909564ae5e3998adfa59559eb47e2f30fa28f49
| 157
|
py
|
Python
|
feed/views.py
|
njokuifeanyigerald/django-social-media-app
|
dc27873fd518b1dc79e179c359470f9a1a10478f
|
[
"MIT"
] | null | null | null |
feed/views.py
|
njokuifeanyigerald/django-social-media-app
|
dc27873fd518b1dc79e179c359470f9a1a10478f
|
[
"MIT"
] | null | null | null |
feed/views.py
|
njokuifeanyigerald/django-social-media-app
|
dc27873fd518b1dc79e179c359470f9a1a10478f
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
def feedHome(request):
return HttpResponse('<p>Welcome To My Social App</p>')
| 26.166667
| 59
| 0.770701
|
from django.shortcuts import render
from django.http import HttpResponse
def feedHome(request):
return HttpResponse('<p>Welcome To My Social App</p>')
| true
| true
|
7909564d55bc80ff0d4e5ca9e58c97157c95b9fe
| 875
|
py
|
Python
|
plugins/salesforce/komand_salesforce/actions/create_record/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/salesforce/komand_salesforce/actions/create_record/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/salesforce/komand_salesforce/actions/create_record/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
import komand
from .schema import CreateRecordInput, CreateRecordOutput
# Custom imports below
class CreateRecord(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="create_record",
description="Create a new SObject record",
input=CreateRecordInput(),
output=CreateRecordOutput(),
)
def run(self, params={}):
object_name = params.get("object_name", "Account")
object_data = params.get("object_data")
record = self.connection.api.create_record(object_name, object_data)
try:
id_ = record["id"]
except KeyError:
self.logger.error("Error: id key is missing from record.")
id_ = "Not available"
if record.get("success"):
return {"id": id_}
else:
return {}
| 27.34375
| 76
| 0.595429
|
import komand
from .schema import CreateRecordInput, CreateRecordOutput
class CreateRecord(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="create_record",
description="Create a new SObject record",
input=CreateRecordInput(),
output=CreateRecordOutput(),
)
def run(self, params={}):
object_name = params.get("object_name", "Account")
object_data = params.get("object_data")
record = self.connection.api.create_record(object_name, object_data)
try:
id_ = record["id"]
except KeyError:
self.logger.error("Error: id key is missing from record.")
id_ = "Not available"
if record.get("success"):
return {"id": id_}
else:
return {}
| true
| true
|
790956666b7ca4e00adb3f774cbf243ad7a6ef3c
| 377
|
py
|
Python
|
tdda/testtdda.py
|
Daniel-Mietchen/tdda
|
98718ec3b4b253bba3b575d4b10a14a6d70576b8
|
[
"MIT"
] | null | null | null |
tdda/testtdda.py
|
Daniel-Mietchen/tdda
|
98718ec3b4b253bba3b575d4b10a14a6d70576b8
|
[
"MIT"
] | null | null | null |
tdda/testtdda.py
|
Daniel-Mietchen/tdda
|
98718ec3b4b253bba3b575d4b10a14a6d70576b8
|
[
"MIT"
] | null | null | null |
"""
Run all TDDA tests
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from tdda.referencetest import ReferenceTestCase
from tdda.constraints.testconstraints import *
from tdda.rexpy.testrexpy import *
from tdda.referencetest.tests.alltests import *
if __name__ == '__main__':
ReferenceTestCase.main()
| 19.842105
| 48
| 0.803714
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from tdda.referencetest import ReferenceTestCase
from tdda.constraints.testconstraints import *
from tdda.rexpy.testrexpy import *
from tdda.referencetest.tests.alltests import *
if __name__ == '__main__':
ReferenceTestCase.main()
| true
| true
|
790957106313de045f6ffacbf06a1dab51905223
| 100
|
py
|
Python
|
programacion_avanzada/06.paquetes_y_modulos/package/sub_package2/mod4.py
|
soytupadrrre/Master_Python_Eip
|
c4774209d7dd15584233fe5d4cc01b1434c9316b
|
[
"MIT"
] | null | null | null |
programacion_avanzada/06.paquetes_y_modulos/package/sub_package2/mod4.py
|
soytupadrrre/Master_Python_Eip
|
c4774209d7dd15584233fe5d4cc01b1434c9316b
|
[
"MIT"
] | null | null | null |
programacion_avanzada/06.paquetes_y_modulos/package/sub_package2/mod4.py
|
soytupadrrre/Master_Python_Eip
|
c4774209d7dd15584233fe5d4cc01b1434c9316b
|
[
"MIT"
] | null | null | null |
def module_name():
print("Soy el módulo 4")
if __name__ == "__main__":
print(module_name())
| 20
| 28
| 0.65
|
def module_name():
print("Soy el módulo 4")
if __name__ == "__main__":
print(module_name())
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.