hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f718af4f6cc559aadfef9f792ebdb4072e29d55f
| 299
|
py
|
Python
|
SoccerDataCrawler/pipelines.py
|
saadchoukry/Ultimate-Manager-Assistant
|
33584e83953dcf59970c9d7b5ec8e686e9aed89f
|
[
"MIT"
] | null | null | null |
SoccerDataCrawler/pipelines.py
|
saadchoukry/Ultimate-Manager-Assistant
|
33584e83953dcf59970c9d7b5ec8e686e9aed89f
|
[
"MIT"
] | null | null | null |
SoccerDataCrawler/pipelines.py
|
saadchoukry/Ultimate-Manager-Assistant
|
33584e83953dcf59970c9d7b5ec8e686e9aed89f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class SoccerdatacrawlerPipeline(object):
def process_item(self, item, spider):
return item
| 24.916667
| 66
| 0.722408
|
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class SoccerdatacrawlerPipeline(object):
def process_item(self, item, spider):
return item
| true
| true
|
f718b1b460678aee54372783e495ef607dfe5f64
| 1,587
|
py
|
Python
|
evaluate/norvig_spell.py
|
aiainui/JamSpell
|
e6f2f28ab46049096bf8292f611d02f873f75d22
|
[
"MIT"
] | null | null | null |
evaluate/norvig_spell.py
|
aiainui/JamSpell
|
e6f2f28ab46049096bf8292f611d02f873f75d22
|
[
"MIT"
] | null | null | null |
evaluate/norvig_spell.py
|
aiainui/JamSpell
|
e6f2f28ab46049096bf8292f611d02f873f75d22
|
[
"MIT"
] | null | null | null |
import re
from collections import Counter
def words(text): return re.findall(r'\w+', text.lower())
WORDS = Counter()
TOTAL_WORDS = 0
def init(filename = 'big.txt'):
global WORDS
global TOTAL_WORDS
#统计词频,并存储为词典
WORDS = Counter(words(open(filename).read()))
#统计总词数
TOTAL_WORDS=sum(WORDS.values())
#统计每一个词的词频占比
def P(word, N=None):
"Probability of `word`."
N = N or TOTAL_WORDS
return WORDS[word] / N
def correction(word):
if known([word]):
return word
cands = known(edits1(word)) or known(edits2(word))
if not cands:
return word
cands = sorted(cands, key=P, reverse=True)
if cands[0] == word:
return word
return sorted(cands, key=P, reverse=True)
#遍历每一个词并确保其在词典中
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
#编辑距离为1的单词
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
#编辑距离为2的单词
def edits2(word):
"All edits that are two edits away from `word`."
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
| 31.117647
| 83
| 0.617517
|
import re
from collections import Counter
def words(text): return re.findall(r'\w+', text.lower())
WORDS = Counter()
TOTAL_WORDS = 0
def init(filename = 'big.txt'):
global WORDS
global TOTAL_WORDS
WORDS = Counter(words(open(filename).read()))
TOTAL_WORDS=sum(WORDS.values())
def P(word, N=None):
N = N or TOTAL_WORDS
return WORDS[word] / N
def correction(word):
if known([word]):
return word
cands = known(edits1(word)) or known(edits2(word))
if not cands:
return word
cands = sorted(cands, key=P, reverse=True)
if cands[0] == word:
return word
return sorted(cands, key=P, reverse=True)
def known(words):
return set(w for w in words if w in WORDS)
def edits1(word):
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
| true
| true
|
f718b204d55fcadab130f4070f6f7ffbad544568
| 4,581
|
py
|
Python
|
docs/conf.py
|
fslds/carbon-black-cloud-sdk-python
|
248a3c63d6b36d6fcdbcb3f51fb7751f062ed372
|
[
"MIT"
] | 24
|
2020-10-16T22:07:38.000Z
|
2022-03-24T14:58:03.000Z
|
docs/conf.py
|
fslds/carbon-black-cloud-sdk-python
|
248a3c63d6b36d6fcdbcb3f51fb7751f062ed372
|
[
"MIT"
] | 63
|
2020-10-26T18:26:15.000Z
|
2022-03-31T17:31:02.000Z
|
docs/conf.py
|
fslds/carbon-black-cloud-sdk-python
|
248a3c63d6b36d6fcdbcb3f51fb7751f062ed372
|
[
"MIT"
] | 10
|
2020-11-09T11:54:23.000Z
|
2022-03-24T20:44:00.000Z
|
"""Sphinx configuration file"""
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Carbon Black Cloud Python SDK'
copyright = '2020-2021, Developer Relations'
author = 'Developer Relations'
# The full version, including alpha/beta/rc tags
release = '1.3.4'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.napoleon', 'sphinx.ext.autodoc', 'sphinx.ext.autosectionlabel']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'tango'
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo_only': True,
'display_version': False,
'style_external_links': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/cbc-sdk-thumbnail.png"
# Output file base name for HTML help builder.
htmlhelp_basename = 'CarbonBlackAPI-PythonBindingsdoc'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CarbonBlackCloud-PythonBindings.tex', u'Carbon Black Cloud Python API Documentation',
u'Carbon Black Developer Network', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'carbonblackcloud-pythonbindings', u'Carbon Black Cloud Python API Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CarbonBlackCloud-PythonBindings', u'Carbon Black Cloud Python API Documentation',
author, 'CarbonBlackCloud-PythonBindings', 'Python bindings for the Carbon Black Cloud API',
'Miscellaneous'),
]
latex_elements = {
# Additional stuff for the LaTeX preamble.
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
'preamble': "".join((
'\\DeclareUnicodeCharacter{25A0}{=}', # Solid box
)),
}
autoclass_content = 'both'
def setup(app):
"""Setup Sphinx."""
app.add_css_file('css/custom.css')
| 33.437956
| 103
| 0.679546
|
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
project = 'Carbon Black Cloud Python SDK'
copyright = '2020-2021, Developer Relations'
author = 'Developer Relations'
release = '1.3.4'
extensions = ['sphinx.ext.napoleon', 'sphinx.ext.autodoc', 'sphinx.ext.autosectionlabel']
templates_path = ['_templates']
master_doc = 'index'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'tango'
add_module_names = False
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'logo_only': True,
'display_version': False,
'style_external_links': True,
}
html_static_path = ['_static']
html_logo = "_static/cbc-sdk-thumbnail.png"
htmlhelp_basename = 'CarbonBlackAPI-PythonBindingsdoc'
latex_documents = [
(master_doc, 'CarbonBlackCloud-PythonBindings.tex', u'Carbon Black Cloud Python API Documentation',
u'Carbon Black Developer Network', 'manual'),
]
man_pages = [
(master_doc, 'carbonblackcloud-pythonbindings', u'Carbon Black Cloud Python API Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, 'CarbonBlackCloud-PythonBindings', u'Carbon Black Cloud Python API Documentation',
author, 'CarbonBlackCloud-PythonBindings', 'Python bindings for the Carbon Black Cloud API',
'Miscellaneous'),
]
latex_elements = {
'papersize': 'letterpaper',
'preamble': "".join((
'\\DeclareUnicodeCharacter{25A0}{=}',
)),
}
autoclass_content = 'both'
def setup(app):
app.add_css_file('css/custom.css')
| true
| true
|
f718b2d2011ebac2f31533e27863f7f07bb850cd
| 5,032
|
py
|
Python
|
perfkitbenchmarker/linux_packages/hbase.py
|
Nowasky/PerfKitBenchmarker
|
cfa88e269eb373780910896ed4bdc8db09469753
|
[
"Apache-2.0"
] | 3
|
2018-04-28T13:06:14.000Z
|
2020-06-09T02:39:44.000Z
|
perfkitbenchmarker/linux_packages/hbase.py
|
Nowasky/PerfKitBenchmarker
|
cfa88e269eb373780910896ed4bdc8db09469753
|
[
"Apache-2.0"
] | 1
|
2021-09-09T07:43:25.000Z
|
2021-09-09T10:47:56.000Z
|
perfkitbenchmarker/linux_packages/hbase.py
|
Nowasky/PerfKitBenchmarker
|
cfa88e269eb373780910896ed4bdc8db09469753
|
[
"Apache-2.0"
] | 6
|
2019-06-11T18:59:57.000Z
|
2021-03-02T19:14:42.000Z
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing HBase installation and cleanup functions.
HBase is a scalable NoSQL database built on Hadoop.
https://hbase.apache.org/
"""
import functools
import logging
import os
import posixpath
import re
from absl import flags
from perfkitbenchmarker import data
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import hadoop
FLAGS = flags.FLAGS
flags.DEFINE_string('hbase_version', '1.3.5', 'HBase version.')
flags.DEFINE_string('hbase_bin_url', None,
'Specify to override url from HBASE_URL_BASE.')
HBASE_URL_BASE = 'https://www-us.apache.org/dist/hbase'
HBASE_PATTERN = r'>(hbase-([\d\.]+)-bin.tar.gz)<'
HBASE_VERSION_PATTERN = re.compile('HBase (.*)$', re.IGNORECASE | re.MULTILINE)
DATA_FILES = ['hbase/hbase-site.xml.j2', 'hbase/regionservers.j2',
'hbase/hbase-env.sh.j2']
HBASE_DIR = posixpath.join(linux_packages.INSTALL_DIR, 'hbase')
HBASE_BIN = posixpath.join(HBASE_DIR, 'bin')
HBASE_CONF_DIR = posixpath.join(HBASE_DIR, 'conf')
def _GetHBaseURL():
"""Gets the HBase download url based on flags.
The default is to look for the version `--hbase_version` to download.
If `--hbase_use_stable` is set will look for the latest stable version.
Returns:
The HBase download url.
"""
return '{0}/{1}/hbase-{1}-bin.tar.gz'.format(
HBASE_URL_BASE, FLAGS.hbase_version)
def GetHBaseVersion(vm):
txt, _ = vm.RemoteCommand(posixpath.join(HBASE_BIN, 'hbase') + ' version')
m = HBASE_VERSION_PATTERN.search(txt)
if m:
return m.group(1)
else:
# log as an warning, don't throw exception so as to continue on
logging.warn('Could not find HBase version from %s', txt)
return None
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
for resource in DATA_FILES:
data.ResourcePath(resource)
def _Install(vm):
vm.Install('hadoop')
vm.Install('curl')
hbase_url = FLAGS.hbase_bin_url or _GetHBaseURL()
vm.RemoteCommand(('mkdir {0} && curl -L {1} | '
'tar -C {0} --strip-components=1 -xzf -').format(
HBASE_DIR, hbase_url))
def YumInstall(vm):
"""Installs HBase on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs HBase on the VM."""
_Install(vm)
def _RenderConfig(vm, master_ip, zk_ips, regionserver_ips):
# Use the same heap configuration as Cassandra
memory_mb = vm.total_memory_kb // 1024
hbase_memory_mb = max(min(memory_mb // 2, 1024),
min(memory_mb // 4, 8192))
context = {
'master_ip': master_ip,
'worker_ips': regionserver_ips,
'zk_quorum_ips': zk_ips,
'hadoop_private_key': hadoop.HADOOP_PRIVATE_KEY,
'hbase_memory_mb': hbase_memory_mb,
'scratch_dir': vm.GetScratchDir(),
}
for file_name in DATA_FILES:
file_path = data.ResourcePath(file_name)
remote_path = posixpath.join(HBASE_CONF_DIR,
os.path.basename(file_name))
if file_name.endswith('.j2'):
vm.RenderTemplate(file_path, os.path.splitext(remote_path)[0], context)
else:
vm.RemoteCopy(file_path, remote_path)
def ConfigureAndStart(master, regionservers, zk_nodes):
"""Configure HBase on a cluster.
Args:
master: VM. Master VM.
regionservers: List of VMs.
"""
vms = [master] + regionservers
def LinkNativeLibraries(vm):
vm.RemoteCommand(('mkdir {0}/lib/native && '
'ln -s {1} {0}/lib/native/Linux-amd64-64').format(
HBASE_DIR,
posixpath.join(hadoop.HADOOP_DIR, 'lib', 'native')))
vm_util.RunThreaded(LinkNativeLibraries, vms)
fn = functools.partial(_RenderConfig, master_ip=master.internal_ip,
zk_ips=[vm.internal_ip for vm in zk_nodes],
regionserver_ips=[regionserver.internal_ip
for regionserver in regionservers])
vm_util.RunThreaded(fn, vms)
master.RemoteCommand('{0} dfs -mkdir /hbase'.format(
posixpath.join(hadoop.HADOOP_BIN, 'hdfs')))
master.RemoteCommand(posixpath.join(HBASE_BIN, 'start-hbase.sh'))
def Stop(master):
"""Stop HBase.
Args:
master: VM. Master VM.
"""
master.RemoteCommand(posixpath.join(HBASE_BIN, 'stop-hbase.sh'))
| 31.254658
| 79
| 0.684221
|
import functools
import logging
import os
import posixpath
import re
from absl import flags
from perfkitbenchmarker import data
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import hadoop
FLAGS = flags.FLAGS
flags.DEFINE_string('hbase_version', '1.3.5', 'HBase version.')
flags.DEFINE_string('hbase_bin_url', None,
'Specify to override url from HBASE_URL_BASE.')
HBASE_URL_BASE = 'https://www-us.apache.org/dist/hbase'
HBASE_PATTERN = r'>(hbase-([\d\.]+)-bin.tar.gz)<'
HBASE_VERSION_PATTERN = re.compile('HBase (.*)$', re.IGNORECASE | re.MULTILINE)
DATA_FILES = ['hbase/hbase-site.xml.j2', 'hbase/regionservers.j2',
'hbase/hbase-env.sh.j2']
HBASE_DIR = posixpath.join(linux_packages.INSTALL_DIR, 'hbase')
HBASE_BIN = posixpath.join(HBASE_DIR, 'bin')
HBASE_CONF_DIR = posixpath.join(HBASE_DIR, 'conf')
def _GetHBaseURL():
return '{0}/{1}/hbase-{1}-bin.tar.gz'.format(
HBASE_URL_BASE, FLAGS.hbase_version)
def GetHBaseVersion(vm):
txt, _ = vm.RemoteCommand(posixpath.join(HBASE_BIN, 'hbase') + ' version')
m = HBASE_VERSION_PATTERN.search(txt)
if m:
return m.group(1)
else:
logging.warn('Could not find HBase version from %s', txt)
return None
def CheckPrerequisites():
for resource in DATA_FILES:
data.ResourcePath(resource)
def _Install(vm):
vm.Install('hadoop')
vm.Install('curl')
hbase_url = FLAGS.hbase_bin_url or _GetHBaseURL()
vm.RemoteCommand(('mkdir {0} && curl -L {1} | '
'tar -C {0} --strip-components=1 -xzf -').format(
HBASE_DIR, hbase_url))
def YumInstall(vm):
_Install(vm)
def AptInstall(vm):
_Install(vm)
def _RenderConfig(vm, master_ip, zk_ips, regionserver_ips):
# Use the same heap configuration as Cassandra
memory_mb = vm.total_memory_kb // 1024
hbase_memory_mb = max(min(memory_mb // 2, 1024),
min(memory_mb // 4, 8192))
context = {
'master_ip': master_ip,
'worker_ips': regionserver_ips,
'zk_quorum_ips': zk_ips,
'hadoop_private_key': hadoop.HADOOP_PRIVATE_KEY,
'hbase_memory_mb': hbase_memory_mb,
'scratch_dir': vm.GetScratchDir(),
}
for file_name in DATA_FILES:
file_path = data.ResourcePath(file_name)
remote_path = posixpath.join(HBASE_CONF_DIR,
os.path.basename(file_name))
if file_name.endswith('.j2'):
vm.RenderTemplate(file_path, os.path.splitext(remote_path)[0], context)
else:
vm.RemoteCopy(file_path, remote_path)
def ConfigureAndStart(master, regionservers, zk_nodes):
vms = [master] + regionservers
def LinkNativeLibraries(vm):
vm.RemoteCommand(('mkdir {0}/lib/native && '
'ln -s {1} {0}/lib/native/Linux-amd64-64').format(
HBASE_DIR,
posixpath.join(hadoop.HADOOP_DIR, 'lib', 'native')))
vm_util.RunThreaded(LinkNativeLibraries, vms)
fn = functools.partial(_RenderConfig, master_ip=master.internal_ip,
zk_ips=[vm.internal_ip for vm in zk_nodes],
regionserver_ips=[regionserver.internal_ip
for regionserver in regionservers])
vm_util.RunThreaded(fn, vms)
master.RemoteCommand('{0} dfs -mkdir /hbase'.format(
posixpath.join(hadoop.HADOOP_BIN, 'hdfs')))
master.RemoteCommand(posixpath.join(HBASE_BIN, 'start-hbase.sh'))
def Stop(master):
master.RemoteCommand(posixpath.join(HBASE_BIN, 'stop-hbase.sh'))
| true
| true
|
f718b37e8693d0dc75af4742f9e4e99f3f98e05d
| 15,690
|
py
|
Python
|
src/tests/model_helper_test.py
|
fejesd/script-server
|
b46c0c25acea4fabe88d3206a404dc1c04e71e37
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/tests/model_helper_test.py
|
fejesd/script-server
|
b46c0c25acea4fabe88d3206a404dc1c04e71e37
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/tests/model_helper_test.py
|
fejesd/script-server
|
b46c0c25acea4fabe88d3206a404dc1c04e71e37
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
import os
import unittest
from config.constants import FILE_TYPE_FILE, FILE_TYPE_DIR
from model import model_helper
from model.model_helper import read_list, read_dict, fill_parameter_values, resolve_env_vars, \
InvalidFileException, read_bool_from_config, InvalidValueException, InvalidValueTypeException, read_str_from_config
from tests import test_utils
from tests.test_utils import create_parameter_model, set_env_value
class TestReadList(unittest.TestCase):
def test_simple_list(self):
values_dict = {'list_key': [1, 2, 3]}
list_value = read_list(values_dict, 'list_key')
self.assertEqual(list_value, [1, 2, 3])
def test_single_value(self):
values_dict = {'list_key': 'hello'}
list_value = read_list(values_dict, 'list_key')
self.assertEqual(list_value, ['hello'])
def test_empty_single_value(self):
values_dict = {'list_key': ''}
list_value = read_list(values_dict, 'list_key')
self.assertEqual(list_value, [''])
def test_default_value_when_missing(self):
values_dict = {'another_key': 'hello'}
list_value = read_list(values_dict, 'list_key')
self.assertEqual(list_value, [])
def test_default_value_when_specified(self):
values_dict = {'another_key': 'hello'}
list_value = read_list(values_dict, 'list_key', [True, False])
self.assertEqual(list_value, [True, False])
def test_dict_not_allowed_value(self):
values_dict = {'list_key': {'key1': 'value1'}}
self.assertRaises(Exception, read_list, values_dict, 'list_key')
class TestReadDict(unittest.TestCase):
def test_simple_dict(self):
values_dict = {'dict_key': {'key1': 'value1', 'key2': 'value2'}}
dict_value = read_dict(values_dict, 'dict_key')
self.assertEqual(dict_value, {'key1': 'value1', 'key2': 'value2'})
def test_list_value_not_allowed(self):
values_dict = {'dict_key': [1, 2]}
self.assertRaises(Exception, read_dict, values_dict, 'dict_key')
def test_single_value_not_allowed(self):
values_dict = {'dict_key': 'hello'}
self.assertRaises(Exception, read_dict, values_dict, 'dict_key')
def test_empty_value_not_allowed(self):
values_dict = {'dict_key': ''}
self.assertRaises(Exception, read_dict, values_dict, 'dict_key')
def test_empty_dict(self):
values_dict = {'dict_key': {}}
dict_value = read_dict(values_dict, 'dict_key', {'key1': 'value1'})
self.assertEqual(dict_value, {})
def test_default_when_missing(self):
values_dict = {'another_key': {'key1': 'value1'}}
dict_value = read_dict(values_dict, 'dict_key')
self.assertEqual(dict_value, {})
def test_default_when_specified(self):
values_dict = {'another_key': {'key1': 'value1'}}
dict_value = read_dict(values_dict, 'dict_key', {'key2': 'value2'})
self.assertEqual(dict_value, {'key2': 'value2'})
class TestReadBoolFromConfig(unittest.TestCase):
def test_bool_true(self):
value = read_bool_from_config('my_bool', {'my_bool': True})
self.assertEqual(True, value)
def test_bool_false(self):
value = read_bool_from_config('my_bool', {'my_bool': False})
self.assertEqual(False, value)
def test_str_true(self):
value = read_bool_from_config('my_bool', {'my_bool': 'true'})
self.assertEqual(True, value)
def test_str_false(self):
value = read_bool_from_config('my_bool', {'my_bool': 'false'})
self.assertEqual(False, value)
def test_str_true_ignore_case(self):
value = read_bool_from_config('my_bool', {'my_bool': 'TRUE'})
self.assertEqual(True, value)
def test_str_false_ignore_case(self):
value = read_bool_from_config('my_bool', {'my_bool': 'False'})
self.assertEqual(False, value)
def test_missing_value_without_default(self):
value = read_bool_from_config('my_bool', {'text': '123'})
self.assertIsNone(value)
def test_missing_value_with_default(self):
value = read_bool_from_config('my_bool', {'text': '123'}, default=True)
self.assertEqual(True, value)
def test_unsupported_type(self):
self.assertRaisesRegex(
Exception, '"my_bool" field should be true or false',
read_bool_from_config,
'my_bool',
{'my_bool': 1})
class TestFillParameterValues(unittest.TestCase):
def test_fill_single_parameter(self):
result = fill_parameter_values(self.create_parameters('p1'), 'Hello, ${p1}!', {'p1': 'world'})
self.assertEqual('Hello, world!', result)
def test_fill_single_parameter_multiple_times(self):
result = fill_parameter_values(self.create_parameters('p1'), 'Ho${p1}-${p1}${p1}!', {'p1': 'ho'})
self.assertEqual('Hoho-hoho!', result)
def test_fill_multiple_parameters(self):
result = fill_parameter_values(self.create_parameters('p1', 'p2', 'p3'),
'Some ${p2} text, which is ${p3} by ${p1}.',
{'p1': 'script-server', 'p2': 'small', 'p3': 'generated'})
self.assertEqual('Some small text, which is generated by script-server.', result)
def test_fill_multiple_parameters_when_one_without_value(self):
result = fill_parameter_values(self.create_parameters('p1', 'p2'),
'${p1} vs ${p2}',
{'p1': 'ABC'})
self.assertEqual('ABC vs ', result)
def test_fill_multiple_parameters_when_one_secure(self):
parameters = self.create_parameters('p1', 'p2')
parameters[1].secure = True
result = fill_parameter_values(parameters,
'${p1} vs ${p2}',
{'p1': 'ABC', 'p2': 'XYZ'})
self.assertEqual('ABC vs ${p2}', result)
def test_fill_non_string_value(self):
result = fill_parameter_values(self.create_parameters('p1'), 'Value = ${p1}', {'p1': 5})
self.assertEqual('Value = 5', result)
def test_fill_when_no_parameter_for_pattern(self):
result = fill_parameter_values(self.create_parameters('p1'), 'Value = ${xyz}', {'p1': '12345'})
self.assertEqual('Value = ${xyz}', result)
def test_fill_when_server_file_recursive_and_one_level(self):
parameters = [create_parameter_model(
'p1',
type='server_file',
file_dir=test_utils.temp_folder,
file_recursive=True)]
result = fill_parameter_values(parameters, 'Value = ${p1}', {'p1': ['folder']})
expected_value = os.path.join(test_utils.temp_folder, 'folder')
self.assertEqual('Value = ' + expected_value, result)
def test_fill_when_server_file_recursive_and_multiple_levels(self):
parameters = [create_parameter_model(
'p1',
type='server_file',
file_dir=test_utils.temp_folder,
file_recursive=True)]
result = fill_parameter_values(parameters, 'Value = ${p1}', {'p1': ['folder', 'sub', 'log.txt']})
expected_value = os.path.join(test_utils.temp_folder, 'folder', 'sub', 'log.txt')
self.assertEqual('Value = ' + expected_value, result)
def test_fill_when_server_file_plain(self):
parameters = [create_parameter_model(
'p1',
type='server_file',
file_dir=test_utils.temp_folder,
file_recursive=True)]
result = fill_parameter_values(parameters, 'Value = ${p1}', {'p1': 'folder'})
self.assertEqual('Value = folder', result)
def create_parameters(self, *names):
result = []
for name in names:
parameter = create_parameter_model(name, all_parameters=result)
result.append(parameter)
return result
def setUp(self) -> None:
super().setUp()
test_utils.setup()
def tearDown(self) -> None:
super().tearDown()
test_utils.cleanup()
class TestResolveEnvVars(unittest.TestCase):
def test_replace_full_match(self):
set_env_value('my_key', 'my_password')
resolved_val = resolve_env_vars('$$my_key', full_match=True)
self.assertEqual('my_password', resolved_val)
def test_missing_env_full_match(self):
self.assertRaises(Exception, resolve_env_vars, '$$my_key', True)
def test_no_replace_full_match(self):
value = 'abc!@#$%^&*,?$xyz'
resolved_val = resolve_env_vars(value, full_match=True)
self.assertEqual(value, resolved_val)
def test_no_replace_in_middle_full_match(self):
value = 'abc$$HOME.123'
resolved_val = resolve_env_vars(value, full_match=True)
self.assertEqual(value, resolved_val)
def test_replace_any_when_exact(self):
set_env_value('my_key', 'my_password')
resolved_val = resolve_env_vars('$$my_key')
self.assertEqual('my_password', resolved_val)
def test_replace_any_when_single_in_middle(self):
set_env_value('my_key', 'my_password')
resolved_val = resolve_env_vars('start/$$my_key/end')
self.assertEqual('start/my_password/end', resolved_val)
def test_replace_any_when_repeating(self):
set_env_value('my_key', 'abc')
resolved_val = resolve_env_vars('$$my_key,$$my_key.$$my_key')
self.assertEqual('abc,abc.abc', resolved_val)
def test_replace_any_when_multiple(self):
set_env_value('key1', 'Hello')
set_env_value('key2', 'world')
set_env_value('key3', '!')
resolved_val = resolve_env_vars('$$key1 $$key2!$$key3')
self.assertEqual('Hello world!!', resolved_val)
def test_replace_any_when_no_env(self):
resolved_val = resolve_env_vars('Hello $$key1!')
self.assertEqual('Hello $$key1!', resolved_val)
def test_resolve_when_empty(self):
resolved_val = resolve_env_vars('')
self.assertEqual('', resolved_val)
def test_resolve_when_int(self):
resolved_val = resolve_env_vars(123)
self.assertEqual(123, resolved_val)
def tearDown(self):
super().tearDown()
test_utils.cleanup()
class ListFilesTest(unittest.TestCase):
def test_single_file(self):
test_utils.create_file('my.txt')
files = model_helper.list_files(test_utils.temp_folder)
self.assertEqual(['my.txt'], files)
def test_multiple_files(self):
test_utils.create_files(['My.txt', 'file.dat', 'test.sh'])
test_utils.create_dir('documents')
files = model_helper.list_files(test_utils.temp_folder)
self.assertEqual(['documents', 'file.dat', 'My.txt', 'test.sh'], files)
def test_multiple_files_non_recursive(self):
for dir in [None, 'documents', 'smth']:
for file in ['my.txt', 'file.dat']:
if dir:
test_utils.create_file(os.path.join(dir, dir + '_' + file))
else:
test_utils.create_file(file)
files = model_helper.list_files(test_utils.temp_folder)
self.assertEqual(['documents', 'file.dat', 'my.txt', 'smth'], files)
def test_file_type_file(self):
files = ['file1', 'file2']
test_utils.create_files(files)
test_utils.create_dir('my_dir')
actual_files = model_helper.list_files(test_utils.temp_folder, file_type=FILE_TYPE_FILE)
self.assertEqual(files, actual_files)
def test_file_type_dir(self):
files = ['file1', 'file2']
test_utils.create_files(files)
test_utils.create_dir('my_dir')
actual_files = model_helper.list_files(test_utils.temp_folder, file_type=FILE_TYPE_DIR)
self.assertEqual(['my_dir'], actual_files)
def test_file_extensions(self):
for extension in ['exe', 'dat', 'txt', 'sh', 'pdf', 'docx']:
for file in ['file1', 'file2']:
test_utils.create_file(file + '.' + extension)
test_utils.create_dir('my_dir' + '.' + extension)
files = model_helper.list_files(test_utils.temp_folder, file_extensions=['exe', 'pdf'])
self.assertEqual(['file1.exe', 'file1.pdf', 'file2.exe', 'file2.pdf'], files)
def test_dir_not_exists(self):
dir = os.path.join(test_utils.temp_folder, 'dir2')
self.assertRaises(InvalidFileException, model_helper.list_files, dir)
def setUp(self):
test_utils.setup()
def tearDown(self):
test_utils.cleanup()
class TestReadIntFromConfig(unittest.TestCase):
def test_normal_int_value(self):
value = model_helper.read_int_from_config('abc', {'abc': 123})
self.assertEqual(123, value)
def test_zero_int_value(self):
value = model_helper.read_int_from_config('abc', {'abc': 0})
self.assertEqual(0, value)
def test_string_value(self):
value = model_helper.read_int_from_config('abc', {'abc': '-666'})
self.assertEqual(-666, value)
def test_string_value_when_invalid(self):
self.assertRaises(InvalidValueException, model_helper.read_int_from_config, 'abc', {'abc': '1000b'})
def test_unsupported_type(self):
self.assertRaises(InvalidValueTypeException, model_helper.read_int_from_config, 'abc', {'abc': True})
def test_default_value(self):
value = model_helper.read_int_from_config('my_key', {'abc': 100})
self.assertIsNone(value)
def test_default_value_explicit(self):
value = model_helper.read_int_from_config('my_key', {'abc': 100}, default=5)
self.assertEqual(5, value)
def test_default_value_when_empty_string(self):
value = model_helper.read_int_from_config('my_key', {'my_key': ' '}, default=9999)
self.assertEqual(9999, value)
class TestReadStrFromConfig(unittest.TestCase):
def test_normal_text(self):
value = read_str_from_config({'key1': 'xyz'}, 'key1')
self.assertEquals('xyz', value)
def test_none_value_no_default(self):
value = read_str_from_config({'key1': None}, 'key1')
self.assertIsNone(value)
def test_none_value_with_default(self):
value = read_str_from_config({'key1': None}, 'key1', default='abc')
self.assertEquals('abc', value)
def test_no_key_no_default(self):
value = read_str_from_config({'key1': 'xyz'}, 'key2')
self.assertIsNone(value)
def test_no_key_with_default(self):
value = read_str_from_config({'key1': 'xyz'}, 'key2', default='abc')
self.assertEquals('abc', value)
def test_text_with_whitespaces(self):
value = read_str_from_config({'key1': ' xyz \n'}, 'key1')
self.assertEquals(' xyz \n', value)
def test_text_when_blank_to_none_and_none(self):
value = read_str_from_config({'key1': None}, 'key1', blank_to_none=True)
self.assertIsNone(value)
def test_text_when_blank_to_none_and_empty(self):
value = read_str_from_config({'key1': ''}, 'key1', blank_to_none=True)
self.assertIsNone(value)
def test_text_when_blank_to_none_and_blank(self):
value = read_str_from_config({'key1': ' \t \n'}, 'key1', blank_to_none=True)
self.assertIsNone(value)
def test_text_when_blank_to_none_and_blank_and_default(self):
value = read_str_from_config({'key1': ' \t \n'}, 'key1', blank_to_none=True, default='abc')
self.assertEquals('abc', value)
def test_text_when_int(self):
self.assertRaisesRegex(InvalidValueTypeException, 'Invalid key1 value: string expected, but was: 5',
read_str_from_config, {'key1': 5}, 'key1')
| 38.933002
| 119
| 0.651052
|
import os
import unittest
from config.constants import FILE_TYPE_FILE, FILE_TYPE_DIR
from model import model_helper
from model.model_helper import read_list, read_dict, fill_parameter_values, resolve_env_vars, \
InvalidFileException, read_bool_from_config, InvalidValueException, InvalidValueTypeException, read_str_from_config
from tests import test_utils
from tests.test_utils import create_parameter_model, set_env_value
class TestReadList(unittest.TestCase):
def test_simple_list(self):
values_dict = {'list_key': [1, 2, 3]}
list_value = read_list(values_dict, 'list_key')
self.assertEqual(list_value, [1, 2, 3])
def test_single_value(self):
values_dict = {'list_key': 'hello'}
list_value = read_list(values_dict, 'list_key')
self.assertEqual(list_value, ['hello'])
def test_empty_single_value(self):
values_dict = {'list_key': ''}
list_value = read_list(values_dict, 'list_key')
self.assertEqual(list_value, [''])
def test_default_value_when_missing(self):
values_dict = {'another_key': 'hello'}
list_value = read_list(values_dict, 'list_key')
self.assertEqual(list_value, [])
def test_default_value_when_specified(self):
values_dict = {'another_key': 'hello'}
list_value = read_list(values_dict, 'list_key', [True, False])
self.assertEqual(list_value, [True, False])
def test_dict_not_allowed_value(self):
values_dict = {'list_key': {'key1': 'value1'}}
self.assertRaises(Exception, read_list, values_dict, 'list_key')
class TestReadDict(unittest.TestCase):
def test_simple_dict(self):
values_dict = {'dict_key': {'key1': 'value1', 'key2': 'value2'}}
dict_value = read_dict(values_dict, 'dict_key')
self.assertEqual(dict_value, {'key1': 'value1', 'key2': 'value2'})
def test_list_value_not_allowed(self):
values_dict = {'dict_key': [1, 2]}
self.assertRaises(Exception, read_dict, values_dict, 'dict_key')
def test_single_value_not_allowed(self):
values_dict = {'dict_key': 'hello'}
self.assertRaises(Exception, read_dict, values_dict, 'dict_key')
def test_empty_value_not_allowed(self):
values_dict = {'dict_key': ''}
self.assertRaises(Exception, read_dict, values_dict, 'dict_key')
def test_empty_dict(self):
values_dict = {'dict_key': {}}
dict_value = read_dict(values_dict, 'dict_key', {'key1': 'value1'})
self.assertEqual(dict_value, {})
def test_default_when_missing(self):
values_dict = {'another_key': {'key1': 'value1'}}
dict_value = read_dict(values_dict, 'dict_key')
self.assertEqual(dict_value, {})
def test_default_when_specified(self):
values_dict = {'another_key': {'key1': 'value1'}}
dict_value = read_dict(values_dict, 'dict_key', {'key2': 'value2'})
self.assertEqual(dict_value, {'key2': 'value2'})
class TestReadBoolFromConfig(unittest.TestCase):
def test_bool_true(self):
value = read_bool_from_config('my_bool', {'my_bool': True})
self.assertEqual(True, value)
def test_bool_false(self):
value = read_bool_from_config('my_bool', {'my_bool': False})
self.assertEqual(False, value)
def test_str_true(self):
value = read_bool_from_config('my_bool', {'my_bool': 'true'})
self.assertEqual(True, value)
def test_str_false(self):
value = read_bool_from_config('my_bool', {'my_bool': 'false'})
self.assertEqual(False, value)
def test_str_true_ignore_case(self):
value = read_bool_from_config('my_bool', {'my_bool': 'TRUE'})
self.assertEqual(True, value)
def test_str_false_ignore_case(self):
value = read_bool_from_config('my_bool', {'my_bool': 'False'})
self.assertEqual(False, value)
def test_missing_value_without_default(self):
value = read_bool_from_config('my_bool', {'text': '123'})
self.assertIsNone(value)
def test_missing_value_with_default(self):
value = read_bool_from_config('my_bool', {'text': '123'}, default=True)
self.assertEqual(True, value)
def test_unsupported_type(self):
self.assertRaisesRegex(
Exception, '"my_bool" field should be true or false',
read_bool_from_config,
'my_bool',
{'my_bool': 1})
class TestFillParameterValues(unittest.TestCase):
def test_fill_single_parameter(self):
result = fill_parameter_values(self.create_parameters('p1'), 'Hello, ${p1}!', {'p1': 'world'})
self.assertEqual('Hello, world!', result)
def test_fill_single_parameter_multiple_times(self):
result = fill_parameter_values(self.create_parameters('p1'), 'Ho${p1}-${p1}${p1}!', {'p1': 'ho'})
self.assertEqual('Hoho-hoho!', result)
def test_fill_multiple_parameters(self):
result = fill_parameter_values(self.create_parameters('p1', 'p2', 'p3'),
'Some ${p2} text, which is ${p3} by ${p1}.',
{'p1': 'script-server', 'p2': 'small', 'p3': 'generated'})
self.assertEqual('Some small text, which is generated by script-server.', result)
def test_fill_multiple_parameters_when_one_without_value(self):
result = fill_parameter_values(self.create_parameters('p1', 'p2'),
'${p1} vs ${p2}',
{'p1': 'ABC'})
self.assertEqual('ABC vs ', result)
def test_fill_multiple_parameters_when_one_secure(self):
parameters = self.create_parameters('p1', 'p2')
parameters[1].secure = True
result = fill_parameter_values(parameters,
'${p1} vs ${p2}',
{'p1': 'ABC', 'p2': 'XYZ'})
self.assertEqual('ABC vs ${p2}', result)
def test_fill_non_string_value(self):
result = fill_parameter_values(self.create_parameters('p1'), 'Value = ${p1}', {'p1': 5})
self.assertEqual('Value = 5', result)
def test_fill_when_no_parameter_for_pattern(self):
result = fill_parameter_values(self.create_parameters('p1'), 'Value = ${xyz}', {'p1': '12345'})
self.assertEqual('Value = ${xyz}', result)
def test_fill_when_server_file_recursive_and_one_level(self):
parameters = [create_parameter_model(
'p1',
type='server_file',
file_dir=test_utils.temp_folder,
file_recursive=True)]
result = fill_parameter_values(parameters, 'Value = ${p1}', {'p1': ['folder']})
expected_value = os.path.join(test_utils.temp_folder, 'folder')
self.assertEqual('Value = ' + expected_value, result)
def test_fill_when_server_file_recursive_and_multiple_levels(self):
parameters = [create_parameter_model(
'p1',
type='server_file',
file_dir=test_utils.temp_folder,
file_recursive=True)]
result = fill_parameter_values(parameters, 'Value = ${p1}', {'p1': ['folder', 'sub', 'log.txt']})
expected_value = os.path.join(test_utils.temp_folder, 'folder', 'sub', 'log.txt')
self.assertEqual('Value = ' + expected_value, result)
def test_fill_when_server_file_plain(self):
parameters = [create_parameter_model(
'p1',
type='server_file',
file_dir=test_utils.temp_folder,
file_recursive=True)]
result = fill_parameter_values(parameters, 'Value = ${p1}', {'p1': 'folder'})
self.assertEqual('Value = folder', result)
def create_parameters(self, *names):
result = []
for name in names:
parameter = create_parameter_model(name, all_parameters=result)
result.append(parameter)
return result
def setUp(self) -> None:
super().setUp()
test_utils.setup()
def tearDown(self) -> None:
super().tearDown()
test_utils.cleanup()
class TestResolveEnvVars(unittest.TestCase):
def test_replace_full_match(self):
set_env_value('my_key', 'my_password')
resolved_val = resolve_env_vars('$$my_key', full_match=True)
self.assertEqual('my_password', resolved_val)
def test_missing_env_full_match(self):
self.assertRaises(Exception, resolve_env_vars, '$$my_key', True)
def test_no_replace_full_match(self):
value = 'abc!@#$%^&*,?$xyz'
resolved_val = resolve_env_vars(value, full_match=True)
self.assertEqual(value, resolved_val)
def test_no_replace_in_middle_full_match(self):
value = 'abc$$HOME.123'
resolved_val = resolve_env_vars(value, full_match=True)
self.assertEqual(value, resolved_val)
def test_replace_any_when_exact(self):
set_env_value('my_key', 'my_password')
resolved_val = resolve_env_vars('$$my_key')
self.assertEqual('my_password', resolved_val)
def test_replace_any_when_single_in_middle(self):
set_env_value('my_key', 'my_password')
resolved_val = resolve_env_vars('start/$$my_key/end')
self.assertEqual('start/my_password/end', resolved_val)
def test_replace_any_when_repeating(self):
set_env_value('my_key', 'abc')
resolved_val = resolve_env_vars('$$my_key,$$my_key.$$my_key')
self.assertEqual('abc,abc.abc', resolved_val)
def test_replace_any_when_multiple(self):
set_env_value('key1', 'Hello')
set_env_value('key2', 'world')
set_env_value('key3', '!')
resolved_val = resolve_env_vars('$$key1 $$key2!$$key3')
self.assertEqual('Hello world!!', resolved_val)
def test_replace_any_when_no_env(self):
resolved_val = resolve_env_vars('Hello $$key1!')
self.assertEqual('Hello $$key1!', resolved_val)
def test_resolve_when_empty(self):
resolved_val = resolve_env_vars('')
self.assertEqual('', resolved_val)
def test_resolve_when_int(self):
resolved_val = resolve_env_vars(123)
self.assertEqual(123, resolved_val)
def tearDown(self):
super().tearDown()
test_utils.cleanup()
class ListFilesTest(unittest.TestCase):
def test_single_file(self):
test_utils.create_file('my.txt')
files = model_helper.list_files(test_utils.temp_folder)
self.assertEqual(['my.txt'], files)
def test_multiple_files(self):
test_utils.create_files(['My.txt', 'file.dat', 'test.sh'])
test_utils.create_dir('documents')
files = model_helper.list_files(test_utils.temp_folder)
self.assertEqual(['documents', 'file.dat', 'My.txt', 'test.sh'], files)
def test_multiple_files_non_recursive(self):
for dir in [None, 'documents', 'smth']:
for file in ['my.txt', 'file.dat']:
if dir:
test_utils.create_file(os.path.join(dir, dir + '_' + file))
else:
test_utils.create_file(file)
files = model_helper.list_files(test_utils.temp_folder)
self.assertEqual(['documents', 'file.dat', 'my.txt', 'smth'], files)
def test_file_type_file(self):
files = ['file1', 'file2']
test_utils.create_files(files)
test_utils.create_dir('my_dir')
actual_files = model_helper.list_files(test_utils.temp_folder, file_type=FILE_TYPE_FILE)
self.assertEqual(files, actual_files)
def test_file_type_dir(self):
files = ['file1', 'file2']
test_utils.create_files(files)
test_utils.create_dir('my_dir')
actual_files = model_helper.list_files(test_utils.temp_folder, file_type=FILE_TYPE_DIR)
self.assertEqual(['my_dir'], actual_files)
def test_file_extensions(self):
for extension in ['exe', 'dat', 'txt', 'sh', 'pdf', 'docx']:
for file in ['file1', 'file2']:
test_utils.create_file(file + '.' + extension)
test_utils.create_dir('my_dir' + '.' + extension)
files = model_helper.list_files(test_utils.temp_folder, file_extensions=['exe', 'pdf'])
self.assertEqual(['file1.exe', 'file1.pdf', 'file2.exe', 'file2.pdf'], files)
def test_dir_not_exists(self):
dir = os.path.join(test_utils.temp_folder, 'dir2')
self.assertRaises(InvalidFileException, model_helper.list_files, dir)
def setUp(self):
test_utils.setup()
def tearDown(self):
test_utils.cleanup()
class TestReadIntFromConfig(unittest.TestCase):
def test_normal_int_value(self):
value = model_helper.read_int_from_config('abc', {'abc': 123})
self.assertEqual(123, value)
def test_zero_int_value(self):
value = model_helper.read_int_from_config('abc', {'abc': 0})
self.assertEqual(0, value)
def test_string_value(self):
value = model_helper.read_int_from_config('abc', {'abc': '-666'})
self.assertEqual(-666, value)
def test_string_value_when_invalid(self):
self.assertRaises(InvalidValueException, model_helper.read_int_from_config, 'abc', {'abc': '1000b'})
def test_unsupported_type(self):
self.assertRaises(InvalidValueTypeException, model_helper.read_int_from_config, 'abc', {'abc': True})
def test_default_value(self):
value = model_helper.read_int_from_config('my_key', {'abc': 100})
self.assertIsNone(value)
def test_default_value_explicit(self):
value = model_helper.read_int_from_config('my_key', {'abc': 100}, default=5)
self.assertEqual(5, value)
def test_default_value_when_empty_string(self):
value = model_helper.read_int_from_config('my_key', {'my_key': ' '}, default=9999)
self.assertEqual(9999, value)
class TestReadStrFromConfig(unittest.TestCase):
def test_normal_text(self):
value = read_str_from_config({'key1': 'xyz'}, 'key1')
self.assertEquals('xyz', value)
def test_none_value_no_default(self):
value = read_str_from_config({'key1': None}, 'key1')
self.assertIsNone(value)
def test_none_value_with_default(self):
value = read_str_from_config({'key1': None}, 'key1', default='abc')
self.assertEquals('abc', value)
def test_no_key_no_default(self):
value = read_str_from_config({'key1': 'xyz'}, 'key2')
self.assertIsNone(value)
def test_no_key_with_default(self):
value = read_str_from_config({'key1': 'xyz'}, 'key2', default='abc')
self.assertEquals('abc', value)
def test_text_with_whitespaces(self):
value = read_str_from_config({'key1': ' xyz \n'}, 'key1')
self.assertEquals(' xyz \n', value)
def test_text_when_blank_to_none_and_none(self):
value = read_str_from_config({'key1': None}, 'key1', blank_to_none=True)
self.assertIsNone(value)
def test_text_when_blank_to_none_and_empty(self):
value = read_str_from_config({'key1': ''}, 'key1', blank_to_none=True)
self.assertIsNone(value)
def test_text_when_blank_to_none_and_blank(self):
value = read_str_from_config({'key1': ' \t \n'}, 'key1', blank_to_none=True)
self.assertIsNone(value)
def test_text_when_blank_to_none_and_blank_and_default(self):
value = read_str_from_config({'key1': ' \t \n'}, 'key1', blank_to_none=True, default='abc')
self.assertEquals('abc', value)
def test_text_when_int(self):
self.assertRaisesRegex(InvalidValueTypeException, 'Invalid key1 value: string expected, but was: 5',
read_str_from_config, {'key1': 5}, 'key1')
| true
| true
|
f718b4588aba611670b18d91b9a922b3ee82b311
| 5,045
|
py
|
Python
|
examples/shapefile1.py
|
yang69can/pyngl
|
78a7040ce9de4b7a442b0c3b5faecccab2f01426
|
[
"Apache-2.0"
] | 125
|
2016-11-24T09:04:28.000Z
|
2022-01-22T14:06:56.000Z
|
examples/shapefile1.py
|
yang69can/pyngl
|
78a7040ce9de4b7a442b0c3b5faecccab2f01426
|
[
"Apache-2.0"
] | 52
|
2017-11-08T23:23:02.000Z
|
2022-03-20T03:17:39.000Z
|
examples/shapefile1.py
|
yang69can/pyngl
|
78a7040ce9de4b7a442b0c3b5faecccab2f01426
|
[
"Apache-2.0"
] | 25
|
2017-08-27T10:50:43.000Z
|
2022-01-29T14:56:05.000Z
|
#
# File:
# shapefile1.py
#
# Synopsis:
# Illustrates reading data from a shapefile and coloring U.S. states
# by "Percent unemployment".
#
# Categories:
# Maps only
# Polygons
#
# Author:
# Mary Haley (based on an NCL script of Rick Brownrigg, CISL/NCAR)
#
# Date of initial publication:
# August 2010
#
# Description:
# This example reads shapefile data from the National Atlas
# (http://www.nationalatlas.gov/) and color fills the states
# based upon "percent unemployment", which is calculated from
# several of the non-spatial variables in the file.
#
# You must also have the files "states.dbf" and "states.shx"
# for this example to run. You can look for the data files at:
#
# http://www.ncl.ucar.edu/Applications/Data/#shp
#
# Effects illustrated:
# o Plotting data from a shapefile
# o Drawing a custom labelbar on a map
# o Drawing filled polygons over a Lambert Conformal plot
# o Zooming in on a particular area on a Lambert Conformal map
#
# Output:
# A single visualization is produced showing the filled U.S. states.
#
#
# Import numpy and os
#
from __future__ import print_function
import numpy,os
#
# Import Ngl,Nio support functions.
#
import Ngl, Nio
wks_type = "png"
wks = Ngl.open_wks (wks_type,"shapefile1")
Ngl.define_colormap(wks,"rainbow+gray")
#
# Map resources.
#
res = Ngl.Resources()
res.mpProjection = "LambertConformal"
res.mpLambertParallel1F = 33 # two parallels
res.mpLambertParallel2F = 45
res.mpLambertMeridianF = -98 # central meridian
res.mpLimitMode = "Corners" # limit map via two opposite corners
res.mpLeftCornerLatF = 22 # left corner
res.mpLeftCornerLonF = -125 # left corner
res.mpRightCornerLatF = 50 # right corner
res.mpRightCornerLonF = -64 # right corner
res.mpFillOn = True # Turn on fill for map areas.
res.mpLandFillColor = "LightGray"
res.mpOceanFillColor = "Cyan"
res.mpInlandWaterFillColor = "Cyan"
res.pmTickMarkDisplayMode = "Always" # Turn on map tickmarks
res.tiMainString = "Percentage unemployment, by state"
res.nglDraw = False # don't draw the plots now
res.nglFrame = False # or advance the frame
plot = Ngl.map(wks,res) # create the map plot
#
# Read data off shapefile. Must have states.shp, states.dbf,
# and states.prj file in this directory.
#
dirc = Ngl.pynglpath("data")
f = Nio.open_file(os.path.join(dirc,"shp","states.shp"), "r") # Open shapefile
segments = f.variables["segments"][:]
geometry = f.variables["geometry"][:]
segsDims = segments.shape
geomDims = geometry.shape
#
# Read global attributes
#
geom_segIndex = f.geom_segIndex
geom_numSegs = f.geom_numSegs
segs_xyzIndex = f.segs_xyzIndex
segs_numPnts = f.segs_numPnts
numFeatures = geomDims[0]
unemp = f.variables["UNEMPLOY"][:] / f.variables["PERSONS"][:]
lon = f.variables["x"][:]
lat = f.variables["y"][:]
#*************************************************
# Section to add filled polygons to map.
#*************************************************
plres = Ngl.Resources() # resources for polylines
plres.gsEdgesOn = True # draw border around polygons
plres.gsEdgeColor = "black"
colors = ["blue","green","yellow","red"]
segNum = 0
for i in range(0,numFeatures):
# color assignment (probably a better way to do this?)
if (unemp[i] >= 0.01 and unemp[i] < 0.02):
plres.gsFillColor = colors[0]
if (unemp[i] >= 0.02 and unemp[i] < 0.03):
plres.gsFillColor = colors[1]
if (unemp[i] >= 0.03 and unemp[i] < 0.04):
plres.gsFillColor = colors[2]
if (unemp[i] >= 0.04):
plres.gsFillColor = colors[3]
startSegment = int(geometry[i][geom_segIndex])
numSegments = int(geometry[i][geom_numSegs])
lines = []
for seg in range(startSegment, startSegment+numSegments):
startPT = int(segments[seg, segs_xyzIndex])
endPT = int(startPT + segments[seg, segs_numPnts] - 1)
lines.append(Ngl.add_polygon(wks, plot, lon[startPT:endPT], \
lat[startPT:endPT], plres))
segNum = segNum + 1
Ngl.draw(plot)
# Make a labelbar...
labels = [ "1", "2", "3", "4" ]
lres = Ngl.Resources()
lres.vpWidthF = 0.50 # location
lres.vpHeightF = 0.05 # " "
lres.lbPerimOn = False # Turn off perimeter.
lres.lbOrientation = "Horizontal" # Default is vertical.
lres.lbLabelAlignment = "BoxCenters" # Default is "BoxCenters".
lres.lbFillColors = colors
lres.lbMonoFillPattern = True # Fill them all solid.
lres.lbLabelFontHeightF = 0.012 # label font height
lres.lbTitleString = "percent" # title
lres.lbTitlePosition = "Bottom" # location of title
lres.lbTitleFontHeightF = 0.01 # title font height
Ngl.labelbar_ndc (wks,4,labels,0.23,0.15,lres)
Ngl.frame(wks) # Advance the frame.
Ngl.end()
| 29.852071
| 83
| 0.637066
|
from __future__ import print_function
import numpy,os
import Ngl, Nio
wks_type = "png"
wks = Ngl.open_wks (wks_type,"shapefile1")
Ngl.define_colormap(wks,"rainbow+gray")
res = Ngl.Resources()
res.mpProjection = "LambertConformal"
res.mpLambertParallel1F = 33
res.mpLambertParallel2F = 45
res.mpLambertMeridianF = -98
res.mpLimitMode = "Corners"
res.mpLeftCornerLatF = 22
res.mpLeftCornerLonF = -125
res.mpRightCornerLatF = 50
res.mpRightCornerLonF = -64
res.mpFillOn = True
res.mpLandFillColor = "LightGray"
res.mpOceanFillColor = "Cyan"
res.mpInlandWaterFillColor = "Cyan"
res.pmTickMarkDisplayMode = "Always"
res.tiMainString = "Percentage unemployment, by state"
res.nglDraw = False
res.nglFrame = False # or advance the frame
plot = Ngl.map(wks,res) # create the map plot
#
# Read data off shapefile. Must have states.shp, states.dbf,
# and states.prj file in this directory.
#
dirc = Ngl.pynglpath("data")
f = Nio.open_file(os.path.join(dirc,"shp","states.shp"), "r") # Open shapefile
segments = f.variables["segments"][:]
geometry = f.variables["geometry"][:]
segsDims = segments.shape
geomDims = geometry.shape
#
# Read global attributes
#
geom_segIndex = f.geom_segIndex
geom_numSegs = f.geom_numSegs
segs_xyzIndex = f.segs_xyzIndex
segs_numPnts = f.segs_numPnts
numFeatures = geomDims[0]
unemp = f.variables["UNEMPLOY"][:] / f.variables["PERSONS"][:]
lon = f.variables["x"][:]
lat = f.variables["y"][:]
#*************************************************
# Section to add filled polygons to map.
#*************************************************
plres = Ngl.Resources() # resources for polylines
plres.gsEdgesOn = True # draw border around polygons
plres.gsEdgeColor = "black"
colors = ["blue","green","yellow","red"]
segNum = 0
for i in range(0,numFeatures):
# color assignment (probably a better way to do this?)
if (unemp[i] >= 0.01 and unemp[i] < 0.02):
plres.gsFillColor = colors[0]
if (unemp[i] >= 0.02 and unemp[i] < 0.03):
plres.gsFillColor = colors[1]
if (unemp[i] >= 0.03 and unemp[i] < 0.04):
plres.gsFillColor = colors[2]
if (unemp[i] >= 0.04):
plres.gsFillColor = colors[3]
startSegment = int(geometry[i][geom_segIndex])
numSegments = int(geometry[i][geom_numSegs])
lines = []
for seg in range(startSegment, startSegment+numSegments):
startPT = int(segments[seg, segs_xyzIndex])
endPT = int(startPT + segments[seg, segs_numPnts] - 1)
lines.append(Ngl.add_polygon(wks, plot, lon[startPT:endPT], \
lat[startPT:endPT], plres))
segNum = segNum + 1
Ngl.draw(plot)
# Make a labelbar...
labels = [ "1", "2", "3", "4" ]
lres = Ngl.Resources()
lres.vpWidthF = 0.50 # location
lres.vpHeightF = 0.05 # " "
lres.lbPerimOn = False # Turn off perimeter.
lres.lbOrientation = "Horizontal" # Default is vertical.
lres.lbLabelAlignment = "BoxCenters" # Default is "BoxCenters".
lres.lbFillColors = colors
lres.lbMonoFillPattern = True # Fill them all solid.
lres.lbLabelFontHeightF = 0.012 # label font height
lres.lbTitleString = "percent" # title
lres.lbTitlePosition = "Bottom" # location of title
lres.lbTitleFontHeightF = 0.01 # title font height
Ngl.labelbar_ndc (wks,4,labels,0.23,0.15,lres)
Ngl.frame(wks) # Advance the frame.
Ngl.end()
| true
| true
|
f718b4a0de04341fe33bcd7f98a98e05166f7286
| 631
|
py
|
Python
|
mmdet/core/bbox/assigners/__init__.py
|
shouwangzhe134/Decoupled-R-CNN
|
7fee5bef6c52a79636f61cfe48babfaf3e4fc088
|
[
"Apache-2.0"
] | 1
|
2021-12-03T06:31:29.000Z
|
2021-12-03T06:31:29.000Z
|
mmdet/core/bbox/assigners/__init__.py
|
shouwangzhe134/Decoupled-R-CNN
|
7fee5bef6c52a79636f61cfe48babfaf3e4fc088
|
[
"Apache-2.0"
] | null | null | null |
mmdet/core/bbox/assigners/__init__.py
|
shouwangzhe134/Decoupled-R-CNN
|
7fee5bef6c52a79636f61cfe48babfaf3e4fc088
|
[
"Apache-2.0"
] | null | null | null |
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
from .max_iou_decoupled_assigner import MaxIoUDecoupledAssigner # wd
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'MaxIoUDecoupledAssigner'
]
| 39.4375
| 77
| 0.836767
|
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
from .max_iou_decoupled_assigner import MaxIoUDecoupledAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'MaxIoUDecoupledAssigner'
]
| true
| true
|
f718b4fadc70811185014ceea7a2ac977f84aa08
| 1,472
|
py
|
Python
|
src/server/core/tests/test_config.py
|
Freshia/masakhane-web
|
acf5eaef7ab8109d6f10f212765572a1dc893cd5
|
[
"MIT"
] | 20
|
2021-04-09T09:08:53.000Z
|
2022-03-16T09:45:36.000Z
|
src/server/core/tests/test_config.py
|
Freshia/masakhane-web
|
acf5eaef7ab8109d6f10f212765572a1dc893cd5
|
[
"MIT"
] | 15
|
2021-04-19T07:04:56.000Z
|
2022-03-12T00:57:44.000Z
|
src/server/core/tests/test_config.py
|
Freshia/masakhane-web
|
acf5eaef7ab8109d6f10f212765572a1dc893cd5
|
[
"MIT"
] | 14
|
2021-04-19T04:39:04.000Z
|
2021-10-08T22:19:58.000Z
|
import os
import unittest
from flask import current_app
from flask_testing import TestCase
from core import masakhane
class TestDevelopmentConfig(TestCase):
def create_app(self):
masakhane.config.from_object('core.config.DevelopmentConfig')
return masakhane
def test_app_is_development(self):
self.assertTrue(masakhane.config['SECRET_KEY'] == "super-secret-key")
self.assertFalse(current_app is None)
self.assertTrue(
masakhane.config['SQLALCHEMY_DATABASE_URI'] ==
os.getenv('DATABASE_TEST_URL', "sqlite:///masakhane.db")
)
class TestTestingConfig(TestCase):
def create_app(self):
masakhane.config.from_object('core.config.StagingConfig')
return masakhane
def test_app_is_testing(self):
self.assertTrue(masakhane.config['SECRET_KEY'] == "key_testing")
self.assertTrue(masakhane.config['TESTING'])
self.assertTrue(
masakhane.config['SQLALCHEMY_DATABASE_URI'] ==
os.getenv('DATABASE_TEST_URL', "sqlite:///masakhane.db")
)
class TestProductionConfig(TestCase):
def create_app(self):
masakhane.config.from_object('core.config.ProductionConfig')
return masakhane
def test_app_is_production(self):
self.assertTrue(masakhane.config['SECRET_KEY'] == "key_production")
self.assertFalse(masakhane.config['TESTING'])
if __name__ == '__main__':
unittest.main()
| 32
| 77
| 0.688179
|
import os
import unittest
from flask import current_app
from flask_testing import TestCase
from core import masakhane
class TestDevelopmentConfig(TestCase):
def create_app(self):
masakhane.config.from_object('core.config.DevelopmentConfig')
return masakhane
def test_app_is_development(self):
self.assertTrue(masakhane.config['SECRET_KEY'] == "super-secret-key")
self.assertFalse(current_app is None)
self.assertTrue(
masakhane.config['SQLALCHEMY_DATABASE_URI'] ==
os.getenv('DATABASE_TEST_URL', "sqlite:///masakhane.db")
)
class TestTestingConfig(TestCase):
def create_app(self):
masakhane.config.from_object('core.config.StagingConfig')
return masakhane
def test_app_is_testing(self):
self.assertTrue(masakhane.config['SECRET_KEY'] == "key_testing")
self.assertTrue(masakhane.config['TESTING'])
self.assertTrue(
masakhane.config['SQLALCHEMY_DATABASE_URI'] ==
os.getenv('DATABASE_TEST_URL', "sqlite:///masakhane.db")
)
class TestProductionConfig(TestCase):
def create_app(self):
masakhane.config.from_object('core.config.ProductionConfig')
return masakhane
def test_app_is_production(self):
self.assertTrue(masakhane.config['SECRET_KEY'] == "key_production")
self.assertFalse(masakhane.config['TESTING'])
if __name__ == '__main__':
unittest.main()
| true
| true
|
f718b51aeb1c66972fd4cac0104213008e7d09c8
| 2,379
|
py
|
Python
|
src/tests/unit/nlp/algorithms/test_unit_spacy_utils.py
|
AITestingOrg/aide--spacy-nlp-service
|
aa6573d6a7630c78e7729f52b5e7347b8e39ac6f
|
[
"MIT"
] | null | null | null |
src/tests/unit/nlp/algorithms/test_unit_spacy_utils.py
|
AITestingOrg/aide--spacy-nlp-service
|
aa6573d6a7630c78e7729f52b5e7347b8e39ac6f
|
[
"MIT"
] | null | null | null |
src/tests/unit/nlp/algorithms/test_unit_spacy_utils.py
|
AITestingOrg/aide--spacy-nlp-service
|
aa6573d6a7630c78e7729f52b5e7347b8e39ac6f
|
[
"MIT"
] | null | null | null |
"""Tests some of the helper methods in algorithms.spacy_utils"""
import pytest
import tests.unit.utils.spacy_utils as utils
from analysis.algorithms.spacy_utils import extract_compound_dependencies
from analysis.algorithms.spacy_utils import extract_dependencies
SHORT_SENTENCE = "Computer hardware is important"
SENTENCE_NO_COMPOUNDS = "there is a beautiful place somewhere that has small things and big stuff"
LONG_SENTENCE_MULTIPLE_COMPOUNDS = "The fish tank is by the bus stop and the swimming pool"
TEST_DATA_DEPENDECY_COUNT = [
(SHORT_SENTENCE, len(SHORT_SENTENCE.split())),
(SENTENCE_NO_COMPOUNDS, len(SENTENCE_NO_COMPOUNDS.split())),
(LONG_SENTENCE_MULTIPLE_COMPOUNDS, len(LONG_SENTENCE_MULTIPLE_COMPOUNDS.split()))
]
TEST_DATA_COMPOUND_COUNT = [
(SHORT_SENTENCE, 1),
(SENTENCE_NO_COMPOUNDS, 0),
(LONG_SENTENCE_MULTIPLE_COMPOUNDS, 3)
]
@pytest.mark.parametrize("sent,expected_count", TEST_DATA_DEPENDECY_COUNT)
def test_number_of_dependencies(sent, expected_count):
"""
Tests that the expected number of dependencies are found in a given sentence.
:param sent: The sentence to check nlp analysis on.
:param expected_count: The expected number of dependencies in the sentence.
"""
# arrange
spacy_singleton = utils.SpacySingleton()
sent = spacy_singleton.nlp(sent)
# act
tokens = extract_dependencies(sent)
# assert
assert len(tokens) == expected_count
@pytest.mark.parametrize("sent,expected_count", TEST_DATA_COMPOUND_COUNT)
def test_compound_detected(sent, expected_count):
"""
Tests that the expected number of compound dependencies are found in a given sentence.
:param sent: The sentence to check nlp analysis on.
:param expected_count: The expected number of compound dependencies in the sentence.
"""
# arrange
spacy_singleton = utils.SpacySingleton()
sent = spacy_singleton.nlp(sent)
# act
tokens = extract_compound_dependencies(sent)
# assert
assert len(tokens) == expected_count
def test_no_compounds_found():
"""Tests that in a sentence with no compound dependencies none is found."""
# arrange
spacy_singleton = utils.SpacySingleton()
sent = spacy_singleton.nlp(SENTENCE_NO_COMPOUNDS)
# act
tokens = extract_dependencies(sent)
# assert
for token in tokens:
assert token[1] != "compound"
| 34.478261
| 98
| 0.752417
|
import pytest
import tests.unit.utils.spacy_utils as utils
from analysis.algorithms.spacy_utils import extract_compound_dependencies
from analysis.algorithms.spacy_utils import extract_dependencies
SHORT_SENTENCE = "Computer hardware is important"
SENTENCE_NO_COMPOUNDS = "there is a beautiful place somewhere that has small things and big stuff"
LONG_SENTENCE_MULTIPLE_COMPOUNDS = "The fish tank is by the bus stop and the swimming pool"
TEST_DATA_DEPENDECY_COUNT = [
(SHORT_SENTENCE, len(SHORT_SENTENCE.split())),
(SENTENCE_NO_COMPOUNDS, len(SENTENCE_NO_COMPOUNDS.split())),
(LONG_SENTENCE_MULTIPLE_COMPOUNDS, len(LONG_SENTENCE_MULTIPLE_COMPOUNDS.split()))
]
TEST_DATA_COMPOUND_COUNT = [
(SHORT_SENTENCE, 1),
(SENTENCE_NO_COMPOUNDS, 0),
(LONG_SENTENCE_MULTIPLE_COMPOUNDS, 3)
]
@pytest.mark.parametrize("sent,expected_count", TEST_DATA_DEPENDECY_COUNT)
def test_number_of_dependencies(sent, expected_count):
spacy_singleton = utils.SpacySingleton()
sent = spacy_singleton.nlp(sent)
tokens = extract_dependencies(sent)
assert len(tokens) == expected_count
@pytest.mark.parametrize("sent,expected_count", TEST_DATA_COMPOUND_COUNT)
def test_compound_detected(sent, expected_count):
spacy_singleton = utils.SpacySingleton()
sent = spacy_singleton.nlp(sent)
tokens = extract_compound_dependencies(sent)
assert len(tokens) == expected_count
def test_no_compounds_found():
spacy_singleton = utils.SpacySingleton()
sent = spacy_singleton.nlp(SENTENCE_NO_COMPOUNDS)
tokens = extract_dependencies(sent)
for token in tokens:
assert token[1] != "compound"
| true
| true
|
f718b75cfa0df5a38dc13850f1b1daf269e241b4
| 1,506
|
py
|
Python
|
sdk/servicebus/azure-servicebus/azure/servicebus/__init__.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 1
|
2021-09-07T18:39:05.000Z
|
2021-09-07T18:39:05.000Z
|
sdk/servicebus/azure-servicebus/azure/servicebus/__init__.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | null | null | null |
sdk/servicebus/azure-servicebus/azure/servicebus/__init__.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
from uamqp import constants
from ._version import VERSION
__version__ = VERSION
from ._servicebus_client import ServiceBusClient
from ._servicebus_sender import ServiceBusSender
from ._servicebus_receiver import ServiceBusReceiver
from ._servicebus_session import ServiceBusSession
from ._common.message import (
ServiceBusMessage,
ServiceBusMessageBatch,
ServiceBusReceivedMessage,
)
from ._common.constants import (
ServiceBusReceiveMode,
ServiceBusSubQueue,
ServiceBusSessionFilter,
NEXT_AVAILABLE_SESSION,
)
from ._common.auto_lock_renewer import AutoLockRenewer
from ._common._connection_string_parser import (
parse_connection_string,
ServiceBusConnectionStringProperties,
)
TransportType = constants.TransportType
__all__ = [
"ServiceBusMessage",
"ServiceBusMessageBatch",
"ServiceBusReceivedMessage",
"NEXT_AVAILABLE_SESSION",
"ServiceBusSubQueue",
"ServiceBusSessionFilter",
"ServiceBusReceiveMode",
"ServiceBusClient",
"ServiceBusReceiver",
"ServiceBusSession",
"ServiceBusSender",
"TransportType",
"AutoLockRenewer",
"parse_connection_string",
"ServiceBusConnectionStringProperties",
]
| 28.961538
| 75
| 0.706507
|
from uamqp import constants
from ._version import VERSION
__version__ = VERSION
from ._servicebus_client import ServiceBusClient
from ._servicebus_sender import ServiceBusSender
from ._servicebus_receiver import ServiceBusReceiver
from ._servicebus_session import ServiceBusSession
from ._common.message import (
ServiceBusMessage,
ServiceBusMessageBatch,
ServiceBusReceivedMessage,
)
from ._common.constants import (
ServiceBusReceiveMode,
ServiceBusSubQueue,
ServiceBusSessionFilter,
NEXT_AVAILABLE_SESSION,
)
from ._common.auto_lock_renewer import AutoLockRenewer
from ._common._connection_string_parser import (
parse_connection_string,
ServiceBusConnectionStringProperties,
)
TransportType = constants.TransportType
__all__ = [
"ServiceBusMessage",
"ServiceBusMessageBatch",
"ServiceBusReceivedMessage",
"NEXT_AVAILABLE_SESSION",
"ServiceBusSubQueue",
"ServiceBusSessionFilter",
"ServiceBusReceiveMode",
"ServiceBusClient",
"ServiceBusReceiver",
"ServiceBusSession",
"ServiceBusSender",
"TransportType",
"AutoLockRenewer",
"parse_connection_string",
"ServiceBusConnectionStringProperties",
]
| true
| true
|
f718b7659cb335872792dd44aa202fbec8c95638
| 4,465
|
py
|
Python
|
src/models/modules/rnn_decoder.py
|
jopetty/transd-dev
|
0078dfd8a049f5b97a7b3be6e883821e4994d4c0
|
[
"MIT"
] | null | null | null |
src/models/modules/rnn_decoder.py
|
jopetty/transd-dev
|
0078dfd8a049f5b97a7b3be6e883821e4994d4c0
|
[
"MIT"
] | null | null | null |
src/models/modules/rnn_decoder.py
|
jopetty/transd-dev
|
0078dfd8a049f5b97a7b3be6e883821e4994d4c0
|
[
"MIT"
] | null | null | null |
import random
from typing import Dict
import torch
from torch import Tensor, nn
from torch.nn import functional as F
class RNNDecoder(nn.Module):
@property
def max_gen_length(self) -> int:
return self.hparams["dec_max_gen_length"]
@property
def EOS_idx(self) -> int:
return self.hparams["dec_EOS_idx"]
def __init__(self, hparams: dict) -> None:
super().__init__()
self.hparams = hparams
self.embedding = nn.Embedding(
hparams["dec_vocab_size"], hparams["dec_embedding_size"]
)
self.unit = nn.RNN(
hparams["dec_embedding_size"],
hparams["dec_hidden_size"],
num_layers=hparams["dec_num_layers"],
batch_first=True,
)
self.output = nn.Linear(hparams["dec_hidden_size"], hparams["dec_vocab_size"])
def forward_step(self, step_input: Dict[str, Tensor]) -> Dict[str, Tensor]:
# Unsqueeze if only one batch is present
no_squeeze = lambda a: a.unsqueeze(0) if a.shape == 2 else a
# print("Step Input")
# for key in step_input:
# print(f"{key}: {step_input[key].shape}")
h = no_squeeze(step_input["h"])
unit_input = no_squeeze(F.relu(self.embedding(step_input["x"])))
_, state = self.unit(unit_input, h)
y = self.output(no_squeeze(state[-1, :, :]))
# print(f"h: {h.shape}")
# print(f"unit_input: {unit_input.shape}")
# print(f"unk: {unk.shape}")
# print(f"state: {state.shape}")
# print(f"state[-1]: {state[-1].shape}")
# print(f"y: {y.shape}")
return {"y": y, "h": state}
def get_step_input(self, dec_input: Dict[str, Tensor]) -> Dict[str, Tensor]:
if "h" in dec_input:
h = dec_input["h"]
elif "encoder_last_state" in dec_input:
h = torch.transpose(dec_input["encoder_last_state"], 0, 1)
else:
raise ValueError(
f"You must provide a hidden input in dec_input '{dec_input}'"
)
if "x" in dec_input:
x = dec_input["x"]
elif "transform" in dec_input:
# print("No x found")
# print(dec_input["transform"][:, 1:-1].shape)
x = dec_input["transform"][:, 1:-1]
else:
raise ValueError(
f"You must provide a step input in dec_input '{dec_input}'"
)
step_input = {"x": x, "h": h}
if "encoder_output" in dec_input:
step_input["encoder_output"] = dec_input["encoder_output"]
return step_input
def forward(self, dec_input: Dict[str, Tensor], tf_ratio) -> Dict[str, Tensor]:
is_teacher_forcing = random.random() < tf_ratio
batch_size: int = dec_input["encoder_output"].shape[0]
hidden_size: int = self.output.in_features
vocab_size: int = self.output.out_features
gen_length = (
dec_input["target"][0].shape[0]
if is_teacher_forcing
else self.max_gen_length
)
dec_step_input = self.get_step_input(dec_input)
has_finished = torch.zeros(batch_size, dtype=torch.bool)
dec_output = torch.zeros(gen_length, batch_size, vocab_size)
dec_hidden = torch.zeros(gen_length, batch_size, hidden_size)
for i in range(gen_length):
# print(f"STEP {i} (tf={is_teacher_forcing})")
step_result = self.forward_step(dec_step_input)
step_prediction = step_result["y"].argmax(dim=-1)
# for key in step_result:
# print(f"step_result[{key}]: {step_result[key].shape}")
# print("dec_hidden: ", dec_hidden.shape)
dec_output[i] = step_result["y"]
dec_hidden[i] = step_result["h"]
has_finished[step_prediction == self.EOS_idx] = True
if all(has_finished):
break
else:
x = dec_input["target"][:, i] if is_teacher_forcing else step_prediction
step_result["x"] = x.unsqueeze(-1)
step_result["encoder_output"] = dec_input["encoder_output"]
dec_step_input = self.get_step_input(step_result)
output = {
"logits": torch.transpose(dec_output, 0, 1),
"predictions": torch.transpose(dec_output, 0, 1).argmax(dim=-1),
"decoder_hiddens": dec_hidden,
}
return output
| 32.830882
| 88
| 0.57738
|
import random
from typing import Dict
import torch
from torch import Tensor, nn
from torch.nn import functional as F
class RNNDecoder(nn.Module):
@property
def max_gen_length(self) -> int:
return self.hparams["dec_max_gen_length"]
@property
def EOS_idx(self) -> int:
return self.hparams["dec_EOS_idx"]
def __init__(self, hparams: dict) -> None:
super().__init__()
self.hparams = hparams
self.embedding = nn.Embedding(
hparams["dec_vocab_size"], hparams["dec_embedding_size"]
)
self.unit = nn.RNN(
hparams["dec_embedding_size"],
hparams["dec_hidden_size"],
num_layers=hparams["dec_num_layers"],
batch_first=True,
)
self.output = nn.Linear(hparams["dec_hidden_size"], hparams["dec_vocab_size"])
def forward_step(self, step_input: Dict[str, Tensor]) -> Dict[str, Tensor]:
no_squeeze = lambda a: a.unsqueeze(0) if a.shape == 2 else a
h = no_squeeze(step_input["h"])
unit_input = no_squeeze(F.relu(self.embedding(step_input["x"])))
_, state = self.unit(unit_input, h)
y = self.output(no_squeeze(state[-1, :, :]))
return {"y": y, "h": state}
def get_step_input(self, dec_input: Dict[str, Tensor]) -> Dict[str, Tensor]:
if "h" in dec_input:
h = dec_input["h"]
elif "encoder_last_state" in dec_input:
h = torch.transpose(dec_input["encoder_last_state"], 0, 1)
else:
raise ValueError(
f"You must provide a hidden input in dec_input '{dec_input}'"
)
if "x" in dec_input:
x = dec_input["x"]
elif "transform" in dec_input:
x = dec_input["transform"][:, 1:-1]
else:
raise ValueError(
f"You must provide a step input in dec_input '{dec_input}'"
)
step_input = {"x": x, "h": h}
if "encoder_output" in dec_input:
step_input["encoder_output"] = dec_input["encoder_output"]
return step_input
def forward(self, dec_input: Dict[str, Tensor], tf_ratio) -> Dict[str, Tensor]:
is_teacher_forcing = random.random() < tf_ratio
batch_size: int = dec_input["encoder_output"].shape[0]
hidden_size: int = self.output.in_features
vocab_size: int = self.output.out_features
gen_length = (
dec_input["target"][0].shape[0]
if is_teacher_forcing
else self.max_gen_length
)
dec_step_input = self.get_step_input(dec_input)
has_finished = torch.zeros(batch_size, dtype=torch.bool)
dec_output = torch.zeros(gen_length, batch_size, vocab_size)
dec_hidden = torch.zeros(gen_length, batch_size, hidden_size)
for i in range(gen_length):
step_result = self.forward_step(dec_step_input)
step_prediction = step_result["y"].argmax(dim=-1)
dec_output[i] = step_result["y"]
dec_hidden[i] = step_result["h"]
has_finished[step_prediction == self.EOS_idx] = True
if all(has_finished):
break
else:
x = dec_input["target"][:, i] if is_teacher_forcing else step_prediction
step_result["x"] = x.unsqueeze(-1)
step_result["encoder_output"] = dec_input["encoder_output"]
dec_step_input = self.get_step_input(step_result)
output = {
"logits": torch.transpose(dec_output, 0, 1),
"predictions": torch.transpose(dec_output, 0, 1).argmax(dim=-1),
"decoder_hiddens": dec_hidden,
}
return output
| true
| true
|
f718b7acac4aead0303354569b3ffa0a259d91e7
| 1,914
|
py
|
Python
|
baselines/baseline2/summarize/sumy/summarizers/_summarizer.py
|
PKULiuHui/LiveBlogSum
|
b6a22521ee454e649981d70ddca6c89a1bac5a4c
|
[
"MIT"
] | 28
|
2017-06-02T08:39:49.000Z
|
2022-03-04T09:48:16.000Z
|
ukpsummarizer-be/summarizer/baselines/sumy/sumy/summarizers/_summarizer.py
|
AIPHES/vldb2018-sherlock
|
3746efa35c4c1769cc4aaeb15aeb9453564e1226
|
[
"Apache-2.0"
] | null | null | null |
ukpsummarizer-be/summarizer/baselines/sumy/sumy/summarizers/_summarizer.py
|
AIPHES/vldb2018-sherlock
|
3746efa35c4c1769cc4aaeb15aeb9453564e1226
|
[
"Apache-2.0"
] | 16
|
2017-06-22T07:48:27.000Z
|
2019-12-23T17:44:52.000Z
|
# -*- coding: utf8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from collections import namedtuple
from operator import attrgetter
from ..utils import ItemsCount
from .._compat import to_unicode
from ..nlp.stemmers import null_stemmer
from nltk import word_tokenize
SentenceInfo = namedtuple("SentenceInfo", ("sentence", "order", "rating",))
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
raise NotImplementedError("This method should be overriden in subclass")
def stem_word(self, word):
return self._stemmer(self.normalize_word(word))
def normalize_word(self, word):
return to_unicode(word).lower()
def _get_break_index(self, infos, summary_size):
s_length = 0
for i,_ in enumerate(infos):
s_length += len(word_tokenize("%s" % (infos[i].sentence)))
if summary_size <= s_length:
return i+1
def _get_best_sentences(self, sentences, summary_size, rating, *args, **kwargs):
rate = rating
if isinstance(rating, dict):
assert not args and not kwargs
rate = lambda s: rating[s]
infos = (SentenceInfo(s, o, rate(s, *args, **kwargs))
for o, s in enumerate(sentences))
# sort sentences by rating in descending order
infos = sorted(infos, key=attrgetter("rating"), reverse=True)
# get `count` first best rated sentences
index = self._get_break_index(infos, summary_size)
# sort sentences by their order in document
infos = sorted(infos[:index], key=attrgetter("order"))
return [unicode(i.sentence) for i in infos]
| 33.578947
| 84
| 0.669801
|
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from collections import namedtuple
from operator import attrgetter
from ..utils import ItemsCount
from .._compat import to_unicode
from ..nlp.stemmers import null_stemmer
from nltk import word_tokenize
SentenceInfo = namedtuple("SentenceInfo", ("sentence", "order", "rating",))
class AbstractSummarizer(object):
def __init__(self, stemmer=null_stemmer):
if not callable(stemmer):
raise ValueError("Stemmer has to be a callable object")
self._stemmer = stemmer
def __call__(self, document, sentences_count):
raise NotImplementedError("This method should be overriden in subclass")
def stem_word(self, word):
return self._stemmer(self.normalize_word(word))
def normalize_word(self, word):
return to_unicode(word).lower()
def _get_break_index(self, infos, summary_size):
s_length = 0
for i,_ in enumerate(infos):
s_length += len(word_tokenize("%s" % (infos[i].sentence)))
if summary_size <= s_length:
return i+1
def _get_best_sentences(self, sentences, summary_size, rating, *args, **kwargs):
rate = rating
if isinstance(rating, dict):
assert not args and not kwargs
rate = lambda s: rating[s]
infos = (SentenceInfo(s, o, rate(s, *args, **kwargs))
for o, s in enumerate(sentences))
infos = sorted(infos, key=attrgetter("rating"), reverse=True)
index = self._get_break_index(infos, summary_size)
infos = sorted(infos[:index], key=attrgetter("order"))
return [unicode(i.sentence) for i in infos]
| true
| true
|
f718b877f9cbe06c7a235e035dbfefb4ed16beec
| 394
|
py
|
Python
|
parsers/defaults.py
|
ZenRows/scaling-to-distributed-crawling
|
3da8d35327f888fcb047b588e7cb698494b4debe
|
[
"MIT"
] | 12
|
2021-08-25T10:13:44.000Z
|
2022-03-24T06:48:54.000Z
|
parsers/defaults.py
|
ZenRows/scaling-to-distributed-crawling
|
3da8d35327f888fcb047b588e7cb698494b4debe
|
[
"MIT"
] | null | null | null |
parsers/defaults.py
|
ZenRows/scaling-to-distributed-crawling
|
3da8d35327f888fcb047b588e7cb698494b4debe
|
[
"MIT"
] | 4
|
2021-08-28T22:50:16.000Z
|
2022-03-23T02:02:00.000Z
|
import repo
from collectors import basic
def extract_content(url, soup):
return soup.title.string # extract page's title
def store_content(url, content):
# store in a hash with the URL as the key and the title as the content
repo.set_content(url, content)
def allow_url_filter(url):
return True # allow all by default
def get_html(url):
return basic.get_html(url)
| 19.7
| 74
| 0.728426
|
import repo
from collectors import basic
def extract_content(url, soup):
return soup.title.string
def store_content(url, content):
# store in a hash with the URL as the key and the title as the content
repo.set_content(url, content)
def allow_url_filter(url):
return True # allow all by default
def get_html(url):
return basic.get_html(url)
| true
| true
|
f718ba3c617c375dee8540ada5548435f7892c63
| 16,710
|
py
|
Python
|
test/functional/qtum_evm_create2.py
|
SeqSEE/Metrix
|
ea6ba7fcc3d775740d78a10e88ce4da5a84db6a3
|
[
"MIT"
] | 26
|
2019-06-26T05:47:35.000Z
|
2022-03-01T04:50:11.000Z
|
test/functional/qtum_evm_create2.py
|
SeqSEE/Metrix
|
ea6ba7fcc3d775740d78a10e88ce4da5a84db6a3
|
[
"MIT"
] | 15
|
2018-08-29T21:58:38.000Z
|
2019-05-17T10:31:22.000Z
|
test/functional/qtum_evm_create2.py
|
SeqSEE/Metrix
|
ea6ba7fcc3d775740d78a10e88ce4da5a84db6a3
|
[
"MIT"
] | 24
|
2018-08-13T11:10:50.000Z
|
2019-06-08T02:49:36.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Qtum Core developers
# Copyright (c) 2020 The Metrix Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.metrix import *
from test_framework.qtumconfig import *
import sys
import io
import pprint
class QtumEVMCreate2Test(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-logevents', '-minmempoolgaslimit=21000', '-constantinopleheight=704']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def get_value_at_index(self, i):
info = self.node.callcontract(self.contract_address, "a05d8c0b" + hex(i)[2:].zfill(64))
ret = info['executionResult']['output']
return ret
def reset(self):
self.node.sendtocontract(self.contract_address, 'd826f88f')
self.node.generate(1)
def keccak256(self, data):
abi = "b8612c0a"
abi += hex(0x20)[2:].zfill(64)
abi += hex(len(data) // 2)[2:].zfill(64)
abi += data
return self.node.callcontract(self.contract_address, abi)['executionResult']['output']
def generate_create2_abi(self, param):
abi = "9fcc1813"
abi += hex(0x20)[2:].zfill(64)
abi += hex(len(param) // 2)[2:].zfill(64)
abi += param
return abi
def assert_state(self, node, gas, gas_price, gas_used, value, excepted):
blockhash = node.getbestblockhash()
block = node.getblock(blockhash)
txids = block['tx']
coinbase_tx = node.getrawtransaction(txids[0], True, blockhash)
call_tx = node.getrawtransaction(txids[1], True, blockhash)
input_tx = node.decoderawtransaction(node.gettransaction(call_tx['vin'][0]['txid'])['hex'])
sender_utxo = input_tx['vout'][call_tx['vin'][0]['vout']]
sender_address = sender_utxo['scriptPubKey']['addresses'][0]
for op_call_vout_index in range(len(call_tx['vout'])):
if call_tx['vout'][op_call_vout_index]['scriptPubKey']['type'] == 'call_sender':
break
# Check that the transaction receipt is correct
receipt = node.gettransactionreceipt(call_tx['txid'])[0]
assert_equal(receipt['gasUsed'], gas_used)
assert_equal(receipt['cumulativeGasUsed'], gas_used)
assert_equal(receipt['blockNumber'], block['height'])
assert_equal(receipt['blockHash'], block['hash'])
assert_equal(receipt['excepted'], excepted)
assert_equal(receipt['exceptedMessage'], '')
assert_equal(receipt['from'], p2pkh_to_hex_hash(sender_address))
assert_equal(receipt['transactionIndex'], 1)
assert_equal(receipt['transactionHash'], call_tx['txid'])
assert_equal(receipt['log'], [])
# If there is supposed to be a value refund tx, check that it:
# - exists
# - has the correct value
# - has the correct input
# - has the correct output
if value > 0:
refund_tx = node.getrawtransaction(txids[-1], True, blockhash)
refund_utxo = refund_tx['vout'][0]
assert_equal(len(refund_tx['vin']), 1)
assert_equal(refund_tx['vin'][0]['txid'], call_tx['txid'])
assert_equal(refund_tx['vin'][0]['vout'], op_call_vout_index)
assert_equal(refund_utxo['value'], value)
assert_equal(sender_utxo['scriptPubKey']['asm'], refund_utxo['scriptPubKey']['asm'])
else:
assert_equal(len(txids), 3)
# Check that the coinstake contains a gas refund (if one should exist)
if gas > gas_used:
gas_refund_output = coinbase_tx['vout'][-2]
assert_equal((gas_refund_output['value']*100000000)//10000000, ((gas-gas_used)*gas_price*100000000)//10000000)
assert_equal(sender_utxo['scriptPubKey']['asm'], gas_refund_output['scriptPubKey']['asm'])
else:
assert_equal(len(coinbase_tx['vout']), 2)
def run_test(self):
# Dummy address to generate blocks to
dummy_key = ECKey()
dummy_key.generate()
dummy_address = hex_hash_to_p2pkh("12"*20)
self.node = self.nodes[0]
self.node.generatetoaddress(200, self.node.getnewaddress())
self.node.generatetoaddress(COINBASE_MATURITY, dummy_address)
"""
pragma solidity ^0.5.10;
contract Test {
uint256[] private ret;
function reset() public {
delete ret;
}
function getRet(uint256 index) public view returns (uint256) {
return ret[index];
}
// To avoid having to add a dependancy for the non-standard keccak256 used in the EVM
// we simply call it here to verify results of extcodehash and create2
function dokeccak256(bytes memory data) public view returns (bytes32) {
return keccak256(data);
}
function create2_test(bytes memory code) public payable {
uint256 r;
uint256 size = code.length;
assembly {
r := create2(0, add(code, 0x20), size, 0x0)
}
ret.push(r);
}
}
d826f88f: reset()
a05d8c0b: getRet(uint256)
b8612c0a: dokeccak256(bytes)
9fcc1813: create2_test(bytes)
"""
bytecode = "608060405234801561001057600080fd5b50610340806100206000396000f3fe60806040526004361061003f5760003560e01c80639fcc181314610044578063a05d8c0b146100ff578063b8612c0a1461014e578063d826f88f1461022a575b600080fd5b6100fd6004803603602081101561005a57600080fd5b810190808035906020019064010000000081111561007757600080fd5b82018360208201111561008957600080fd5b803590602001918460018302840111640100000000831117156100ab57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050509192919290505050610241565b005b34801561010b57600080fd5b506101386004803603602081101561012257600080fd5b8101908080359060200190929190505050610285565b6040518082815260200191505060405180910390f35b34801561015a57600080fd5b506102146004803603602081101561017157600080fd5b810190808035906020019064010000000081111561018e57600080fd5b8201836020820111156101a057600080fd5b803590602001918460018302840111640100000000831117156101c257600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506102a5565b6040518082815260200191505060405180910390f35b34801561023657600080fd5b5061023f6102b6565b005b60008082519050600081602085016000f591506000829080600181540180825580915050906001820390600052602060002001600090919290919091505550505050565b600080828154811061029357fe5b90600052602060002001549050919050565b600081805190602001209050919050565b6000806102c391906102c5565b565b50805460008255906000526020600020908101906102e391906102e6565b50565b61030891905b808211156103045760008160009055506001016102ec565b5090565b9056fea265627a7a7230582081aabd93f08842152eaf2ff607e68cee84abb692b2fbfa0556ee4e0a29ce332464736f6c634300050a0032"
self.contract_address = self.node.createcontract(bytecode)['address']
self.node.generatetoaddress(1, dummy_address)
create2_bytecode = "608060405234801561001057600080fd5b50610a57806100206000396000f3fe6080604052600436106100e85760003560e01c80639fcc18131161008a578063c5b319b211610059578063c5b319b214610473578063c9846c59146104b8578063d826f88f146104cf578063e5037fb1146104e6576100e8565b80639fcc181314610221578063a05d8c0b146102dc578063a1d98fa11461032b578063b8612c0a14610397576100e8565b80634f2f0adc116100c65780634f2f0adc1461015d578063825d4ba3146101ae5780639c9d8fbe146101f35780639ca24dff1461020a576100e8565b806308a86e5f146100ea5780631bbfb6821461012f5780634772905514610146575b005b3480156100f657600080fd5b5061012d6004803603604081101561010d57600080fd5b8101908080359060200190929190803590602001909291905050506104fd565b005b34801561013b57600080fd5b50610144610532565b005b34801561015257600080fd5b5061015b61061f565b005b34801561016957600080fd5b506101ac6004803603602081101561018057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506106c1565b005b3480156101ba57600080fd5b506101f1600480360360408110156101d157600080fd5b8101908080359060200190929190803590602001909291905050506106f7565b005b3480156101ff57600080fd5b5061020861072c565b005b34801561021657600080fd5b5061021f6107ce565b005b6102da6004803603602081101561023757600080fd5b810190808035906020019064010000000081111561025457600080fd5b82018360208201111561026657600080fd5b8035906020019184600183028401116401000000008311171561028857600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050919291929050505061081f565b005b3480156102e857600080fd5b50610315600480360360208110156102ff57600080fd5b8101908080359060200190929190505050610863565b6040518082815260200191505060405180910390f35b34801561033757600080fd5b50610340610883565b6040518080602001828103825283818151815260200191508051906020019060200280838360005b83811015610383578082015181840152602081019050610368565b505050509050019250505060405180910390f35b3480156103a357600080fd5b5061045d600480360360208110156103ba57600080fd5b81019080803590602001906401000000008111156103d757600080fd5b8201836020820111156103e957600080fd5b8035906020019184600183028401116401000000008311171561040b57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050509192919290505050610925565b6040518082815260200191505060405180910390f35b34801561047f57600080fd5b506104b66004803603604081101561049657600080fd5b810190808035906020019092919080359060200190929190505050610936565b005b3480156104c457600080fd5b506104cd61096b565b005b3480156104db57600080fd5b506104e46109b9565b005b3480156104f257600080fd5b506104fb6109c8565b005b80821c915060008290806001815401808255809150509060018203906000526020600020016000909192909190915055505050565b60006060600060405180807f72657475726e7661726c656e6774682829000000000000000000000000000000815250601101905060405180910390207bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916905060008090506040518281526040519350600080600483600030620186a05a03f13d95506040519450602086036020863e5050600090505b82518110156106195760008382815181106105dd57fe5b602002602001015190806001815401808255809150509060018203906000526020600020016000909192909190915055508060010190506105c6565b50505050565b600060405180807f6d757461626c6563616c6c6261636b2829000000000000000000000000000000815250601101905060405180910390207bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19169050600060405182815260008060048330620186a05a03fa91505060008190806001815401808255809150509060018203906000526020600020016000909192909190915055505050565b6000813f905060008190806001815401808255809150509060018203906000526020600020016000909192909190915055505050565b80821d915060008290806001815401808255809150509060018203906000526020600020016000909192909190915055505050565b600060405180807f73746174696363616c6c6261636b282900000000000000000000000000000000815250601001905060405180910390207bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19169050600060405182815260008060048330620186a05a03fa91505060008190806001815401808255809150509060018203906000526020600020016000909192909190915055505050565b60007f0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef9080600181540180825580915050906001820390600052602060002001600090919290919091505550600080fd5b60008082519050600081602085016000f591506000829080600181540180825580915050906001820390600052602060002001600090919290919091505550505050565b600080828154811061087157fe5b90600052602060002001549050919050565b60606000600f600143034060001c1690506060816040519080825280602002602001820160405280156108c55781602001602082028038833980820191505090505b50905060008090505b8281101561091c57807ff0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f01782828151811061090557fe5b6020026020010181815250508060010190506108ce565b50809250505090565b600081805190602001209050919050565b80821b915060008290806001815401808255809150509060018203906000526020600020016000909192909190915055505050565b60007f12121212121212121212121212121212121212121212121212121212121212129080600181540180825580915050906001820390600052602060002001600090919290919091505550565b6000806109c691906109e5565b565b60008090505b600a8110156109e2578060010190506109ce565b50565b5080546000825590600052602060002090810190610a039190610a06565b50565b610a2891905b80821115610a24576000816000905550600101610a0c565b5090565b9056fea165627a7a72305820db27ae89f2aafaaa420f35ae90480db051520a88007ede039df1c2302d284ec20029"
abi = self.generate_create2_abi(create2_bytecode)
# Run a normal create2 tx, will cause a throw since create2 is undefined before qip7/constantinople
# this will cause a refund tx
self.node.sendtocontract(self.contract_address, abi, 10, 1000000, 0.000001)
self.node.generatetoaddress(1, dummy_address)
self.assert_state(self.node, gas=1000000, gas_price=0.000001, gas_used=1000000, value=10,
excepted='BadInstruction')
self.node.generatetoaddress(10, dummy_address)
# run a create2 tx with too little gas
# this will cause a refund tx
self.node.sendtocontract(self.contract_address, abi, 10, 795405, 0.000001)
self.node.generatetoaddress(1, dummy_address)
self.assert_state(self.node, gas=795405, gas_price=0.000001, gas_used=795405, value=10, excepted='OutOfGas')
# run a create2 tx with too little gas
# this will cause a refund tx
self.node.sendtocontract(self.contract_address, abi, 10, 795406, 0.000001)
self.node.generatetoaddress(1, dummy_address)
self.assert_state(self.node, gas=795406, gas_price=0.000001, gas_used=795406, value=10, excepted='OutOfGas')
# run a create2 tx with just enough gas
# this will not cause a refund tx
self.node.sendtocontract(self.contract_address, abi, 10, 795407, 0.000001)
self.node.generatetoaddress(1, dummy_address)
self.assert_state(self.node, gas=795407, gas_price=0.000001, gas_used=795407, value=0, excepted='None')
# check contract address
codehash = self.keccak256(create2_bytecode)
expected_create2_address = self.keccak256("ff" + self.contract_address + ("0" * 64) + codehash)[24:]
create2_address = self.get_value_at_index(0)[24:]
assert (create2_address in self.node.listcontracts())
assert_equal(expected_create2_address, create2_address)
# Send some value to the create2 address, should succeed
self.node.sendtocontract(create2_address, "00", 1)
self.node.generate(1)
assert_equal(self.node.listcontracts()[create2_address], 1)
# Make sure that creating another exactly the same create2 contract fails
self.reset()
self.node.sendtocontract(self.contract_address, abi, 10, 10000000, 0.000001)
self.node.generatetoaddress(1, dummy_address)
assert_equal(self.get_value_at_index(0), "0" * 64)
# Make sure that the value associated with the create2 address remains the same
assert_equal(self.node.listcontracts()[create2_address], 1)
# run a create2 tx with more than enough gas
self.reset()
create2_bytecode = '000000'
abi = self.generate_create2_abi(create2_bytecode)
self.node.sendtocontract(self.contract_address, abi, 10, 10000000, 0.000001)
self.node.generatetoaddress(1, dummy_address)
self.assert_state(self.node, gas=10000000, gas_price=0.000001, gas_used=94562, value=0, excepted='None')
# check contract address
codehash = self.keccak256(create2_bytecode)
expected_create2_address = self.keccak256("ff" + self.contract_address + ("0" * 64) + codehash)[24:]
create2_address = self.get_value_at_index(0)[24:]
assert (create2_address in self.node.listcontracts())
assert_equal(expected_create2_address, create2_address)
# balance of self.contract_address should be 30 so far
assert_equal(self.node.listcontracts()[self.contract_address], 30)
if __name__ == '__main__':
QtumEVMCreate2Test().main()
| 74.598214
| 5,387
| 0.795212
|
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.metrix import *
from test_framework.qtumconfig import *
import sys
import io
import pprint
class QtumEVMCreate2Test(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-logevents', '-minmempoolgaslimit=21000', '-constantinopleheight=704']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def get_value_at_index(self, i):
info = self.node.callcontract(self.contract_address, "a05d8c0b" + hex(i)[2:].zfill(64))
ret = info['executionResult']['output']
return ret
def reset(self):
self.node.sendtocontract(self.contract_address, 'd826f88f')
self.node.generate(1)
def keccak256(self, data):
abi = "b8612c0a"
abi += hex(0x20)[2:].zfill(64)
abi += hex(len(data) // 2)[2:].zfill(64)
abi += data
return self.node.callcontract(self.contract_address, abi)['executionResult']['output']
def generate_create2_abi(self, param):
abi = "9fcc1813"
abi += hex(0x20)[2:].zfill(64)
abi += hex(len(param) // 2)[2:].zfill(64)
abi += param
return abi
def assert_state(self, node, gas, gas_price, gas_used, value, excepted):
blockhash = node.getbestblockhash()
block = node.getblock(blockhash)
txids = block['tx']
coinbase_tx = node.getrawtransaction(txids[0], True, blockhash)
call_tx = node.getrawtransaction(txids[1], True, blockhash)
input_tx = node.decoderawtransaction(node.gettransaction(call_tx['vin'][0]['txid'])['hex'])
sender_utxo = input_tx['vout'][call_tx['vin'][0]['vout']]
sender_address = sender_utxo['scriptPubKey']['addresses'][0]
for op_call_vout_index in range(len(call_tx['vout'])):
if call_tx['vout'][op_call_vout_index]['scriptPubKey']['type'] == 'call_sender':
break
receipt = node.gettransactionreceipt(call_tx['txid'])[0]
assert_equal(receipt['gasUsed'], gas_used)
assert_equal(receipt['cumulativeGasUsed'], gas_used)
assert_equal(receipt['blockNumber'], block['height'])
assert_equal(receipt['blockHash'], block['hash'])
assert_equal(receipt['excepted'], excepted)
assert_equal(receipt['exceptedMessage'], '')
assert_equal(receipt['from'], p2pkh_to_hex_hash(sender_address))
assert_equal(receipt['transactionIndex'], 1)
assert_equal(receipt['transactionHash'], call_tx['txid'])
assert_equal(receipt['log'], [])
if value > 0:
refund_tx = node.getrawtransaction(txids[-1], True, blockhash)
refund_utxo = refund_tx['vout'][0]
assert_equal(len(refund_tx['vin']), 1)
assert_equal(refund_tx['vin'][0]['txid'], call_tx['txid'])
assert_equal(refund_tx['vin'][0]['vout'], op_call_vout_index)
assert_equal(refund_utxo['value'], value)
assert_equal(sender_utxo['scriptPubKey']['asm'], refund_utxo['scriptPubKey']['asm'])
else:
assert_equal(len(txids), 3)
if gas > gas_used:
gas_refund_output = coinbase_tx['vout'][-2]
assert_equal((gas_refund_output['value']*100000000)//10000000, ((gas-gas_used)*gas_price*100000000)//10000000)
assert_equal(sender_utxo['scriptPubKey']['asm'], gas_refund_output['scriptPubKey']['asm'])
else:
assert_equal(len(coinbase_tx['vout']), 2)
def run_test(self):
dummy_key = ECKey()
dummy_key.generate()
dummy_address = hex_hash_to_p2pkh("12"*20)
self.node = self.nodes[0]
self.node.generatetoaddress(200, self.node.getnewaddress())
self.node.generatetoaddress(COINBASE_MATURITY, dummy_address)
bytecode = "608060405234801561001057600080fd5b50610340806100206000396000f3fe60806040526004361061003f5760003560e01c80639fcc181314610044578063a05d8c0b146100ff578063b8612c0a1461014e578063d826f88f1461022a575b600080fd5b6100fd6004803603602081101561005a57600080fd5b810190808035906020019064010000000081111561007757600080fd5b82018360208201111561008957600080fd5b803590602001918460018302840111640100000000831117156100ab57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050509192919290505050610241565b005b34801561010b57600080fd5b506101386004803603602081101561012257600080fd5b8101908080359060200190929190505050610285565b6040518082815260200191505060405180910390f35b34801561015a57600080fd5b506102146004803603602081101561017157600080fd5b810190808035906020019064010000000081111561018e57600080fd5b8201836020820111156101a057600080fd5b803590602001918460018302840111640100000000831117156101c257600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506102a5565b6040518082815260200191505060405180910390f35b34801561023657600080fd5b5061023f6102b6565b005b60008082519050600081602085016000f591506000829080600181540180825580915050906001820390600052602060002001600090919290919091505550505050565b600080828154811061029357fe5b90600052602060002001549050919050565b600081805190602001209050919050565b6000806102c391906102c5565b565b50805460008255906000526020600020908101906102e391906102e6565b50565b61030891905b808211156103045760008160009055506001016102ec565b5090565b9056fea265627a7a7230582081aabd93f08842152eaf2ff607e68cee84abb692b2fbfa0556ee4e0a29ce332464736f6c634300050a0032"
self.contract_address = self.node.createcontract(bytecode)['address']
self.node.generatetoaddress(1, dummy_address)
create2_bytecode = "608060405234801561001057600080fd5b50610a57806100206000396000f3fe6080604052600436106100e85760003560e01c80639fcc18131161008a578063c5b319b211610059578063c5b319b214610473578063c9846c59146104b8578063d826f88f146104cf578063e5037fb1146104e6576100e8565b80639fcc181314610221578063a05d8c0b146102dc578063a1d98fa11461032b578063b8612c0a14610397576100e8565b80634f2f0adc116100c65780634f2f0adc1461015d578063825d4ba3146101ae5780639c9d8fbe146101f35780639ca24dff1461020a576100e8565b806308a86e5f146100ea5780631bbfb6821461012f5780634772905514610146575b005b3480156100f657600080fd5b5061012d6004803603604081101561010d57600080fd5b8101908080359060200190929190803590602001909291905050506104fd565b005b34801561013b57600080fd5b50610144610532565b005b34801561015257600080fd5b5061015b61061f565b005b34801561016957600080fd5b506101ac6004803603602081101561018057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506106c1565b005b3480156101ba57600080fd5b506101f1600480360360408110156101d157600080fd5b8101908080359060200190929190803590602001909291905050506106f7565b005b3480156101ff57600080fd5b5061020861072c565b005b34801561021657600080fd5b5061021f6107ce565b005b6102da6004803603602081101561023757600080fd5b810190808035906020019064010000000081111561025457600080fd5b82018360208201111561026657600080fd5b8035906020019184600183028401116401000000008311171561028857600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050919291929050505061081f565b005b3480156102e857600080fd5b50610315600480360360208110156102ff57600080fd5b8101908080359060200190929190505050610863565b6040518082815260200191505060405180910390f35b34801561033757600080fd5b50610340610883565b6040518080602001828103825283818151815260200191508051906020019060200280838360005b83811015610383578082015181840152602081019050610368565b505050509050019250505060405180910390f35b3480156103a357600080fd5b5061045d600480360360208110156103ba57600080fd5b81019080803590602001906401000000008111156103d757600080fd5b8201836020820111156103e957600080fd5b8035906020019184600183028401116401000000008311171561040b57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050509192919290505050610925565b6040518082815260200191505060405180910390f35b34801561047f57600080fd5b506104b66004803603604081101561049657600080fd5b810190808035906020019092919080359060200190929190505050610936565b005b3480156104c457600080fd5b506104cd61096b565b005b3480156104db57600080fd5b506104e46109b9565b005b3480156104f257600080fd5b506104fb6109c8565b005b80821c915060008290806001815401808255809150509060018203906000526020600020016000909192909190915055505050565b60006060600060405180807f72657475726e7661726c656e6774682829000000000000000000000000000000815250601101905060405180910390207bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916905060008090506040518281526040519350600080600483600030620186a05a03f13d95506040519450602086036020863e5050600090505b82518110156106195760008382815181106105dd57fe5b602002602001015190806001815401808255809150509060018203906000526020600020016000909192909190915055508060010190506105c6565b50505050565b600060405180807f6d757461626c6563616c6c6261636b2829000000000000000000000000000000815250601101905060405180910390207bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19169050600060405182815260008060048330620186a05a03fa91505060008190806001815401808255809150509060018203906000526020600020016000909192909190915055505050565b6000813f905060008190806001815401808255809150509060018203906000526020600020016000909192909190915055505050565b80821d915060008290806001815401808255809150509060018203906000526020600020016000909192909190915055505050565b600060405180807f73746174696363616c6c6261636b282900000000000000000000000000000000815250601001905060405180910390207bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19169050600060405182815260008060048330620186a05a03fa91505060008190806001815401808255809150509060018203906000526020600020016000909192909190915055505050565b60007f0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef9080600181540180825580915050906001820390600052602060002001600090919290919091505550600080fd5b60008082519050600081602085016000f591506000829080600181540180825580915050906001820390600052602060002001600090919290919091505550505050565b600080828154811061087157fe5b90600052602060002001549050919050565b60606000600f600143034060001c1690506060816040519080825280602002602001820160405280156108c55781602001602082028038833980820191505090505b50905060008090505b8281101561091c57807ff0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f01782828151811061090557fe5b6020026020010181815250508060010190506108ce565b50809250505090565b600081805190602001209050919050565b80821b915060008290806001815401808255809150509060018203906000526020600020016000909192909190915055505050565b60007f12121212121212121212121212121212121212121212121212121212121212129080600181540180825580915050906001820390600052602060002001600090919290919091505550565b6000806109c691906109e5565b565b60008090505b600a8110156109e2578060010190506109ce565b50565b5080546000825590600052602060002090810190610a039190610a06565b50565b610a2891905b80821115610a24576000816000905550600101610a0c565b5090565b9056fea165627a7a72305820db27ae89f2aafaaa420f35ae90480db051520a88007ede039df1c2302d284ec20029"
abi = self.generate_create2_abi(create2_bytecode)
self.node.sendtocontract(self.contract_address, abi, 10, 1000000, 0.000001)
self.node.generatetoaddress(1, dummy_address)
self.assert_state(self.node, gas=1000000, gas_price=0.000001, gas_used=1000000, value=10,
excepted='BadInstruction')
self.node.generatetoaddress(10, dummy_address)
self.node.sendtocontract(self.contract_address, abi, 10, 795405, 0.000001)
self.node.generatetoaddress(1, dummy_address)
self.assert_state(self.node, gas=795405, gas_price=0.000001, gas_used=795405, value=10, excepted='OutOfGas')
self.node.sendtocontract(self.contract_address, abi, 10, 795406, 0.000001)
self.node.generatetoaddress(1, dummy_address)
self.assert_state(self.node, gas=795406, gas_price=0.000001, gas_used=795406, value=10, excepted='OutOfGas')
self.node.sendtocontract(self.contract_address, abi, 10, 795407, 0.000001)
self.node.generatetoaddress(1, dummy_address)
self.assert_state(self.node, gas=795407, gas_price=0.000001, gas_used=795407, value=0, excepted='None')
codehash = self.keccak256(create2_bytecode)
expected_create2_address = self.keccak256("ff" + self.contract_address + ("0" * 64) + codehash)[24:]
create2_address = self.get_value_at_index(0)[24:]
assert (create2_address in self.node.listcontracts())
assert_equal(expected_create2_address, create2_address)
self.node.sendtocontract(create2_address, "00", 1)
self.node.generate(1)
assert_equal(self.node.listcontracts()[create2_address], 1)
self.reset()
self.node.sendtocontract(self.contract_address, abi, 10, 10000000, 0.000001)
self.node.generatetoaddress(1, dummy_address)
assert_equal(self.get_value_at_index(0), "0" * 64)
assert_equal(self.node.listcontracts()[create2_address], 1)
self.reset()
create2_bytecode = '000000'
abi = self.generate_create2_abi(create2_bytecode)
self.node.sendtocontract(self.contract_address, abi, 10, 10000000, 0.000001)
self.node.generatetoaddress(1, dummy_address)
self.assert_state(self.node, gas=10000000, gas_price=0.000001, gas_used=94562, value=0, excepted='None')
codehash = self.keccak256(create2_bytecode)
expected_create2_address = self.keccak256("ff" + self.contract_address + ("0" * 64) + codehash)[24:]
create2_address = self.get_value_at_index(0)[24:]
assert (create2_address in self.node.listcontracts())
assert_equal(expected_create2_address, create2_address)
assert_equal(self.node.listcontracts()[self.contract_address], 30)
if __name__ == '__main__':
QtumEVMCreate2Test().main()
| true
| true
|
f718baf74518818d797d42c1119cb4a8e3f94fa1
| 6,048
|
py
|
Python
|
capsule_em/norb/norb_record.py
|
continue-nature/google-research
|
7011fe008efc4f11592ace842dbd4c9dffd46c29
|
[
"Apache-2.0"
] | null | null | null |
capsule_em/norb/norb_record.py
|
continue-nature/google-research
|
7011fe008efc4f11592ace842dbd4c9dffd46c29
|
[
"Apache-2.0"
] | null | null | null |
capsule_em/norb/norb_record.py
|
continue-nature/google-research
|
7011fe008efc4f11592ace842dbd4c9dffd46c29
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input utility functions for norb."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import tensorflow.compat.v1 as tf
def _read_and_decode(filename_queue, image_pixel=96, distort=0):
"""Read a norb tf record file."""
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'meta': tf.FixedLenFeature([4], tf.int64),
})
# Convert from a scalar string tensor (whose single string has
# length image_pixels) to a uint8 tensor with shape
# [image_pixels].
image = tf.decode_raw(features['image_raw'], tf.uint8)
height = tf.cast(features['height'], tf.int32)
depth = tf.cast(features['depth'], tf.int32)
image = tf.reshape(image, tf.stack([depth, height, height]))
image = tf.transpose(image, [1, 2, 0])
image = tf.cast(image, tf.float32)
print(image.get_shape()[0].value)
if image_pixel < 96:
print('image resizing to {}'.format(image_pixel))
image = tf.image.resize_images(image, [image_pixel, image_pixel])
orig_images = image
if image_pixel == 48:
new_dim = 32
elif image_pixel == 32:
new_dim = 22
if distort == 1:
image = tf.image.random_brightness(image, max_delta=63)
image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
image = tf.random_crop(image, tf.stack([new_dim, new_dim, depth]))
# 0.26179938779 is 15 degress in radians
image = tf.image.per_image_standardization(image)
image_pixel = new_dim
elif distort == 2:
image = tf.image.resize_image_with_crop_or_pad(image, new_dim, new_dim)
image = tf.image.per_image_standardization(image)
image_pixel = new_dim
else:
image = image * (1.0 / 255.0)
image = tf.div(
tf.subtract(image, tf.reduce_min(image)),
tf.subtract(tf.reduce_max(image), tf.reduce_min(image)))
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
return image, label, image_pixel, orig_images
bxs_m2 = [[1, 1], [1, -1], [-1, 1], [-1, -1]]
def inputs(train_dir,
batch_size,
split,
multi,
image_pixel=96,
distort=False,
patching=False):
"""Reads input data num_epochs times."""
if multi:
filename = os.path.join(train_dir, '{}duo-az.tfrecords'.format(split))
else:
filename = os.path.join(train_dir, '{}.tfrecords'.format(split))
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer([filename])
if distort:
d = 1 + (split == 'test')
else:
d = 0
# Even when reading in multiple threads, share the filename
# queue.
image, label, dim, orig_image = _read_and_decode(
filename_queue, image_pixel=image_pixel, distort=d)
orig_image.set_shape([48, 48, 1 + multi])
image.set_shape([dim, dim, 1 + multi])
image = tf.transpose(image, [2, 0, 1])
if split == 'train':
images, sparse_labels = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=2,
capacity=2000 + 3 * batch_size,
# Ensures a minimum amount of shuffling of examples.
min_after_dequeue=2000)
else:
images, sparse_labels, orig_images = tf.train.batch(
[image, label, orig_image],
batch_size=batch_size,
num_threads=1,
capacity=1000 + 3 * batch_size)
if patching:
t_images = tf.tile(orig_images, [4, 1, 1, 1])
c_images = tf.image.extract_glimpse(
t_images, [32, 32], bxs_m2, centered=True, normalized=False)
c2images = tf.image.extract_glimpse(
t_images, [32, 32],
2 * np.array(bxs_m2),
centered=True,
normalized=False)
c3images = tf.image.extract_glimpse(
t_images, [32, 32],
3 * np.array(bxs_m2),
centered=True,
normalized=False)
c_images = tf.map_fn(tf.image.per_image_standardization, c_images)
c2images = tf.map_fn(tf.image.per_image_standardization, c2images)
c3images = tf.map_fn(tf.image.per_image_standardization, c3images)
c_images = tf.transpose(c_images, [0, 3, 1, 2])
c2images = tf.transpose(c2images, [0, 3, 1, 2])
c3images = tf.transpose(c3images, [0, 3, 1, 2])
# cc_images = tf.concat([images, m_images, c_images], axis=0)
# cc_labels = tf.tile(sparse_labels, [9])
cc_images = tf.concat([images, c_images, c2images, c3images], axis=0)
cc_labels = tf.tile(sparse_labels, [13])
features = {
'images': images,
'labels': tf.one_hot(sparse_labels, 5),
'recons_image': images,
'recons_label': sparse_labels,
'height': dim,
'depth': 1 + multi,
'num_classes': 5,
'cc_images': cc_images,
'cc_recons_label': cc_labels,
'cc_labels': tf.one_hot(cc_labels, 5),
}
return features
| 35.576471
| 77
| 0.645503
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import tensorflow.compat.v1 as tf
def _read_and_decode(filename_queue, image_pixel=96, distort=0):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'meta': tf.FixedLenFeature([4], tf.int64),
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
height = tf.cast(features['height'], tf.int32)
depth = tf.cast(features['depth'], tf.int32)
image = tf.reshape(image, tf.stack([depth, height, height]))
image = tf.transpose(image, [1, 2, 0])
image = tf.cast(image, tf.float32)
print(image.get_shape()[0].value)
if image_pixel < 96:
print('image resizing to {}'.format(image_pixel))
image = tf.image.resize_images(image, [image_pixel, image_pixel])
orig_images = image
if image_pixel == 48:
new_dim = 32
elif image_pixel == 32:
new_dim = 22
if distort == 1:
image = tf.image.random_brightness(image, max_delta=63)
image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
image = tf.random_crop(image, tf.stack([new_dim, new_dim, depth]))
image = tf.image.per_image_standardization(image)
image_pixel = new_dim
elif distort == 2:
image = tf.image.resize_image_with_crop_or_pad(image, new_dim, new_dim)
image = tf.image.per_image_standardization(image)
image_pixel = new_dim
else:
image = image * (1.0 / 255.0)
image = tf.div(
tf.subtract(image, tf.reduce_min(image)),
tf.subtract(tf.reduce_max(image), tf.reduce_min(image)))
label = tf.cast(features['label'], tf.int32)
return image, label, image_pixel, orig_images
bxs_m2 = [[1, 1], [1, -1], [-1, 1], [-1, -1]]
def inputs(train_dir,
batch_size,
split,
multi,
image_pixel=96,
distort=False,
patching=False):
if multi:
filename = os.path.join(train_dir, '{}duo-az.tfrecords'.format(split))
else:
filename = os.path.join(train_dir, '{}.tfrecords'.format(split))
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer([filename])
if distort:
d = 1 + (split == 'test')
else:
d = 0
image, label, dim, orig_image = _read_and_decode(
filename_queue, image_pixel=image_pixel, distort=d)
orig_image.set_shape([48, 48, 1 + multi])
image.set_shape([dim, dim, 1 + multi])
image = tf.transpose(image, [2, 0, 1])
if split == 'train':
images, sparse_labels = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=2,
capacity=2000 + 3 * batch_size,
min_after_dequeue=2000)
else:
images, sparse_labels, orig_images = tf.train.batch(
[image, label, orig_image],
batch_size=batch_size,
num_threads=1,
capacity=1000 + 3 * batch_size)
if patching:
t_images = tf.tile(orig_images, [4, 1, 1, 1])
c_images = tf.image.extract_glimpse(
t_images, [32, 32], bxs_m2, centered=True, normalized=False)
c2images = tf.image.extract_glimpse(
t_images, [32, 32],
2 * np.array(bxs_m2),
centered=True,
normalized=False)
c3images = tf.image.extract_glimpse(
t_images, [32, 32],
3 * np.array(bxs_m2),
centered=True,
normalized=False)
c_images = tf.map_fn(tf.image.per_image_standardization, c_images)
c2images = tf.map_fn(tf.image.per_image_standardization, c2images)
c3images = tf.map_fn(tf.image.per_image_standardization, c3images)
c_images = tf.transpose(c_images, [0, 3, 1, 2])
c2images = tf.transpose(c2images, [0, 3, 1, 2])
c3images = tf.transpose(c3images, [0, 3, 1, 2])
cc_images = tf.concat([images, c_images, c2images, c3images], axis=0)
cc_labels = tf.tile(sparse_labels, [13])
features = {
'images': images,
'labels': tf.one_hot(sparse_labels, 5),
'recons_image': images,
'recons_label': sparse_labels,
'height': dim,
'depth': 1 + multi,
'num_classes': 5,
'cc_images': cc_images,
'cc_recons_label': cc_labels,
'cc_labels': tf.one_hot(cc_labels, 5),
}
return features
| true
| true
|
f718bc829f48f8a6746cce096e97afd9ab044089
| 245
|
py
|
Python
|
IOMC/EventVertexGenerators/python/VtxSmearedRun3RoundOptics25ns13TeVLowSigmaZ_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
IOMC/EventVertexGenerators/python/VtxSmearedRun3RoundOptics25ns13TeVLowSigmaZ_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
IOMC/EventVertexGenerators/python/VtxSmearedRun3RoundOptics25ns13TeVLowSigmaZ_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from IOMC.EventVertexGenerators.VtxSmearedParameters_cfi import *
VtxSmeared = cms.EDProducer("BetafuncEvtVtxGenerator",
Run3RoundOptics25ns13TeVLowSigmaZVtxSmearingParameters,
VtxSmearedCommon
)
| 35
| 65
| 0.857143
|
import FWCore.ParameterSet.Config as cms
from IOMC.EventVertexGenerators.VtxSmearedParameters_cfi import *
VtxSmeared = cms.EDProducer("BetafuncEvtVtxGenerator",
Run3RoundOptics25ns13TeVLowSigmaZVtxSmearingParameters,
VtxSmearedCommon
)
| true
| true
|
f718bcfa66c3e9bd9ce5380c3709a1d89ddf5766
| 446
|
py
|
Python
|
data.py
|
MarcyVampQueen/Alex-Agnes-Website
|
54e973a798eed937e97c37367cfac4c3fbeebefd
|
[
"CC0-1.0"
] | null | null | null |
data.py
|
MarcyVampQueen/Alex-Agnes-Website
|
54e973a798eed937e97c37367cfac4c3fbeebefd
|
[
"CC0-1.0"
] | null | null | null |
data.py
|
MarcyVampQueen/Alex-Agnes-Website
|
54e973a798eed937e97c37367cfac4c3fbeebefd
|
[
"CC0-1.0"
] | null | null | null |
##
## For importing data from the database
##
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, desc
######## Set up DB ########
engine = create_engine("sqlite:///static/agnesShows.sqlite")
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
def getData():
return 0
| 21.238095
| 60
| 0.744395
|
xt.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, desc
| true
| true
|
f718bcfd85e31d09972c8d4bec7f3557b99ee2d8
| 4,793
|
py
|
Python
|
helm/kyverno-policies-dx/tests/ats/test_common_default.py
|
giantswarm/kyverno-policies-dx
|
93eec2e3992edcb490dc2f6fd424df0917d8567b
|
[
"Apache-2.0"
] | null | null | null |
helm/kyverno-policies-dx/tests/ats/test_common_default.py
|
giantswarm/kyverno-policies-dx
|
93eec2e3992edcb490dc2f6fd424df0917d8567b
|
[
"Apache-2.0"
] | null | null | null |
helm/kyverno-policies-dx/tests/ats/test_common_default.py
|
giantswarm/kyverno-policies-dx
|
93eec2e3992edcb490dc2f6fd424df0917d8567b
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.append('../../../tests')
import yaml
from functools import partial
import time
import random
import string
import ensure
from textwrap import dedent
from ensure import release
from ensure import cluster
from ensure import machinedeployment
from ensure import kubeadmconfig
from ensure import kubeadmconfig_controlplane
from ensure import kubeadmconfig_with_labels
from ensure import kubeadmconfig_with_role_labels
from ensure import kubeadmconfig_with_kubelet_args
from ensure import kubeadm_control_plane
from ensure import kubeadmconfig_controlplane
from ensure import kubeadmconfig_with_files
from ensure import kubeadmconfig_with_audit_file
from ensure import fetch_policies
from ensure import run_pod_from_registries
import pytest
from pytest_kube import forward_requests, wait_for_rollout, app_template
import logging
LOGGER = logging.getLogger(__name__)
@pytest.mark.smoke
def test_kubeadmconfig_policy_controlplane(kubeadmconfig_controlplane) -> None:
"""
test_kubeadmconfig_policy_controlplane tests defaulting of a KubeadmConfig for a control plane where all required values are empty strings.
:param kubeadmconfig_controlplane: KubeadmConfig CR which is empty.
"""
assert kubeadmconfig_controlplane['metadata']['labels']['cluster.x-k8s.io/watch-filter'] == ensure.watch_label
assert kubeadmconfig_controlplane['metadata']['labels']['cluster.x-k8s.io/control-plane'] == ""
@pytest.mark.smoke
def test_kubeadmconfig_auditpolicy(kubeadmconfig_with_files) -> None:
"""
test_kubeadmconfig_auditpolicy tests defaulting of a kubeadmconfig with audit policy details
:param kubeadmconfig_with_files: KubeadmConfig CR which includes some existing files
"""
found = False
for file in kubeadmconfig_with_files['spec']['files']:
if file['path'] == "/etc/kubernetes/policies/audit-policy.yaml":
found = True
assert found == True
@pytest.mark.smoke
def test_kubeadmconfig_auditpolicy(kubeadmconfig_with_audit_file) -> None:
"""
test_kubeadmconfig_auditpolicy tests defaulting of a kubeadmconfig with audit policy details
:param kubeadmconfig_with_audit_file: KubeadmConfig CR which includes an existing audit file
"""
assert len(kubeadmconfig_with_audit_file['spec']['files']) == 1
@pytest.mark.smoke
def test_kyverno_policy(fetch_policies) -> None:
"""
test_kyverno_policy tests that the policy is present
"""
found = False
for policy in fetch_policies['items']:
LOGGER.info(f"Policy {policy['metadata']['name']} is present in the cluster")
if policy['metadata']['name'] == "restrict-image-registries":
found = True
assert found == True
@pytest.mark.smoke
def test_kyverno_policy_reports(run_pod_from_registries) -> None:
"""
test_kyverno_policy_reports tests the restrict-image-registries policy
:param run_pod_from_registries: Pods with containers from inside and outside GS registries
"""
bad_registry_found = False
good_registry_found = False
if len(run_pod_from_registries['items']) == 0:
LOGGER.warning("No policy reports present on the cluster")
for report in run_pod_from_registries['items']:
LOGGER.info(f"Policy report {report['metadata']['name']} is present on the cluster")
for policy_report in report['results']:
# Look for PolicyReports from the `restrict-image-registries` policy
if policy_report['policy'] == "restrict-image-registries":
for resource in policy_report['resources']:
LOGGER.info(f"PolicyReport for Policy {policy_report['policy']} for resource {resource['name']} is present on the cluster")
# Check for the Pod with bad registries and verify that it has a fail result
if resource['name'] == "pod-outside-gs-registries":
if policy_report['result'] == "fail":
bad_registry_found = True
break
else:
LOGGER.warning(f"PolicyReport for {resource['name']} is present but result is not correct")
# Check for the Pod with good registries and verify that it has a pass result
if resource['name'] == "pod-inside-gs-registries":
if policy_report['result'] == "pass":
good_registry_found = True
break
else:
LOGGER.warning(f"PolicyReport for {resource['name']} is present but result is not correct")
assert (bad_registry_found == True and good_registry_found == True)
| 38.653226
| 143
| 0.690799
|
import sys
sys.path.append('../../../tests')
import yaml
from functools import partial
import time
import random
import string
import ensure
from textwrap import dedent
from ensure import release
from ensure import cluster
from ensure import machinedeployment
from ensure import kubeadmconfig
from ensure import kubeadmconfig_controlplane
from ensure import kubeadmconfig_with_labels
from ensure import kubeadmconfig_with_role_labels
from ensure import kubeadmconfig_with_kubelet_args
from ensure import kubeadm_control_plane
from ensure import kubeadmconfig_controlplane
from ensure import kubeadmconfig_with_files
from ensure import kubeadmconfig_with_audit_file
from ensure import fetch_policies
from ensure import run_pod_from_registries
import pytest
from pytest_kube import forward_requests, wait_for_rollout, app_template
import logging
LOGGER = logging.getLogger(__name__)
@pytest.mark.smoke
def test_kubeadmconfig_policy_controlplane(kubeadmconfig_controlplane) -> None:
assert kubeadmconfig_controlplane['metadata']['labels']['cluster.x-k8s.io/watch-filter'] == ensure.watch_label
assert kubeadmconfig_controlplane['metadata']['labels']['cluster.x-k8s.io/control-plane'] == ""
@pytest.mark.smoke
def test_kubeadmconfig_auditpolicy(kubeadmconfig_with_files) -> None:
found = False
for file in kubeadmconfig_with_files['spec']['files']:
if file['path'] == "/etc/kubernetes/policies/audit-policy.yaml":
found = True
assert found == True
@pytest.mark.smoke
def test_kubeadmconfig_auditpolicy(kubeadmconfig_with_audit_file) -> None:
assert len(kubeadmconfig_with_audit_file['spec']['files']) == 1
@pytest.mark.smoke
def test_kyverno_policy(fetch_policies) -> None:
found = False
for policy in fetch_policies['items']:
LOGGER.info(f"Policy {policy['metadata']['name']} is present in the cluster")
if policy['metadata']['name'] == "restrict-image-registries":
found = True
assert found == True
@pytest.mark.smoke
def test_kyverno_policy_reports(run_pod_from_registries) -> None:
bad_registry_found = False
good_registry_found = False
if len(run_pod_from_registries['items']) == 0:
LOGGER.warning("No policy reports present on the cluster")
for report in run_pod_from_registries['items']:
LOGGER.info(f"Policy report {report['metadata']['name']} is present on the cluster")
for policy_report in report['results']:
if policy_report['policy'] == "restrict-image-registries":
for resource in policy_report['resources']:
LOGGER.info(f"PolicyReport for Policy {policy_report['policy']} for resource {resource['name']} is present on the cluster")
if resource['name'] == "pod-outside-gs-registries":
if policy_report['result'] == "fail":
bad_registry_found = True
break
else:
LOGGER.warning(f"PolicyReport for {resource['name']} is present but result is not correct")
if resource['name'] == "pod-inside-gs-registries":
if policy_report['result'] == "pass":
good_registry_found = True
break
else:
LOGGER.warning(f"PolicyReport for {resource['name']} is present but result is not correct")
assert (bad_registry_found == True and good_registry_found == True)
| true
| true
|
f718bd140bb76d7697f696e4bb8ee88d4d8ace84
| 9,074
|
py
|
Python
|
mltk/data/loaders.py
|
haowen-xu/ml-essentials
|
ca44186be37887461205227c32995f1485b4ff41
|
[
"MIT"
] | 4
|
2019-08-06T03:23:14.000Z
|
2019-11-08T10:58:54.000Z
|
mltk/data/loaders.py
|
haowen-xu/ml-essentials
|
ca44186be37887461205227c32995f1485b4ff41
|
[
"MIT"
] | null | null | null |
mltk/data/loaders.py
|
haowen-xu/ml-essentials
|
ca44186be37887461205227c32995f1485b4ff41
|
[
"MIT"
] | 2
|
2019-12-03T08:09:05.000Z
|
2020-10-15T06:50:20.000Z
|
"""
Simple dataset loaders.
For more datasets and more comprehensive loaders, you may turn to dedicated
libraries like `fuel`.
"""
import gzip
import hashlib
import os
import pickle
from typing import *
import idx2numpy
import numpy as np
from ..typing_ import *
from ..utils import CacheDir, validate_enum_arg
__all__ = ['load_mnist', 'load_fashion_mnist', 'load_cifar10', 'load_cifar100']
_MNIST_LIKE_FILE_NAMES = {
'train_x': 'train-images-idx3-ubyte.gz',
'train_y': 'train-labels-idx1-ubyte.gz',
'test_x': 't10k-images-idx3-ubyte.gz',
'test_y': 't10k-labels-idx1-ubyte.gz',
}
_MNIST_URI_PREFIX = 'http://yann.lecun.com/exdb/mnist/'
_MNIST_FILE_MD5 = {
'train_x': 'f68b3c2dcbeaaa9fbdd348bbdeb94873',
'train_y': 'd53e105ee54ea40749a09fcbcd1e9432',
'test_x': '9fb629c4189551a2d022fa330f9573f3',
'test_y': 'ec29112dd5afa0611ce80d1b7f02629c',
}
_FASHION_MNIST_URI_PREFIX = 'http://fashion-mnist.s3-website.eu-central-1.' \
'amazonaws.com/'
_FASHION_MNIST_FILE_MD5 = {
'train_x': '8d4fb7e6c68d591d4c3dfef9ec88bf0d',
'train_y': '25c81989df183df01b3e8a0aad5dffbe',
'test_x': 'bef4ecab320f06d8554ea6380940ec79',
'test_y': 'bb300cfdad3c16e7a12a480ee83cd310',
}
_CIFAR_10_URI = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
_CIFAR_10_MD5 = 'c58f30108f718f92721af3b95e74349a'
_CIFAR_10_CONTENT_DIR = 'cifar-10-batches-py'
_CIFAR_100_URI = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
_CIFAR_100_MD5 = 'eb9058c3a382ffc7106e4002c42a8d85'
_CIFAR_100_CONTENT_DIR = 'cifar-100-python'
def _validate_x_shape(shape, default_shape):
shape = tuple(int(v) for v in shape)
default_shape = tuple(int(v) for v in default_shape)
value_size = int(np.prod(default_shape))
if np.prod(shape) != value_size:
raise ValueError(f'`x_shape` does not product to {value_size}: {shape}')
return shape
def load_mnist_like(uri_prefix: str,
file_md5: Dict[str, str],
cache_name: str,
x_shape: Sequence[int] = (28, 28),
x_dtype: ArrayDType = np.uint8,
y_dtype: ArrayDType = np.int32
) -> Tuple[XYArrayTuple, XYArrayTuple]:
"""
Load an MNIST-like dataset as NumPy arrays.
Args:
uri_prefix: Common prefix of the URIs in `remote_files`.
file_md5: The remote file MD5 hash sums, a dict of
`{'train_x': ..., 'train_y': ..., 'test_x': ..., 'test_y': ...}`,
where each value is the md5 sum.
cache_name: Name of the cache directory.
x_shape: Reshape each digit into this shape.
x_dtype: Cast each digit into this data type.
y_dtype: Cast each label into this data type.
Returns:
The ``(train_x, train_y), (test_x, test_y)`` arrays.
"""
def _fetch_array(array_name):
uri = uri_prefix + _MNIST_LIKE_FILE_NAMES[array_name]
md5 = file_md5[array_name]
path = CacheDir(cache_name).download(
uri, hasher=hashlib.md5(), expected_hash=md5)
with gzip.open(path, 'rb') as f:
return idx2numpy.convert_from_file(f)
# check arguments
x_shape = _validate_x_shape(x_shape, (28, 28))
# load data
train_x = _fetch_array('train_x').astype(x_dtype)
train_y = _fetch_array('train_y').astype(y_dtype)
test_x = _fetch_array('test_x').astype(x_dtype)
test_y = _fetch_array('test_y').astype(y_dtype)
assert(len(train_x) == len(train_y) == 60000)
assert(len(test_x) == len(test_y) == 10000)
# change shape
train_x = train_x.reshape([len(train_x)] + list(x_shape))
test_x = test_x.reshape([len(test_x)] + list(x_shape))
return (train_x, train_y), (test_x, test_y)
def load_mnist(x_shape: Sequence[int] = (28, 28),
x_dtype: ArrayDType = np.uint8,
y_dtype: ArrayDType = np.int32
) -> Tuple[XYArrayTuple, XYArrayTuple]:
"""
Load an MNIST dataset as NumPy arrays.
Args:
x_shape: Reshape each digit into this shape.
x_dtype: Cast each digit into this data type.
y_dtype: Cast each label into this data type.
Returns:
The ``(train_x, train_y), (test_x, test_y)`` arrays.
"""
return load_mnist_like(
_MNIST_URI_PREFIX, _MNIST_FILE_MD5, 'mnist', x_shape, x_dtype, y_dtype)
def load_fashion_mnist(x_shape: Sequence[int] = (28, 28),
x_dtype: ArrayDType = np.uint8,
y_dtype: ArrayDType = np.int32
) -> Tuple[XYArrayTuple, XYArrayTuple]:
"""
Load an MNIST dataset as NumPy arrays.
Args:
x_shape: Reshape each digit into this shape.
x_dtype: Cast each digit into this data type.
y_dtype: Cast each label into this data type.
Returns:
The ``(train_x, train_y), (test_x, test_y)`` arrays.
"""
return load_mnist_like(
_FASHION_MNIST_URI_PREFIX, _FASHION_MNIST_FILE_MD5, 'fashion_mnist',
x_shape, x_dtype, y_dtype)
def _cifar_load_batch(path, x_shape, x_dtype, y_dtype, expected_batch_label,
labels_key='labels'):
# load from file
with open(path, 'rb') as f:
d = {
k.decode('utf-8'): v
for k, v in pickle.load(f, encoding='bytes').items()
}
d['batch_label'] = d['batch_label'].decode('utf-8')
assert(d['batch_label'] == expected_batch_label)
data = np.asarray(d['data'], dtype=x_dtype)
labels = np.asarray(d[labels_key], dtype=y_dtype)
# change shape
data = data.reshape((data.shape[0], 3, 32, 32))
data = np.transpose(data, (0, 2, 3, 1))
if x_shape:
data = data.reshape([data.shape[0]] + list(x_shape))
return data, labels
def load_cifar10(x_shape: Sequence[int] = (32, 32, 3),
x_dtype: ArrayDType = np.float32,
y_dtype: ArrayDType = np.int32) -> Tuple[XYArrayTuple, XYArrayTuple]:
"""
Load the CIFAR-10 dataset as NumPy arrays.
Args:
x_shape: Reshape each digit into this shape.
x_dtype: Cast each digit into this data type.
y_dtype: Cast each label into this data type.
Returns:
The ``(train_x, train_y), (test_x, test_y)`` arrays.
"""
# check the arguments
x_shape = _validate_x_shape(x_shape, (32, 32, 3))
# fetch data
path = CacheDir('cifar').download_and_extract(
_CIFAR_10_URI, hasher=hashlib.md5(), expected_hash=_CIFAR_10_MD5)
data_dir = os.path.join(path, _CIFAR_10_CONTENT_DIR)
# load the data
train_num = 50000
train_x = np.zeros((train_num,) + x_shape, dtype=x_dtype)
train_y = np.zeros((train_num,), dtype=y_dtype)
for i in range(1, 6):
path = os.path.join(data_dir, 'data_batch_{}'.format(i))
x, y = _cifar_load_batch(
path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,
expected_batch_label='training batch {} of 5'.format(i)
)
(train_x[(i - 1) * 10000: i * 10000, ...],
train_y[(i - 1) * 10000: i * 10000]) = x, y
path = os.path.join(data_dir, 'test_batch')
test_x, test_y = _cifar_load_batch(
path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,
expected_batch_label='testing batch 1 of 1'
)
assert(len(test_x) == len(test_y) == 10000)
return (train_x, train_y), (test_x, test_y)
def load_cifar100(label_mode: str = 'fine',
x_shape: Sequence[int] = (32, 32, 3),
x_dtype: ArrayDType = np.float32,
y_dtype: ArrayDType = np.int32) -> Tuple[XYArrayTuple, XYArrayTuple]:
"""
Load the CIFAR-100 dataset as NumPy arrays.
Args:
label_mode: One of {"fine", "coarse"}.
x_shape: Reshape each digit into this shape.
x_dtype: Cast each digit into this data type.
y_dtype: Cast each label into this data type.
Returns:
The ``(train_x, train_y), (test_x, test_y)`` arrays.
"""
# check the arguments
label_mode = validate_enum_arg('label_mode', label_mode, ('fine', 'coarse'))
x_shape = _validate_x_shape(x_shape, (32, 32, 3))
# fetch data
path = CacheDir('cifar').download_and_extract(
_CIFAR_100_URI, hasher=hashlib.md5(), expected_hash=_CIFAR_100_MD5)
data_dir = os.path.join(path, _CIFAR_100_CONTENT_DIR)
# load the data
path = os.path.join(data_dir, 'train')
train_x, train_y = _cifar_load_batch(
path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,
expected_batch_label='training batch 1 of 1',
labels_key='{}_labels'.format(label_mode)
)
assert(len(train_x) == len(train_y) == 50000)
path = os.path.join(data_dir, 'test')
test_x, test_y = _cifar_load_batch(
path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,
expected_batch_label='testing batch 1 of 1',
labels_key='{}_labels'.format(label_mode)
)
assert(len(test_x) == len(test_y) == 10000)
return (train_x, train_y), (test_x, test_y)
| 34.371212
| 87
| 0.638417
|
import gzip
import hashlib
import os
import pickle
from typing import *
import idx2numpy
import numpy as np
from ..typing_ import *
from ..utils import CacheDir, validate_enum_arg
__all__ = ['load_mnist', 'load_fashion_mnist', 'load_cifar10', 'load_cifar100']
_MNIST_LIKE_FILE_NAMES = {
'train_x': 'train-images-idx3-ubyte.gz',
'train_y': 'train-labels-idx1-ubyte.gz',
'test_x': 't10k-images-idx3-ubyte.gz',
'test_y': 't10k-labels-idx1-ubyte.gz',
}
_MNIST_URI_PREFIX = 'http://yann.lecun.com/exdb/mnist/'
_MNIST_FILE_MD5 = {
'train_x': 'f68b3c2dcbeaaa9fbdd348bbdeb94873',
'train_y': 'd53e105ee54ea40749a09fcbcd1e9432',
'test_x': '9fb629c4189551a2d022fa330f9573f3',
'test_y': 'ec29112dd5afa0611ce80d1b7f02629c',
}
_FASHION_MNIST_URI_PREFIX = 'http://fashion-mnist.s3-website.eu-central-1.' \
'amazonaws.com/'
_FASHION_MNIST_FILE_MD5 = {
'train_x': '8d4fb7e6c68d591d4c3dfef9ec88bf0d',
'train_y': '25c81989df183df01b3e8a0aad5dffbe',
'test_x': 'bef4ecab320f06d8554ea6380940ec79',
'test_y': 'bb300cfdad3c16e7a12a480ee83cd310',
}
_CIFAR_10_URI = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
_CIFAR_10_MD5 = 'c58f30108f718f92721af3b95e74349a'
_CIFAR_10_CONTENT_DIR = 'cifar-10-batches-py'
_CIFAR_100_URI = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
_CIFAR_100_MD5 = 'eb9058c3a382ffc7106e4002c42a8d85'
_CIFAR_100_CONTENT_DIR = 'cifar-100-python'
def _validate_x_shape(shape, default_shape):
shape = tuple(int(v) for v in shape)
default_shape = tuple(int(v) for v in default_shape)
value_size = int(np.prod(default_shape))
if np.prod(shape) != value_size:
raise ValueError(f'`x_shape` does not product to {value_size}: {shape}')
return shape
def load_mnist_like(uri_prefix: str,
file_md5: Dict[str, str],
cache_name: str,
x_shape: Sequence[int] = (28, 28),
x_dtype: ArrayDType = np.uint8,
y_dtype: ArrayDType = np.int32
) -> Tuple[XYArrayTuple, XYArrayTuple]:
def _fetch_array(array_name):
uri = uri_prefix + _MNIST_LIKE_FILE_NAMES[array_name]
md5 = file_md5[array_name]
path = CacheDir(cache_name).download(
uri, hasher=hashlib.md5(), expected_hash=md5)
with gzip.open(path, 'rb') as f:
return idx2numpy.convert_from_file(f)
x_shape = _validate_x_shape(x_shape, (28, 28))
train_x = _fetch_array('train_x').astype(x_dtype)
train_y = _fetch_array('train_y').astype(y_dtype)
test_x = _fetch_array('test_x').astype(x_dtype)
test_y = _fetch_array('test_y').astype(y_dtype)
assert(len(train_x) == len(train_y) == 60000)
assert(len(test_x) == len(test_y) == 10000)
train_x = train_x.reshape([len(train_x)] + list(x_shape))
test_x = test_x.reshape([len(test_x)] + list(x_shape))
return (train_x, train_y), (test_x, test_y)
def load_mnist(x_shape: Sequence[int] = (28, 28),
x_dtype: ArrayDType = np.uint8,
y_dtype: ArrayDType = np.int32
) -> Tuple[XYArrayTuple, XYArrayTuple]:
return load_mnist_like(
_MNIST_URI_PREFIX, _MNIST_FILE_MD5, 'mnist', x_shape, x_dtype, y_dtype)
def load_fashion_mnist(x_shape: Sequence[int] = (28, 28),
x_dtype: ArrayDType = np.uint8,
y_dtype: ArrayDType = np.int32
) -> Tuple[XYArrayTuple, XYArrayTuple]:
return load_mnist_like(
_FASHION_MNIST_URI_PREFIX, _FASHION_MNIST_FILE_MD5, 'fashion_mnist',
x_shape, x_dtype, y_dtype)
def _cifar_load_batch(path, x_shape, x_dtype, y_dtype, expected_batch_label,
labels_key='labels'):
with open(path, 'rb') as f:
d = {
k.decode('utf-8'): v
for k, v in pickle.load(f, encoding='bytes').items()
}
d['batch_label'] = d['batch_label'].decode('utf-8')
assert(d['batch_label'] == expected_batch_label)
data = np.asarray(d['data'], dtype=x_dtype)
labels = np.asarray(d[labels_key], dtype=y_dtype)
data = data.reshape((data.shape[0], 3, 32, 32))
data = np.transpose(data, (0, 2, 3, 1))
if x_shape:
data = data.reshape([data.shape[0]] + list(x_shape))
return data, labels
def load_cifar10(x_shape: Sequence[int] = (32, 32, 3),
x_dtype: ArrayDType = np.float32,
y_dtype: ArrayDType = np.int32) -> Tuple[XYArrayTuple, XYArrayTuple]:
x_shape = _validate_x_shape(x_shape, (32, 32, 3))
path = CacheDir('cifar').download_and_extract(
_CIFAR_10_URI, hasher=hashlib.md5(), expected_hash=_CIFAR_10_MD5)
data_dir = os.path.join(path, _CIFAR_10_CONTENT_DIR)
train_num = 50000
train_x = np.zeros((train_num,) + x_shape, dtype=x_dtype)
train_y = np.zeros((train_num,), dtype=y_dtype)
for i in range(1, 6):
path = os.path.join(data_dir, 'data_batch_{}'.format(i))
x, y = _cifar_load_batch(
path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,
expected_batch_label='training batch {} of 5'.format(i)
)
(train_x[(i - 1) * 10000: i * 10000, ...],
train_y[(i - 1) * 10000: i * 10000]) = x, y
path = os.path.join(data_dir, 'test_batch')
test_x, test_y = _cifar_load_batch(
path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,
expected_batch_label='testing batch 1 of 1'
)
assert(len(test_x) == len(test_y) == 10000)
return (train_x, train_y), (test_x, test_y)
def load_cifar100(label_mode: str = 'fine',
x_shape: Sequence[int] = (32, 32, 3),
x_dtype: ArrayDType = np.float32,
y_dtype: ArrayDType = np.int32) -> Tuple[XYArrayTuple, XYArrayTuple]:
label_mode = validate_enum_arg('label_mode', label_mode, ('fine', 'coarse'))
x_shape = _validate_x_shape(x_shape, (32, 32, 3))
path = CacheDir('cifar').download_and_extract(
_CIFAR_100_URI, hasher=hashlib.md5(), expected_hash=_CIFAR_100_MD5)
data_dir = os.path.join(path, _CIFAR_100_CONTENT_DIR)
path = os.path.join(data_dir, 'train')
train_x, train_y = _cifar_load_batch(
path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,
expected_batch_label='training batch 1 of 1',
labels_key='{}_labels'.format(label_mode)
)
assert(len(train_x) == len(train_y) == 50000)
path = os.path.join(data_dir, 'test')
test_x, test_y = _cifar_load_batch(
path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,
expected_batch_label='testing batch 1 of 1',
labels_key='{}_labels'.format(label_mode)
)
assert(len(test_x) == len(test_y) == 10000)
return (train_x, train_y), (test_x, test_y)
| true
| true
|
f718bd3022cc2f58c5419de421533f5813d0adac
| 4,907
|
py
|
Python
|
tb_rest_client/models/models_ce/shared_attributes_setting_snmp_communication_config.py
|
samson0v/python_tb_rest_client
|
08ff7898740f7cec2170e85d5c3c89e222e967f7
|
[
"Apache-2.0"
] | 30
|
2020-06-19T06:42:50.000Z
|
2021-08-23T21:16:36.000Z
|
tb_rest_client/models/models_ce/shared_attributes_setting_snmp_communication_config.py
|
samson0v/python_tb_rest_client
|
08ff7898740f7cec2170e85d5c3c89e222e967f7
|
[
"Apache-2.0"
] | 25
|
2021-08-30T01:17:27.000Z
|
2022-03-16T14:10:14.000Z
|
tb_rest_client/models/models_ce/shared_attributes_setting_snmp_communication_config.py
|
samson0v/python_tb_rest_client
|
08ff7898740f7cec2170e85d5c3c89e222e967f7
|
[
"Apache-2.0"
] | 23
|
2020-07-06T13:41:54.000Z
|
2021-08-23T21:04:50.000Z
|
# coding: utf-8
"""
ThingsBoard REST API
ThingsBoard open-source IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3-SNAPSHOT
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from tb_rest_client.models.models_ce.snmp_communication_config import SnmpCommunicationConfig # noqa: F401,E501
class SharedAttributesSettingSnmpCommunicationConfig(SnmpCommunicationConfig):
"""
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'mappings': 'list[SnmpMapping]',
'spec': 'str'
}
if hasattr(SnmpCommunicationConfig, "swagger_types"):
swagger_types.update(SnmpCommunicationConfig.swagger_types)
attribute_map = {
'mappings': 'mappings',
'spec': 'spec'
}
if hasattr(SnmpCommunicationConfig, "attribute_map"):
attribute_map.update(SnmpCommunicationConfig.attribute_map)
def __init__(self, mappings=None, spec=None, *args, **kwargs): # noqa: E501
"""SharedAttributesSettingSnmpCommunicationConfig - a model defined in Swagger""" # noqa: E501
self._mappings = None
self._spec = None
self.discriminator = None
if mappings is not None:
self.mappings = mappings
if spec is not None:
self.spec = spec
SnmpCommunicationConfig.__init__(self, *args, **kwargs)
@property
def mappings(self):
"""Gets the mappings of this SharedAttributesSettingSnmpCommunicationConfig. # noqa: E501
:return: The mappings of this SharedAttributesSettingSnmpCommunicationConfig. # noqa: E501
:rtype: list[SnmpMapping]
"""
return self._mappings
@mappings.setter
def mappings(self, mappings):
"""Sets the mappings of this SharedAttributesSettingSnmpCommunicationConfig.
:param mappings: The mappings of this SharedAttributesSettingSnmpCommunicationConfig. # noqa: E501
:type: list[SnmpMapping]
"""
self._mappings = mappings
@property
def spec(self):
"""Gets the spec of this SharedAttributesSettingSnmpCommunicationConfig. # noqa: E501
:return: The spec of this SharedAttributesSettingSnmpCommunicationConfig. # noqa: E501
:rtype: str
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this SharedAttributesSettingSnmpCommunicationConfig.
:param spec: The spec of this SharedAttributesSettingSnmpCommunicationConfig. # noqa: E501
:type: str
"""
allowed_values = ["CLIENT_ATTRIBUTES_QUERYING", "SHARED_ATTRIBUTES_SETTING", "TELEMETRY_QUERYING",
"TO_DEVICE_RPC_REQUEST"] # noqa: E501
if spec not in allowed_values:
raise ValueError(
"Invalid value for `spec` ({0}), must be one of {1}" # noqa: E501
.format(spec, allowed_values)
)
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SharedAttributesSettingSnmpCommunicationConfig, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SharedAttributesSettingSnmpCommunicationConfig):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.713333
| 112
| 0.611779
|
import pprint
import re
import six
from tb_rest_client.models.models_ce.snmp_communication_config import SnmpCommunicationConfig
class SharedAttributesSettingSnmpCommunicationConfig(SnmpCommunicationConfig):
swagger_types = {
'mappings': 'list[SnmpMapping]',
'spec': 'str'
}
if hasattr(SnmpCommunicationConfig, "swagger_types"):
swagger_types.update(SnmpCommunicationConfig.swagger_types)
attribute_map = {
'mappings': 'mappings',
'spec': 'spec'
}
if hasattr(SnmpCommunicationConfig, "attribute_map"):
attribute_map.update(SnmpCommunicationConfig.attribute_map)
def __init__(self, mappings=None, spec=None, *args, **kwargs):
self._mappings = None
self._spec = None
self.discriminator = None
if mappings is not None:
self.mappings = mappings
if spec is not None:
self.spec = spec
SnmpCommunicationConfig.__init__(self, *args, **kwargs)
@property
def mappings(self):
return self._mappings
@mappings.setter
def mappings(self, mappings):
self._mappings = mappings
@property
def spec(self):
return self._spec
@spec.setter
def spec(self, spec):
allowed_values = ["CLIENT_ATTRIBUTES_QUERYING", "SHARED_ATTRIBUTES_SETTING", "TELEMETRY_QUERYING",
"TO_DEVICE_RPC_REQUEST"]
if spec not in allowed_values:
raise ValueError(
"Invalid value for `spec` ({0}), must be one of {1}"
.format(spec, allowed_values)
)
self._spec = spec
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SharedAttributesSettingSnmpCommunicationConfig, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, SharedAttributesSettingSnmpCommunicationConfig):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f718be3ab1e8857e704777f735842ba57cdcf3f2
| 27,925
|
py
|
Python
|
sdk/communication/azure-communication-chat/tests/test_chat_thread_client_async.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-03-09T08:59:13.000Z
|
2022-03-09T08:59:13.000Z
|
sdk/communication/azure-communication-chat/tests/test_chat_thread_client_async.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/communication/azure-communication-chat/tests/test_chat_thread_client_async.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-03-04T06:21:56.000Z
|
2022-03-04T06:21:56.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
from azure.core.credentials import AccessToken
from datetime import datetime
from msrest.serialization import TZ_UTC
from azure.communication.chat.aio import ChatThreadClient
from azure.communication.chat import (
ChatParticipant,
ChatMessageType
)
from azure.communication.chat._shared.models import(
CommunicationUserIdentifier
)
from unittest_helpers import mock_response
from azure.core.exceptions import HttpResponseError
from unittest.mock import Mock, patch
import pytest
import time
import calendar
def _convert_datetime_to_utc_int(input):
return int(calendar.timegm(input.utctimetuple()))
async def mock_get_token():
return AccessToken("some_token", _convert_datetime_to_utc_int(datetime.now().replace(tzinfo=TZ_UTC)))
credential = Mock(get_token=mock_get_token)
@pytest.mark.asyncio
async def test_update_topic():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=204)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
topic = "update topic"
try:
await chat_thread_client.update_topic(topic=topic)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_send_message():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=201, json_payload={"id": message_id})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
create_message_result_id = None
try:
content='hello world'
sender_display_name='sender name'
metadata={ "tags": "tag" }
create_message_result = await chat_thread_client.send_message(
content,
sender_display_name=sender_display_name,
metadata=metadata)
create_message_result_id = create_message_result.id
except:
raised = True
assert raised == False
assert create_message_result_id == message_id
@pytest.mark.asyncio
async def test_send_message_w_type():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
message_str = "Hi I am Bob."
create_message_result_id = None
chat_message_types = [ChatMessageType.TEXT, ChatMessageType.HTML, "text", "html"]
for chat_message_type in chat_message_types:
async def mock_send(*_, **__):
return mock_response(status_code=201, json_payload={
"id": message_id,
"type": chat_message_type,
"sequenceId": "3",
"version": message_id,
"content": {
"message": message_str,
"topic": "Lunch Chat thread",
"participants": [
{
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b",
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
],
"initiator": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"
},
"senderDisplayName": "Bob",
"createdOn": "2021-01-27T01:37:33Z",
"senderId": "8:acs:46849534-eb08-4ab7-bde7-c36928cd1547_00000007-e155-1f06-1db7-3a3a0d00004b"
})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
content='hello world'
sender_display_name='sender name'
create_message_result = await chat_thread_client.send_message(
content,
chat_message_type=chat_message_type,
sender_display_name=sender_display_name)
create_message_result_id = create_message_result.id
except:
raised = True
assert raised == False
assert create_message_result_id == message_id
@pytest.mark.asyncio
async def test_send_message_w_invalid_type_throws_error():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
# the payload is irrelevant - it'll fail before
async def mock_send(*_, **__):
return mock_response(status_code=201, json_payload={"id": message_id})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
create_message_result_id = None
chat_message_types = [ChatMessageType.PARTICIPANT_ADDED, ChatMessageType.PARTICIPANT_REMOVED,
ChatMessageType.TOPIC_UPDATED, "participant_added", "participant_removed", "topic_updated",
"ChatMessageType.TEXT", "ChatMessageType.HTML",
"ChatMessageType.PARTICIPANT_ADDED", "ChatMessageType.PARTICIPANT_REMOVED",
"ChatMessageType.TOPIC_UPDATED"]
for chat_message_type in chat_message_types:
try:
content='hello world'
sender_display_name='sender name'
create_message_result = await chat_thread_client.send_message(
content,
chat_message_type=chat_message_type,
sender_display_name=sender_display_name)
except:
raised = True
assert raised == True
@pytest.mark.asyncio
async def test_get_message():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
message_str = "Hi I am Bob."
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"id": message_id,
"type": "text",
"sequenceId": "3",
"version": message_id,
"content": {
"message": message_str,
"topic": "Lunch Chat thread",
"participants": [
{
"communicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
],
"initiatorCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}}
},
"senderDisplayName": "Bob",
"createdOn": "2021-01-27T01:37:33Z",
"senderCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"deletedOn": "2021-01-27T01:37:33Z",
"editedOn": "2021-01-27T01:37:33Z",
"metadata": {
"tags": "tag"
}
})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
message = None
try:
message = await chat_thread_client.get_message(message_id)
except:
raised = True
assert raised == False
assert message.id == message_id
assert message.type == ChatMessageType.TEXT
assert message.content.message == message_str
assert message.metadata["tags"] == "tag"
assert len(message.content.participants) > 0
@pytest.mark.asyncio
async def test_list_messages():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={"value": [{
"id": message_id,
"type": "text",
"sequenceId": "3",
"version": message_id,
"content": {
"message": "message_str",
"topic": "Lunch Chat thread",
"participants": [
{
"communicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
],
"initiatorCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}}
},
"senderDisplayName": "Bob",
"createdOn": "2021-01-27T01:37:33Z",
"senderCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"deletedOn": "2021-01-27T01:37:33Z",
"editedOn": "2021-01-27T01:37:33Z"
}]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
chat_messages = None
try:
chat_messages = chat_thread_client.list_messages(results_per_page=1)
except:
raised = True
assert raised == False
items = []
async for item in chat_messages:
items.append(item)
assert len(items) == 1
assert items[0].id == message_id
@pytest.mark.asyncio
async def test_list_messages_with_start_time():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"value": [
{
"id": "message_id_1",
"type": "text",
"sequenceId": "3",
"version": "message_id_1",
"content": {
"message": "message_str",
"topic": "Lunch Chat thread",
"participants": [
{
"communicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
],
"initiatorCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}}
},
"senderDisplayName": "Bob",
"createdOn": "2021-01-27T01:37:33Z",
"senderCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"deletedOn": "2021-01-27T01:37:33Z",
"editedOn": "2021-01-27T01:37:33Z"
},
{
"id": "message_id_2",
"type": "text",
"sequenceId": "3",
"version": "message_id_2",
"content": {
"message": "message_str",
"topic": "Lunch Chat thread",
"participants": [
{
"communicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
],
"initiatorCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}}
},
"senderDisplayName": "Bob",
"createdOn": "2021-01-27T01:37:33Z",
"senderCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"deletedOn": "2021-01-27T01:37:33Z",
"editedOn": "2021-01-27T01:37:33Z"
}]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
chat_messages = None
try:
chat_messages = chat_thread_client.list_messages(
start_time=datetime(2020, 8, 17, 18, 0, 0)
)
except:
raised = True
assert raised == False
items = []
async for item in chat_messages:
items.append(item)
assert len(items) == 2
@pytest.mark.asyncio
async def test_update_message_content():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=204)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
content = "updated message content"
await chat_thread_client.update_message(message_id, content=content)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_update_message_metadata():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=204)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
metadata={ "tags": "tag" }
await chat_thread_client.update_message(message_id, metadata=metadata)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_delete_message():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=204)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
await chat_thread_client.delete_message(message_id)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_list_participants():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
participant_id="8:acs:57b9bac9-df6c-4d39-a73b-26e944adf6ea_9b0110-08007f1041"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={"value": [
{
"communicationIdentifier": {
"rawId": participant_id,
"communicationUser": {
"id": participant_id
}
},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
chat_thread_participants = None
try:
chat_thread_participants = chat_thread_client.list_participants()
except:
raised = True
assert raised == False
items = []
async for item in chat_thread_participants:
items.append(item)
assert len(items) == 1
@pytest.mark.asyncio
async def test_list_participants_with_results_per_page():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
participant_id_1 = "8:acs:9b665d53-8164-4923-ad5d-5e983b07d2e7_00000006-5399-552c-b274-5a3a0d0000dc"
participant_id_2 = "8:acs:9b665d53-8164-4923-ad5d-5e983b07d2e7_00000006-9d32-35c9-557d-5a3a0d0002f1"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"value": [
{
"communicationIdentifier": {
"rawId": participant_id_1,
"communicationUser": {
"id": participant_id_1
}
},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
},
{
"communicationIdentifier": {
"rawId": participant_id_2,
"communicationUser": {
"id": participant_id_2
}
},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
chat_thread_participants = None
try:
chat_thread_participants = chat_thread_client.list_participants(results_per_page=2)
except:
raised = True
assert raised == False
items = []
async for item in chat_thread_participants:
items.append(item)
assert len(items) == 2
@pytest.mark.asyncio
async def test_add_participants():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
new_participant_id="8:acs:57b9bac9-df6c-4d39-a73b-26e944adf6ea_9b0110-08007f1041"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=201)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
new_participant = ChatParticipant(
identifier=CommunicationUserIdentifier(new_participant_id),
display_name='name',
share_history_time=datetime.utcnow())
participants = [new_participant]
try:
await chat_thread_client.add_participants(participants)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_add_participants_w_failed_participants_returns_nonempty_list():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
new_participant_id="8:acs:57b9bac9-df6c-4d39-a73b-26e944adf6ea_9b0110-08007f1041"
raised = False
error_message = "some error message"
async def mock_send(*_, **__):
return mock_response(status_code=201, json_payload={
"invalidParticipants": [
{
"code": "string",
"message": error_message,
"target": new_participant_id,
"details": []
}
]
})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
new_participant = ChatParticipant(
identifier=CommunicationUserIdentifier(new_participant_id),
display_name='name',
share_history_time=datetime.utcnow())
participants = [new_participant]
try:
result = await chat_thread_client.add_participants(participants)
except:
raised = True
assert raised == False
assert len(result) == 1
failed_participant = result[0][0]
communication_error = result[0][1]
assert new_participant.identifier.properties['id'] == failed_participant.identifier.properties['id']
assert new_participant.display_name == failed_participant.display_name
assert new_participant.share_history_time == failed_participant.share_history_time
assert error_message == communication_error.message
@pytest.mark.asyncio
async def test_remove_participant():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
participant_id="8:acs:57b9bac9-df6c-4d39-a73b-26e944adf6ea_9b0110-08007f1041"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=204)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
await chat_thread_client.remove_participant(identifier=CommunicationUserIdentifier(participant_id))
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_send_typing_notification():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
await chat_thread_client.send_typing_notification()
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_send_typing_notification_with_sender_display_name():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
await chat_thread_client.send_typing_notification(sender_display_name="John")
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_send_read_receipt():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id="1596823919339"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
await chat_thread_client.send_read_receipt(message_id)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_list_read_receipts():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id="1596823919339"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={"value": [
{
"chatMessageId": message_id,
"senderCommunicationIdentifier": {
"rawId": "string",
"communicationUser": {
"id": "string"
}
}
}
]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
read_receipts = None
try:
read_receipts = chat_thread_client.list_read_receipts()
except:
raised = True
assert raised == False
items = []
async for item in read_receipts:
items.append(item)
assert len(items) == 1
@pytest.mark.asyncio
async def test_list_read_receipts_with_results_per_page():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id_1 = "1596823919339"
message_id_2 = "1596823919340"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"value": [
{
"chatMessageId": message_id_1,
"senderCommunicationIdentifier": {
"rawId": "string",
"communicationUser": {
"id": "string"
}
}
},
{
"chatMessageId": message_id_2,
"senderCommunicationIdentifier": {
"rawId": "string",
"communicationUser": {
"id": "string"
}
}
}
]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
read_receipts = None
try:
read_receipts = chat_thread_client.list_read_receipts(results_per_page=2)
except:
raised = True
assert raised == False
items = []
async for item in read_receipts:
items.append(item)
assert len(items) == 2
@pytest.mark.asyncio
async def test_list_read_receipts_with_results_per_page_and_skip():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id_1 = "1596823919339"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"value": [
{
"chatMessageId": message_id_1,
"senderCommunicationIdentifier": {
"rawId": "string",
"communicationUser": {
"id": "string"
}
}
}
]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
read_receipts = None
try:
read_receipts = chat_thread_client.list_read_receipts(results_per_page=1, skip=1)
except:
raised = True
assert raised == False
items = []
async for item in read_receipts:
items.append(item)
assert len(items) == 1
@pytest.mark.asyncio
async def test_get_properties():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"id": thread_id,
"topic": "Lunch Chat thread",
"createdOn": "2020-10-30T10:50:50Z",
"deletedOn": "2020-10-30T10:50:50Z",
"createdByCommunicationIdentifier": {"rawId": "string", "communicationUser": {"id": "string"}}
})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
get_thread_result = None
try:
get_thread_result = await chat_thread_client.get_properties()
except:
raised = True
assert raised == False
assert get_thread_result.id == thread_id
| 37.134309
| 126
| 0.582775
|
from azure.core.credentials import AccessToken
from datetime import datetime
from msrest.serialization import TZ_UTC
from azure.communication.chat.aio import ChatThreadClient
from azure.communication.chat import (
ChatParticipant,
ChatMessageType
)
from azure.communication.chat._shared.models import(
CommunicationUserIdentifier
)
from unittest_helpers import mock_response
from azure.core.exceptions import HttpResponseError
from unittest.mock import Mock, patch
import pytest
import time
import calendar
def _convert_datetime_to_utc_int(input):
return int(calendar.timegm(input.utctimetuple()))
async def mock_get_token():
return AccessToken("some_token", _convert_datetime_to_utc_int(datetime.now().replace(tzinfo=TZ_UTC)))
credential = Mock(get_token=mock_get_token)
@pytest.mark.asyncio
async def test_update_topic():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=204)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
topic = "update topic"
try:
await chat_thread_client.update_topic(topic=topic)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_send_message():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=201, json_payload={"id": message_id})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
create_message_result_id = None
try:
content='hello world'
sender_display_name='sender name'
metadata={ "tags": "tag" }
create_message_result = await chat_thread_client.send_message(
content,
sender_display_name=sender_display_name,
metadata=metadata)
create_message_result_id = create_message_result.id
except:
raised = True
assert raised == False
assert create_message_result_id == message_id
@pytest.mark.asyncio
async def test_send_message_w_type():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
message_str = "Hi I am Bob."
create_message_result_id = None
chat_message_types = [ChatMessageType.TEXT, ChatMessageType.HTML, "text", "html"]
for chat_message_type in chat_message_types:
async def mock_send(*_, **__):
return mock_response(status_code=201, json_payload={
"id": message_id,
"type": chat_message_type,
"sequenceId": "3",
"version": message_id,
"content": {
"message": message_str,
"topic": "Lunch Chat thread",
"participants": [
{
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b",
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
],
"initiator": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"
},
"senderDisplayName": "Bob",
"createdOn": "2021-01-27T01:37:33Z",
"senderId": "8:acs:46849534-eb08-4ab7-bde7-c36928cd1547_00000007-e155-1f06-1db7-3a3a0d00004b"
})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
content='hello world'
sender_display_name='sender name'
create_message_result = await chat_thread_client.send_message(
content,
chat_message_type=chat_message_type,
sender_display_name=sender_display_name)
create_message_result_id = create_message_result.id
except:
raised = True
assert raised == False
assert create_message_result_id == message_id
@pytest.mark.asyncio
async def test_send_message_w_invalid_type_throws_error():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=201, json_payload={"id": message_id})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
create_message_result_id = None
chat_message_types = [ChatMessageType.PARTICIPANT_ADDED, ChatMessageType.PARTICIPANT_REMOVED,
ChatMessageType.TOPIC_UPDATED, "participant_added", "participant_removed", "topic_updated",
"ChatMessageType.TEXT", "ChatMessageType.HTML",
"ChatMessageType.PARTICIPANT_ADDED", "ChatMessageType.PARTICIPANT_REMOVED",
"ChatMessageType.TOPIC_UPDATED"]
for chat_message_type in chat_message_types:
try:
content='hello world'
sender_display_name='sender name'
create_message_result = await chat_thread_client.send_message(
content,
chat_message_type=chat_message_type,
sender_display_name=sender_display_name)
except:
raised = True
assert raised == True
@pytest.mark.asyncio
async def test_get_message():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
message_str = "Hi I am Bob."
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"id": message_id,
"type": "text",
"sequenceId": "3",
"version": message_id,
"content": {
"message": message_str,
"topic": "Lunch Chat thread",
"participants": [
{
"communicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
],
"initiatorCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}}
},
"senderDisplayName": "Bob",
"createdOn": "2021-01-27T01:37:33Z",
"senderCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"deletedOn": "2021-01-27T01:37:33Z",
"editedOn": "2021-01-27T01:37:33Z",
"metadata": {
"tags": "tag"
}
})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
message = None
try:
message = await chat_thread_client.get_message(message_id)
except:
raised = True
assert raised == False
assert message.id == message_id
assert message.type == ChatMessageType.TEXT
assert message.content.message == message_str
assert message.metadata["tags"] == "tag"
assert len(message.content.participants) > 0
@pytest.mark.asyncio
async def test_list_messages():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={"value": [{
"id": message_id,
"type": "text",
"sequenceId": "3",
"version": message_id,
"content": {
"message": "message_str",
"topic": "Lunch Chat thread",
"participants": [
{
"communicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
],
"initiatorCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}}
},
"senderDisplayName": "Bob",
"createdOn": "2021-01-27T01:37:33Z",
"senderCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"deletedOn": "2021-01-27T01:37:33Z",
"editedOn": "2021-01-27T01:37:33Z"
}]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
chat_messages = None
try:
chat_messages = chat_thread_client.list_messages(results_per_page=1)
except:
raised = True
assert raised == False
items = []
async for item in chat_messages:
items.append(item)
assert len(items) == 1
assert items[0].id == message_id
@pytest.mark.asyncio
async def test_list_messages_with_start_time():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"value": [
{
"id": "message_id_1",
"type": "text",
"sequenceId": "3",
"version": "message_id_1",
"content": {
"message": "message_str",
"topic": "Lunch Chat thread",
"participants": [
{
"communicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
],
"initiatorCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}}
},
"senderDisplayName": "Bob",
"createdOn": "2021-01-27T01:37:33Z",
"senderCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"deletedOn": "2021-01-27T01:37:33Z",
"editedOn": "2021-01-27T01:37:33Z"
},
{
"id": "message_id_2",
"type": "text",
"sequenceId": "3",
"version": "message_id_2",
"content": {
"message": "message_str",
"topic": "Lunch Chat thread",
"participants": [
{
"communicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
],
"initiatorCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}}
},
"senderDisplayName": "Bob",
"createdOn": "2021-01-27T01:37:33Z",
"senderCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"deletedOn": "2021-01-27T01:37:33Z",
"editedOn": "2021-01-27T01:37:33Z"
}]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
chat_messages = None
try:
chat_messages = chat_thread_client.list_messages(
start_time=datetime(2020, 8, 17, 18, 0, 0)
)
except:
raised = True
assert raised == False
items = []
async for item in chat_messages:
items.append(item)
assert len(items) == 2
@pytest.mark.asyncio
async def test_update_message_content():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=204)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
content = "updated message content"
await chat_thread_client.update_message(message_id, content=content)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_update_message_metadata():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=204)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
metadata={ "tags": "tag" }
await chat_thread_client.update_message(message_id, metadata=metadata)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_delete_message():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=204)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
await chat_thread_client.delete_message(message_id)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_list_participants():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
participant_id="8:acs:57b9bac9-df6c-4d39-a73b-26e944adf6ea_9b0110-08007f1041"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={"value": [
{
"communicationIdentifier": {
"rawId": participant_id,
"communicationUser": {
"id": participant_id
}
},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
chat_thread_participants = None
try:
chat_thread_participants = chat_thread_client.list_participants()
except:
raised = True
assert raised == False
items = []
async for item in chat_thread_participants:
items.append(item)
assert len(items) == 1
@pytest.mark.asyncio
async def test_list_participants_with_results_per_page():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
participant_id_1 = "8:acs:9b665d53-8164-4923-ad5d-5e983b07d2e7_00000006-5399-552c-b274-5a3a0d0000dc"
participant_id_2 = "8:acs:9b665d53-8164-4923-ad5d-5e983b07d2e7_00000006-9d32-35c9-557d-5a3a0d0002f1"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"value": [
{
"communicationIdentifier": {
"rawId": participant_id_1,
"communicationUser": {
"id": participant_id_1
}
},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
},
{
"communicationIdentifier": {
"rawId": participant_id_2,
"communicationUser": {
"id": participant_id_2
}
},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
chat_thread_participants = None
try:
chat_thread_participants = chat_thread_client.list_participants(results_per_page=2)
except:
raised = True
assert raised == False
items = []
async for item in chat_thread_participants:
items.append(item)
assert len(items) == 2
@pytest.mark.asyncio
async def test_add_participants():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
new_participant_id="8:acs:57b9bac9-df6c-4d39-a73b-26e944adf6ea_9b0110-08007f1041"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=201)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
new_participant = ChatParticipant(
identifier=CommunicationUserIdentifier(new_participant_id),
display_name='name',
share_history_time=datetime.utcnow())
participants = [new_participant]
try:
await chat_thread_client.add_participants(participants)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_add_participants_w_failed_participants_returns_nonempty_list():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
new_participant_id="8:acs:57b9bac9-df6c-4d39-a73b-26e944adf6ea_9b0110-08007f1041"
raised = False
error_message = "some error message"
async def mock_send(*_, **__):
return mock_response(status_code=201, json_payload={
"invalidParticipants": [
{
"code": "string",
"message": error_message,
"target": new_participant_id,
"details": []
}
]
})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
new_participant = ChatParticipant(
identifier=CommunicationUserIdentifier(new_participant_id),
display_name='name',
share_history_time=datetime.utcnow())
participants = [new_participant]
try:
result = await chat_thread_client.add_participants(participants)
except:
raised = True
assert raised == False
assert len(result) == 1
failed_participant = result[0][0]
communication_error = result[0][1]
assert new_participant.identifier.properties['id'] == failed_participant.identifier.properties['id']
assert new_participant.display_name == failed_participant.display_name
assert new_participant.share_history_time == failed_participant.share_history_time
assert error_message == communication_error.message
@pytest.mark.asyncio
async def test_remove_participant():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
participant_id="8:acs:57b9bac9-df6c-4d39-a73b-26e944adf6ea_9b0110-08007f1041"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=204)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
await chat_thread_client.remove_participant(identifier=CommunicationUserIdentifier(participant_id))
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_send_typing_notification():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
await chat_thread_client.send_typing_notification()
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_send_typing_notification_with_sender_display_name():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
await chat_thread_client.send_typing_notification(sender_display_name="John")
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_send_read_receipt():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id="1596823919339"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
await chat_thread_client.send_read_receipt(message_id)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_list_read_receipts():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id="1596823919339"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={"value": [
{
"chatMessageId": message_id,
"senderCommunicationIdentifier": {
"rawId": "string",
"communicationUser": {
"id": "string"
}
}
}
]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
read_receipts = None
try:
read_receipts = chat_thread_client.list_read_receipts()
except:
raised = True
assert raised == False
items = []
async for item in read_receipts:
items.append(item)
assert len(items) == 1
@pytest.mark.asyncio
async def test_list_read_receipts_with_results_per_page():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id_1 = "1596823919339"
message_id_2 = "1596823919340"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"value": [
{
"chatMessageId": message_id_1,
"senderCommunicationIdentifier": {
"rawId": "string",
"communicationUser": {
"id": "string"
}
}
},
{
"chatMessageId": message_id_2,
"senderCommunicationIdentifier": {
"rawId": "string",
"communicationUser": {
"id": "string"
}
}
}
]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
read_receipts = None
try:
read_receipts = chat_thread_client.list_read_receipts(results_per_page=2)
except:
raised = True
assert raised == False
items = []
async for item in read_receipts:
items.append(item)
assert len(items) == 2
@pytest.mark.asyncio
async def test_list_read_receipts_with_results_per_page_and_skip():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id_1 = "1596823919339"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"value": [
{
"chatMessageId": message_id_1,
"senderCommunicationIdentifier": {
"rawId": "string",
"communicationUser": {
"id": "string"
}
}
}
]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
read_receipts = None
try:
read_receipts = chat_thread_client.list_read_receipts(results_per_page=1, skip=1)
except:
raised = True
assert raised == False
items = []
async for item in read_receipts:
items.append(item)
assert len(items) == 1
@pytest.mark.asyncio
async def test_get_properties():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"id": thread_id,
"topic": "Lunch Chat thread",
"createdOn": "2020-10-30T10:50:50Z",
"deletedOn": "2020-10-30T10:50:50Z",
"createdByCommunicationIdentifier": {"rawId": "string", "communicationUser": {"id": "string"}}
})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
get_thread_result = None
try:
get_thread_result = await chat_thread_client.get_properties()
except:
raised = True
assert raised == False
assert get_thread_result.id == thread_id
| true
| true
|
f718be894a4691236341bdac4c5579dd0d6a6f14
| 8,462
|
py
|
Python
|
tests/callbacks/test_early_stopping.py
|
NunoEdgarGFlowHub/torchbearer
|
d2b21b8ffcabde5b505cb1c736e05af6ee4276ca
|
[
"MIT"
] | 358
|
2018-07-23T13:30:38.000Z
|
2019-06-02T07:18:35.000Z
|
tests/callbacks/test_early_stopping.py
|
Jayaudaykmar26589/torchbearer
|
940e75ec88acd59d5a97aa8c721f7cfa30a5c4d0
|
[
"MIT"
] | 307
|
2018-07-18T12:07:23.000Z
|
2019-06-03T18:00:27.000Z
|
tests/callbacks/test_early_stopping.py
|
Jayaudaykmar26589/torchbearer
|
940e75ec88acd59d5a97aa8c721f7cfa30a5c4d0
|
[
"MIT"
] | 42
|
2018-07-23T22:49:23.000Z
|
2019-05-20T07:22:55.000Z
|
from unittest import TestCase
from mock import MagicMock
import torchbearer
from torchbearer.callbacks import EarlyStopping
class TestEarlyStopping(TestCase):
def test_step_on_batch(self):
stopper = EarlyStopping(monitor='test_metric', mode='min', step_on_batch=True)
stopper.step = MagicMock()
stopper.on_step_training('test')
self.assertTrue(stopper.step.call_count == 1)
stopper.on_end_epoch('test')
self.assertTrue(stopper.step.call_count == 1)
def test_min_should_stop(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='min')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric'] = 0.01
stopper.on_end_epoch(state)
self.assertTrue(state[torchbearer.STOP_TRAINING])
def test_min_should_continue(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='min')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric'] = 0.0001
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
def test_max_should_stop(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='max')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric'] = 0.0001
stopper.on_end_epoch(state)
self.assertTrue(state[torchbearer.STOP_TRAINING])
def test_max_should_continue(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='max')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric'] = 0.01
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
def test_max_equal_should_stop(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='max')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
stopper.on_end_epoch(state)
self.assertTrue(state[torchbearer.STOP_TRAINING])
def test_in_equal_should_stop(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='min')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
stopper.on_end_epoch(state)
self.assertTrue(state[torchbearer.STOP_TRAINING])
def test_patience_should_stop(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', patience=3)
stopper.on_start(state)
for i in range(3):
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
stopper.on_end_epoch(state)
self.assertTrue(state[torchbearer.STOP_TRAINING])
def test_patience_should_continue(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', patience=3)
stopper.on_start(state)
for i in range(3):
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric'] = 0.0001
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
def test_min_delta_should_continue(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='max', min_delta=0.1)
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric'] = 0.102
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
def test_min_delta_should_stop(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='max', min_delta=0.1)
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric'] = 0.10
stopper.on_end_epoch(state)
self.assertTrue(state[torchbearer.STOP_TRAINING])
def test_auto_should_be_min(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertTrue(stopper.mode == 'min')
def test_auto_should_be_max(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'acc_metric': 0.001}
}
stopper = EarlyStopping(monitor='acc_metric')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertTrue(stopper.mode == 'max')
def test_monitor_should_continue(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric_1': 0.001, 'test_metric_2': 0.001}
}
stopper = EarlyStopping(monitor='test_metric_2', mode='max')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric_1'] = 0.0001
state[torchbearer.METRICS]['test_metric_2'] = 0.01
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
def test_monitor_should_stop(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric_1': 0.001, 'test_metric_2': 0.001}
}
stopper = EarlyStopping(monitor='test_metric_2', mode='max')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric_1'] = 0.1
state[torchbearer.METRICS]['test_metric_2'] = 0.0001
stopper.on_end_epoch(state)
self.assertTrue(state[torchbearer.STOP_TRAINING])
def test_state_dict(self):
stopper = EarlyStopping(monitor='test_metric_1')
stopper.wait = 10
stopper.best = 20
state = stopper.state_dict()
stopper = EarlyStopping(monitor='test_metric_1')
self.assertNotEqual(stopper.wait, 10)
stopper.load_state_dict(state)
self.assertEqual(stopper.wait, 10)
self.assertEqual(stopper.best, 20)
| 29.381944
| 86
| 0.63732
|
from unittest import TestCase
from mock import MagicMock
import torchbearer
from torchbearer.callbacks import EarlyStopping
class TestEarlyStopping(TestCase):
def test_step_on_batch(self):
stopper = EarlyStopping(monitor='test_metric', mode='min', step_on_batch=True)
stopper.step = MagicMock()
stopper.on_step_training('test')
self.assertTrue(stopper.step.call_count == 1)
stopper.on_end_epoch('test')
self.assertTrue(stopper.step.call_count == 1)
def test_min_should_stop(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='min')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric'] = 0.01
stopper.on_end_epoch(state)
self.assertTrue(state[torchbearer.STOP_TRAINING])
def test_min_should_continue(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='min')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric'] = 0.0001
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
def test_max_should_stop(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='max')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric'] = 0.0001
stopper.on_end_epoch(state)
self.assertTrue(state[torchbearer.STOP_TRAINING])
def test_max_should_continue(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='max')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric'] = 0.01
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
def test_max_equal_should_stop(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='max')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
stopper.on_end_epoch(state)
self.assertTrue(state[torchbearer.STOP_TRAINING])
def test_in_equal_should_stop(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='min')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
stopper.on_end_epoch(state)
self.assertTrue(state[torchbearer.STOP_TRAINING])
def test_patience_should_stop(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', patience=3)
stopper.on_start(state)
for i in range(3):
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
stopper.on_end_epoch(state)
self.assertTrue(state[torchbearer.STOP_TRAINING])
def test_patience_should_continue(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', patience=3)
stopper.on_start(state)
for i in range(3):
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric'] = 0.0001
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
def test_min_delta_should_continue(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='max', min_delta=0.1)
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric'] = 0.102
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
def test_min_delta_should_stop(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric', mode='max', min_delta=0.1)
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric'] = 0.10
stopper.on_end_epoch(state)
self.assertTrue(state[torchbearer.STOP_TRAINING])
def test_auto_should_be_min(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric': 0.001}
}
stopper = EarlyStopping(monitor='test_metric')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertTrue(stopper.mode == 'min')
def test_auto_should_be_max(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'acc_metric': 0.001}
}
stopper = EarlyStopping(monitor='acc_metric')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertTrue(stopper.mode == 'max')
def test_monitor_should_continue(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric_1': 0.001, 'test_metric_2': 0.001}
}
stopper = EarlyStopping(monitor='test_metric_2', mode='max')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric_1'] = 0.0001
state[torchbearer.METRICS]['test_metric_2'] = 0.01
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
def test_monitor_should_stop(self):
state = {
torchbearer.EPOCH: 1,
torchbearer.STOP_TRAINING: False,
torchbearer.METRICS: {'test_metric_1': 0.001, 'test_metric_2': 0.001}
}
stopper = EarlyStopping(monitor='test_metric_2', mode='max')
stopper.on_start(state)
stopper.on_end_epoch(state)
self.assertFalse(state[torchbearer.STOP_TRAINING])
state[torchbearer.METRICS]['test_metric_1'] = 0.1
state[torchbearer.METRICS]['test_metric_2'] = 0.0001
stopper.on_end_epoch(state)
self.assertTrue(state[torchbearer.STOP_TRAINING])
def test_state_dict(self):
stopper = EarlyStopping(monitor='test_metric_1')
stopper.wait = 10
stopper.best = 20
state = stopper.state_dict()
stopper = EarlyStopping(monitor='test_metric_1')
self.assertNotEqual(stopper.wait, 10)
stopper.load_state_dict(state)
self.assertEqual(stopper.wait, 10)
self.assertEqual(stopper.best, 20)
| true
| true
|
f718bea2b4b0957c8f70bacbe0393777c4196839
| 27
|
py
|
Python
|
pyaxe/axesim/__init__.py
|
sosey/pyaxe
|
f57de55daf77de21d5868ace08b69090778d5975
|
[
"BSD-3-Clause"
] | null | null | null |
pyaxe/axesim/__init__.py
|
sosey/pyaxe
|
f57de55daf77de21d5868ace08b69090778d5975
|
[
"BSD-3-Clause"
] | null | null | null |
pyaxe/axesim/__init__.py
|
sosey/pyaxe
|
f57de55daf77de21d5868ace08b69090778d5975
|
[
"BSD-3-Clause"
] | null | null | null |
from .axesimtasks import *
| 13.5
| 26
| 0.777778
|
from .axesimtasks import *
| true
| true
|
f718bf45fe77b4e7d788d89bab1b100ccb4a2a7c
| 2,737
|
py
|
Python
|
python-build/python-libs/ase/android.py
|
visdom2000/python-for-android
|
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
|
[
"Apache-2.0"
] | 267
|
2015-03-22T15:23:48.000Z
|
2022-03-05T21:57:34.000Z
|
python-build/python-libs/ase/android.py
|
visdom2000/python-for-android
|
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
|
[
"Apache-2.0"
] | 133
|
2015-03-21T15:13:43.000Z
|
2021-12-11T23:37:58.000Z
|
python-build/python-libs/ase/android.py
|
visdom2000/python-for-android
|
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
|
[
"Apache-2.0"
] | 119
|
2015-04-28T16:07:10.000Z
|
2022-03-18T03:49:48.000Z
|
# Copyright (C) 2017 shimoda kuri65536@hotmail.com
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from __future__ import print_function
__author__ = 'Damon Kohler <damonkohler@gmail.com>'
import sys
import time
import collections
import json
import os
import socket
from logging import warning as warn
PORT = os.environ.get('AP_PORT')
HOST = os.environ.get('AP_HOST')
HANDSHAKE = os.environ.get('AP_HANDSHAKE')
Result = collections.namedtuple('Result', 'id,result,error')
class Android(object):
def __init__(self, addr=None):
if addr is None:
addr = HOST, PORT
if True:
try:
self.conn = socket.create_connection(addr)
except:
self.conn = self.launchSL4A(addr)
if sys.version_info[0] == 2:
self.client = self.conn.makefile('rw')
else:
self.client = self.conn.makefile('rw', encoding='utf-8')
self.id = 0
if HANDSHAKE is not None:
self._authenticate(HANDSHAKE)
def _rpc(self, method, *args):
data = {'id': self.id,
'method': method,
'params': args}
request = json.dumps(data)
self.client.write(request+'\n')
self.client.flush()
response = self.client.readline()
self.id += 1
result = json.loads(response)
if result['error'] is not None:
print(result['error'])
# namedtuple doesn't work with unicode keys.
return Result(id=result['id'], result=result['result'],
error=result['error'], )
def __getattr__(self, name):
def rpc_call(*args):
return self._rpc(name, *args)
return rpc_call
if True:
def launchSL4A(self, addr):
if addr[0] is None:
addr = ("127.0.0.1", addr[1])
if addr[1] is None:
addr = (addr[0], "8888")
sl4a = 'com.googlecode.android_scripting'
cmd = ('am start -a %s.action.LAUNCH_SERVER '
'--ei %s.extra.USE_SERVICE_PORT %s '
'%s/.activity.ScriptingLayerServiceLauncher '
% (sl4a, sl4a, addr[1], sl4a))
warn("launch SL4A with %s" % str(addr))
os.system(cmd)
time.sleep(2)
return socket.create_connection(addr)
# vi: et:ts=4:nowrap
| 30.076923
| 79
| 0.639021
|
from __future__ import print_function
__author__ = 'Damon Kohler <damonkohler@gmail.com>'
import sys
import time
import collections
import json
import os
import socket
from logging import warning as warn
PORT = os.environ.get('AP_PORT')
HOST = os.environ.get('AP_HOST')
HANDSHAKE = os.environ.get('AP_HANDSHAKE')
Result = collections.namedtuple('Result', 'id,result,error')
class Android(object):
def __init__(self, addr=None):
if addr is None:
addr = HOST, PORT
if True:
try:
self.conn = socket.create_connection(addr)
except:
self.conn = self.launchSL4A(addr)
if sys.version_info[0] == 2:
self.client = self.conn.makefile('rw')
else:
self.client = self.conn.makefile('rw', encoding='utf-8')
self.id = 0
if HANDSHAKE is not None:
self._authenticate(HANDSHAKE)
def _rpc(self, method, *args):
data = {'id': self.id,
'method': method,
'params': args}
request = json.dumps(data)
self.client.write(request+'\n')
self.client.flush()
response = self.client.readline()
self.id += 1
result = json.loads(response)
if result['error'] is not None:
print(result['error'])
return Result(id=result['id'], result=result['result'],
error=result['error'], )
def __getattr__(self, name):
def rpc_call(*args):
return self._rpc(name, *args)
return rpc_call
if True:
def launchSL4A(self, addr):
if addr[0] is None:
addr = ("127.0.0.1", addr[1])
if addr[1] is None:
addr = (addr[0], "8888")
sl4a = 'com.googlecode.android_scripting'
cmd = ('am start -a %s.action.LAUNCH_SERVER '
'--ei %s.extra.USE_SERVICE_PORT %s '
'%s/.activity.ScriptingLayerServiceLauncher '
% (sl4a, sl4a, addr[1], sl4a))
warn("launch SL4A with %s" % str(addr))
os.system(cmd)
time.sleep(2)
return socket.create_connection(addr)
# vi: et:ts=4:nowrap
| true
| true
|
f718c07db2037d9de5bc25abef5731563295c516
| 717
|
py
|
Python
|
blog/models.py
|
piratos/ctfbulletin
|
7c8766b89d4d726567d2690d761f6caa5db4d251
|
[
"MIT"
] | 1
|
2015-10-17T17:19:52.000Z
|
2015-10-17T17:19:52.000Z
|
blog/models.py
|
piratos/ctfbulletin
|
7c8766b89d4d726567d2690d761f6caa5db4d251
|
[
"MIT"
] | null | null | null |
blog/models.py
|
piratos/ctfbulletin
|
7c8766b89d4d726567d2690d761f6caa5db4d251
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from talk.models import Challenger
class Article(models.Model):
author = models.ForeignKey(User)
title = models.CharField(max_length=128)
content = models.TextField(blank=True)
date_post = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.title + " by " + self.author.username
class BlogComment(models.Model):
article = models.ForeignKey(Article)
commenter = models.ForeignKey(Challenger)
comment = models.TextField(blank=True)
date_comment = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return 'comment from '+self.commenter.user.username
| 31.173913
| 59
| 0.740586
|
from django.db import models
from django.contrib.auth.models import User
from talk.models import Challenger
class Article(models.Model):
author = models.ForeignKey(User)
title = models.CharField(max_length=128)
content = models.TextField(blank=True)
date_post = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.title + " by " + self.author.username
class BlogComment(models.Model):
article = models.ForeignKey(Article)
commenter = models.ForeignKey(Challenger)
comment = models.TextField(blank=True)
date_comment = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return 'comment from '+self.commenter.user.username
| true
| true
|
f718c0c056dc10de1f4c8b09d04305451105ebae
| 740
|
py
|
Python
|
Algorithms/0021_Merge_Two_Sorted_Lists/Python/Merge_Two_Sorted_Lists_Solution_2.py
|
lht19900714/Leetcode_Solutions
|
dac7a038329a5c1f8a78e86cc6f49116b963f1fb
|
[
"MIT"
] | null | null | null |
Algorithms/0021_Merge_Two_Sorted_Lists/Python/Merge_Two_Sorted_Lists_Solution_2.py
|
lht19900714/Leetcode_Solutions
|
dac7a038329a5c1f8a78e86cc6f49116b963f1fb
|
[
"MIT"
] | null | null | null |
Algorithms/0021_Merge_Two_Sorted_Lists/Python/Merge_Two_Sorted_Lists_Solution_2.py
|
lht19900714/Leetcode_Solutions
|
dac7a038329a5c1f8a78e86cc6f49116b963f1fb
|
[
"MIT"
] | null | null | null |
# Space: O(1)
# Time: O(n)
# Iterative approach
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def mergeTwoLists(self, l1, l2):
res = ListNode(0)
cur_res = res
cur1, cur2 = l1,l2
while cur1 and cur2:
if cur1.val<cur2.val:
cur_res.next = cur1
cur1 = cur1.next
cur_res = cur_res.next
else:
cur_res.next = cur2
cur2 = cur2.next
cur_res = cur_res.next
if cur1:
cur_res.next = cur1
else:
cur_res.next = cur2
return res.next
| 17.619048
| 38
| 0.483784
|
class Solution:
def mergeTwoLists(self, l1, l2):
res = ListNode(0)
cur_res = res
cur1, cur2 = l1,l2
while cur1 and cur2:
if cur1.val<cur2.val:
cur_res.next = cur1
cur1 = cur1.next
cur_res = cur_res.next
else:
cur_res.next = cur2
cur2 = cur2.next
cur_res = cur_res.next
if cur1:
cur_res.next = cur1
else:
cur_res.next = cur2
return res.next
| true
| true
|
f718c0ccbd1cbc0e5a1fcb274003f494640f1da2
| 4,247
|
py
|
Python
|
test_files/use_cwl-utils_to_read_input_params.py
|
PMCC-BioinformaticsCore/CWLab
|
6bea694c25364a5655d9e68fd29954b568ac9c4b
|
[
"Apache-2.0"
] | null | null | null |
test_files/use_cwl-utils_to_read_input_params.py
|
PMCC-BioinformaticsCore/CWLab
|
6bea694c25364a5655d9e68fd29954b568ac9c4b
|
[
"Apache-2.0"
] | null | null | null |
test_files/use_cwl-utils_to_read_input_params.py
|
PMCC-BioinformaticsCore/CWLab
|
6bea694c25364a5655d9e68fd29954b568ac9c4b
|
[
"Apache-2.0"
] | null | null | null |
from cwl_utils import parser_v1_0
from re import sub
from cwlab.xls2cwl_job.read_xls import clean_string
configs = {}
cwl_document = parser_v1_0.load_document("test_files/workflows/wf_fastqc.cwl")
if isinstance(cwl_document, list):
cwl_documents = cwl_document
for cwl_document_ in cwl_documents:
if clean_string( sub(".*#", "", cwl_document_.id) ) == "main":
cwl_document = cwl_document_
break
inp_records = cwl_document.inputs
for inp_rec in inp_records:
inp_rec
name = clean_string( sub(".*#", "", inp_rec.id) )
is_array = False
null_allowed = False
null_items_allowed = False
default_value = [""]
# test if optional:
if isinstance(inp_rec.type, list):
if len(inp_rec.type) == 2 and "null" in inp_rec.type:
null_allowed = True
inp_rec.type.remove("null")
inp_rec.type = inp_rec.type[0]
else:
sys.exit( print_pref + "E: unkown type for parameter " + name +
": lists of type are only supported when one of two elements is \"null\"")
# test if array:
if isinstance(inp_records[0].type, parser_v1_0.InputArraySchema):
if hasattr(inp_rec.type, "type") and hasattr(inp_rec.type, "items"):
if inp_rec.type.type == "array":
is_array = True
inp_rec.type = inp_rec.type.items
else:
sys.exit( print_pref + "E: unkown type for parameter " + name )
else:
sys.exit( print_pref + "E: unkown type for parameter " + name )
# test if "null" is allowed as array item:
if isinstance(inp_rec.type, list):
if len(inp_rec.type) == 2 and "null" in inp_rec.type:
null_items_allowed = True
inp_rec.type.remove("null")
inp_rec.type = inp_rec.type[0]
else:
sys.exit( print_pref + "E: unkown type for parameter " + name +
": lists of type are only supported when one of two elements is \"null\"")
if isinstance(inp_rec.type, str):
type_ = inp_rec.type
else:
sys.exit( print_pref + "E: unkown type for parameter " + name )
# get the default:
if hasattr(inp_rec, "default"):
if is_basic_type_instance(inp_rec.default):
default_value = [clean_string(inp_rec.default)]
else:
if is_array and isinstance(inp_rec.default, list):
default_value = []
for entry in inp_rec.default:
if is_basic_type_instance(inp_rec.default):
default_value.append(clean_string(entry))
else:
print(print_pref + "W: invalid default value for parameter " + name +
": will be ignored", file=sys.stderr)
default_value = [""]
elif type_ == "File" and isinstance(inp_rec.default, dict):
print(print_pref + "W: invalid default value for parameter " + name +
": defaults for File class are not supported yet; will be ignored", file=sys.stderr)
default_value = [""]
else:
print(print_pref + "W: invalid default value for parameter " + name +
": will be ignored", file=sys.stderr)
default_value = [""]
else:
default_value = [""]
# read secondary files:
if type_ == "File" and hasattr(inp_rec, "secondaryFiles"):
if isinstance(inp_rec.secondaryFiles, str):
secondary_files = [ inp_rec.secondaryFiles ]
elif isinstance(inp_rec.secondaryFiles, list):
secondary_files = inp_rec.secondaryFiles
else:
sys.exit( print_pref + "E: invalid secondaryFiles field for parameter " + name )
else:
secondary_files = [ "" ]
# assemble config parameters:
inp_configs = {
"type": type_,
"is_array": is_array,
"null_allowed": null_allowed,
"null_items_allowed": null_items_allowed,
"secondary_files": secondary_files,
"default_value": default_value
}
# add to configs dict:
configs[ name ] = inp_configs
| 41.637255
| 104
| 0.584177
|
from cwl_utils import parser_v1_0
from re import sub
from cwlab.xls2cwl_job.read_xls import clean_string
configs = {}
cwl_document = parser_v1_0.load_document("test_files/workflows/wf_fastqc.cwl")
if isinstance(cwl_document, list):
cwl_documents = cwl_document
for cwl_document_ in cwl_documents:
if clean_string( sub(".*#", "", cwl_document_.id) ) == "main":
cwl_document = cwl_document_
break
inp_records = cwl_document.inputs
for inp_rec in inp_records:
inp_rec
name = clean_string( sub(".*#", "", inp_rec.id) )
is_array = False
null_allowed = False
null_items_allowed = False
default_value = [""]
if isinstance(inp_rec.type, list):
if len(inp_rec.type) == 2 and "null" in inp_rec.type:
null_allowed = True
inp_rec.type.remove("null")
inp_rec.type = inp_rec.type[0]
else:
sys.exit( print_pref + "E: unkown type for parameter " + name +
": lists of type are only supported when one of two elements is \"null\"")
if isinstance(inp_records[0].type, parser_v1_0.InputArraySchema):
if hasattr(inp_rec.type, "type") and hasattr(inp_rec.type, "items"):
if inp_rec.type.type == "array":
is_array = True
inp_rec.type = inp_rec.type.items
else:
sys.exit( print_pref + "E: unkown type for parameter " + name )
else:
sys.exit( print_pref + "E: unkown type for parameter " + name )
if isinstance(inp_rec.type, list):
if len(inp_rec.type) == 2 and "null" in inp_rec.type:
null_items_allowed = True
inp_rec.type.remove("null")
inp_rec.type = inp_rec.type[0]
else:
sys.exit( print_pref + "E: unkown type for parameter " + name +
": lists of type are only supported when one of two elements is \"null\"")
if isinstance(inp_rec.type, str):
type_ = inp_rec.type
else:
sys.exit( print_pref + "E: unkown type for parameter " + name )
if hasattr(inp_rec, "default"):
if is_basic_type_instance(inp_rec.default):
default_value = [clean_string(inp_rec.default)]
else:
if is_array and isinstance(inp_rec.default, list):
default_value = []
for entry in inp_rec.default:
if is_basic_type_instance(inp_rec.default):
default_value.append(clean_string(entry))
else:
print(print_pref + "W: invalid default value for parameter " + name +
": will be ignored", file=sys.stderr)
default_value = [""]
elif type_ == "File" and isinstance(inp_rec.default, dict):
print(print_pref + "W: invalid default value for parameter " + name +
": defaults for File class are not supported yet; will be ignored", file=sys.stderr)
default_value = [""]
else:
print(print_pref + "W: invalid default value for parameter " + name +
": will be ignored", file=sys.stderr)
default_value = [""]
else:
default_value = [""]
if type_ == "File" and hasattr(inp_rec, "secondaryFiles"):
if isinstance(inp_rec.secondaryFiles, str):
secondary_files = [ inp_rec.secondaryFiles ]
elif isinstance(inp_rec.secondaryFiles, list):
secondary_files = inp_rec.secondaryFiles
else:
sys.exit( print_pref + "E: invalid secondaryFiles field for parameter " + name )
else:
secondary_files = [ "" ]
inp_configs = {
"type": type_,
"is_array": is_array,
"null_allowed": null_allowed,
"null_items_allowed": null_items_allowed,
"secondary_files": secondary_files,
"default_value": default_value
}
configs[ name ] = inp_configs
| true
| true
|
f718c1ac19167819014f5c812c200a12969781c3
| 3,286
|
py
|
Python
|
opencv/sources/samples/python/tutorial_code/imgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.py
|
vrushank-agrawal/opencv-x64-cmake
|
3f9486510d706c8ac579ac82f5d58f667f948124
|
[
"Apache-2.0"
] | null | null | null |
opencv/sources/samples/python/tutorial_code/imgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.py
|
vrushank-agrawal/opencv-x64-cmake
|
3f9486510d706c8ac579ac82f5d58f667f948124
|
[
"Apache-2.0"
] | null | null | null |
opencv/sources/samples/python/tutorial_code/imgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.py
|
vrushank-agrawal/opencv-x64-cmake
|
3f9486510d706c8ac579ac82f5d58f667f948124
|
[
"Apache-2.0"
] | null | null | null |
import cv2 as cv
import numpy as np
import argparse
W = 52 # window size is WxW
C_Thr = 0.43 # threshold for coherency
LowThr = 35 # threshold1 for orientation, it ranges from 0 to 180
HighThr = 57 # threshold2 for orientation, it ranges from 0 to 180
## [calcGST]
## [calcJ_header]
## [calcGST_proto]
def calcGST(inputIMG, w):
## [calcGST_proto]
img = inputIMG.astype(np.float32)
# GST components calculation (start)
# J = (J11 J12; J12 J22) - GST
imgDiffX = cv.Sobel(img, cv.CV_32F, 1, 0, 3)
imgDiffY = cv.Sobel(img, cv.CV_32F, 0, 1, 3)
imgDiffXY = cv.multiply(imgDiffX, imgDiffY)
## [calcJ_header]
imgDiffXX = cv.multiply(imgDiffX, imgDiffX)
imgDiffYY = cv.multiply(imgDiffY, imgDiffY)
J11 = cv.boxFilter(imgDiffXX, cv.CV_32F, (w,w))
J22 = cv.boxFilter(imgDiffYY, cv.CV_32F, (w,w))
J12 = cv.boxFilter(imgDiffXY, cv.CV_32F, (w,w))
# GST components calculations (stop)
# eigenvalue calculation (start)
# lambda1 = 0.5*(J11 + J22 + sqrt((J11-J22)^2 + 4*J12^2))
# lambda2 = 0.5*(J11 + J22 - sqrt((J11-J22)^2 + 4*J12^2))
tmp1 = J11 + J22
tmp2 = J11 - J22
tmp2 = cv.multiply(tmp2, tmp2)
tmp3 = cv.multiply(J12, J12)
tmp4 = np.sqrt(tmp2 + 4.0 * tmp3)
lambda1 = 0.5*(tmp1 + tmp4) # biggest eigenvalue
lambda2 = 0.5*(tmp1 - tmp4) # smallest eigenvalue
# eigenvalue calculation (stop)
# Coherency calculation (start)
# Coherency = (lambda1 - lambda2)/(lambda1 + lambda2)) - measure of anisotropism
# Coherency is anisotropy degree (consistency of local orientation)
imgCoherencyOut = cv.divide(lambda1 - lambda2, lambda1 + lambda2)
# Coherency calculation (stop)
# orientation angle calculation (start)
# tan(2*Alpha) = 2*J12/(J22 - J11)
# Alpha = 0.5 atan2(2*J12/(J22 - J11))
imgOrientationOut = cv.phase(J22 - J11, 2.0 * J12, angleInDegrees = True)
imgOrientationOut = 0.5 * imgOrientationOut
# orientation angle calculation (stop)
return imgCoherencyOut, imgOrientationOut
## [calcGST]
parser = argparse.ArgumentParser(description='Code for Anisotropic image segmentation tutorial.')
parser.add_argument('-i', '--input', help='Path to input image.', required=True)
args = parser.parse_args()
imgIn = cv.imread(args.input, cv.IMREAD_GRAYSCALE)
if imgIn is None:
print('Could not open or find the image: {}'.format(args.input))
exit(0)
## [main_extra]
## [main]
imgCoherency, imgOrientation = calcGST(imgIn, W)
## [thresholding]
_, imgCoherencyBin = cv.threshold(imgCoherency, C_Thr, 255, cv.THRESH_BINARY)
_, imgOrientationBin = cv.threshold(imgOrientation, LowThr, HighThr, cv.THRESH_BINARY)
## [thresholding]
## [combining]
imgBin = cv.bitwise_and(imgCoherencyBin, imgOrientationBin)
## [combining]
## [main]
imgCoherency = cv.normalize(imgCoherency, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)
imgOrientation = cv.normalize(imgOrientation, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)
cv.imshow('result.jpg', np.uint8(0.5*(imgIn + imgBin)))
cv.imshow('Coherency.jpg', imgCoherency)
cv.imshow('Orientation.jpg', imgOrientation)
cv.waitKey(0)
## [main_extra]
| 35.333333
| 112
| 0.665855
|
import cv2 as cv
import numpy as np
import argparse
W = 52
C_Thr = 0.43
LowThr = 35
HighThr = 57
32)
imgDiffX = cv.Sobel(img, cv.CV_32F, 1, 0, 3)
imgDiffY = cv.Sobel(img, cv.CV_32F, 0, 1, 3)
imgDiffXY = cv.multiply(imgDiffX, imgDiffY)
= cv.multiply(imgDiffX, imgDiffX)
imgDiffYY = cv.multiply(imgDiffY, imgDiffY)
J11 = cv.boxFilter(imgDiffXX, cv.CV_32F, (w,w))
J22 = cv.boxFilter(imgDiffYY, cv.CV_32F, (w,w))
J12 = cv.boxFilter(imgDiffXY, cv.CV_32F, (w,w))
tmp1 = J11 + J22
tmp2 = J11 - J22
tmp2 = cv.multiply(tmp2, tmp2)
tmp3 = cv.multiply(J12, J12)
tmp4 = np.sqrt(tmp2 + 4.0 * tmp3)
lambda1 = 0.5*(tmp1 + tmp4)
lambda2 = 0.5*(tmp1 - tmp4)
imgCoherencyOut = cv.divide(lambda1 - lambda2, lambda1 + lambda2)
imgOrientationOut = cv.phase(J22 - J11, 2.0 * J12, angleInDegrees = True)
imgOrientationOut = 0.5 * imgOrientationOut
return imgCoherencyOut, imgOrientationOut
argparse.ArgumentParser(description='Code for Anisotropic image segmentation tutorial.')
parser.add_argument('-i', '--input', help='Path to input image.', required=True)
args = parser.parse_args()
imgIn = cv.imread(args.input, cv.IMREAD_GRAYSCALE)
if imgIn is None:
print('Could not open or find the image: {}'.format(args.input))
exit(0)
tation = calcGST(imgIn, W)
in = cv.threshold(imgCoherency, C_Thr, 255, cv.THRESH_BINARY)
_, imgOrientationBin = cv.threshold(imgOrientation, LowThr, HighThr, cv.THRESH_BINARY)
CoherencyBin, imgOrientationBin)
ormalize(imgCoherency, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)
imgOrientation = cv.normalize(imgOrientation, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)
cv.imshow('result.jpg', np.uint8(0.5*(imgIn + imgBin)))
cv.imshow('Coherency.jpg', imgCoherency)
cv.imshow('Orientation.jpg', imgOrientation)
cv.waitKey(0)
| true
| true
|
f718c3df162456688b5938954dae9350e0b38d1b
| 902
|
py
|
Python
|
skyfall.py
|
jopetty/skyfall
|
0a048899c60de6666542a31abf3232cc95375998
|
[
"MIT"
] | null | null | null |
skyfall.py
|
jopetty/skyfall
|
0a048899c60de6666542a31abf3232cc95375998
|
[
"MIT"
] | null | null | null |
skyfall.py
|
jopetty/skyfall
|
0a048899c60de6666542a31abf3232cc95375998
|
[
"MIT"
] | null | null | null |
def get_sidereal_time(time: float, date: (int, int, int), longitude: float) -> float:
year, month, day = date
# Calculate the Julian Day
A = int(year/100)
B = 2 - A + int(A/4)
jd = int(365.25*(year + 4716)) + int(30.6001*(month + 1)) + day + B - 1524.5
# Calculate Greenwich Sidereal Time
T = (jd + time/24.0 - 2451545.0)/36525.0
qo = 280.46061837 + 360.98564736629 * (jd -2451545.0) + 0.000387933 * T**2 - T**3/38710000
# Calculate Local Sidereal Time
q = qo + longitude
return q
def get_local_hour_angle(lst: float, right_ascension: float) -> float:
return lst - right_ascension
def get_coordinates(latitude: float, declination: float, lha: float) -> (float, float):
pass
if __name__ == "__main__":
currtime = 23.87778
currdate = (2020, 3, 8)
currlong = -114.093810
ljd = get_sidereal_time(currtime, currdate, currlong)
print(f"Local Sidereal Time: {ljd}")
| 30.066667
| 92
| 0.665188
|
def get_sidereal_time(time: float, date: (int, int, int), longitude: float) -> float:
year, month, day = date
A = int(year/100)
B = 2 - A + int(A/4)
jd = int(365.25*(year + 4716)) + int(30.6001*(month + 1)) + day + B - 1524.5
T = (jd + time/24.0 - 2451545.0)/36525.0
qo = 280.46061837 + 360.98564736629 * (jd -2451545.0) + 0.000387933 * T**2 - T**3/38710000
q = qo + longitude
return q
def get_local_hour_angle(lst: float, right_ascension: float) -> float:
return lst - right_ascension
def get_coordinates(latitude: float, declination: float, lha: float) -> (float, float):
pass
if __name__ == "__main__":
currtime = 23.87778
currdate = (2020, 3, 8)
currlong = -114.093810
ljd = get_sidereal_time(currtime, currdate, currlong)
print(f"Local Sidereal Time: {ljd}")
| true
| true
|
f718c4016a855511a2343e1d1e93784a2accafd2
| 1,002
|
py
|
Python
|
setup.py
|
boettiger-lab/rl-toolkit
|
cbf8060c79779f134101ef1c41b4b8ee44c61e4e
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
boettiger-lab/rl-toolkit
|
cbf8060c79779f134101ef1c41b4b8ee44c61e4e
|
[
"BSD-3-Clause"
] | 1
|
2021-04-14T16:25:08.000Z
|
2021-06-08T00:50:36.000Z
|
setup.py
|
boettiger-lab/rl-toolkit
|
cbf8060c79779f134101ef1c41b4b8ee44c61e4e
|
[
"BSD-3-Clause"
] | null | null | null |
# setup.py
from setuptools import find_packages, setup
setup(
name="rl_toolkit",
version="0.0.0",
packages=find_packages(exclude=["docs", "scripts", "tests"]),
install_requires=[
"gym",
"gym_fishing",
"gym_conservation",
"numpy",
"pandas",
"matplotlib",
"stable_baselines3",
],
extras_require={
"tests": [
# Run tests and coverage
"pytest",
"pytest-cov",
"pytest-env",
"pytest-xdist",
# Type check
"pytype",
# Lint code
"flake8>=3.8",
# Sort imports
"isort>=5.0",
# Reformat
"black",
],
"docs": [
"sphinx",
"sphinx-autobuild",
"sphinx-rtd-theme",
# For spelling
"sphinxcontrib.spelling",
# Type hints support
"sphinx-autodoc-typehints",
],
},
)
| 22.772727
| 65
| 0.44511
|
from setuptools import find_packages, setup
setup(
name="rl_toolkit",
version="0.0.0",
packages=find_packages(exclude=["docs", "scripts", "tests"]),
install_requires=[
"gym",
"gym_fishing",
"gym_conservation",
"numpy",
"pandas",
"matplotlib",
"stable_baselines3",
],
extras_require={
"tests": [
"pytest",
"pytest-cov",
"pytest-env",
"pytest-xdist",
"pytype",
"flake8>=3.8",
"isort>=5.0",
"black",
],
"docs": [
"sphinx",
"sphinx-autobuild",
"sphinx-rtd-theme",
"sphinxcontrib.spelling",
"sphinx-autodoc-typehints",
],
},
)
| true
| true
|
f718c4cdd12f00e7309d3ba6c81331d8862050b5
| 3,991
|
py
|
Python
|
src/nodemgr/common/podman_containers.py
|
jnpr-pranav/contrail-controller
|
428eee37c28c31830fd764315794e1a6e52720c1
|
[
"Apache-2.0"
] | 37
|
2020-09-21T10:42:26.000Z
|
2022-01-09T10:16:40.000Z
|
src/nodemgr/common/podman_containers.py
|
jnpr-pranav/contrail-controller
|
428eee37c28c31830fd764315794e1a6e52720c1
|
[
"Apache-2.0"
] | null | null | null |
src/nodemgr/common/podman_containers.py
|
jnpr-pranav/contrail-controller
|
428eee37c28c31830fd764315794e1a6e52720c1
|
[
"Apache-2.0"
] | 21
|
2020-08-25T12:48:42.000Z
|
2022-03-22T04:32:18.000Z
|
import json
import logging
import subprocess
from nodemgr.common import utils
from nodemgr.common.sandesh.nodeinfo.cpuinfo.ttypes import ProcessCpuInfo
class PodmanContainerMemoryCpuUsage:
def __init__(self, query_, pid_):
self._query = query_
self._cgroup = '/sys/fs/cgroup/memory{0}/memory.stat'.format(utils.get_memory_cgroup(pid_))
@property
def last_cpu(self):
return 0
@property
def last_time(self):
return 0
def _get_rss_from_cgroup(self):
try:
with open(self._cgroup, 'r') as f:
while True:
ll = f.readline()
if not ll:
break
v = ll.partition('rss ')[2]
if v:
return int(v.strip())
except Exception:
logging.exception('memory stat reading')
return 0
def get_process_mem_cpu_info(self):
y = self._query()
if not y:
return None
output = ProcessCpuInfo()
u = y['cpu_percent'] if 'cpu_percent' in y else 0.0
u = 0.0 if '--' == u else float(u[0:-1])
output.cpu_share = u
u = y['mem_percent'] if 'mem_percent' in y else 0
u = 0
if 'mem_usage' in y:
m, _ = y['mem_usage'].split('/', 1)
m = m.rstrip()
if m and m.endswith('kB'):
u = int((2**10) * float(m[0:-2]))
if m and m.endswith('MB'):
u = int((2**20) * float(m[0:-2]))
if m and m.endswith('GB'):
u = int((2**30) * float(m[0:-2]))
output.mem_virt = u // 1024
output.mem_res = self._get_rss_from_cgroup() // 1024
return output
class PodmanContainersInterface:
def _execute(self, arguments_, timeout_=10):
a = ["podman"]
a.extend(arguments_)
p = subprocess.Popen(a, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
o, e = p.communicate(timeout_)
except subprocess.TimeoutExpired:
p.kill()
o, e = p.communicate()
p.wait()
if e:
logging.critical(e)
return (p.returncode, o)
def _parse_json(self, arguments_):
a = []
a.extend(arguments_)
a.extend(["--format", "json"])
c, o = self._execute(a)
if 0 != c:
# NB. there is nothing to parse
return (c, None)
try:
return (c, json.loads(o))
except Exception:
logging.exception('json parsing')
return (c, None)
def _decorate(self, container_):
if container_:
if 'ID' in container_:
container_['Id'] = container_['ID']
if 'State' in container_:
s = container_['State']
container_['State'] = [
'unknown', 'configured', 'created',
'running', 'stopped', 'paused', 'exited', 'removing'][s]
return container_
def list(self, all_=True):
a = ["ps"]
if all_:
a.append("-a")
_, output = self._parse_json(a)
if output:
for i in output:
self._decorate(i)
return output
def inspect(self, id_):
_, output = self._parse_json(["inspect", id_])
if output and len(output) > 0:
return output[0]
return None
def execute(self, id_, line_):
return self._execute(["exec", id_, '/usr/bin/sh', '-c', line_])
def query_usage(self, id_, last_cpu_=None, last_time_=None):
i = format(id_, 'x').zfill(12)
s = self.inspect(i)
if not s:
raise LookupError(i)
def do_query():
_, x = self._parse_json(["stats", "--no-stream", i])
if not x or len(x) == 0:
return None
return x[0]
return PodmanContainerMemoryCpuUsage(do_query, s['State']['Pid'])
| 27.524138
| 99
| 0.512152
|
import json
import logging
import subprocess
from nodemgr.common import utils
from nodemgr.common.sandesh.nodeinfo.cpuinfo.ttypes import ProcessCpuInfo
class PodmanContainerMemoryCpuUsage:
def __init__(self, query_, pid_):
self._query = query_
self._cgroup = '/sys/fs/cgroup/memory{0}/memory.stat'.format(utils.get_memory_cgroup(pid_))
@property
def last_cpu(self):
return 0
@property
def last_time(self):
return 0
def _get_rss_from_cgroup(self):
try:
with open(self._cgroup, 'r') as f:
while True:
ll = f.readline()
if not ll:
break
v = ll.partition('rss ')[2]
if v:
return int(v.strip())
except Exception:
logging.exception('memory stat reading')
return 0
def get_process_mem_cpu_info(self):
y = self._query()
if not y:
return None
output = ProcessCpuInfo()
u = y['cpu_percent'] if 'cpu_percent' in y else 0.0
u = 0.0 if '--' == u else float(u[0:-1])
output.cpu_share = u
u = y['mem_percent'] if 'mem_percent' in y else 0
u = 0
if 'mem_usage' in y:
m, _ = y['mem_usage'].split('/', 1)
m = m.rstrip()
if m and m.endswith('kB'):
u = int((2**10) * float(m[0:-2]))
if m and m.endswith('MB'):
u = int((2**20) * float(m[0:-2]))
if m and m.endswith('GB'):
u = int((2**30) * float(m[0:-2]))
output.mem_virt = u // 1024
output.mem_res = self._get_rss_from_cgroup() // 1024
return output
class PodmanContainersInterface:
def _execute(self, arguments_, timeout_=10):
a = ["podman"]
a.extend(arguments_)
p = subprocess.Popen(a, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
o, e = p.communicate(timeout_)
except subprocess.TimeoutExpired:
p.kill()
o, e = p.communicate()
p.wait()
if e:
logging.critical(e)
return (p.returncode, o)
def _parse_json(self, arguments_):
a = []
a.extend(arguments_)
a.extend(["--format", "json"])
c, o = self._execute(a)
if 0 != c:
return (c, None)
try:
return (c, json.loads(o))
except Exception:
logging.exception('json parsing')
return (c, None)
def _decorate(self, container_):
if container_:
if 'ID' in container_:
container_['Id'] = container_['ID']
if 'State' in container_:
s = container_['State']
container_['State'] = [
'unknown', 'configured', 'created',
'running', 'stopped', 'paused', 'exited', 'removing'][s]
return container_
def list(self, all_=True):
a = ["ps"]
if all_:
a.append("-a")
_, output = self._parse_json(a)
if output:
for i in output:
self._decorate(i)
return output
def inspect(self, id_):
_, output = self._parse_json(["inspect", id_])
if output and len(output) > 0:
return output[0]
return None
def execute(self, id_, line_):
return self._execute(["exec", id_, '/usr/bin/sh', '-c', line_])
def query_usage(self, id_, last_cpu_=None, last_time_=None):
i = format(id_, 'x').zfill(12)
s = self.inspect(i)
if not s:
raise LookupError(i)
def do_query():
_, x = self._parse_json(["stats", "--no-stream", i])
if not x or len(x) == 0:
return None
return x[0]
return PodmanContainerMemoryCpuUsage(do_query, s['State']['Pid'])
| true
| true
|
f718c88b539b9df608482abd9c783e4ba2f18bbd
| 1,390
|
py
|
Python
|
client.py
|
melrick8196/string-matcher
|
9854ec03b99fae89664184b1d8a3a81ae3e55001
|
[
"MIT"
] | null | null | null |
client.py
|
melrick8196/string-matcher
|
9854ec03b99fae89664184b1d8a3a81ae3e55001
|
[
"MIT"
] | null | null | null |
client.py
|
melrick8196/string-matcher
|
9854ec03b99fae89664184b1d8a3a81ae3e55001
|
[
"MIT"
] | null | null | null |
from handler.handler import Handler
from handler.char_handler import CharHandler
from handler.dot_handler import DotHandler
from handler.star_handler import StarHandler
from handler.abstract_handler import AbstractHandler
from match import Match
def make_pattern():
head = CharHandler()
c = CharHandler()
d = DotHandler()
t = CharHandler()
head.set_next(StarHandler()).set_next(CharHandler())
print(head)
user_pattern = "c.t"
user_target_string = "act"
print("pattern:{}, target_string:{}".format(user_pattern, user_target_string))
res = client_code(head)
print("final res:",res)
def client_code(handler: Handler) -> None:
"""
The client code is usually suited to work with a single handler. In most
cases, it is not even aware that the handler is part of a chain.
"""
user_pattern = "c*t"
user_target_string = "xxxxcat"
pattern_pos = 0
target_string_pos = 0
for index, ele in enumerate(user_target_string):
print(f"\nClient: Who wants a {ele}?")
result = handler.handle(pattern_pos, user_pattern, index, user_target_string)
print(result)
if result == -1 and index < len(user_target_string):
continue
else:
break
return result
if __name__ == "__main__":
#x = Match("c.t")
#x.find_first_ln("ffffffffffffffack")
make_pattern()
| 30.217391
| 85
| 0.682014
|
from handler.handler import Handler
from handler.char_handler import CharHandler
from handler.dot_handler import DotHandler
from handler.star_handler import StarHandler
from handler.abstract_handler import AbstractHandler
from match import Match
def make_pattern():
head = CharHandler()
c = CharHandler()
d = DotHandler()
t = CharHandler()
head.set_next(StarHandler()).set_next(CharHandler())
print(head)
user_pattern = "c.t"
user_target_string = "act"
print("pattern:{}, target_string:{}".format(user_pattern, user_target_string))
res = client_code(head)
print("final res:",res)
def client_code(handler: Handler) -> None:
user_pattern = "c*t"
user_target_string = "xxxxcat"
pattern_pos = 0
target_string_pos = 0
for index, ele in enumerate(user_target_string):
print(f"\nClient: Who wants a {ele}?")
result = handler.handle(pattern_pos, user_pattern, index, user_target_string)
print(result)
if result == -1 and index < len(user_target_string):
continue
else:
break
return result
if __name__ == "__main__":
make_pattern()
| true
| true
|
f718c9dd0f055f503653caa02d3327cc81af693d
| 534
|
py
|
Python
|
odds_news/news/migrations/0001_initial.py
|
toyeiei/odds-news
|
01aa0e89a90b033f1468663de760397753544e37
|
[
"MIT"
] | 1
|
2021-02-11T03:36:48.000Z
|
2021-02-11T03:36:48.000Z
|
odds_news/news/migrations/0001_initial.py
|
toyeiei/odds-news
|
01aa0e89a90b033f1468663de760397753544e37
|
[
"MIT"
] | null | null | null |
odds_news/news/migrations/0001_initial.py
|
toyeiei/odds-news
|
01aa0e89a90b033f1468663de760397753544e37
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.6 on 2021-02-11 03:58
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300)),
('content', models.TextField()),
],
),
]
| 23.217391
| 114
| 0.561798
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300)),
('content', models.TextField()),
],
),
]
| true
| true
|
f718ca8de83edf266222648897f5916fcb4f332f
| 5,624
|
py
|
Python
|
infra/base-sim/hwfutils/hwfutils/seed_composer.py
|
il-steffen/hw-fuzzing
|
63c05761a524364e299206ee9376b8cf9f852930
|
[
"Apache-2.0"
] | 31
|
2020-10-09T05:52:54.000Z
|
2022-01-24T18:50:41.000Z
|
infra/base-sim/hwfutils/hwfutils/seed_composer.py
|
il-steffen/hw-fuzzing
|
63c05761a524364e299206ee9376b8cf9f852930
|
[
"Apache-2.0"
] | 2
|
2021-05-25T18:34:19.000Z
|
2021-09-13T02:34:32.000Z
|
infra/base-sim/hwfutils/hwfutils/seed_composer.py
|
il-steffen/hw-fuzzing
|
63c05761a524364e299206ee9376b8cf9f852930
|
[
"Apache-2.0"
] | 5
|
2021-02-16T12:22:17.000Z
|
2021-10-18T10:23:55.000Z
|
#!/usr/bin/python3
# Copyright 2020 Timothy Trippel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import subprocess
import sys
import prettytable
import yaml
from hwfutils.string_color import color_str_green as green
from hwfutils.string_color import color_str_red as red
from hwfutils.string_color import color_str_yellow as yellow
from hwfutils.tlul_fuzz_instr import TLULFuzzInstr
def dump_seed_file_to_stdin(output_file_name):
"""Dumps generated seed file in hex format to STDIN."""
print(output_file_name + ":")
cmd = ["xxd", output_file_name]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
print(red("ERROR: cannot dump generated seed file."))
sys.exit(1)
def gen_seed(input_yaml_file_name, output_file_name, verbose):
"""Parse YAML HW fuzzing opcodes and translates them in binary to file."""
print(f"Creating fuzzer seed from YAML: {input_yaml_file_name} ...")
with open(input_yaml_file_name, "r") as fp:
fuzz_opcodes = yaml.load(fp, Loader=yaml.Loader)
with open(output_file_name, "wb") as fp:
for instr in fuzz_opcodes:
hwf_instr = TLULFuzzInstr(instr)
if verbose:
print(hwf_instr)
for _ in range(hwf_instr.repeat):
fp.write(hwf_instr.to_bytes())
print(green("Seed file generated!"))
if verbose:
dump_seed_file_to_stdin(output_file_name)
def _print_configs(args):
# Create table to print configurations to STDIN
config_table = prettytable.PrettyTable(header=False)
config_table.title = "Seed Generation Parameters"
config_table.field_names = ["Parameter", "Value"]
# Add parameter values to table
config_table.add_row(["Input (YAML) Filename", args.input_filename])
config_table.add_row(["Output Filename", args.output_filename])
config_table.add_row(["Frame Type", args.frame_type])
config_table.add_row(["Opcode Size (# bytes)", args.opcode_size])
config_table.add_row(["Address Size (# bytes)", args.address_size])
config_table.add_row(["Data Size (# bytes)", args.data_size])
# Print table
config_table.align = "l"
print(yellow(config_table.get_string()))
def parse_args(argv):
module_description = "OpenTitan Fuzzing Seed Composer"
parser = argparse.ArgumentParser(description=module_description)
parser.add_argument("--opcode-type",
default=TLULFuzzInstr.opcode_type,
choices=[
"constant",
"mapped",
],
type=str,
help="Fuzzing instruction opcode type.")
parser.add_argument("--instr-type",
default=TLULFuzzInstr.instr_type,
choices=[
"fixed",
"variable",
],
type=str,
help="Fuzzing instruction frame type.")
parser.add_argument("--endianness",
default=TLULFuzzInstr.endianness,
choices=[
"little",
"big",
],
type=str,
help="Endianness of HW Fuzzing Instruction frames.")
parser.add_argument("--opcode-size",
default=TLULFuzzInstr.opcode_size,
type=int,
help="Size of opcode field in bytes.")
parser.add_argument("--address-size",
default=TLULFuzzInstr.address_size,
type=int,
help="Size of address field in bytes")
parser.add_argument("--data-size",
default=TLULFuzzInstr.data_size,
type=int,
help="Size of data field in bytes.")
parser.add_argument("--direct-in-size",
default=TLULFuzzInstr.direct_in_size,
type=int,
help="Size of direct inputs field in bytes.")
parser.add_argument("-v",
"--verbose",
action="store_true",
help="Enable verbose status messages.")
parser.add_argument("input_file_name",
metavar="input.yaml",
help="Input configuration YAML file.")
parser.add_argument("output_file_name",
metavar="afl_seed.hwf",
help="Name of output seed file (hex).")
args = parser.parse_args(argv)
if args.verbose:
_print_configs(args)
return args
def config_tlul_fuzz_instr(args):
TLULFuzzInstr.opcode_type = args.opcode_type
TLULFuzzInstr.instr_type = args.instr_type
TLULFuzzInstr.opcode_size = args.opcode_size
TLULFuzzInstr.address_size = args.address_size
TLULFuzzInstr.data_size = args.data_size
TLULFuzzInstr.direct_in_size = args.direct_in_size
TLULFuzzInstr.endianness = args.endianness
def main(argv):
args = parse_args(argv)
config_tlul_fuzz_instr(args)
gen_seed(args.input_file_name, args.output_file_name, args.verbose)
if __name__ == "__main__":
main(sys.argv[1:])
| 36.75817
| 76
| 0.63638
|
import argparse
import subprocess
import sys
import prettytable
import yaml
from hwfutils.string_color import color_str_green as green
from hwfutils.string_color import color_str_red as red
from hwfutils.string_color import color_str_yellow as yellow
from hwfutils.tlul_fuzz_instr import TLULFuzzInstr
def dump_seed_file_to_stdin(output_file_name):
print(output_file_name + ":")
cmd = ["xxd", output_file_name]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
print(red("ERROR: cannot dump generated seed file."))
sys.exit(1)
def gen_seed(input_yaml_file_name, output_file_name, verbose):
print(f"Creating fuzzer seed from YAML: {input_yaml_file_name} ...")
with open(input_yaml_file_name, "r") as fp:
fuzz_opcodes = yaml.load(fp, Loader=yaml.Loader)
with open(output_file_name, "wb") as fp:
for instr in fuzz_opcodes:
hwf_instr = TLULFuzzInstr(instr)
if verbose:
print(hwf_instr)
for _ in range(hwf_instr.repeat):
fp.write(hwf_instr.to_bytes())
print(green("Seed file generated!"))
if verbose:
dump_seed_file_to_stdin(output_file_name)
def _print_configs(args):
config_table = prettytable.PrettyTable(header=False)
config_table.title = "Seed Generation Parameters"
config_table.field_names = ["Parameter", "Value"]
config_table.add_row(["Input (YAML) Filename", args.input_filename])
config_table.add_row(["Output Filename", args.output_filename])
config_table.add_row(["Frame Type", args.frame_type])
config_table.add_row(["Opcode Size (# bytes)", args.opcode_size])
config_table.add_row(["Address Size (# bytes)", args.address_size])
config_table.add_row(["Data Size (# bytes)", args.data_size])
config_table.align = "l"
print(yellow(config_table.get_string()))
def parse_args(argv):
module_description = "OpenTitan Fuzzing Seed Composer"
parser = argparse.ArgumentParser(description=module_description)
parser.add_argument("--opcode-type",
default=TLULFuzzInstr.opcode_type,
choices=[
"constant",
"mapped",
],
type=str,
help="Fuzzing instruction opcode type.")
parser.add_argument("--instr-type",
default=TLULFuzzInstr.instr_type,
choices=[
"fixed",
"variable",
],
type=str,
help="Fuzzing instruction frame type.")
parser.add_argument("--endianness",
default=TLULFuzzInstr.endianness,
choices=[
"little",
"big",
],
type=str,
help="Endianness of HW Fuzzing Instruction frames.")
parser.add_argument("--opcode-size",
default=TLULFuzzInstr.opcode_size,
type=int,
help="Size of opcode field in bytes.")
parser.add_argument("--address-size",
default=TLULFuzzInstr.address_size,
type=int,
help="Size of address field in bytes")
parser.add_argument("--data-size",
default=TLULFuzzInstr.data_size,
type=int,
help="Size of data field in bytes.")
parser.add_argument("--direct-in-size",
default=TLULFuzzInstr.direct_in_size,
type=int,
help="Size of direct inputs field in bytes.")
parser.add_argument("-v",
"--verbose",
action="store_true",
help="Enable verbose status messages.")
parser.add_argument("input_file_name",
metavar="input.yaml",
help="Input configuration YAML file.")
parser.add_argument("output_file_name",
metavar="afl_seed.hwf",
help="Name of output seed file (hex).")
args = parser.parse_args(argv)
if args.verbose:
_print_configs(args)
return args
def config_tlul_fuzz_instr(args):
TLULFuzzInstr.opcode_type = args.opcode_type
TLULFuzzInstr.instr_type = args.instr_type
TLULFuzzInstr.opcode_size = args.opcode_size
TLULFuzzInstr.address_size = args.address_size
TLULFuzzInstr.data_size = args.data_size
TLULFuzzInstr.direct_in_size = args.direct_in_size
TLULFuzzInstr.endianness = args.endianness
def main(argv):
args = parse_args(argv)
config_tlul_fuzz_instr(args)
gen_seed(args.input_file_name, args.output_file_name, args.verbose)
if __name__ == "__main__":
main(sys.argv[1:])
| true
| true
|
f718caa21924c30a5b8e61dde532d1a5687b439a
| 1,670
|
py
|
Python
|
igibson/test/test_motion_planning.py
|
fxia22/gibson_demos
|
5f8d253694b23b41c53959774203ba5787578b74
|
[
"MIT"
] | 1
|
2021-08-03T23:59:21.000Z
|
2021-08-03T23:59:21.000Z
|
igibson/test/test_motion_planning.py
|
fxia22/gibson_demos
|
5f8d253694b23b41c53959774203ba5787578b74
|
[
"MIT"
] | null | null | null |
igibson/test/test_motion_planning.py
|
fxia22/gibson_demos
|
5f8d253694b23b41c53959774203ba5787578b74
|
[
"MIT"
] | 1
|
2021-12-01T16:09:01.000Z
|
2021-12-01T16:09:01.000Z
|
import igibson
from igibson.envs.igibson_env import iGibsonEnv
from time import time
import os
from igibson.utils.assets_utils import download_assets, download_demo_data
from igibson.utils.motion_planning_wrapper import MotionPlanningWrapper
import numpy as np
import matplotlib.pyplot as plt
def test_occupancy_grid():
print("Test env")
download_assets()
download_demo_data()
config_filename = os.path.join(igibson.root_path, 'test', 'test_house_occupancy_grid.yaml')
nav_env = iGibsonEnv(config_file=config_filename, mode='headless')
nav_env.reset()
nav_env.robots[0].set_position_orientation([0,0,0],[0,0,0,1])
nav_env.simulator.step()
action = nav_env.action_space.sample()
ts = nav_env.step(action)
assert np.sum(ts[0]['occupancy_grid'] == 0) > 0
assert np.sum(ts[0]['occupancy_grid'] == 1) > 0
plt.imshow(ts[0]['occupancy_grid'][:,:,0])
plt.colorbar()
plt.savefig('occupancy_grid.png')
nav_env.clean()
def test_base_planning():
print("Test env")
download_assets()
download_demo_data()
config_filename = os.path.join(igibson.root_path, 'test', 'test_house_occupancy_grid.yaml')
nav_env = iGibsonEnv(config_file=config_filename, mode='headless')
motion_planner = MotionPlanningWrapper(nav_env)
state = nav_env.reset()
nav_env.robots[0].set_position_orientation([0,0,0],[0,0,0,1])
nav_env.simulator.step()
plan = None
itr = 0
while plan is None and itr < 10:
plan = motion_planner.plan_base_motion([0.5,0,0])
print(plan)
itr += 1
motion_planner.dry_run_base_plan(plan)
assert len(plan) > 0
nav_env.clean()
| 31.509434
| 95
| 0.707784
|
import igibson
from igibson.envs.igibson_env import iGibsonEnv
from time import time
import os
from igibson.utils.assets_utils import download_assets, download_demo_data
from igibson.utils.motion_planning_wrapper import MotionPlanningWrapper
import numpy as np
import matplotlib.pyplot as plt
def test_occupancy_grid():
print("Test env")
download_assets()
download_demo_data()
config_filename = os.path.join(igibson.root_path, 'test', 'test_house_occupancy_grid.yaml')
nav_env = iGibsonEnv(config_file=config_filename, mode='headless')
nav_env.reset()
nav_env.robots[0].set_position_orientation([0,0,0],[0,0,0,1])
nav_env.simulator.step()
action = nav_env.action_space.sample()
ts = nav_env.step(action)
assert np.sum(ts[0]['occupancy_grid'] == 0) > 0
assert np.sum(ts[0]['occupancy_grid'] == 1) > 0
plt.imshow(ts[0]['occupancy_grid'][:,:,0])
plt.colorbar()
plt.savefig('occupancy_grid.png')
nav_env.clean()
def test_base_planning():
print("Test env")
download_assets()
download_demo_data()
config_filename = os.path.join(igibson.root_path, 'test', 'test_house_occupancy_grid.yaml')
nav_env = iGibsonEnv(config_file=config_filename, mode='headless')
motion_planner = MotionPlanningWrapper(nav_env)
state = nav_env.reset()
nav_env.robots[0].set_position_orientation([0,0,0],[0,0,0,1])
nav_env.simulator.step()
plan = None
itr = 0
while plan is None and itr < 10:
plan = motion_planner.plan_base_motion([0.5,0,0])
print(plan)
itr += 1
motion_planner.dry_run_base_plan(plan)
assert len(plan) > 0
nav_env.clean()
| true
| true
|
f718caa3270dc05500c06ad33fedb631d114128e
| 1,122
|
py
|
Python
|
easy/27.removeElement.py
|
nanfeng-dada/leetcode-
|
255c0e8ef2f19d7a384a28c1abb3bea085d597e8
|
[
"MIT"
] | null | null | null |
easy/27.removeElement.py
|
nanfeng-dada/leetcode-
|
255c0e8ef2f19d7a384a28c1abb3bea085d597e8
|
[
"MIT"
] | null | null | null |
easy/27.removeElement.py
|
nanfeng-dada/leetcode-
|
255c0e8ef2f19d7a384a28c1abb3bea085d597e8
|
[
"MIT"
] | null | null | null |
# 在python中复制操作重新赋一个标识符,所以可以直接赋值
class Solution():
def removeElement(self, nums: list, val: int) -> int:
lst=[]
for i in range(len(nums)):
if nums[i]!=val:
lst.append(nums[i])
nums[:]=lst
return len(lst)
#python计数与删除操作
class Solution2:
def removeElement(self, nums, val):
c = nums.count(val)
i = 0
while i < c:
nums.remove(val)
i += 1
return len(nums)
# 正常解法为快慢指针
class Solution1():
def removeElement(self, nums: list, val: int) -> int:
cur_next=0
for j in range(len(nums)):
if nums[j]!=val:
nums[cur_next]=nums[j]
cur_next+=1
return cur_next
# 上面的解法的另一种书写形式
class Solution4:
def removeElement(self, nums: list, val: int) -> int:
i = 0
while i < len(nums):
if nums[i] == val:
nums[i] = nums[-1]
del nums[-1]
else:
i += 1
return len(nums)
if __name__=="__main__":
a=Solution1()
print(a.removeElement([3,2,2,3],3))
| 26.093023
| 57
| 0.5
|
class Solution():
def removeElement(self, nums: list, val: int) -> int:
lst=[]
for i in range(len(nums)):
if nums[i]!=val:
lst.append(nums[i])
nums[:]=lst
return len(lst)
class Solution2:
def removeElement(self, nums, val):
c = nums.count(val)
i = 0
while i < c:
nums.remove(val)
i += 1
return len(nums)
class Solution1():
def removeElement(self, nums: list, val: int) -> int:
cur_next=0
for j in range(len(nums)):
if nums[j]!=val:
nums[cur_next]=nums[j]
cur_next+=1
return cur_next
class Solution4:
def removeElement(self, nums: list, val: int) -> int:
i = 0
while i < len(nums):
if nums[i] == val:
nums[i] = nums[-1]
del nums[-1]
else:
i += 1
return len(nums)
if __name__=="__main__":
a=Solution1()
print(a.removeElement([3,2,2,3],3))
| true
| true
|
f718cafcdc12bd1668b77de58ecb4b9f95b1567b
| 999
|
py
|
Python
|
tests/flows/test_fpgaflow.py
|
mfkiwl/siliconcompiler
|
49a16d9a07c526821afe1ce2f2d77394e439ca05
|
[
"Apache-2.0"
] | 1
|
2022-01-19T01:12:43.000Z
|
2022-01-19T01:12:43.000Z
|
tests/flows/test_fpgaflow.py
|
mfkiwl/siliconcompiler
|
49a16d9a07c526821afe1ce2f2d77394e439ca05
|
[
"Apache-2.0"
] | null | null | null |
tests/flows/test_fpgaflow.py
|
mfkiwl/siliconcompiler
|
49a16d9a07c526821afe1ce2f2d77394e439ca05
|
[
"Apache-2.0"
] | null | null | null |
import os
import subprocess
import pytest
##################################
@pytest.mark.eda
@pytest.mark.quick
def test_icebreaker(scroot):
'''Basic FPGA test: build the Blinky example by running `sc` as a command-line app.
'''
# Use subprocess to test running the `sc` scripts as a command-line program.
# Pipe stdout to /dev/null to avoid printing to the terminal.
blinky_ex_dir = os.path.join(scroot, 'examples', 'blinky')
# Run the build command for an iCE40 board.
subprocess.run(['sc',
os.path.join(blinky_ex_dir, 'blinky.v'),
'-read_pcf', f"import 0 {os.path.join(blinky_ex_dir, 'icebreaker.pcf')}",
'-design', 'blinky',
'-target', 'fpgaflow_ice40up5k-sg48'])
# Verify that a bitstream was generated
assert os.path.isfile('build/blinky/job0/bitstream/0/outputs/blinky.bit')
if __name__ == "__main__":
from tests.fixtures import scroot
test_icebreaker(scroot())
| 34.448276
| 93
| 0.628629
|
import os
import subprocess
import pytest
s import scroot
test_icebreaker(scroot())
| true
| true
|
f718cb4ed172fde3f674b4de0949910cbac3415f
| 645
|
py
|
Python
|
yandex_checkout/domain/models/payment_data/request/payment_data_applepay.py
|
tonchik-tm/yandex-checkout-sdk-python
|
7680e85a3e3416a1b3d2a6dd6bd3de84ba646d1d
|
[
"MIT"
] | 1
|
2021-03-19T06:47:48.000Z
|
2021-03-19T06:47:48.000Z
|
yandex_checkout/domain/models/payment_data/request/payment_data_applepay.py
|
tonchik-tm/yandex-checkout-sdk-python
|
7680e85a3e3416a1b3d2a6dd6bd3de84ba646d1d
|
[
"MIT"
] | null | null | null |
yandex_checkout/domain/models/payment_data/request/payment_data_applepay.py
|
tonchik-tm/yandex-checkout-sdk-python
|
7680e85a3e3416a1b3d2a6dd6bd3de84ba646d1d
|
[
"MIT"
] | null | null | null |
from yandex_checkout.domain.models.payment_data.payment_data import PaymentData
from yandex_checkout.domain.common.payment_method_type import PaymentMethodType
class PaymentDataApplepay(PaymentData):
__payment_data = None
def __init__(self, *args, **kwargs):
super(PaymentDataApplepay, self).__init__(*args, **kwargs)
if self.type is None or self.type is not PaymentMethodType.APPLEPAY:
self.type = PaymentMethodType.APPLEPAY
@property
def payment_data(self):
return self.__payment_data
@payment_data.setter
def payment_data(self, value):
self.__payment_data = str(value)
| 32.25
| 79
| 0.739535
|
from yandex_checkout.domain.models.payment_data.payment_data import PaymentData
from yandex_checkout.domain.common.payment_method_type import PaymentMethodType
class PaymentDataApplepay(PaymentData):
__payment_data = None
def __init__(self, *args, **kwargs):
super(PaymentDataApplepay, self).__init__(*args, **kwargs)
if self.type is None or self.type is not PaymentMethodType.APPLEPAY:
self.type = PaymentMethodType.APPLEPAY
@property
def payment_data(self):
return self.__payment_data
@payment_data.setter
def payment_data(self, value):
self.__payment_data = str(value)
| true
| true
|
f718cc3919aa36f93ba35bd7f65b86bfe5177b51
| 45,740
|
py
|
Python
|
Joint_Event_Extraction.py
|
adityabasu1/Event-Extraction-NLP
|
98faa88d36f09330ebce6fc180ab2f087776f2e1
|
[
"MIT"
] | null | null | null |
Joint_Event_Extraction.py
|
adityabasu1/Event-Extraction-NLP
|
98faa88d36f09330ebce6fc180ab2f087776f2e1
|
[
"MIT"
] | null | null | null |
Joint_Event_Extraction.py
|
adityabasu1/Event-Extraction-NLP
|
98faa88d36f09330ebce6fc180ab2f087776f2e1
|
[
"MIT"
] | 3
|
2021-11-25T08:21:13.000Z
|
2021-11-26T14:12:03.000Z
|
import sys
import os
import numpy as np
import random
from collections import OrderedDict
import pickle
import datetime
from tqdm import tqdm
from recordclass import recordclass
import math
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import json
# Helper funcs
def custom_print(*msg):
for i in range(0, len(msg)):
if i == len(msg) - 1:
print(msg[i])
logger.write(str(msg[i]) + '\n')
else:
print(msg[i], ' ', end='')
logger.write(str(msg[i]))
def load_word_embedding(embed_file, vocab):
custom_print('vocab length:', len(vocab))
embed_vocab = OrderedDict()
rev_embed_vocab = OrderedDict()
embed_matrix = list()
embed_vocab['<PAD>'] = 0
rev_embed_vocab[0] = '<PAD>'
embed_matrix.append(np.zeros(word_embed_dim, dtype=np.float32))
embed_vocab['<UNK>'] = 1
rev_embed_vocab[1] = '<UNK>'
embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))
embed_vocab['<SOS>'] = 2
rev_embed_vocab[2] = '<SOS>'
embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))
embed_vocab['<EOS>'] = 3
rev_embed_vocab[3] = '<EOS>'
embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))
word_idx = 4
with open(embed_file, "r") as f:
for line in f:
parts = line.split()
if len(parts) < word_embed_dim + 1:
continue
word = parts[0]
if word in vocab and vocab[word] >= word_min_freq:
vec = [np.float32(val) for val in parts[1:]]
embed_matrix.append(vec)
embed_vocab[word] = word_idx
rev_embed_vocab[word_idx] = word
word_idx += 1
for word in vocab:
if word not in embed_vocab and vocab[word] >= word_min_freq:
embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))
embed_vocab[word] = word_idx
rev_embed_vocab[word_idx] = word
word_idx += 1
custom_print('embed dictionary length:', len(embed_vocab))
return embed_vocab, rev_embed_vocab, np.array(embed_matrix, dtype=np.float32)
def build_vocab(data, events, arguments, roles, vocab_file, embed_file):
vocab = OrderedDict()
char_v = OrderedDict()
char_v['<PAD>'] = 0
char_v['<UNK>'] = 1
char_v[';'] = 2
char_v['|'] = 3
char_idx = 4
for d in data:
for word in d.SrcWords:
if word not in vocab:
vocab[word] = 1
else:
vocab[word] += 1
for c in word:
if c not in char_v:
char_v[c] = char_idx
char_idx += 1
for event in events:
vocab[event] = word_min_freq
for argument in arguments:
vocab[argument] = word_min_freq
for role in roles:
vocab[role] = word_min_freq
vocab[';'] = word_min_freq
vocab['|'] = word_min_freq
word_v, rev_word_v, embed_matrix = load_word_embedding(embed_file, vocab)
output = open(vocab_file, 'wb')
pickle.dump([word_v, char_v], output)
output.close()
return word_v, rev_word_v, char_v, embed_matrix
def load_vocab(vocab_file):
with open(vocab_file, 'rb') as f:
word_v, char_v = pickle.load(f)
return word_v, char_v
def get_adj_mat(amat):
K = 5
adj_mat = np.zeros((len(amat), len(amat)), np.float32)
for i in range(len(amat)):
for j in range(len(amat)):
if 0 <= amat[i][j] <= K:
adj_mat[i][j] = 1.0 / math.pow(2, amat[i][j])
else:
adj_mat[i][j] = 0
return adj_mat
def get_data(src_lines, trg_lines, datatype):
samples = []
uid = 1
src_len = -1
trg_len = -1
for i in range(0, len(src_lines)):
src_line = src_lines[i].strip()
trg_line = trg_lines[i].strip()
src_words = src_line.split()
if datatype == 1:
tuples = trg_line.strip().split('|')
random.shuffle(tuples)
new_trg_line = ' | '.join(tuples)
assert len(trg_line.split()) == len(new_trg_line.split())
trg_line = new_trg_line
trg_words = list()
trg_words.append('<SOS>')
trg_words += trg_line.split()
trg_words.append('<EOS>')
if datatype == 1 and (len(src_words) > max_src_len or len(trg_words) > max_trg_len + 1):
continue
if len(src_words) > src_len:
src_len = len(src_words)
if len(trg_words) > trg_len:
trg_len = len(trg_words)
sample = Sample(Id=uid, SrcLen=len(src_words), SrcWords=src_words, TrgLen=len(trg_words),
TrgWords=trg_words) #c
samples.append(sample)
uid += 1
print(src_len)
print(trg_len)
return samples
def read_data(src_file, trg_file, datatype):
reader = open(src_file)
src_lines = reader.readlines()
reader.close()
reader = open(trg_file)
trg_lines = reader.readlines()
reader.close()
# tot_len = 100
# src_lines = src_lines[0:min(tot_len, len(src_lines))]
# trg_lines = trg_lines[0:min(tot_len, len(trg_lines))]
# adj_lines = adj_lines[0:min(tot_len, len(adj_lines))]
data = get_data(src_lines, trg_lines, datatype)
return data
#event_lines, argument_lines, roles_lines
# to add option for less detailed checks
def check_event_trigger(ref_string, pred_string):
return (ref_string == pred_string)
pass
def check_event_type(ref_string, pred_string, event_lines):
if granular_mode == 0:
if pred_string in event_lines:
return (ref_string == pred_string)
else:
# print("invalid prediction")
return False
pass
if granular_mode == 1:
pred_token = pred_string.split(":")[0]
ref_token = ref_string.split(":")[0]
return (pred_token == ref_token)
pass
def check_event_argument(ref_string, pred_string):
return (ref_string == pred_string)
pass
def check_argument_type(ref_string, pred_string, argument_lines):
if granular_mode == 0:
if pred_string in argument_lines:
return (ref_string == pred_string)
else:
# print("invalid prediction")
return False
pass
if granular_mode == 1:
pred_token = pred_string.split(":")[0]
ref_token = ref_string.split(":")[0]
return (pred_token == ref_token)
pass
def check_argument_role(ref_string, pred_string, roles_lines):
if pred_string in roles_lines:
return (ref_string == pred_string)
else:
# print("invalid prediction")
return False
pass
def calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines):
list_of_tracking_metrics = ['predicted_tuples',
'ground_truth_tuples',
'correct_predictions',
'events_count',
'correct_events',
'correct_event_type',
'correct_arguments',
'correct_argment_types',
'correct_argument_roles'
]
metric_counts = dict.fromkeys(list_of_tracking_metrics, 0)
for i in range(0, min(len(ref_lines), len(pred_lines))):
ref_line = ref_lines[i].strip()
pred_line = pred_lines[i].strip()
ref_tuples = ref_line.split('|')
pred_tuples = pred_line.split('|')
# find a way to compare multiple tuples
# correct - t1 | t2 | t3
# pred - p1 | p2
# postives = 3 [number of ground truths minus nones]
# predicted_pos = 2 [number of preds minus nones]
# TP = correct preds
# TP + FP = predicted
# TP + FN = positives
# Precision = correct / predicted_pos
# Recall = correct / positives
# f = pr/p+r
# handling repeated predictions
# set_of_preds = set()
# for pred_tuple in pred_tuples:
# set_of_preds.add(pred_tuple.strip())
# pred_tuples = list(set_of_preds)
for pred_tuple in pred_tuples:
pred_strings = pred_tuple.split(';')
if(len(pred_strings) < 3):
continue
# in the case of no argument detection, we only calculate the event trigger scores
if(pred_strings[2].strip().lower()) == 'none':
max_matches = 0
part_matches = []
for ref_tuple in ref_tuples:
# ssss
ev1, ev2 = cal_f1_for_pair(ref_tuple, pred_tuple, event_lines)
pair_score = ev1+ev2
if pair_score > max_matches:
max_matches = pair_score
part_matches = (ev1, ev2)
pass
pass
metric_counts['events_count'] += 1
if ev1 == 1:
metric_counts['correct_events'] += 1
if ev2 == 1:
metric_counts['correct_event_type'] += 1
continue
max_matches = 0
part_matches = cal_f1_for_tuple(ref_tuples[0], pred_tuple, event_lines, argument_lines, roles_lines)
for ref_tuple in ref_tuples:
res = cal_f1_for_tuple(ref_tuple, pred_tuple, event_lines, argument_lines, roles_lines)
tuple_score = sum(res)
if tuple_score >= max_matches:
max_matches = tuple_score
part_matches = res
pass
pass
metric_counts['predicted_tuples'] += 1
metric_counts['events_count'] += 1
if max_matches >= 4:
metric_counts['correct_predictions'] += 1
if part_matches[0] == 1:
metric_counts['correct_events'] += 1
if part_matches[1] == 1:
metric_counts['correct_event_type'] += 1
if part_matches[2] == 1:
metric_counts['correct_arguments'] += 1
if part_matches[3] == 1:
metric_counts['correct_argment_types'] += 1
if part_matches[4] == 1:
metric_counts['correct_argument_roles'] += 1
pass
for ref_tuple in ref_tuples:
if(ref_tuple.split(';')[2].strip().lower()) != 'none':
metric_counts['ground_truth_tuples'] += 1
pass
print(metric_counts)
precision = float(metric_counts['correct_predictions'] / (metric_counts['predicted_tuples'] + 1e-08))
recall = float(metric_counts['correct_predictions'] / (metric_counts['ground_truth_tuples'] + 1e-08))
f1 = 2 * precision * recall / (precision + recall + 1e-08)
precision = round(precision, 3)
recall = round(recall, 3)
f1 = round(f1, 3)
print("Partwise Results")
event_acc = metric_counts['correct_events']/ (metric_counts['events_count'] + 1e-08)
evtype_acc = metric_counts['correct_event_type']/ (metric_counts['events_count'] + 1e-08)
argument_acc = metric_counts['correct_arguments']/ (metric_counts['predicted_tuples'] + 1e-08)
argtype_acc = metric_counts['correct_argment_types']/ (metric_counts['predicted_tuples'] + 1e-08)
role_acc = metric_counts['correct_argument_roles']/ (metric_counts['predicted_tuples'] + 1e-08)
print(f'Event Trigger Word Accuracy: {event_acc}')
print(f'Event Type Accuracy: {evtype_acc}')
print(f'Argument Identification Accuracy: {argument_acc}')
print(f'Argument Type Accuracy: {argtype_acc}')
print(f'Argument Role Accuracy: {role_acc}')
print(f'Macro f-score: {f1}')
targ_file = os.path.join(trg_data_folder, 'Results_logger.txt')
f = open(targ_file, "a")
f.write(f'Event Trigger Word Accuracy: {event_acc}')
f.write("\n")
f.write(f'Event Type Accuracy: {evtype_acc}')
f.write("\n")
f.write(f'Argument Identification Accuracy: {argument_acc}')
f.write("\n")
f.write(f'Argument Type Accuracy: {argtype_acc}')
f.write("\n")
f.write(f'Argument Role Accuracy: {role_acc}')
f.write("\n")
f.write(f'Macro f-score: {f1}')
f.write("\n")
f.close()
return f1
def cal_f1_for_pair(ref_tuple: str ,
pred_tuple: str,
event_lines: list
) -> list:
ref_strings = ref_tuple.split(';')
pred_strings = pred_tuple.split(';')
ev1 = int( check_event_trigger(ref_strings[0].strip(), pred_strings[0].strip()) )
ev2 = int( check_event_type(ref_strings[1].strip(), pred_strings[1].strip(), event_lines) )
return ev1, ev2
def cal_f1_for_tuple(ref_tuple: str ,
pred_tuple: str,
event_lines: list,
argument_lines: list,
roles_lines: list
) -> list:
ref_strings = ref_tuple.split(';')
pred_strings = pred_tuple.split(';')
if (len (pred_strings) != 5 ):
if (len (pred_strings) >= 2 ):
ev1 = int( check_event_trigger(ref_strings[0].strip(), pred_strings[0].strip()) )
ev2 = int( check_event_type(ref_strings[1].strip(), pred_strings[1].strip(), event_lines) )
return [ev1, ev2, 0, 0, 0]
return list([0,0,0,0,0])
ev1 = int( check_event_trigger(ref_strings[0].strip(), pred_strings[0].strip()) )
ev2 = int( check_event_type(ref_strings[1].strip(), pred_strings[1].strip(), event_lines) )
ev3 = int( check_event_argument(ref_strings[2].strip(), pred_strings[2].strip()) )
ev4 = int( check_argument_type(ref_strings[3].strip(), pred_strings[3].strip(), argument_lines) )
ev5 = int( check_argument_role(ref_strings[4].strip(), pred_strings[4].strip(), roles_lines) )
ret = [ev1, ev2, ev3, ev4, ev5]
return ret
def get_model(model_id):
if model_id == 1:
return SeqToSeqModel()
def write_test_res(data, preds, attns, outfile):
writer = open(outfile, 'w')
for i in range(0, len(data)):
pred_words = get_pred_words(preds[i], attns[i], data[i].SrcWords)[:-1]
writer.write(' '.join(pred_words) + '\n')
writer.close()
def set_random_seeds(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 1:
torch.cuda.manual_seed_all(seed)
def get_max_len(sample_batch):
src_max_len = len(sample_batch[0].SrcWords)
for idx in range(1, len(sample_batch)):
if len(sample_batch[idx].SrcWords) > src_max_len:
src_max_len = len(sample_batch[idx].SrcWords)
trg_max_len = len(sample_batch[0].TrgWords)
for idx in range(1, len(sample_batch)):
if len(sample_batch[idx].TrgWords) > trg_max_len:
trg_max_len = len(sample_batch[idx].TrgWords)
return src_max_len, trg_max_len
def get_words_index_seq(words, max_len):
seq = list()
for word in words:
if word in word_vocab:
seq.append(word_vocab[word])
else:
seq.append(word_vocab['<UNK>'])
pad_len = max_len - len(words)
for i in range(0, pad_len):
seq.append(word_vocab['<PAD>'])
return seq
def get_target_words_index_seq(words, max_len):
seq = list()
for word in words:
if word in word_vocab:
seq.append(word_vocab[word])
else:
seq.append(word_vocab['<UNK>'])
pad_len = max_len - len(words)
for i in range(0, pad_len):
seq.append(word_vocab['<EOS>'])
return seq
def get_padded_mask(cur_len, max_len):
mask_seq = list()
for i in range(0, cur_len):
mask_seq.append(0)
pad_len = max_len - cur_len
for i in range(0, pad_len):
mask_seq.append(1)
return mask_seq
def get_target_vocab_mask(src_words):
mask = []
for i in range(0, len(word_vocab)):
mask.append(1)
for word in src_words:
if word in word_vocab:
mask[word_vocab[word]] = 0
# events, arguments, roles
for event in events:
mask[word_vocab[event]] = 0
for argument in arguments:
mask[word_vocab[argument]] = 0
for role in roles:
mask[word_vocab[role]] = 0
mask[word_vocab['<UNK>']] = 0
mask[word_vocab['<EOS>']] = 0
mask[word_vocab[';']] = 0
mask[word_vocab['|']] = 0
return mask
def get_rel_mask(trg_words, max_len):
mask_seq = list()
for word in trg_words:
mask_seq.append(0)
# if word in relations:
# mask_seq.append(0)
# else:
# mask_seq.append(1)
pad_len = max_len - len(trg_words)
for i in range(0, pad_len):
mask_seq.append(1)
return mask_seq
def get_char_seq(words, max_len):
char_seq = list()
for i in range(0, conv_filter_size - 1):
char_seq.append(char_vocab['<PAD>'])
for word in words:
for c in word[0:min(len(word), max_word_len)]:
if c in char_vocab:
char_seq.append(char_vocab[c])
else:
char_seq.append(char_vocab['<UNK>'])
pad_len = max_word_len - len(word)
for i in range(0, pad_len):
char_seq.append(char_vocab['<PAD>'])
for i in range(0, conv_filter_size - 1):
char_seq.append(char_vocab['<PAD>'])
pad_len = max_len - len(words)
for i in range(0, pad_len):
for i in range(0, max_word_len + conv_filter_size - 1):
char_seq.append(char_vocab['<PAD>'])
return char_seq
def get_relations(file_name):
rels = []
reader = open(file_name)
lines = reader.readlines()
reader.close()
for line in lines:
rels.append(line.strip())
return rels
def get_batch_data(cur_samples, is_training=False):
"""
Returns the training samples and labels as numpy array
"""
batch_src_max_len, batch_trg_max_len = get_max_len(cur_samples)
src_words_list = list()
src_words_mask_list = list()
src_char_seq = list()
trg_words_list = list()
trg_vocab_mask = list()
adj_lst = []
target = list()
cnt = 0
for sample in cur_samples:
src_words_list.append(get_words_index_seq(sample.SrcWords, batch_src_max_len))
src_words_mask_list.append(get_padded_mask(sample.SrcLen, batch_src_max_len))
src_char_seq.append(get_char_seq(sample.SrcWords, batch_src_max_len))
trg_vocab_mask.append(get_target_vocab_mask(sample.SrcWords))
# cur_masked_adj = np.zeros((batch_src_max_len, batch_src_max_len), dtype=np.float32)
# cur_masked_adj[:len(sample.SrcWords), :len(sample.SrcWords)] = sample.AdjMat
# adj_lst.append(cur_masked_adj)
if is_training:
padded_trg_words = get_words_index_seq(sample.TrgWords, batch_trg_max_len)
trg_words_list.append(padded_trg_words)
target.append(padded_trg_words[1:])
else:
trg_words_list.append(get_words_index_seq(['<SOS>'], 1))
cnt += 1
return {'src_words': np.array(src_words_list, dtype=np.float32),
'src_chars': np.array(src_char_seq),
'src_words_mask': np.array(src_words_mask_list),
'adj': np.array(adj_lst),
'trg_vocab_mask': np.array(trg_vocab_mask),
'trg_words': np.array(trg_words_list, dtype=np.int32),
'target': np.array(target)}
def shuffle_data(data):
custom_print(len(data))
data.sort(key=lambda x: x.SrcLen)
num_batch = int(len(data) / batch_size)
rand_idx = random.sample(range(num_batch), num_batch)
new_data = []
for idx in rand_idx:
new_data += data[batch_size * idx: batch_size * (idx + 1)]
if len(new_data) < len(data):
new_data += data[num_batch * batch_size:]
return new_data
def get_pred_words(preds, attns, src_words):
pred_words = []
for i in range(0, max_trg_len):
word_idx = preds[i]
if word_vocab['<EOS>'] == word_idx:
pred_words.append('<EOS>')
break
elif att_type != 'None' and copy_on and word_vocab['<UNK>'] == word_idx:
word_idx = attns[i]
pred_words.append(src_words[word_idx])
else:
pred_words.append(rev_word_vocab[word_idx])
return pred_words
class WordEmbeddings(nn.Module):
def __init__(self, vocab_size, embed_dim, pre_trained_embed_matrix, drop_out_rate):
super(WordEmbeddings, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
self.embeddings.weight.data.copy_(torch.from_numpy(pre_trained_embed_matrix))
self.dropout = nn.Dropout(drop_out_rate)
def forward(self, words_seq):
word_embeds = self.embeddings(words_seq)
word_embeds = self.dropout(word_embeds)
return word_embeds
def weight(self):
return self.embeddings.weight
# Potentially use a pretrained BERT - 509
class CharEmbeddings(nn.Module):
def __init__(self, vocab_size, embed_dim, drop_out_rate):
super(CharEmbeddings, self).__init__()
# Layers
self.embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
self.dropout = nn.Dropout(drop_out_rate)
def forward(self, words_seq):
char_embeds = self.embeddings(words_seq)
char_embeds = self.dropout(char_embeds)
return char_embeds
# DONT CHANGE CLASSES
# 543
class Encoder(nn.Module):
def __init__(self, input_dim, hidden_dim, layers, is_bidirectional, drop_out_rate):
super(Encoder, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.layers = layers
self.is_bidirectional = is_bidirectional
self.drop_rate = drop_out_rate
self.char_embeddings = CharEmbeddings(len(char_vocab), char_embed_dim, drop_rate)
# Remove In case we want to BERT
self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.layers, batch_first=True,
bidirectional=self.is_bidirectional)
self.dropout = nn.Dropout(self.drop_rate)
self.conv1d = nn.Conv1d(char_embed_dim, char_feature_size, conv_filter_size)
self.max_pool = nn.MaxPool1d(max_word_len + conv_filter_size - 1, max_word_len + conv_filter_size - 1)
def forward(self, words_input, char_seq, adj, is_training=False):
char_embeds = self.char_embeddings(char_seq)
char_embeds = char_embeds.permute(0, 2, 1)
char_feature = torch.tanh(self.max_pool(self.conv1d(char_embeds)))
char_feature = char_feature.permute(0, 2, 1)
words_input = torch.cat((words_input, char_feature), -1)
outputs, hc = self.lstm(words_input)
outputs = self.dropout(outputs)
return outputs
# 597
class Attention(nn.Module):
def __init__(self, input_dim):
super(Attention, self).__init__()
self.input_dim = input_dim
self.linear_ctx = nn.Linear(self.input_dim, self.input_dim, bias=False)
self.linear_query = nn.Linear(self.input_dim, self.input_dim, bias=True)
self.v = nn.Linear(self.input_dim, 1)
def forward(self, s_prev, enc_hs, src_mask):
uh = self.linear_ctx(enc_hs)
wq = self.linear_query(s_prev)
wquh = torch.tanh(wq + uh)
attn_weights = self.v(wquh).squeeze()
attn_weights.data.masked_fill_(src_mask.data, -float('inf'))
attn_weights = F.softmax(attn_weights, dim=-1)
ctx = torch.bmm(attn_weights.unsqueeze(1), enc_hs).squeeze()
return ctx, attn_weights
# 617
class NGram_Attention(nn.Module):
def __init__(self, input_dim, N):
super(NGram_Attention, self).__init__()
self.input_dim = input_dim
self.layers = N
self.V_layers = nn.ModuleList()
self.W_layers = nn.ModuleList()
for i in range(N):
self.V_layers.append(nn.Linear(input_dim, input_dim))
self.W_layers.append(nn.Linear(input_dim, input_dim))
def forward(self, s_prev, enc_hs, src_mask):
att = torch.bmm(s_prev.unsqueeze(1), self.V_layers[0](enc_hs).transpose(1, 2)).squeeze()
att.data.masked_fill_(src_mask.data, -float('inf'))
att = F.softmax(att, dim=-1)
ctx = self.W_layers[0](torch.bmm(att.unsqueeze(1), enc_hs).squeeze())
for i in range(1, self.layers):
enc_hs_ngram = torch.nn.AvgPool1d(i+1, 1)(enc_hs.transpose(1, 2)).transpose(1, 2)
n_mask = src_mask.unsqueeze(1).float()
n_mask = torch.nn.AvgPool1d(i+1, 1)(n_mask).squeeze()
n_mask[n_mask > 0] = 1
n_mask = n_mask.byte()
n_att = torch.bmm(s_prev.unsqueeze(1), self.V_layers[i](enc_hs_ngram).transpose(1, 2)).squeeze()
n_att.data.masked_fill_(n_mask.data, -float('inf'))
n_att = F.softmax(n_att, dim=-1)
ctx += self.W_layers[i](torch.bmm(n_att.unsqueeze(1), enc_hs_ngram).squeeze())
return ctx, att
# 588
def mean_over_time(x, mask):
x.data.masked_fill_(mask.unsqueeze(2).data, 0)
x = torch.sum(x, dim=1)
time_steps = torch.sum(mask.eq(0), dim=1, keepdim=True).float()
x /= time_steps
return x
# 645
class Decoder(nn.Module):
def __init__(self, input_dim, hidden_dim, layers, drop_out_rate, max_length):
super(Decoder, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.layers = layers
self.drop_rate = drop_out_rate
self.max_length = max_length
if att_type == 'None':
self.lstm = nn.LSTMCell(2 * self.input_dim, self.hidden_dim, self.layers)
elif att_type == 'Unigram':
self.attention = Attention(input_dim)
self.lstm = nn.LSTMCell(2 * self.input_dim, self.hidden_dim, self.layers)
else:
self.attention = NGram_Attention(input_dim, 3)
self.lstm = nn.LSTMCell(3 * self.input_dim, self.hidden_dim, self.layers)
self.dropout = nn.Dropout(self.drop_rate)
self.ent_out = nn.Linear(self.input_dim, len(word_vocab))
def forward(self, y_prev, h_prev, enc_hs, src_word_embeds, src_mask, is_training=False):
src_time_steps = enc_hs.size()[1]
if att_type == 'None':
ctx = mean_over_time(enc_hs, src_mask)
attn_weights = torch.zeros(src_mask.size()).cuda()
elif att_type == 'Unigram':
s_prev = h_prev[0]
s_prev = s_prev.unsqueeze(1)
s_prev = s_prev.repeat(1, src_time_steps, 1)
ctx, attn_weights = self.attention(s_prev, enc_hs, src_mask)
else:
last_index = src_mask.size()[1] - torch.sum(src_mask, dim=-1).long() - 1
last_index = last_index.unsqueeze(1).unsqueeze(1).repeat(1, 1, enc_hs.size()[-1])
enc_last = torch.gather(enc_hs, 1, last_index).squeeze()
ctx, attn_weights = self.attention(enc_last, src_word_embeds, src_mask)
ctx = torch.cat((enc_last, ctx), -1)
y_prev = y_prev.squeeze()
s_cur = torch.cat((y_prev, ctx), 1)
hidden, cell_state = self.lstm(s_cur, h_prev)
hidden = self.dropout(hidden)
output = self.ent_out(hidden)
return output, (hidden, cell_state), attn_weights
# 690
class SeqToSeqModel(nn.Module):
def __init__(self):
super(SeqToSeqModel, self).__init__()
self.word_embeddings = WordEmbeddings(len(word_vocab), word_embed_dim, word_embed_matrix, drop_rate)
self.encoder = Encoder(enc_inp_size, int(enc_hidden_size/2), layers, True, drop_rate)
self.decoder = Decoder(dec_inp_size, dec_hidden_size, layers, drop_rate, max_trg_len)
def forward(self, src_words_seq, src_chars_seq, src_mask, trg_words_seq, trg_vocab_mask, adj, is_training=False):
src_word_embeds = self.word_embeddings(src_words_seq)
trg_word_embeds = self.word_embeddings(trg_words_seq)
batch_len = src_word_embeds.size()[0]
if is_training:
time_steps = trg_word_embeds.size()[1] - 1
else:
time_steps = max_trg_len
encoder_output = self.encoder(src_word_embeds, src_chars_seq, adj, is_training)
h0 = autograd.Variable(torch.FloatTensor(torch.zeros(batch_len, word_embed_dim)))
h0 = h0.cuda()
c0 = autograd.Variable(torch.FloatTensor(torch.zeros(batch_len, word_embed_dim)))
c0 = c0.cuda()
dec_hid = (h0, c0)
if is_training:
dec_inp = trg_word_embeds[:, 0, :]
dec_out, dec_hid, dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,
src_mask, is_training)
dec_out = dec_out.view(-1, len(word_vocab))
dec_out = F.log_softmax(dec_out, dim=-1)
dec_out = dec_out.unsqueeze(1)
for t in range(1, time_steps):
dec_inp = trg_word_embeds[:, t, :]
cur_dec_out, dec_hid, dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,
src_mask, is_training)
cur_dec_out = cur_dec_out.view(-1, len(word_vocab))
dec_out = torch.cat((dec_out, F.log_softmax(cur_dec_out, dim=-1).unsqueeze(1)), 1)
else:
dec_inp = trg_word_embeds[:, 0, :]
dec_out, dec_hid, dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,
src_mask, is_training)
dec_out = dec_out.view(-1, len(word_vocab))
if copy_on:
dec_out.data.masked_fill_(trg_vocab_mask.data, -float('inf'))
dec_out = F.log_softmax(dec_out, dim=-1)
topv, topi = dec_out.topk(1)
dec_out_v, dec_out_i = dec_out.topk(1)
dec_attn_v, dec_attn_i = dec_attn.topk(1)
for t in range(1, time_steps):
dec_inp = self.word_embeddings(topi.squeeze().detach())
cur_dec_out, dec_hid, cur_dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,
src_mask, is_training)
cur_dec_out = cur_dec_out.view(-1, len(word_vocab))
if copy_on:
cur_dec_out.data.masked_fill_(trg_vocab_mask.data, -float('inf'))
cur_dec_out = F.log_softmax(cur_dec_out, dim=-1)
topv, topi = cur_dec_out.topk(1)
cur_dec_out_v, cur_dec_out_i = cur_dec_out.topk(1)
dec_out_i = torch.cat((dec_out_i, cur_dec_out_i), 1)
cur_dec_attn_v, cur_dec_attn_i = cur_dec_attn.topk(1)
dec_attn_i = torch.cat((dec_attn_i, cur_dec_attn_i), 1)
if is_training:
dec_out = dec_out.view(-1, len(word_vocab))
return dec_out
else:
return dec_out_i, dec_attn_i
def predict(samples, model, model_id):
pred_batch_size = batch_size
batch_count = math.ceil(len(samples) / pred_batch_size)
move_last_batch = False
if len(samples) - batch_size * (batch_count - 1) == 1:
move_last_batch = True
batch_count -= 1
preds = list()
attns = list()
model.eval()
set_random_seeds(random_seed)
start_time = datetime.datetime.now()
for batch_idx in tqdm(range(0, batch_count)):
batch_start = batch_idx * pred_batch_size
batch_end = min(len(samples), batch_start + pred_batch_size)
if batch_idx == batch_count - 1 and move_last_batch:
batch_end = len(samples)
cur_batch = samples[batch_start:batch_end]
cur_samples_input = get_batch_data(cur_batch, False)
src_words_seq = torch.from_numpy(cur_samples_input['src_words'].astype('long'))
src_words_mask = torch.from_numpy(cur_samples_input['src_words_mask'].astype('uint8'))
trg_vocab_mask = torch.from_numpy(cur_samples_input['trg_vocab_mask'].astype('uint8'))
trg_words_seq = torch.from_numpy(cur_samples_input['trg_words'].astype('long'))
adj = torch.from_numpy(cur_samples_input['adj'].astype('float32'))
src_chars_seq = torch.from_numpy(cur_samples_input['src_chars'].astype('long'))
if torch.cuda.is_available():
src_words_seq = src_words_seq.cuda()
src_words_mask = src_words_mask.cuda()
trg_vocab_mask = trg_vocab_mask.cuda()
trg_words_seq = trg_words_seq.cuda()
adj = adj.cuda()
src_chars_seq = src_chars_seq.cuda()
src_words_seq = autograd.Variable(src_words_seq)
src_words_mask = autograd.Variable(src_words_mask)
trg_vocab_mask = autograd.Variable(trg_vocab_mask)
adj = autograd.Variable(adj)
src_chars_seq = autograd.Variable(src_chars_seq)
trg_words_seq = autograd.Variable(trg_words_seq)
with torch.no_grad():
outputs = model(src_words_seq, src_chars_seq, src_words_mask, trg_words_seq, trg_vocab_mask, adj,False)
preds += list(outputs[0].data.cpu().numpy())
attns += list(outputs[1].data.cpu().numpy())
model.zero_grad()
end_time = datetime.datetime.now()
custom_print('Prediction time:', end_time - start_time)
return preds, attns
def train_model(model_id, train_samples, dev_samples, best_model_file):
train_size = len(train_samples)
batch_count = int(math.ceil(train_size/batch_size))
move_last_batch = False
if len(train_samples) - batch_size * (batch_count - 1) == 1:
move_last_batch = True
batch_count -= 1
custom_print(batch_count)
# model = get_model(model_id)
model = SeqToSeqModel()
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
custom_print('Parameters size:', pytorch_total_params)
custom_print(model)
if torch.cuda.is_available():
model.cuda()
if n_gpu > 1:
model = torch.nn.DataParallel(model)
criterion = nn.NLLLoss(ignore_index=0)
optimizer = optim.Adam(model.parameters())
custom_print(optimizer)
best_dev_acc = -1.0
best_epoch_idx = -1
best_epoch_seed = -1
for epoch_idx in range(0, num_epoch):
model.train()
model.zero_grad()
custom_print('Epoch:', epoch_idx + 1)
cur_seed = random_seed + epoch_idx + 1
set_random_seeds(cur_seed)
cur_shuffled_train_data = shuffle_data(train_samples)
start_time = datetime.datetime.now()
train_loss_val = 0.0
for batch_idx in tqdm(range(0, batch_count)):
batch_start = batch_idx * batch_size
batch_end = min(len(cur_shuffled_train_data), batch_start + batch_size)
if batch_idx == batch_count - 1 and move_last_batch:
batch_end = len(cur_shuffled_train_data)
cur_batch = cur_shuffled_train_data[batch_start:batch_end]
cur_samples_input = get_batch_data(cur_batch, True)
# np arrays to tensors
src_words_seq = torch.from_numpy(cur_samples_input['src_words'].astype('long'))
src_words_mask = torch.from_numpy(cur_samples_input['src_words_mask'].astype('uint8'))
trg_vocab_mask = torch.from_numpy(cur_samples_input['trg_vocab_mask'].astype('uint8'))
trg_words_seq = torch.from_numpy(cur_samples_input['trg_words'].astype('long'))
adj = torch.from_numpy(cur_samples_input['adj'].astype('float32'))
src_chars_seq = torch.from_numpy(cur_samples_input['src_chars'].astype('long'))
target = torch.from_numpy(cur_samples_input['target'].astype('long'))
if torch.cuda.is_available():
src_words_seq = src_words_seq.cuda()
src_words_mask = src_words_mask.cuda()
trg_vocab_mask = trg_vocab_mask.cuda()
trg_words_seq = trg_words_seq.cuda()
adj = adj.cuda()
src_chars_seq = src_chars_seq.cuda()
target = target.cuda()
src_words_seq = autograd.Variable(src_words_seq)
src_words_mask = autograd.Variable(src_words_mask)
trg_vocab_mask = autograd.Variable(trg_vocab_mask)
trg_words_seq = autograd.Variable(trg_words_seq)
adj = autograd.Variable(adj)
src_chars_seq = autograd.Variable(src_chars_seq)
target = autograd.Variable(target)
outputs = model(src_words_seq, src_chars_seq, src_words_mask, trg_words_seq, trg_vocab_mask, adj, True)
target = target.view(-1, 1).squeeze()
loss = criterion(outputs, target)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 10.0)
if (batch_idx + 1) % update_freq == 0:
optimizer.step()
model.zero_grad()
train_loss_val += loss.item()
train_loss_val /= batch_count
end_time = datetime.datetime.now()
custom_print('Training loss:', train_loss_val)
custom_print('Training time:', end_time - start_time)
custom_print('\nDev Results\n')
set_random_seeds(random_seed)
dev_preds, dev_attns = predict(dev_samples, model, model_id)
write_test_res(dev_samples, dev_preds, dev_attns, os.path.join(trg_data_folder, 'dev.out'))
ref_lines = open(trg_dev_file).read().splitlines()
pred_lines = open(os.path.join(trg_data_folder, 'dev.out')).read().splitlines()
event_lines = open(events_file).read().splitlines()
argument_lines = open(arguments_file).read().splitlines()
roles_lines = open(roles_file).read().splitlines()
dev_acc = calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines)
# pred_pos, gt_pos, correct_pos = get_F1(dev_samples, dev_preds, dev_attns)
# custom_print(pred_pos, '\t', gt_pos, '\t', correct_pos)
# p = float(correct_pos) / (pred_pos + 1e-8)
# r = float(correct_pos) / (gt_pos + 1e-8)
# dev_acc = (2 * p * r) / (p + r + 1e-8)
# custom_print('F1:', dev_acc)
if dev_acc >= best_dev_acc:
best_epoch_idx = epoch_idx + 1
best_epoch_seed = cur_seed
custom_print('model saved......')
best_dev_acc = dev_acc
torch.save(model.state_dict(), best_model_file)
custom_print('\n\n')
if epoch_idx + 1 - best_epoch_idx >= early_stop_cnt:
break
custom_print('*******')
custom_print('Best Epoch:', best_epoch_idx)
custom_print('Best Epoch Seed:', best_epoch_seed)
if __name__ == "__main__":
os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]
random_seed = int(sys.argv[2])
src_data_folder = sys.argv[3]
trg_data_folder = sys.argv[4]
job_mode = sys.argv[5]
embedding_type = sys.argv[6]
granular_mode = 1
n_gpu = torch.cuda.device_count()
set_random_seeds(random_seed)
if not os.path.exists(trg_data_folder):
os.mkdir(trg_data_folder)
model_name = 1
#Tunable Hyperparameters
batch_size = 32
num_epoch = 30
max_src_len = 100
max_trg_len = 50
if embedding_type == 'w2v':
embedding_file = os.path.join(src_data_folder, 'w2v.txt')
else:
embedding_file = os.path.join(src_data_folder, 'Bert_embeddings.txt')
update_freq = 1
enc_type = ['LSTM', 'GCN', 'LSTM-GCN'][0]
att_type = ['None', 'Unigram', 'N-Gram-Enc'][1]
copy_on = True
gcn_num_layers = 3
if embedding_type == 'w2v':
word_embed_dim = 300
else:
word_embed_dim = 768
word_min_freq = 2
char_embed_dim = 50
char_feature_size = 50
conv_filter_size = 3
max_word_len = 10
enc_inp_size = word_embed_dim + char_feature_size
enc_hidden_size = word_embed_dim
dec_inp_size = enc_hidden_size
dec_hidden_size = dec_inp_size
drop_rate = 0.3
layers = 1
early_stop_cnt = 20
sample_cnt = 0
Sample = recordclass("Sample", "Id SrcLen SrcWords TrgLen TrgWords")
events_file = os.path.join(src_data_folder, 'event_types.txt')
arguments_file = os.path.join(src_data_folder, 'arguments.txt')
roles_file = os.path.join(src_data_folder, 'roles.txt')
events = get_relations(events_file)
arguments = get_relations(arguments_file)
roles = get_relations(roles_file)
# train a model
if job_mode == 'train':
logger = open(os.path.join(trg_data_folder, 'training.log'), 'w')
custom_print(sys.argv)
custom_print(max_src_len, max_trg_len, drop_rate, layers)
custom_print('loading data......')
model_file_name = os.path.join(trg_data_folder, 'model.h5py')
src_train_file = os.path.join(src_data_folder, 'train.sent')
trg_train_file = os.path.join(src_data_folder, 'train.tup')
train_data = read_data(src_train_file, trg_train_file, 1)
src_dev_file = os.path.join(src_data_folder, 'dev.sent')
trg_dev_file = os.path.join(src_data_folder, 'dev.tup')
dev_data = read_data(src_dev_file, trg_dev_file, 2)
custom_print('Training data size:', len(train_data))
custom_print('Development data size:', len(dev_data))
custom_print("preparing vocabulary......")
save_vocab = os.path.join(trg_data_folder, 'vocab.pkl')
word_vocab, rev_word_vocab, char_vocab, word_embed_matrix = build_vocab(train_data, events, arguments, roles, save_vocab,
embedding_file)
custom_print("Training started......")
train_model(model_name, train_data, dev_data, model_file_name)
logger.close()
if job_mode == 'test':
logger = open(os.path.join(trg_data_folder, 'test.log'), 'w')
custom_print(sys.argv)
custom_print("loading word vectors......")
vocab_file_name = os.path.join(trg_data_folder, 'vocab.pkl')
word_vocab, char_vocab = load_vocab(vocab_file_name)
rev_word_vocab = OrderedDict()
for word in word_vocab:
idx = word_vocab[word]
rev_word_vocab[idx] = word
word_embed_matrix = np.zeros((len(word_vocab), word_embed_dim), dtype=np.float32)
custom_print('vocab size:', len(word_vocab))
src_test_file = os.path.join(src_data_folder, 'test.sent')
trg_test_file = os.path.join(src_data_folder, 'test.tup')
test_data = read_data(src_test_file, trg_test_file, 3)
custom_print('Test data size:', len(test_data))
custom_print('seed:', random_seed)
model_file = os.path.join(trg_data_folder, 'model.h5py')
best_model = get_model(model_name)
custom_print(best_model)
if torch.cuda.is_available():
best_model.cuda()
if n_gpu > 1:
best_model = torch.nn.DataParallel(best_model)
best_model.load_state_dict(torch.load(model_file))
custom_print('\nTest Results\n')
set_random_seeds(random_seed)
test_preds, test_attns = predict(test_data, best_model, model_name)
custom_print('Copy On')
write_test_res(test_data, test_preds, test_attns, os.path.join(trg_data_folder, 'test.out'))
# ref_lines = open(trg_test_file).readlines()
# pred_lines = open(os.path.join(trg_data_folder, 'test.out')).readlines()
# event_lines = open(events_file).readlines()
# argument_lines = open(arguments_file).readlines()
# roles_lines = open(roles_file).readlines()
ref_lines = open(trg_test_file).read().splitlines()
pred_lines = open(os.path.join(trg_data_folder, 'test.out')).read().splitlines()
event_lines = open(events_file).read().splitlines()
argument_lines = open(arguments_file).read().splitlines()
roles_lines = open(roles_file).read().splitlines()
mode = 1
custom_print('Overall F1')
# custom_print(cal_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines, mode))
calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines)
copy_on = False
custom_print('Copy Off')
set_random_seeds(random_seed)
test_preds, test_attns = predict(test_data, best_model, model_name)
write_test_res(test_data, test_preds, test_attns, os.path.join(trg_data_folder, 'test_without_copy.out'))
# ref_lines = open(trg_test_file).readlines()
# pred_lines = open(os.path.join(trg_data_folder, 'test_without_copy.out')).readlines()
# event_lines = open(events_file).readlines()
# argument_lines = open(arguments_file).readlines()
# roles_lines = open(roles_file).readlines()
ref_lines = open(trg_test_file).read().splitlines()
pred_lines = open(os.path.join(trg_data_folder, 'test_without_copy.out')).read().splitlines()
event_lines = open(events_file).read().splitlines()
argument_lines = open(arguments_file).read().splitlines()
roles_lines = open(roles_file).read().splitlines()
mode = 1
custom_print('Overall F1')
# custom_print(cal_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines, mode))
calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines)
logger.close()
| 36.101026
| 129
| 0.620638
|
import sys
import os
import numpy as np
import random
from collections import OrderedDict
import pickle
import datetime
from tqdm import tqdm
from recordclass import recordclass
import math
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import json
def custom_print(*msg):
for i in range(0, len(msg)):
if i == len(msg) - 1:
print(msg[i])
logger.write(str(msg[i]) + '\n')
else:
print(msg[i], ' ', end='')
logger.write(str(msg[i]))
def load_word_embedding(embed_file, vocab):
custom_print('vocab length:', len(vocab))
embed_vocab = OrderedDict()
rev_embed_vocab = OrderedDict()
embed_matrix = list()
embed_vocab['<PAD>'] = 0
rev_embed_vocab[0] = '<PAD>'
embed_matrix.append(np.zeros(word_embed_dim, dtype=np.float32))
embed_vocab['<UNK>'] = 1
rev_embed_vocab[1] = '<UNK>'
embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))
embed_vocab['<SOS>'] = 2
rev_embed_vocab[2] = '<SOS>'
embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))
embed_vocab['<EOS>'] = 3
rev_embed_vocab[3] = '<EOS>'
embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))
word_idx = 4
with open(embed_file, "r") as f:
for line in f:
parts = line.split()
if len(parts) < word_embed_dim + 1:
continue
word = parts[0]
if word in vocab and vocab[word] >= word_min_freq:
vec = [np.float32(val) for val in parts[1:]]
embed_matrix.append(vec)
embed_vocab[word] = word_idx
rev_embed_vocab[word_idx] = word
word_idx += 1
for word in vocab:
if word not in embed_vocab and vocab[word] >= word_min_freq:
embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))
embed_vocab[word] = word_idx
rev_embed_vocab[word_idx] = word
word_idx += 1
custom_print('embed dictionary length:', len(embed_vocab))
return embed_vocab, rev_embed_vocab, np.array(embed_matrix, dtype=np.float32)
def build_vocab(data, events, arguments, roles, vocab_file, embed_file):
vocab = OrderedDict()
char_v = OrderedDict()
char_v['<PAD>'] = 0
char_v['<UNK>'] = 1
char_v[';'] = 2
char_v['|'] = 3
char_idx = 4
for d in data:
for word in d.SrcWords:
if word not in vocab:
vocab[word] = 1
else:
vocab[word] += 1
for c in word:
if c not in char_v:
char_v[c] = char_idx
char_idx += 1
for event in events:
vocab[event] = word_min_freq
for argument in arguments:
vocab[argument] = word_min_freq
for role in roles:
vocab[role] = word_min_freq
vocab[';'] = word_min_freq
vocab['|'] = word_min_freq
word_v, rev_word_v, embed_matrix = load_word_embedding(embed_file, vocab)
output = open(vocab_file, 'wb')
pickle.dump([word_v, char_v], output)
output.close()
return word_v, rev_word_v, char_v, embed_matrix
def load_vocab(vocab_file):
with open(vocab_file, 'rb') as f:
word_v, char_v = pickle.load(f)
return word_v, char_v
def get_adj_mat(amat):
K = 5
adj_mat = np.zeros((len(amat), len(amat)), np.float32)
for i in range(len(amat)):
for j in range(len(amat)):
if 0 <= amat[i][j] <= K:
adj_mat[i][j] = 1.0 / math.pow(2, amat[i][j])
else:
adj_mat[i][j] = 0
return adj_mat
def get_data(src_lines, trg_lines, datatype):
samples = []
uid = 1
src_len = -1
trg_len = -1
for i in range(0, len(src_lines)):
src_line = src_lines[i].strip()
trg_line = trg_lines[i].strip()
src_words = src_line.split()
if datatype == 1:
tuples = trg_line.strip().split('|')
random.shuffle(tuples)
new_trg_line = ' | '.join(tuples)
assert len(trg_line.split()) == len(new_trg_line.split())
trg_line = new_trg_line
trg_words = list()
trg_words.append('<SOS>')
trg_words += trg_line.split()
trg_words.append('<EOS>')
if datatype == 1 and (len(src_words) > max_src_len or len(trg_words) > max_trg_len + 1):
continue
if len(src_words) > src_len:
src_len = len(src_words)
if len(trg_words) > trg_len:
trg_len = len(trg_words)
sample = Sample(Id=uid, SrcLen=len(src_words), SrcWords=src_words, TrgLen=len(trg_words),
TrgWords=trg_words)
samples.append(sample)
uid += 1
print(src_len)
print(trg_len)
return samples
def read_data(src_file, trg_file, datatype):
reader = open(src_file)
src_lines = reader.readlines()
reader.close()
reader = open(trg_file)
trg_lines = reader.readlines()
reader.close()
data = get_data(src_lines, trg_lines, datatype)
return data
def check_event_trigger(ref_string, pred_string):
return (ref_string == pred_string)
pass
def check_event_type(ref_string, pred_string, event_lines):
if granular_mode == 0:
if pred_string in event_lines:
return (ref_string == pred_string)
else:
return False
pass
if granular_mode == 1:
pred_token = pred_string.split(":")[0]
ref_token = ref_string.split(":")[0]
return (pred_token == ref_token)
pass
def check_event_argument(ref_string, pred_string):
return (ref_string == pred_string)
pass
def check_argument_type(ref_string, pred_string, argument_lines):
if granular_mode == 0:
if pred_string in argument_lines:
return (ref_string == pred_string)
else:
return False
pass
if granular_mode == 1:
pred_token = pred_string.split(":")[0]
ref_token = ref_string.split(":")[0]
return (pred_token == ref_token)
pass
def check_argument_role(ref_string, pred_string, roles_lines):
if pred_string in roles_lines:
return (ref_string == pred_string)
else:
return False
pass
def calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines):
list_of_tracking_metrics = ['predicted_tuples',
'ground_truth_tuples',
'correct_predictions',
'events_count',
'correct_events',
'correct_event_type',
'correct_arguments',
'correct_argment_types',
'correct_argument_roles'
]
metric_counts = dict.fromkeys(list_of_tracking_metrics, 0)
for i in range(0, min(len(ref_lines), len(pred_lines))):
ref_line = ref_lines[i].strip()
pred_line = pred_lines[i].strip()
ref_tuples = ref_line.split('|')
pred_tuples = pred_line.split('|')
for pred_tuple in pred_tuples:
pred_strings = pred_tuple.split(';')
if(len(pred_strings) < 3):
continue
if(pred_strings[2].strip().lower()) == 'none':
max_matches = 0
part_matches = []
for ref_tuple in ref_tuples:
ev1, ev2 = cal_f1_for_pair(ref_tuple, pred_tuple, event_lines)
pair_score = ev1+ev2
if pair_score > max_matches:
max_matches = pair_score
part_matches = (ev1, ev2)
pass
pass
metric_counts['events_count'] += 1
if ev1 == 1:
metric_counts['correct_events'] += 1
if ev2 == 1:
metric_counts['correct_event_type'] += 1
continue
max_matches = 0
part_matches = cal_f1_for_tuple(ref_tuples[0], pred_tuple, event_lines, argument_lines, roles_lines)
for ref_tuple in ref_tuples:
res = cal_f1_for_tuple(ref_tuple, pred_tuple, event_lines, argument_lines, roles_lines)
tuple_score = sum(res)
if tuple_score >= max_matches:
max_matches = tuple_score
part_matches = res
pass
pass
metric_counts['predicted_tuples'] += 1
metric_counts['events_count'] += 1
if max_matches >= 4:
metric_counts['correct_predictions'] += 1
if part_matches[0] == 1:
metric_counts['correct_events'] += 1
if part_matches[1] == 1:
metric_counts['correct_event_type'] += 1
if part_matches[2] == 1:
metric_counts['correct_arguments'] += 1
if part_matches[3] == 1:
metric_counts['correct_argment_types'] += 1
if part_matches[4] == 1:
metric_counts['correct_argument_roles'] += 1
pass
for ref_tuple in ref_tuples:
if(ref_tuple.split(';')[2].strip().lower()) != 'none':
metric_counts['ground_truth_tuples'] += 1
pass
print(metric_counts)
precision = float(metric_counts['correct_predictions'] / (metric_counts['predicted_tuples'] + 1e-08))
recall = float(metric_counts['correct_predictions'] / (metric_counts['ground_truth_tuples'] + 1e-08))
f1 = 2 * precision * recall / (precision + recall + 1e-08)
precision = round(precision, 3)
recall = round(recall, 3)
f1 = round(f1, 3)
print("Partwise Results")
event_acc = metric_counts['correct_events']/ (metric_counts['events_count'] + 1e-08)
evtype_acc = metric_counts['correct_event_type']/ (metric_counts['events_count'] + 1e-08)
argument_acc = metric_counts['correct_arguments']/ (metric_counts['predicted_tuples'] + 1e-08)
argtype_acc = metric_counts['correct_argment_types']/ (metric_counts['predicted_tuples'] + 1e-08)
role_acc = metric_counts['correct_argument_roles']/ (metric_counts['predicted_tuples'] + 1e-08)
print(f'Event Trigger Word Accuracy: {event_acc}')
print(f'Event Type Accuracy: {evtype_acc}')
print(f'Argument Identification Accuracy: {argument_acc}')
print(f'Argument Type Accuracy: {argtype_acc}')
print(f'Argument Role Accuracy: {role_acc}')
print(f'Macro f-score: {f1}')
targ_file = os.path.join(trg_data_folder, 'Results_logger.txt')
f = open(targ_file, "a")
f.write(f'Event Trigger Word Accuracy: {event_acc}')
f.write("\n")
f.write(f'Event Type Accuracy: {evtype_acc}')
f.write("\n")
f.write(f'Argument Identification Accuracy: {argument_acc}')
f.write("\n")
f.write(f'Argument Type Accuracy: {argtype_acc}')
f.write("\n")
f.write(f'Argument Role Accuracy: {role_acc}')
f.write("\n")
f.write(f'Macro f-score: {f1}')
f.write("\n")
f.close()
return f1
def cal_f1_for_pair(ref_tuple: str ,
pred_tuple: str,
event_lines: list
) -> list:
ref_strings = ref_tuple.split(';')
pred_strings = pred_tuple.split(';')
ev1 = int( check_event_trigger(ref_strings[0].strip(), pred_strings[0].strip()) )
ev2 = int( check_event_type(ref_strings[1].strip(), pred_strings[1].strip(), event_lines) )
return ev1, ev2
def cal_f1_for_tuple(ref_tuple: str ,
pred_tuple: str,
event_lines: list,
argument_lines: list,
roles_lines: list
) -> list:
ref_strings = ref_tuple.split(';')
pred_strings = pred_tuple.split(';')
if (len (pred_strings) != 5 ):
if (len (pred_strings) >= 2 ):
ev1 = int( check_event_trigger(ref_strings[0].strip(), pred_strings[0].strip()) )
ev2 = int( check_event_type(ref_strings[1].strip(), pred_strings[1].strip(), event_lines) )
return [ev1, ev2, 0, 0, 0]
return list([0,0,0,0,0])
ev1 = int( check_event_trigger(ref_strings[0].strip(), pred_strings[0].strip()) )
ev2 = int( check_event_type(ref_strings[1].strip(), pred_strings[1].strip(), event_lines) )
ev3 = int( check_event_argument(ref_strings[2].strip(), pred_strings[2].strip()) )
ev4 = int( check_argument_type(ref_strings[3].strip(), pred_strings[3].strip(), argument_lines) )
ev5 = int( check_argument_role(ref_strings[4].strip(), pred_strings[4].strip(), roles_lines) )
ret = [ev1, ev2, ev3, ev4, ev5]
return ret
def get_model(model_id):
if model_id == 1:
return SeqToSeqModel()
def write_test_res(data, preds, attns, outfile):
writer = open(outfile, 'w')
for i in range(0, len(data)):
pred_words = get_pred_words(preds[i], attns[i], data[i].SrcWords)[:-1]
writer.write(' '.join(pred_words) + '\n')
writer.close()
def set_random_seeds(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 1:
torch.cuda.manual_seed_all(seed)
def get_max_len(sample_batch):
src_max_len = len(sample_batch[0].SrcWords)
for idx in range(1, len(sample_batch)):
if len(sample_batch[idx].SrcWords) > src_max_len:
src_max_len = len(sample_batch[idx].SrcWords)
trg_max_len = len(sample_batch[0].TrgWords)
for idx in range(1, len(sample_batch)):
if len(sample_batch[idx].TrgWords) > trg_max_len:
trg_max_len = len(sample_batch[idx].TrgWords)
return src_max_len, trg_max_len
def get_words_index_seq(words, max_len):
seq = list()
for word in words:
if word in word_vocab:
seq.append(word_vocab[word])
else:
seq.append(word_vocab['<UNK>'])
pad_len = max_len - len(words)
for i in range(0, pad_len):
seq.append(word_vocab['<PAD>'])
return seq
def get_target_words_index_seq(words, max_len):
seq = list()
for word in words:
if word in word_vocab:
seq.append(word_vocab[word])
else:
seq.append(word_vocab['<UNK>'])
pad_len = max_len - len(words)
for i in range(0, pad_len):
seq.append(word_vocab['<EOS>'])
return seq
def get_padded_mask(cur_len, max_len):
mask_seq = list()
for i in range(0, cur_len):
mask_seq.append(0)
pad_len = max_len - cur_len
for i in range(0, pad_len):
mask_seq.append(1)
return mask_seq
def get_target_vocab_mask(src_words):
mask = []
for i in range(0, len(word_vocab)):
mask.append(1)
for word in src_words:
if word in word_vocab:
mask[word_vocab[word]] = 0
for event in events:
mask[word_vocab[event]] = 0
for argument in arguments:
mask[word_vocab[argument]] = 0
for role in roles:
mask[word_vocab[role]] = 0
mask[word_vocab['<UNK>']] = 0
mask[word_vocab['<EOS>']] = 0
mask[word_vocab[';']] = 0
mask[word_vocab['|']] = 0
return mask
def get_rel_mask(trg_words, max_len):
mask_seq = list()
for word in trg_words:
mask_seq.append(0)
pad_len = max_len - len(trg_words)
for i in range(0, pad_len):
mask_seq.append(1)
return mask_seq
def get_char_seq(words, max_len):
char_seq = list()
for i in range(0, conv_filter_size - 1):
char_seq.append(char_vocab['<PAD>'])
for word in words:
for c in word[0:min(len(word), max_word_len)]:
if c in char_vocab:
char_seq.append(char_vocab[c])
else:
char_seq.append(char_vocab['<UNK>'])
pad_len = max_word_len - len(word)
for i in range(0, pad_len):
char_seq.append(char_vocab['<PAD>'])
for i in range(0, conv_filter_size - 1):
char_seq.append(char_vocab['<PAD>'])
pad_len = max_len - len(words)
for i in range(0, pad_len):
for i in range(0, max_word_len + conv_filter_size - 1):
char_seq.append(char_vocab['<PAD>'])
return char_seq
def get_relations(file_name):
rels = []
reader = open(file_name)
lines = reader.readlines()
reader.close()
for line in lines:
rels.append(line.strip())
return rels
def get_batch_data(cur_samples, is_training=False):
batch_src_max_len, batch_trg_max_len = get_max_len(cur_samples)
src_words_list = list()
src_words_mask_list = list()
src_char_seq = list()
trg_words_list = list()
trg_vocab_mask = list()
adj_lst = []
target = list()
cnt = 0
for sample in cur_samples:
src_words_list.append(get_words_index_seq(sample.SrcWords, batch_src_max_len))
src_words_mask_list.append(get_padded_mask(sample.SrcLen, batch_src_max_len))
src_char_seq.append(get_char_seq(sample.SrcWords, batch_src_max_len))
trg_vocab_mask.append(get_target_vocab_mask(sample.SrcWords))
if is_training:
padded_trg_words = get_words_index_seq(sample.TrgWords, batch_trg_max_len)
trg_words_list.append(padded_trg_words)
target.append(padded_trg_words[1:])
else:
trg_words_list.append(get_words_index_seq(['<SOS>'], 1))
cnt += 1
return {'src_words': np.array(src_words_list, dtype=np.float32),
'src_chars': np.array(src_char_seq),
'src_words_mask': np.array(src_words_mask_list),
'adj': np.array(adj_lst),
'trg_vocab_mask': np.array(trg_vocab_mask),
'trg_words': np.array(trg_words_list, dtype=np.int32),
'target': np.array(target)}
def shuffle_data(data):
custom_print(len(data))
data.sort(key=lambda x: x.SrcLen)
num_batch = int(len(data) / batch_size)
rand_idx = random.sample(range(num_batch), num_batch)
new_data = []
for idx in rand_idx:
new_data += data[batch_size * idx: batch_size * (idx + 1)]
if len(new_data) < len(data):
new_data += data[num_batch * batch_size:]
return new_data
def get_pred_words(preds, attns, src_words):
pred_words = []
for i in range(0, max_trg_len):
word_idx = preds[i]
if word_vocab['<EOS>'] == word_idx:
pred_words.append('<EOS>')
break
elif att_type != 'None' and copy_on and word_vocab['<UNK>'] == word_idx:
word_idx = attns[i]
pred_words.append(src_words[word_idx])
else:
pred_words.append(rev_word_vocab[word_idx])
return pred_words
class WordEmbeddings(nn.Module):
def __init__(self, vocab_size, embed_dim, pre_trained_embed_matrix, drop_out_rate):
super(WordEmbeddings, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
self.embeddings.weight.data.copy_(torch.from_numpy(pre_trained_embed_matrix))
self.dropout = nn.Dropout(drop_out_rate)
def forward(self, words_seq):
word_embeds = self.embeddings(words_seq)
word_embeds = self.dropout(word_embeds)
return word_embeds
def weight(self):
return self.embeddings.weight
class CharEmbeddings(nn.Module):
def __init__(self, vocab_size, embed_dim, drop_out_rate):
super(CharEmbeddings, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
self.dropout = nn.Dropout(drop_out_rate)
def forward(self, words_seq):
char_embeds = self.embeddings(words_seq)
char_embeds = self.dropout(char_embeds)
return char_embeds
class Encoder(nn.Module):
def __init__(self, input_dim, hidden_dim, layers, is_bidirectional, drop_out_rate):
super(Encoder, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.layers = layers
self.is_bidirectional = is_bidirectional
self.drop_rate = drop_out_rate
self.char_embeddings = CharEmbeddings(len(char_vocab), char_embed_dim, drop_rate)
self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.layers, batch_first=True,
bidirectional=self.is_bidirectional)
self.dropout = nn.Dropout(self.drop_rate)
self.conv1d = nn.Conv1d(char_embed_dim, char_feature_size, conv_filter_size)
self.max_pool = nn.MaxPool1d(max_word_len + conv_filter_size - 1, max_word_len + conv_filter_size - 1)
def forward(self, words_input, char_seq, adj, is_training=False):
char_embeds = self.char_embeddings(char_seq)
char_embeds = char_embeds.permute(0, 2, 1)
char_feature = torch.tanh(self.max_pool(self.conv1d(char_embeds)))
char_feature = char_feature.permute(0, 2, 1)
words_input = torch.cat((words_input, char_feature), -1)
outputs, hc = self.lstm(words_input)
outputs = self.dropout(outputs)
return outputs
class Attention(nn.Module):
def __init__(self, input_dim):
super(Attention, self).__init__()
self.input_dim = input_dim
self.linear_ctx = nn.Linear(self.input_dim, self.input_dim, bias=False)
self.linear_query = nn.Linear(self.input_dim, self.input_dim, bias=True)
self.v = nn.Linear(self.input_dim, 1)
def forward(self, s_prev, enc_hs, src_mask):
uh = self.linear_ctx(enc_hs)
wq = self.linear_query(s_prev)
wquh = torch.tanh(wq + uh)
attn_weights = self.v(wquh).squeeze()
attn_weights.data.masked_fill_(src_mask.data, -float('inf'))
attn_weights = F.softmax(attn_weights, dim=-1)
ctx = torch.bmm(attn_weights.unsqueeze(1), enc_hs).squeeze()
return ctx, attn_weights
class NGram_Attention(nn.Module):
def __init__(self, input_dim, N):
super(NGram_Attention, self).__init__()
self.input_dim = input_dim
self.layers = N
self.V_layers = nn.ModuleList()
self.W_layers = nn.ModuleList()
for i in range(N):
self.V_layers.append(nn.Linear(input_dim, input_dim))
self.W_layers.append(nn.Linear(input_dim, input_dim))
def forward(self, s_prev, enc_hs, src_mask):
att = torch.bmm(s_prev.unsqueeze(1), self.V_layers[0](enc_hs).transpose(1, 2)).squeeze()
att.data.masked_fill_(src_mask.data, -float('inf'))
att = F.softmax(att, dim=-1)
ctx = self.W_layers[0](torch.bmm(att.unsqueeze(1), enc_hs).squeeze())
for i in range(1, self.layers):
enc_hs_ngram = torch.nn.AvgPool1d(i+1, 1)(enc_hs.transpose(1, 2)).transpose(1, 2)
n_mask = src_mask.unsqueeze(1).float()
n_mask = torch.nn.AvgPool1d(i+1, 1)(n_mask).squeeze()
n_mask[n_mask > 0] = 1
n_mask = n_mask.byte()
n_att = torch.bmm(s_prev.unsqueeze(1), self.V_layers[i](enc_hs_ngram).transpose(1, 2)).squeeze()
n_att.data.masked_fill_(n_mask.data, -float('inf'))
n_att = F.softmax(n_att, dim=-1)
ctx += self.W_layers[i](torch.bmm(n_att.unsqueeze(1), enc_hs_ngram).squeeze())
return ctx, att
def mean_over_time(x, mask):
x.data.masked_fill_(mask.unsqueeze(2).data, 0)
x = torch.sum(x, dim=1)
time_steps = torch.sum(mask.eq(0), dim=1, keepdim=True).float()
x /= time_steps
return x
class Decoder(nn.Module):
def __init__(self, input_dim, hidden_dim, layers, drop_out_rate, max_length):
super(Decoder, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.layers = layers
self.drop_rate = drop_out_rate
self.max_length = max_length
if att_type == 'None':
self.lstm = nn.LSTMCell(2 * self.input_dim, self.hidden_dim, self.layers)
elif att_type == 'Unigram':
self.attention = Attention(input_dim)
self.lstm = nn.LSTMCell(2 * self.input_dim, self.hidden_dim, self.layers)
else:
self.attention = NGram_Attention(input_dim, 3)
self.lstm = nn.LSTMCell(3 * self.input_dim, self.hidden_dim, self.layers)
self.dropout = nn.Dropout(self.drop_rate)
self.ent_out = nn.Linear(self.input_dim, len(word_vocab))
def forward(self, y_prev, h_prev, enc_hs, src_word_embeds, src_mask, is_training=False):
src_time_steps = enc_hs.size()[1]
if att_type == 'None':
ctx = mean_over_time(enc_hs, src_mask)
attn_weights = torch.zeros(src_mask.size()).cuda()
elif att_type == 'Unigram':
s_prev = h_prev[0]
s_prev = s_prev.unsqueeze(1)
s_prev = s_prev.repeat(1, src_time_steps, 1)
ctx, attn_weights = self.attention(s_prev, enc_hs, src_mask)
else:
last_index = src_mask.size()[1] - torch.sum(src_mask, dim=-1).long() - 1
last_index = last_index.unsqueeze(1).unsqueeze(1).repeat(1, 1, enc_hs.size()[-1])
enc_last = torch.gather(enc_hs, 1, last_index).squeeze()
ctx, attn_weights = self.attention(enc_last, src_word_embeds, src_mask)
ctx = torch.cat((enc_last, ctx), -1)
y_prev = y_prev.squeeze()
s_cur = torch.cat((y_prev, ctx), 1)
hidden, cell_state = self.lstm(s_cur, h_prev)
hidden = self.dropout(hidden)
output = self.ent_out(hidden)
return output, (hidden, cell_state), attn_weights
class SeqToSeqModel(nn.Module):
def __init__(self):
super(SeqToSeqModel, self).__init__()
self.word_embeddings = WordEmbeddings(len(word_vocab), word_embed_dim, word_embed_matrix, drop_rate)
self.encoder = Encoder(enc_inp_size, int(enc_hidden_size/2), layers, True, drop_rate)
self.decoder = Decoder(dec_inp_size, dec_hidden_size, layers, drop_rate, max_trg_len)
def forward(self, src_words_seq, src_chars_seq, src_mask, trg_words_seq, trg_vocab_mask, adj, is_training=False):
src_word_embeds = self.word_embeddings(src_words_seq)
trg_word_embeds = self.word_embeddings(trg_words_seq)
batch_len = src_word_embeds.size()[0]
if is_training:
time_steps = trg_word_embeds.size()[1] - 1
else:
time_steps = max_trg_len
encoder_output = self.encoder(src_word_embeds, src_chars_seq, adj, is_training)
h0 = autograd.Variable(torch.FloatTensor(torch.zeros(batch_len, word_embed_dim)))
h0 = h0.cuda()
c0 = autograd.Variable(torch.FloatTensor(torch.zeros(batch_len, word_embed_dim)))
c0 = c0.cuda()
dec_hid = (h0, c0)
if is_training:
dec_inp = trg_word_embeds[:, 0, :]
dec_out, dec_hid, dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,
src_mask, is_training)
dec_out = dec_out.view(-1, len(word_vocab))
dec_out = F.log_softmax(dec_out, dim=-1)
dec_out = dec_out.unsqueeze(1)
for t in range(1, time_steps):
dec_inp = trg_word_embeds[:, t, :]
cur_dec_out, dec_hid, dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,
src_mask, is_training)
cur_dec_out = cur_dec_out.view(-1, len(word_vocab))
dec_out = torch.cat((dec_out, F.log_softmax(cur_dec_out, dim=-1).unsqueeze(1)), 1)
else:
dec_inp = trg_word_embeds[:, 0, :]
dec_out, dec_hid, dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,
src_mask, is_training)
dec_out = dec_out.view(-1, len(word_vocab))
if copy_on:
dec_out.data.masked_fill_(trg_vocab_mask.data, -float('inf'))
dec_out = F.log_softmax(dec_out, dim=-1)
topv, topi = dec_out.topk(1)
dec_out_v, dec_out_i = dec_out.topk(1)
dec_attn_v, dec_attn_i = dec_attn.topk(1)
for t in range(1, time_steps):
dec_inp = self.word_embeddings(topi.squeeze().detach())
cur_dec_out, dec_hid, cur_dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,
src_mask, is_training)
cur_dec_out = cur_dec_out.view(-1, len(word_vocab))
if copy_on:
cur_dec_out.data.masked_fill_(trg_vocab_mask.data, -float('inf'))
cur_dec_out = F.log_softmax(cur_dec_out, dim=-1)
topv, topi = cur_dec_out.topk(1)
cur_dec_out_v, cur_dec_out_i = cur_dec_out.topk(1)
dec_out_i = torch.cat((dec_out_i, cur_dec_out_i), 1)
cur_dec_attn_v, cur_dec_attn_i = cur_dec_attn.topk(1)
dec_attn_i = torch.cat((dec_attn_i, cur_dec_attn_i), 1)
if is_training:
dec_out = dec_out.view(-1, len(word_vocab))
return dec_out
else:
return dec_out_i, dec_attn_i
def predict(samples, model, model_id):
pred_batch_size = batch_size
batch_count = math.ceil(len(samples) / pred_batch_size)
move_last_batch = False
if len(samples) - batch_size * (batch_count - 1) == 1:
move_last_batch = True
batch_count -= 1
preds = list()
attns = list()
model.eval()
set_random_seeds(random_seed)
start_time = datetime.datetime.now()
for batch_idx in tqdm(range(0, batch_count)):
batch_start = batch_idx * pred_batch_size
batch_end = min(len(samples), batch_start + pred_batch_size)
if batch_idx == batch_count - 1 and move_last_batch:
batch_end = len(samples)
cur_batch = samples[batch_start:batch_end]
cur_samples_input = get_batch_data(cur_batch, False)
src_words_seq = torch.from_numpy(cur_samples_input['src_words'].astype('long'))
src_words_mask = torch.from_numpy(cur_samples_input['src_words_mask'].astype('uint8'))
trg_vocab_mask = torch.from_numpy(cur_samples_input['trg_vocab_mask'].astype('uint8'))
trg_words_seq = torch.from_numpy(cur_samples_input['trg_words'].astype('long'))
adj = torch.from_numpy(cur_samples_input['adj'].astype('float32'))
src_chars_seq = torch.from_numpy(cur_samples_input['src_chars'].astype('long'))
if torch.cuda.is_available():
src_words_seq = src_words_seq.cuda()
src_words_mask = src_words_mask.cuda()
trg_vocab_mask = trg_vocab_mask.cuda()
trg_words_seq = trg_words_seq.cuda()
adj = adj.cuda()
src_chars_seq = src_chars_seq.cuda()
src_words_seq = autograd.Variable(src_words_seq)
src_words_mask = autograd.Variable(src_words_mask)
trg_vocab_mask = autograd.Variable(trg_vocab_mask)
adj = autograd.Variable(adj)
src_chars_seq = autograd.Variable(src_chars_seq)
trg_words_seq = autograd.Variable(trg_words_seq)
with torch.no_grad():
outputs = model(src_words_seq, src_chars_seq, src_words_mask, trg_words_seq, trg_vocab_mask, adj,False)
preds += list(outputs[0].data.cpu().numpy())
attns += list(outputs[1].data.cpu().numpy())
model.zero_grad()
end_time = datetime.datetime.now()
custom_print('Prediction time:', end_time - start_time)
return preds, attns
def train_model(model_id, train_samples, dev_samples, best_model_file):
train_size = len(train_samples)
batch_count = int(math.ceil(train_size/batch_size))
move_last_batch = False
if len(train_samples) - batch_size * (batch_count - 1) == 1:
move_last_batch = True
batch_count -= 1
custom_print(batch_count)
model = SeqToSeqModel()
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
custom_print('Parameters size:', pytorch_total_params)
custom_print(model)
if torch.cuda.is_available():
model.cuda()
if n_gpu > 1:
model = torch.nn.DataParallel(model)
criterion = nn.NLLLoss(ignore_index=0)
optimizer = optim.Adam(model.parameters())
custom_print(optimizer)
best_dev_acc = -1.0
best_epoch_idx = -1
best_epoch_seed = -1
for epoch_idx in range(0, num_epoch):
model.train()
model.zero_grad()
custom_print('Epoch:', epoch_idx + 1)
cur_seed = random_seed + epoch_idx + 1
set_random_seeds(cur_seed)
cur_shuffled_train_data = shuffle_data(train_samples)
start_time = datetime.datetime.now()
train_loss_val = 0.0
for batch_idx in tqdm(range(0, batch_count)):
batch_start = batch_idx * batch_size
batch_end = min(len(cur_shuffled_train_data), batch_start + batch_size)
if batch_idx == batch_count - 1 and move_last_batch:
batch_end = len(cur_shuffled_train_data)
cur_batch = cur_shuffled_train_data[batch_start:batch_end]
cur_samples_input = get_batch_data(cur_batch, True)
src_words_seq = torch.from_numpy(cur_samples_input['src_words'].astype('long'))
src_words_mask = torch.from_numpy(cur_samples_input['src_words_mask'].astype('uint8'))
trg_vocab_mask = torch.from_numpy(cur_samples_input['trg_vocab_mask'].astype('uint8'))
trg_words_seq = torch.from_numpy(cur_samples_input['trg_words'].astype('long'))
adj = torch.from_numpy(cur_samples_input['adj'].astype('float32'))
src_chars_seq = torch.from_numpy(cur_samples_input['src_chars'].astype('long'))
target = torch.from_numpy(cur_samples_input['target'].astype('long'))
if torch.cuda.is_available():
src_words_seq = src_words_seq.cuda()
src_words_mask = src_words_mask.cuda()
trg_vocab_mask = trg_vocab_mask.cuda()
trg_words_seq = trg_words_seq.cuda()
adj = adj.cuda()
src_chars_seq = src_chars_seq.cuda()
target = target.cuda()
src_words_seq = autograd.Variable(src_words_seq)
src_words_mask = autograd.Variable(src_words_mask)
trg_vocab_mask = autograd.Variable(trg_vocab_mask)
trg_words_seq = autograd.Variable(trg_words_seq)
adj = autograd.Variable(adj)
src_chars_seq = autograd.Variable(src_chars_seq)
target = autograd.Variable(target)
outputs = model(src_words_seq, src_chars_seq, src_words_mask, trg_words_seq, trg_vocab_mask, adj, True)
target = target.view(-1, 1).squeeze()
loss = criterion(outputs, target)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 10.0)
if (batch_idx + 1) % update_freq == 0:
optimizer.step()
model.zero_grad()
train_loss_val += loss.item()
train_loss_val /= batch_count
end_time = datetime.datetime.now()
custom_print('Training loss:', train_loss_val)
custom_print('Training time:', end_time - start_time)
custom_print('\nDev Results\n')
set_random_seeds(random_seed)
dev_preds, dev_attns = predict(dev_samples, model, model_id)
write_test_res(dev_samples, dev_preds, dev_attns, os.path.join(trg_data_folder, 'dev.out'))
ref_lines = open(trg_dev_file).read().splitlines()
pred_lines = open(os.path.join(trg_data_folder, 'dev.out')).read().splitlines()
event_lines = open(events_file).read().splitlines()
argument_lines = open(arguments_file).read().splitlines()
roles_lines = open(roles_file).read().splitlines()
dev_acc = calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines)
if dev_acc >= best_dev_acc:
best_epoch_idx = epoch_idx + 1
best_epoch_seed = cur_seed
custom_print('model saved......')
best_dev_acc = dev_acc
torch.save(model.state_dict(), best_model_file)
custom_print('\n\n')
if epoch_idx + 1 - best_epoch_idx >= early_stop_cnt:
break
custom_print('*******')
custom_print('Best Epoch:', best_epoch_idx)
custom_print('Best Epoch Seed:', best_epoch_seed)
if __name__ == "__main__":
os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]
random_seed = int(sys.argv[2])
src_data_folder = sys.argv[3]
trg_data_folder = sys.argv[4]
job_mode = sys.argv[5]
embedding_type = sys.argv[6]
granular_mode = 1
n_gpu = torch.cuda.device_count()
set_random_seeds(random_seed)
if not os.path.exists(trg_data_folder):
os.mkdir(trg_data_folder)
model_name = 1
batch_size = 32
num_epoch = 30
max_src_len = 100
max_trg_len = 50
if embedding_type == 'w2v':
embedding_file = os.path.join(src_data_folder, 'w2v.txt')
else:
embedding_file = os.path.join(src_data_folder, 'Bert_embeddings.txt')
update_freq = 1
enc_type = ['LSTM', 'GCN', 'LSTM-GCN'][0]
att_type = ['None', 'Unigram', 'N-Gram-Enc'][1]
copy_on = True
gcn_num_layers = 3
if embedding_type == 'w2v':
word_embed_dim = 300
else:
word_embed_dim = 768
word_min_freq = 2
char_embed_dim = 50
char_feature_size = 50
conv_filter_size = 3
max_word_len = 10
enc_inp_size = word_embed_dim + char_feature_size
enc_hidden_size = word_embed_dim
dec_inp_size = enc_hidden_size
dec_hidden_size = dec_inp_size
drop_rate = 0.3
layers = 1
early_stop_cnt = 20
sample_cnt = 0
Sample = recordclass("Sample", "Id SrcLen SrcWords TrgLen TrgWords")
events_file = os.path.join(src_data_folder, 'event_types.txt')
arguments_file = os.path.join(src_data_folder, 'arguments.txt')
roles_file = os.path.join(src_data_folder, 'roles.txt')
events = get_relations(events_file)
arguments = get_relations(arguments_file)
roles = get_relations(roles_file)
if job_mode == 'train':
logger = open(os.path.join(trg_data_folder, 'training.log'), 'w')
custom_print(sys.argv)
custom_print(max_src_len, max_trg_len, drop_rate, layers)
custom_print('loading data......')
model_file_name = os.path.join(trg_data_folder, 'model.h5py')
src_train_file = os.path.join(src_data_folder, 'train.sent')
trg_train_file = os.path.join(src_data_folder, 'train.tup')
train_data = read_data(src_train_file, trg_train_file, 1)
src_dev_file = os.path.join(src_data_folder, 'dev.sent')
trg_dev_file = os.path.join(src_data_folder, 'dev.tup')
dev_data = read_data(src_dev_file, trg_dev_file, 2)
custom_print('Training data size:', len(train_data))
custom_print('Development data size:', len(dev_data))
custom_print("preparing vocabulary......")
save_vocab = os.path.join(trg_data_folder, 'vocab.pkl')
word_vocab, rev_word_vocab, char_vocab, word_embed_matrix = build_vocab(train_data, events, arguments, roles, save_vocab,
embedding_file)
custom_print("Training started......")
train_model(model_name, train_data, dev_data, model_file_name)
logger.close()
if job_mode == 'test':
logger = open(os.path.join(trg_data_folder, 'test.log'), 'w')
custom_print(sys.argv)
custom_print("loading word vectors......")
vocab_file_name = os.path.join(trg_data_folder, 'vocab.pkl')
word_vocab, char_vocab = load_vocab(vocab_file_name)
rev_word_vocab = OrderedDict()
for word in word_vocab:
idx = word_vocab[word]
rev_word_vocab[idx] = word
word_embed_matrix = np.zeros((len(word_vocab), word_embed_dim), dtype=np.float32)
custom_print('vocab size:', len(word_vocab))
src_test_file = os.path.join(src_data_folder, 'test.sent')
trg_test_file = os.path.join(src_data_folder, 'test.tup')
test_data = read_data(src_test_file, trg_test_file, 3)
custom_print('Test data size:', len(test_data))
custom_print('seed:', random_seed)
model_file = os.path.join(trg_data_folder, 'model.h5py')
best_model = get_model(model_name)
custom_print(best_model)
if torch.cuda.is_available():
best_model.cuda()
if n_gpu > 1:
best_model = torch.nn.DataParallel(best_model)
best_model.load_state_dict(torch.load(model_file))
custom_print('\nTest Results\n')
set_random_seeds(random_seed)
test_preds, test_attns = predict(test_data, best_model, model_name)
custom_print('Copy On')
write_test_res(test_data, test_preds, test_attns, os.path.join(trg_data_folder, 'test.out'))
ref_lines = open(trg_test_file).read().splitlines()
pred_lines = open(os.path.join(trg_data_folder, 'test.out')).read().splitlines()
event_lines = open(events_file).read().splitlines()
argument_lines = open(arguments_file).read().splitlines()
roles_lines = open(roles_file).read().splitlines()
mode = 1
custom_print('Overall F1')
calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines)
copy_on = False
custom_print('Copy Off')
set_random_seeds(random_seed)
test_preds, test_attns = predict(test_data, best_model, model_name)
write_test_res(test_data, test_preds, test_attns, os.path.join(trg_data_folder, 'test_without_copy.out'))
ref_lines = open(trg_test_file).read().splitlines()
pred_lines = open(os.path.join(trg_data_folder, 'test_without_copy.out')).read().splitlines()
event_lines = open(events_file).read().splitlines()
argument_lines = open(arguments_file).read().splitlines()
roles_lines = open(roles_file).read().splitlines()
mode = 1
custom_print('Overall F1')
calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines)
logger.close()
| true
| true
|
f718cce7e15087472b225e39690a19a8bdf903bc
| 2,877
|
py
|
Python
|
action-app_template.py
|
metal3d/snips-app-template-py
|
ba5f3f8d6e24b886e5177fb948deb9a87d4e354c
|
[
"MIT"
] | null | null | null |
action-app_template.py
|
metal3d/snips-app-template-py
|
ba5f3f8d6e24b886e5177fb948deb9a87d4e354c
|
[
"MIT"
] | null | null | null |
action-app_template.py
|
metal3d/snips-app-template-py
|
ba5f3f8d6e24b886e5177fb948deb9a87d4e354c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from snipsTools import SnipsConfigParser
from hermes_python.hermes import Hermes
# imported to get type check and IDE completion
from hermes_python.ontology.dialogue.intent import IntentMessage
CONFIG_INI = "config.ini"
# If this skill is supposed to run on the satellite,
# please get this mqtt connection info from <config.ini>
# Hint: MQTT server is always running on the master device
MQTT_IP_ADDR: str = "localhost"
MQTT_PORT: int = 1883
MQTT_ADDR: str = "{}:{}".format(MQTT_IP_ADDR, str(MQTT_PORT))
class Template(object):
"""Class used to wrap action code with mqtt connection
Please change the name refering to your application
"""
def __init__(self):
# get the configuration if needed
try:
self.config = SnipsConfigParser.read_configuration_file(CONFIG_INI)
except Exception:
self.config = None
# start listening to MQTT
self.start_blocking()
@staticmethod
def intent_1_callback(self,
hermes: Hermes,
intent_message: IntentMessage):
# terminate the session first if not continue
hermes.publish_end_session(intent_message.session_id, "")
# action code goes here...
print('[Received] intent: {}'.format(
intent_message.intent.intent_name))
# if need to speak the execution result by tts
hermes.publish_start_session_notification(
intent_message.site_id,
"Action 1", "")
@staticmethod
def intent_2_callback(self,
hermes: Hermes,
intent_message: IntentMessage):
# terminate the session first if not continue
hermes.publish_end_session()
hermes.publish_end_session(intent_message.session_id, "")
# action code goes here...
print('[Received] intent: {}'.format(
intent_message.intent.intent_name))
# if need to speak the execution result by tts
hermes.publish_start_session_notification(
intent_message.site_id,
"Action 2", "")
@staticmethod
def master_intent_callback(self,
hermes: Hermes,
intent_message: IntentMessage,):
coming_intent = intent_message.intent.intent_name
if coming_intent == 'intent_1':
self.intent_1_callback(hermes, intent_message)
if coming_intent == 'intent_2':
self.intent_2_callback(hermes, intent_message)
# more callback and if condition goes here...
# --> Register callback function and start MQTT
def start_blocking(self):
with Hermes(MQTT_ADDR) as h:
h.subscribe_intents(self.master_intent_callback).start()
if __name__ == "__main__":
Template()
| 31.966667
| 79
| 0.638165
|
from snipsTools import SnipsConfigParser
from hermes_python.hermes import Hermes
from hermes_python.ontology.dialogue.intent import IntentMessage
CONFIG_INI = "config.ini"
MQTT_IP_ADDR: str = "localhost"
MQTT_PORT: int = 1883
MQTT_ADDR: str = "{}:{}".format(MQTT_IP_ADDR, str(MQTT_PORT))
class Template(object):
def __init__(self):
try:
self.config = SnipsConfigParser.read_configuration_file(CONFIG_INI)
except Exception:
self.config = None
self.start_blocking()
@staticmethod
def intent_1_callback(self,
hermes: Hermes,
intent_message: IntentMessage):
hermes.publish_end_session(intent_message.session_id, "")
print('[Received] intent: {}'.format(
intent_message.intent.intent_name))
hermes.publish_start_session_notification(
intent_message.site_id,
"Action 1", "")
@staticmethod
def intent_2_callback(self,
hermes: Hermes,
intent_message: IntentMessage):
hermes.publish_end_session()
hermes.publish_end_session(intent_message.session_id, "")
print('[Received] intent: {}'.format(
intent_message.intent.intent_name))
hermes.publish_start_session_notification(
intent_message.site_id,
"Action 2", "")
@staticmethod
def master_intent_callback(self,
hermes: Hermes,
intent_message: IntentMessage,):
coming_intent = intent_message.intent.intent_name
if coming_intent == 'intent_1':
self.intent_1_callback(hermes, intent_message)
if coming_intent == 'intent_2':
self.intent_2_callback(hermes, intent_message)
def start_blocking(self):
with Hermes(MQTT_ADDR) as h:
h.subscribe_intents(self.master_intent_callback).start()
if __name__ == "__main__":
Template()
| true
| true
|
f718cf53c5de4dffb4d199ec0f98de476584ce74
| 1,133
|
py
|
Python
|
rubberband/utils/hasher.py
|
ambros-gleixner/rubberband
|
72dd935dbc4bed93860fdcaa0cbe752bcbd6e395
|
[
"MIT"
] | 4
|
2018-03-25T15:01:20.000Z
|
2020-06-22T14:34:01.000Z
|
rubberband/utils/hasher.py
|
ambros-gleixner/rubberband
|
72dd935dbc4bed93860fdcaa0cbe752bcbd6e395
|
[
"MIT"
] | 41
|
2016-12-19T21:17:41.000Z
|
2021-12-13T19:50:34.000Z
|
rubberband/utils/hasher.py
|
ambros-gleixner/rubberband
|
72dd935dbc4bed93860fdcaa0cbe752bcbd6e395
|
[
"MIT"
] | 1
|
2017-10-06T13:52:57.000Z
|
2017-10-06T13:52:57.000Z
|
"""Functions to deal with hases."""
import hashlib
def read_in_chunks(file_object, chunk_size=1024):
"""
Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k.
borrowed from http://stackoverflow.com/a/519653
Parameters
----------
file_object : file object
File to read in chunks.
Keyword arguments:
chunk_size : int
size of chunks to read (default 1024)
Yields
------
datachunks
requested data
"""
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def generate_sha256_hash(filepath):
"""
Take a filepath and return a hex digest of a sha2 hash.
Parameters
----------
filepath : str
Path to file.
Returns
-------
str
requested hash
"""
sha_result = hashlib.sha256()
try:
file_object = open(filepath, "rb")
except IOError:
return None
for chunk in read_in_chunks(file_object):
sha_result.update(chunk)
file_object.close()
return sha_result.hexdigest()
| 18.883333
| 60
| 0.596646
|
import hashlib
def read_in_chunks(file_object, chunk_size=1024):
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def generate_sha256_hash(filepath):
sha_result = hashlib.sha256()
try:
file_object = open(filepath, "rb")
except IOError:
return None
for chunk in read_in_chunks(file_object):
sha_result.update(chunk)
file_object.close()
return sha_result.hexdigest()
| true
| true
|
f718d0527694a1af8cb31e96efd2f96edcedbbe0
| 1,309
|
py
|
Python
|
test/unit_test_fdp_to_approxdp_conversion.py
|
samellem/autodp
|
fd14fed07e0bb67fca5f7e82bbdab6cf60b339d3
|
[
"Apache-2.0"
] | 158
|
2019-04-16T15:13:27.000Z
|
2022-03-29T17:41:20.000Z
|
test/unit_test_fdp_to_approxdp_conversion.py
|
samellem/autodp
|
fd14fed07e0bb67fca5f7e82bbdab6cf60b339d3
|
[
"Apache-2.0"
] | 10
|
2019-09-17T19:42:29.000Z
|
2021-09-23T16:54:20.000Z
|
test/unit_test_fdp_to_approxdp_conversion.py
|
samellem/autodp
|
fd14fed07e0bb67fca5f7e82bbdab6cf60b339d3
|
[
"Apache-2.0"
] | 37
|
2019-04-17T18:26:03.000Z
|
2022-03-29T14:31:06.000Z
|
from autodp.mechanism_zoo import GaussianMechanism
from autodp.dp_bank import get_eps_ana_gaussian
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
params = [0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0]
def _fdp_conversion(sigma):
delta_list = [0,1e-8, 1e-6, 1e-4, 1e-2, 0.3, 0.5, 1]
# f-DP implementation
gm3 = GaussianMechanism(sigma, name='GM3', RDP_off=True, approxDP_off=True, fdp_off=False)
# direct approxdp implementation
agm = lambda x: get_eps_ana_gaussian(sigma, x)
eps_direct = np.array([agm(delta) for delta in delta_list])
# the fdp is converted by numerical methods from privacy profile.
eps_converted = np.array([gm3.get_approxDP(delta) for delta in delta_list])
max_diff = eps_direct - eps_converted
rel_diff = max_diff / (eps_direct+1e-10)
if np.isinf(eps_direct[0]) and np.isinf(eps_converted[0]):
rel_diff[0] = 0
return rel_diff
_fdp_conversion(1.0)
class Test_approxDP2fDP_Conversion(parameterized.TestCase):
@parameterized.parameters(p for p in params)
def test_fdp_conversion(self, sigma):
max_diff = _fdp_conversion(sigma)
self.assertSequenceAlmostEqual(max_diff, np.zeros_like(max_diff), places=2)
if __name__ == '__main__':
absltest.main()
| 27.270833
| 94
| 0.718869
|
from autodp.mechanism_zoo import GaussianMechanism
from autodp.dp_bank import get_eps_ana_gaussian
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
params = [0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0]
def _fdp_conversion(sigma):
delta_list = [0,1e-8, 1e-6, 1e-4, 1e-2, 0.3, 0.5, 1]
gm3 = GaussianMechanism(sigma, name='GM3', RDP_off=True, approxDP_off=True, fdp_off=False)
agm = lambda x: get_eps_ana_gaussian(sigma, x)
eps_direct = np.array([agm(delta) for delta in delta_list])
eps_converted = np.array([gm3.get_approxDP(delta) for delta in delta_list])
max_diff = eps_direct - eps_converted
rel_diff = max_diff / (eps_direct+1e-10)
if np.isinf(eps_direct[0]) and np.isinf(eps_converted[0]):
rel_diff[0] = 0
return rel_diff
_fdp_conversion(1.0)
class Test_approxDP2fDP_Conversion(parameterized.TestCase):
@parameterized.parameters(p for p in params)
def test_fdp_conversion(self, sigma):
max_diff = _fdp_conversion(sigma)
self.assertSequenceAlmostEqual(max_diff, np.zeros_like(max_diff), places=2)
if __name__ == '__main__':
absltest.main()
| true
| true
|
f718d07f43521deaf105e5ff03b5e17fd7bdf28f
| 263
|
py
|
Python
|
tests/urls.py
|
systemallica/django-websocket-notifications
|
eae304b021eb14d818d3a1fa5dd18bf791eb4197
|
[
"MIT"
] | null | null | null |
tests/urls.py
|
systemallica/django-websocket-notifications
|
eae304b021eb14d818d3a1fa5dd18bf791eb4197
|
[
"MIT"
] | null | null | null |
tests/urls.py
|
systemallica/django-websocket-notifications
|
eae304b021eb14d818d3a1fa5dd18bf791eb4197
|
[
"MIT"
] | null | null | null |
from django.urls import include, path
urlpatterns = [
path(
"websocket-notifications/",
include("websocket_notifications.urls", namespace="websocket_notifications"),
),
path("api/v1/", include("tests.router", namespace="api_v1")),
]
| 23.909091
| 85
| 0.669202
|
from django.urls import include, path
urlpatterns = [
path(
"websocket-notifications/",
include("websocket_notifications.urls", namespace="websocket_notifications"),
),
path("api/v1/", include("tests.router", namespace="api_v1")),
]
| true
| true
|
f718d0a3f9b64d814641388c917ca2626dab7a70
| 263
|
py
|
Python
|
tests/bugs/issue_34/test_artificial_1024_inv_constant_30__20.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/bugs/issue_34/test_artificial_1024_inv_constant_30__20.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/bugs/issue_34/test_artificial_1024_inv_constant_30__20.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "constant", cycle_length = 30, transform = "inv", sigma = 0.0, exog_count = 20, ar_order = 0);
| 43.833333
| 159
| 0.730038
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "constant", cycle_length = 30, transform = "inv", sigma = 0.0, exog_count = 20, ar_order = 0);
| true
| true
|
f718d0afae215cd7135b82b727c09e9c0f4ed187
| 180
|
py
|
Python
|
tdapp/forms.py
|
shagun-agrawal/ToDo-App
|
7b9c60ca8cf2a431ac9b1b46d8a7dc1054489229
|
[
"MIT"
] | 1
|
2021-05-05T12:42:58.000Z
|
2021-05-05T12:42:58.000Z
|
tdapp/forms.py
|
shagun-agrawal/ToDo-App
|
7b9c60ca8cf2a431ac9b1b46d8a7dc1054489229
|
[
"MIT"
] | null | null | null |
tdapp/forms.py
|
shagun-agrawal/ToDo-App
|
7b9c60ca8cf2a431ac9b1b46d8a7dc1054489229
|
[
"MIT"
] | null | null | null |
from django import forms
from django.forms import ModelForm
from .models import *
class TodoForm(forms.ModelForm):
class Meta:
model=Todo
fields='__all__'
| 25.714286
| 35
| 0.694444
|
from django import forms
from django.forms import ModelForm
from .models import *
class TodoForm(forms.ModelForm):
class Meta:
model=Todo
fields='__all__'
| true
| true
|
f718d0be8f645f8f092c72408323f1add9cb3ad9
| 1,127
|
py
|
Python
|
tempest/tests/fake_tempest_plugin.py
|
KiranPawar72/tempest
|
1fef3dd92b083055793065dd0693454735ec2c01
|
[
"Apache-2.0"
] | 3
|
2016-07-15T12:27:23.000Z
|
2021-04-23T04:41:10.000Z
|
tempest/tests/fake_tempest_plugin.py
|
LIS/lis-tempest
|
8e6403b2d6de81c5d18ed867b4977385c8278b75
|
[
"Apache-2.0"
] | null | null | null |
tempest/tests/fake_tempest_plugin.py
|
LIS/lis-tempest
|
8e6403b2d6de81c5d18ed867b4977385c8278b75
|
[
"Apache-2.0"
] | 12
|
2016-07-14T18:13:05.000Z
|
2017-07-08T18:45:42.000Z
|
# Copyright (c) 2015 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.test_discover import plugins
class FakePlugin(plugins.TempestPlugin):
expected_load_test = ["my/test/path", "/home/dir"]
def load_tests(self):
return self.expected_load_test
def register_opts(self, conf):
return
def get_opt_lists(self):
return []
class FakeStevedoreObj(object):
obj = FakePlugin()
@property
def name(self):
return self._name
def __init__(self, name='Test1'):
self._name = name
| 27.487805
| 78
| 0.694765
|
from tempest.test_discover import plugins
class FakePlugin(plugins.TempestPlugin):
expected_load_test = ["my/test/path", "/home/dir"]
def load_tests(self):
return self.expected_load_test
def register_opts(self, conf):
return
def get_opt_lists(self):
return []
class FakeStevedoreObj(object):
obj = FakePlugin()
@property
def name(self):
return self._name
def __init__(self, name='Test1'):
self._name = name
| true
| true
|
f718d1040e1e703f3c3d716f7b5e0629af91e0bf
| 421
|
py
|
Python
|
project/utilities/context_processors.py
|
Aman2244-hub/ducss-site-old
|
42bb7d4938f2500cc3e709c2f2e34cc11dacbe7c
|
[
"MIT"
] | 1
|
2020-10-19T17:25:33.000Z
|
2020-10-19T17:25:33.000Z
|
project/utilities/context_processors.py
|
Aman2244-hub/ducss-site-old
|
42bb7d4938f2500cc3e709c2f2e34cc11dacbe7c
|
[
"MIT"
] | 1
|
2020-10-24T16:28:57.000Z
|
2020-10-24T16:28:57.000Z
|
project/utilities/context_processors.py
|
Aman2244-hub/ducss-site-old
|
42bb7d4938f2500cc3e709c2f2e34cc11dacbe7c
|
[
"MIT"
] | 4
|
2020-09-30T16:48:40.000Z
|
2020-10-19T03:53:12.000Z
|
import hmac, hashlib
from datetime import datetime
from django.conf import settings
def static(request):
''' Add static URL to the context, including the revision number (if known) when not in DEBUG mode. '''
if settings.DEBUG and settings.REVISION:
static_url = u'%sv%s/' % (settings.STATIC_URL, settings.REVISION)
else:
static_url = settings.STATIC_URL
return {'STATIC_URL': static_url}
| 38.272727
| 107
| 0.71734
|
import hmac, hashlib
from datetime import datetime
from django.conf import settings
def static(request):
if settings.DEBUG and settings.REVISION:
static_url = u'%sv%s/' % (settings.STATIC_URL, settings.REVISION)
else:
static_url = settings.STATIC_URL
return {'STATIC_URL': static_url}
| true
| true
|
f718d25705647be878e1d7696bde13ea0e8f11b5
| 138
|
py
|
Python
|
testproj/testproj/testapp/admin.py
|
Polyconseil/django-select2-rocks
|
0cc29af55cdd7bec7da773966bec0da84fa7aa6c
|
[
"BSD-2-Clause"
] | 6
|
2015-09-03T09:01:46.000Z
|
2021-01-28T20:15:18.000Z
|
testproj/testproj/testapp/admin.py
|
Polyconseil/django-select2-rocks
|
0cc29af55cdd7bec7da773966bec0da84fa7aa6c
|
[
"BSD-2-Clause"
] | 7
|
2015-06-04T14:48:20.000Z
|
2018-02-28T09:53:03.000Z
|
testproj/testproj/testapp/admin.py
|
Polyconseil/django-select2-rocks
|
0cc29af55cdd7bec7da773966bec0da84fa7aa6c
|
[
"BSD-2-Clause"
] | 3
|
2015-04-05T14:20:10.000Z
|
2016-09-30T17:02:01.000Z
|
from django.contrib import admin
from .models import Beach, SelectedBeach
admin.site.register(Beach)
admin.site.register(SelectedBeach)
| 19.714286
| 40
| 0.826087
|
from django.contrib import admin
from .models import Beach, SelectedBeach
admin.site.register(Beach)
admin.site.register(SelectedBeach)
| true
| true
|
f718d26ba94100e832b3e9147ab6faaa3966711c
| 2,264
|
py
|
Python
|
test/adjlist.py
|
roks/snap-python
|
e316dfae8f0b7707756e0a6bf4237d448259d2d2
|
[
"BSD-3-Clause"
] | null | null | null |
test/adjlist.py
|
roks/snap-python
|
e316dfae8f0b7707756e0a6bf4237d448259d2d2
|
[
"BSD-3-Clause"
] | null | null | null |
test/adjlist.py
|
roks/snap-python
|
e316dfae8f0b7707756e0a6bf4237d448259d2d2
|
[
"BSD-3-Clause"
] | 1
|
2019-11-11T20:25:19.000Z
|
2019-11-11T20:25:19.000Z
|
import random
import os
import sys
import time
sys.path.append("../swig")
import snap as Snap
numnodes = 100
valrange = numnodes / 10
Edges = Snap.TIntV()
for i in range(0,numnodes):
Edges.Add(int(random.random() * valrange))
d = {}
for i in range(0,numnodes,2):
print "Edges", i/2, Edges.GetVal(i).Val, Edges.GetVal(i+1).Val
d[(Edges.GetVal(i).Val,Edges.GetVal(i+1).Val)] = 1
Hash = Snap.TIntH()
print "type", type(Edges), type(Hash)
Snap.Edge2Hash(Edges,Hash)
for i in range(0,valrange):
Vec2 = Hash.GetDat(i)
print i, Vec2.Val
AdjLists = Snap.TIntIntVH()
print "type", type(Edges), type(AdjLists)
Snap.GetAdjLists(Edges, AdjLists)
size = 0
for i in range(0,valrange):
Vec2 = AdjLists.GetDat(i)
size += Vec2.Len()
for j in range(0,Vec2.Len()):
print i, Vec2.GetVal(j).Val
print "done", Edges.Len(), AdjLists.Len(), size, len(d)
sys.exit(0)
#print "dir(Snap.TIntV)", dir(Snap.TIntV)
Vec1 = Snap.TIntV(numnodes)
#print "dir(Vec1)", dir(Vec1)
print "Len Vec1", Vec1.Len()
#print "dir(Snap.TIntIntVV)", dir(Snap.TIntIntVV)
Vec2 = Snap.TIntIntVV(numtask)
#print "dir(Vec2)", dir(Vec2)
print "Len Vec2", Vec2.Len()
print "Vec1", type(Vec1)
Snap.GetDegrees(Vec1, 10.0, 1.5)
for i in range(0,Vec1.Len()):
print "Vec1", i, Vec1.GetVal(i).Val
Snap.AssignRndTask(Vec1, Vec2)
for i in range(0,Vec2.Len()):
Vec3 = Vec2.GetVal(i)
print "Vec3", i, Vec3.Len()
for j in range(0,Vec3.Len()):
print "Vec4", i, j, Vec3.GetVal(j).Val
sys.exit(0)
for i in range(0,Vec2.Len()):
Vec3 = Vec2.GetVal(i)
print "Vec3", i, Vec3.Len()
h = httplib.HTTPConnection("rokl1.stanford.edu",8100)
#h.request("POST","/msg/GenStubs-0/GenTasks-2","12345",{"User-agent": "007"})
h.connect()
url = "/msg/GenStubs-0/GenTasks-%d" % (i)
h.putrequest("POST",url)
h.putheader("User-Agent", "007")
#h.putheader("Content-Length", "9")
h.putheader("Content-Length", str(Vec3.GetMemSize()))
h.endheaders()
fileno = h.sock.fileno()
print "fileno", fileno
n = Vec3.Send(fileno)
#n = os.write(fileno,"123abcdef")
print n
#h.send("abc123")
res = h.getresponse()
print res.status, res.reason
data = res.read()
print len(data)
print data
| 21.561905
| 81
| 0.635159
|
import random
import os
import sys
import time
sys.path.append("../swig")
import snap as Snap
numnodes = 100
valrange = numnodes / 10
Edges = Snap.TIntV()
for i in range(0,numnodes):
Edges.Add(int(random.random() * valrange))
d = {}
for i in range(0,numnodes,2):
print "Edges", i/2, Edges.GetVal(i).Val, Edges.GetVal(i+1).Val
d[(Edges.GetVal(i).Val,Edges.GetVal(i+1).Val)] = 1
Hash = Snap.TIntH()
print "type", type(Edges), type(Hash)
Snap.Edge2Hash(Edges,Hash)
for i in range(0,valrange):
Vec2 = Hash.GetDat(i)
print i, Vec2.Val
AdjLists = Snap.TIntIntVH()
print "type", type(Edges), type(AdjLists)
Snap.GetAdjLists(Edges, AdjLists)
size = 0
for i in range(0,valrange):
Vec2 = AdjLists.GetDat(i)
size += Vec2.Len()
for j in range(0,Vec2.Len()):
print i, Vec2.GetVal(j).Val
print "done", Edges.Len(), AdjLists.Len(), size, len(d)
sys.exit(0)
Vec1 = Snap.TIntV(numnodes)
print "Len Vec1", Vec1.Len()
Vec2 = Snap.TIntIntVV(numtask)
print "Len Vec2", Vec2.Len()
print "Vec1", type(Vec1)
Snap.GetDegrees(Vec1, 10.0, 1.5)
for i in range(0,Vec1.Len()):
print "Vec1", i, Vec1.GetVal(i).Val
Snap.AssignRndTask(Vec1, Vec2)
for i in range(0,Vec2.Len()):
Vec3 = Vec2.GetVal(i)
print "Vec3", i, Vec3.Len()
for j in range(0,Vec3.Len()):
print "Vec4", i, j, Vec3.GetVal(j).Val
sys.exit(0)
for i in range(0,Vec2.Len()):
Vec3 = Vec2.GetVal(i)
print "Vec3", i, Vec3.Len()
h = httplib.HTTPConnection("rokl1.stanford.edu",8100)
h.connect()
url = "/msg/GenStubs-0/GenTasks-%d" % (i)
h.putrequest("POST",url)
h.putheader("User-Agent", "007")
h.putheader("Content-Length", str(Vec3.GetMemSize()))
h.endheaders()
fileno = h.sock.fileno()
print "fileno", fileno
n = Vec3.Send(fileno)
print n
res = h.getresponse()
print res.status, res.reason
data = res.read()
print len(data)
print data
| false
| true
|
f718d30770e5e2e065c0b1aec425ed1ab250e010
| 8,463
|
py
|
Python
|
calico_cni/tests/unit/test_ipam.py
|
fasaxc/calico-cni
|
671ab58b8a5705d0184d9353e060ca8475c0d5c8
|
[
"Apache-2.0"
] | null | null | null |
calico_cni/tests/unit/test_ipam.py
|
fasaxc/calico-cni
|
671ab58b8a5705d0184d9353e060ca8475c0d5c8
|
[
"Apache-2.0"
] | null | null | null |
calico_cni/tests/unit/test_ipam.py
|
fasaxc/calico-cni
|
671ab58b8a5705d0184d9353e060ca8475c0d5c8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import json
import unittest
from mock import patch, MagicMock, Mock, call, ANY
from netaddr import IPAddress, IPNetwork
from subprocess32 import CalledProcessError, Popen, PIPE
from nose.tools import assert_equal, assert_true, assert_false, assert_raises
from StringIO import StringIO
import pycalico.netns
from pycalico.ipam import IPAMClient
from pycalico.datastore_datatypes import IPPool, Endpoint
from pycalico.datastore_errors import MultipleEndpointsMatch
from calico_cni.constants import *
from calico_cni.ipam import IpamPlugin, _exit_on_error, main
class CniIpamTest(unittest.TestCase):
"""
Test class for IPAM plugin.
"""
def setUp(self):
"""
Per-test setup method.
"""
self.container_id = "ff3afbd1-17ad-499d-b514-72438c009e81"
self.network_config = {
"name": "ut-network",
"type": "calico",
"ipam": {
"type": "calico-ipam",
"subnet": "10.22.0.0/16",
"routes": [{"dst": "0.0.0.0/0"}],
"range-start": "",
"range-end": ""
}
}
self.env = {
CNI_CONTAINERID_ENV: self.container_id,
CNI_IFNAME_ENV: "eth0",
CNI_ARGS_ENV: "",
CNI_COMMAND_ENV: CNI_CMD_ADD,
CNI_PATH_ENV: "/usr/bin/rkt/",
CNI_NETNS_ENV: "netns",
}
# Create the CniPlugin to test.
self.plugin = IpamPlugin(self.network_config, self.env)
# Mock out the datastore client.
self.m_datastore_client = MagicMock(spec=IPAMClient)
self.plugin.datastore_client = self.m_datastore_client
@patch('sys.stdout', new_callable=StringIO)
def test_execute_add_mainline(self, m_stdout):
# Mock
self.plugin.command = CNI_CMD_ADD
ip4 = IPNetwork("1.2.3.4/32")
ip6 = IPNetwork("ba:ad::be:ef/128")
self.plugin._assign_address = MagicMock(spec=self.plugin._assign_address)
self.plugin._assign_address.return_value = ip4, ip6
# Call
self.plugin.execute()
# Assert
expected = json.dumps({"ip4": {"ip": "1.2.3.4/32"},
"ip6": {"ip": "ba:ad::be:ef/128"}})
assert_equal(m_stdout.getvalue().strip(), expected)
@patch('sys.stdout', new_callable=StringIO)
def test_execute_del_mainline(self, m_stdout):
# Mock
self.plugin.command = CNI_CMD_DELETE
# Call
self.plugin.execute()
# Assert
expected = ''
assert_equal(m_stdout.getvalue().strip(), expected)
self.plugin.datastore_client.release_ip_by_handle.assert_called_once_with(handle_id=self.plugin.container_id)
@patch('sys.stdout', new_callable=StringIO)
def test_execute_del_not_assigned(self, m_stdout):
# Mock
self.plugin.command = CNI_CMD_DELETE
self.plugin.datastore_client.release_ip_by_handle.side_effect = KeyError
# Call
self.plugin.execute()
# Assert
expected = ''
assert_equal(m_stdout.getvalue().strip(), expected)
def test_assign_address_mainline(self):
# Mock
ip4 = IPNetwork("1.2.3.4/32")
ip6 = IPNetwork("ba:ad::be:ef/128")
self.plugin.datastore_client.auto_assign_ips = MagicMock(spec=self.plugin._assign_address)
self.plugin.datastore_client.auto_assign_ips.return_value = [ip4], [ip6]
# Args
handle_id = "abcdef12345"
# Call
ret_ip4, ret_ip6 = self.plugin._assign_address(handle_id)
# Assert
assert_equal(ip4, ret_ip4)
assert_equal(ip6, ret_ip6)
def test_assign_address_runtime_err(self):
# Mock
self.plugin.datastore_client.auto_assign_ips = MagicMock(spec=self.plugin._assign_address)
self.plugin.datastore_client.auto_assign_ips.side_effect = RuntimeError
# Args
handle_id = "abcdef12345"
# Call
with assert_raises(SystemExit) as err:
self.plugin._assign_address(handle_id)
e = err.exception
assert_equal(e.code, ERR_CODE_FAILED_ASSIGNMENT)
@patch("calico_cni.ipam._exit_on_error", autospec=True)
def test_assign_address_no_ipv4(self, m_exit):
# Mock
ip6 = IPNetwork("ba:ad::be:ef/128")
self.plugin.datastore_client.auto_assign_ips = MagicMock(spec=self.plugin._assign_address)
self.plugin.datastore_client.auto_assign_ips.return_value = [], [ip6]
# Args
handle_id = "abcdef12345"
# Call
ret_ip4, ret_ip6 = self.plugin._assign_address(handle_id)
# Assert
m_exit.assert_called_once_with(code=ERR_CODE_FAILED_ASSIGNMENT, message=ANY, details=ANY)
@patch("calico_cni.ipam._exit_on_error", autospec=True)
def test_assign_address_no_ipv6(self, m_exit):
# Mock
ip4 = IPNetwork("1.2.3.4/32")
self.plugin.datastore_client.auto_assign_ips = MagicMock(spec=self.plugin._assign_address)
self.plugin.datastore_client.auto_assign_ips.return_value = [ip4], []
# Args
handle_id = "abcdef12345"
# Call
ret_ip4, ret_ip6 = self.plugin._assign_address(handle_id)
# Assert
m_exit.assert_called_once_with(code=ERR_CODE_FAILED_ASSIGNMENT, message=ANY, details=ANY)
def test_parse_config_no_command(self):
# Delete command.
del self.plugin.env[CNI_COMMAND_ENV]
# Call
with assert_raises(SystemExit) as err:
self.plugin._parse_config()
e = err.exception
assert_equal(e.code, ERR_CODE_INVALID_ARGUMENT)
def test_parse_config_invalid_command(self):
# Change command.
self.plugin.env[CNI_COMMAND_ENV] = "invalid"
# Call
with assert_raises(SystemExit) as err:
self.plugin._parse_config()
e = err.exception
assert_equal(e.code, ERR_CODE_INVALID_ARGUMENT)
def test_parse_config_invalid_container_id(self):
# Delete container ID.
del self.plugin.env[CNI_CONTAINERID_ENV]
# Call
with assert_raises(SystemExit) as err:
self.plugin._parse_config()
e = err.exception
assert_equal(e.code, ERR_CODE_INVALID_ARGUMENT)
def test_exit_on_error(self):
with assert_raises(SystemExit) as err:
_exit_on_error(1, "message", "details")
e = err.exception
assert_equal(e.code, 1)
@patch("calico_cni.ipam.os", autospec=True)
@patch("calico_cni.ipam.sys", autospec=True)
@patch("calico_cni.ipam.IpamPlugin", autospec=True)
@patch("calico_cni.ipam.configure_logging", autospec=True)
def test_main(self, m_conf_log, m_plugin, m_sys, m_os):
# Mock
m_os.environ = self.env
m_sys.stdin.readlines.return_value = json.dumps(self.network_config)
m_plugin.reset_mock()
# Call
main()
# Assert
m_plugin.assert_called_once_with(self.network_config, self.env)
m_plugin(self.env, self.network_config).execute.assert_called_once_with()
@patch("calico_cni.ipam.os", autospec=True)
@patch("calico_cni.ipam.sys", autospec=True)
@patch("calico_cni.ipam.IpamPlugin", autospec=True)
@patch("calico_cni.ipam.configure_logging", autospec=True)
@patch("calico_cni.ipam._exit_on_error", autospec=True)
def test_main_execute_error(self, m_exit, m_conf_log, m_plugin, m_sys, m_os):
# Mock
m_os.environ = self.env
m_sys.stdin.readlines.return_value = json.dumps(self.network_config)
m_plugin.reset_mock()
m_plugin(self.network_config, self.env).execute.side_effect = Exception
# Call
main()
# Assert
m_exit.assert_called_once_with(ERR_CODE_UNHANDLED, message=ANY, details=ANY)
| 34.263158
| 117
| 0.656268
|
import os
import sys
import json
import unittest
from mock import patch, MagicMock, Mock, call, ANY
from netaddr import IPAddress, IPNetwork
from subprocess32 import CalledProcessError, Popen, PIPE
from nose.tools import assert_equal, assert_true, assert_false, assert_raises
from StringIO import StringIO
import pycalico.netns
from pycalico.ipam import IPAMClient
from pycalico.datastore_datatypes import IPPool, Endpoint
from pycalico.datastore_errors import MultipleEndpointsMatch
from calico_cni.constants import *
from calico_cni.ipam import IpamPlugin, _exit_on_error, main
class CniIpamTest(unittest.TestCase):
def setUp(self):
self.container_id = "ff3afbd1-17ad-499d-b514-72438c009e81"
self.network_config = {
"name": "ut-network",
"type": "calico",
"ipam": {
"type": "calico-ipam",
"subnet": "10.22.0.0/16",
"routes": [{"dst": "0.0.0.0/0"}],
"range-start": "",
"range-end": ""
}
}
self.env = {
CNI_CONTAINERID_ENV: self.container_id,
CNI_IFNAME_ENV: "eth0",
CNI_ARGS_ENV: "",
CNI_COMMAND_ENV: CNI_CMD_ADD,
CNI_PATH_ENV: "/usr/bin/rkt/",
CNI_NETNS_ENV: "netns",
}
self.plugin = IpamPlugin(self.network_config, self.env)
self.m_datastore_client = MagicMock(spec=IPAMClient)
self.plugin.datastore_client = self.m_datastore_client
@patch('sys.stdout', new_callable=StringIO)
def test_execute_add_mainline(self, m_stdout):
self.plugin.command = CNI_CMD_ADD
ip4 = IPNetwork("1.2.3.4/32")
ip6 = IPNetwork("ba:ad::be:ef/128")
self.plugin._assign_address = MagicMock(spec=self.plugin._assign_address)
self.plugin._assign_address.return_value = ip4, ip6
self.plugin.execute()
expected = json.dumps({"ip4": {"ip": "1.2.3.4/32"},
"ip6": {"ip": "ba:ad::be:ef/128"}})
assert_equal(m_stdout.getvalue().strip(), expected)
@patch('sys.stdout', new_callable=StringIO)
def test_execute_del_mainline(self, m_stdout):
self.plugin.command = CNI_CMD_DELETE
self.plugin.execute()
expected = ''
assert_equal(m_stdout.getvalue().strip(), expected)
self.plugin.datastore_client.release_ip_by_handle.assert_called_once_with(handle_id=self.plugin.container_id)
@patch('sys.stdout', new_callable=StringIO)
def test_execute_del_not_assigned(self, m_stdout):
self.plugin.command = CNI_CMD_DELETE
self.plugin.datastore_client.release_ip_by_handle.side_effect = KeyError
self.plugin.execute()
expected = ''
assert_equal(m_stdout.getvalue().strip(), expected)
def test_assign_address_mainline(self):
ip4 = IPNetwork("1.2.3.4/32")
ip6 = IPNetwork("ba:ad::be:ef/128")
self.plugin.datastore_client.auto_assign_ips = MagicMock(spec=self.plugin._assign_address)
self.plugin.datastore_client.auto_assign_ips.return_value = [ip4], [ip6]
handle_id = "abcdef12345"
ret_ip4, ret_ip6 = self.plugin._assign_address(handle_id)
assert_equal(ip4, ret_ip4)
assert_equal(ip6, ret_ip6)
def test_assign_address_runtime_err(self):
self.plugin.datastore_client.auto_assign_ips = MagicMock(spec=self.plugin._assign_address)
self.plugin.datastore_client.auto_assign_ips.side_effect = RuntimeError
handle_id = "abcdef12345"
with assert_raises(SystemExit) as err:
self.plugin._assign_address(handle_id)
e = err.exception
assert_equal(e.code, ERR_CODE_FAILED_ASSIGNMENT)
@patch("calico_cni.ipam._exit_on_error", autospec=True)
def test_assign_address_no_ipv4(self, m_exit):
ip6 = IPNetwork("ba:ad::be:ef/128")
self.plugin.datastore_client.auto_assign_ips = MagicMock(spec=self.plugin._assign_address)
self.plugin.datastore_client.auto_assign_ips.return_value = [], [ip6]
handle_id = "abcdef12345"
ret_ip4, ret_ip6 = self.plugin._assign_address(handle_id)
m_exit.assert_called_once_with(code=ERR_CODE_FAILED_ASSIGNMENT, message=ANY, details=ANY)
@patch("calico_cni.ipam._exit_on_error", autospec=True)
def test_assign_address_no_ipv6(self, m_exit):
ip4 = IPNetwork("1.2.3.4/32")
self.plugin.datastore_client.auto_assign_ips = MagicMock(spec=self.plugin._assign_address)
self.plugin.datastore_client.auto_assign_ips.return_value = [ip4], []
handle_id = "abcdef12345"
ret_ip4, ret_ip6 = self.plugin._assign_address(handle_id)
m_exit.assert_called_once_with(code=ERR_CODE_FAILED_ASSIGNMENT, message=ANY, details=ANY)
def test_parse_config_no_command(self):
del self.plugin.env[CNI_COMMAND_ENV]
with assert_raises(SystemExit) as err:
self.plugin._parse_config()
e = err.exception
assert_equal(e.code, ERR_CODE_INVALID_ARGUMENT)
def test_parse_config_invalid_command(self):
self.plugin.env[CNI_COMMAND_ENV] = "invalid"
with assert_raises(SystemExit) as err:
self.plugin._parse_config()
e = err.exception
assert_equal(e.code, ERR_CODE_INVALID_ARGUMENT)
def test_parse_config_invalid_container_id(self):
del self.plugin.env[CNI_CONTAINERID_ENV]
with assert_raises(SystemExit) as err:
self.plugin._parse_config()
e = err.exception
assert_equal(e.code, ERR_CODE_INVALID_ARGUMENT)
def test_exit_on_error(self):
with assert_raises(SystemExit) as err:
_exit_on_error(1, "message", "details")
e = err.exception
assert_equal(e.code, 1)
@patch("calico_cni.ipam.os", autospec=True)
@patch("calico_cni.ipam.sys", autospec=True)
@patch("calico_cni.ipam.IpamPlugin", autospec=True)
@patch("calico_cni.ipam.configure_logging", autospec=True)
def test_main(self, m_conf_log, m_plugin, m_sys, m_os):
m_os.environ = self.env
m_sys.stdin.readlines.return_value = json.dumps(self.network_config)
m_plugin.reset_mock()
main()
m_plugin.assert_called_once_with(self.network_config, self.env)
m_plugin(self.env, self.network_config).execute.assert_called_once_with()
@patch("calico_cni.ipam.os", autospec=True)
@patch("calico_cni.ipam.sys", autospec=True)
@patch("calico_cni.ipam.IpamPlugin", autospec=True)
@patch("calico_cni.ipam.configure_logging", autospec=True)
@patch("calico_cni.ipam._exit_on_error", autospec=True)
def test_main_execute_error(self, m_exit, m_conf_log, m_plugin, m_sys, m_os):
m_os.environ = self.env
m_sys.stdin.readlines.return_value = json.dumps(self.network_config)
m_plugin.reset_mock()
m_plugin(self.network_config, self.env).execute.side_effect = Exception
main()
m_exit.assert_called_once_with(ERR_CODE_UNHANDLED, message=ANY, details=ANY)
| true
| true
|
f718d35850eaef0795da6ed63a4cdb69e9bc3d94
| 5,413
|
py
|
Python
|
venv/Lib/site-packages/scipy/sparse/linalg/isolve/tests/test_gcrotmk.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 353
|
2020-12-10T10:47:17.000Z
|
2022-03-31T23:08:29.000Z
|
venv/Lib/site-packages/scipy/sparse/linalg/isolve/tests/test_gcrotmk.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 80
|
2020-12-10T09:54:22.000Z
|
2022-03-30T22:08:45.000Z
|
venv/Lib/site-packages/scipy/sparse/linalg/isolve/tests/test_gcrotmk.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 63
|
2020-12-10T17:10:34.000Z
|
2022-03-28T16:27:07.000Z
|
#!/usr/bin/env python
"""Tests for the linalg.isolve.gcrotmk module
"""
from numpy.testing import (assert_, assert_allclose, assert_equal,
suppress_warnings)
import numpy as np
from numpy import zeros, array, allclose
from scipy.linalg import norm
from scipy.sparse import csr_matrix, eye, rand
from scipy.sparse.linalg.interface import LinearOperator
from scipy.sparse.linalg import splu
from scipy.sparse.linalg.isolve import gcrotmk, gmres
Am = csr_matrix(array([[-2,1,0,0,0,9],
[1,-2,1,0,5,0],
[0,1,-2,1,0,0],
[0,0,1,-2,1,0],
[0,3,0,1,-2,1],
[1,0,0,0,1,-2]]))
b = array([1,2,3,4,5,6])
count = [0]
def matvec(v):
count[0] += 1
return Am*v
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
def do_solve(**kw):
count[0] = 0
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), tol=1e-14, **kw)
count_0 = count[0]
assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))
return x0, count_0
class TestGCROTMK(object):
def test_preconditioner(self):
# Check that preconditioning works
pc = splu(Am.tocsc())
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
x0, count_0 = do_solve()
x1, count_1 = do_solve(M=M)
assert_equal(count_1, 3)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
def test_arnoldi(self):
np.random.seed(1)
A = eye(2000) + rand(2000, 2000, density=5e-4)
b = np.random.rand(2000)
# The inner arnoldi should be equivalent to gmres
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=15, k=0, maxiter=1)
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1)
assert_equal(flag0, 1)
assert_equal(flag1, 1)
assert np.linalg.norm(A.dot(x0) - b) > 1e-3
assert_allclose(x0, x1)
def test_cornercase(self):
np.random.seed(1234)
# Rounding error may prevent convergence with tol=0 --- ensure
# that the return values in this case are correct, and no
# exceptions are raised
for n in [3, 5, 10, 100]:
A = 2*eye(n)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
b = np.ones(n)
x, info = gcrotmk(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = gcrotmk(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
b = np.random.rand(n)
x, info = gcrotmk(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = gcrotmk(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
def test_nans(self):
A = eye(3, format='lil')
A[1,1] = np.nan
b = np.ones(3)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = gcrotmk(A, b, tol=0, maxiter=10)
assert_equal(info, 1)
def test_truncate(self):
np.random.seed(1234)
A = np.random.rand(30, 30) + np.eye(30)
b = np.random.rand(30)
for truncate in ['oldest', 'smallest']:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate, tol=1e-4,
maxiter=200)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-3)
def test_CU(self):
for discard_C in (True, False):
# Check that C,U behave as expected
CU = []
x0, count_0 = do_solve(CU=CU, discard_C=discard_C)
assert_(len(CU) > 0)
assert_(len(CU) <= 6)
if discard_C:
for c, u in CU:
assert_(c is None)
# should converge immediately
x1, count_1 = do_solve(CU=CU, discard_C=discard_C)
if discard_C:
assert_equal(count_1, 2 + len(CU))
else:
assert_equal(count_1, 3)
assert_(count_1 <= count_0/2)
assert_allclose(x1, x0, atol=1e-14)
def test_denormals(self):
# Check that no warnings are emitted if the matrix contains
# numbers for which 1/x has no float representation, and that
# the solver behaves properly.
A = np.array([[1, 2], [3, 4]], dtype=float)
A *= 100 * np.nextafter(0, 1)
b = np.array([1, 1])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = gcrotmk(A, b)
if info == 0:
assert_allclose(A.dot(xp), b)
| 32.608434
| 81
| 0.5448
|
from numpy.testing import (assert_, assert_allclose, assert_equal,
suppress_warnings)
import numpy as np
from numpy import zeros, array, allclose
from scipy.linalg import norm
from scipy.sparse import csr_matrix, eye, rand
from scipy.sparse.linalg.interface import LinearOperator
from scipy.sparse.linalg import splu
from scipy.sparse.linalg.isolve import gcrotmk, gmres
Am = csr_matrix(array([[-2,1,0,0,0,9],
[1,-2,1,0,5,0],
[0,1,-2,1,0,0],
[0,0,1,-2,1,0],
[0,3,0,1,-2,1],
[1,0,0,0,1,-2]]))
b = array([1,2,3,4,5,6])
count = [0]
def matvec(v):
count[0] += 1
return Am*v
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
def do_solve(**kw):
count[0] = 0
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), tol=1e-14, **kw)
count_0 = count[0]
assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))
return x0, count_0
class TestGCROTMK(object):
def test_preconditioner(self):
pc = splu(Am.tocsc())
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
x0, count_0 = do_solve()
x1, count_1 = do_solve(M=M)
assert_equal(count_1, 3)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
def test_arnoldi(self):
np.random.seed(1)
A = eye(2000) + rand(2000, 2000, density=5e-4)
b = np.random.rand(2000)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=15, k=0, maxiter=1)
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1)
assert_equal(flag0, 1)
assert_equal(flag1, 1)
assert np.linalg.norm(A.dot(x0) - b) > 1e-3
assert_allclose(x0, x1)
def test_cornercase(self):
np.random.seed(1234)
for n in [3, 5, 10, 100]:
A = 2*eye(n)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
b = np.ones(n)
x, info = gcrotmk(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = gcrotmk(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
b = np.random.rand(n)
x, info = gcrotmk(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = gcrotmk(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
def test_nans(self):
A = eye(3, format='lil')
A[1,1] = np.nan
b = np.ones(3)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = gcrotmk(A, b, tol=0, maxiter=10)
assert_equal(info, 1)
def test_truncate(self):
np.random.seed(1234)
A = np.random.rand(30, 30) + np.eye(30)
b = np.random.rand(30)
for truncate in ['oldest', 'smallest']:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate, tol=1e-4,
maxiter=200)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-3)
def test_CU(self):
for discard_C in (True, False):
CU = []
x0, count_0 = do_solve(CU=CU, discard_C=discard_C)
assert_(len(CU) > 0)
assert_(len(CU) <= 6)
if discard_C:
for c, u in CU:
assert_(c is None)
x1, count_1 = do_solve(CU=CU, discard_C=discard_C)
if discard_C:
assert_equal(count_1, 2 + len(CU))
else:
assert_equal(count_1, 3)
assert_(count_1 <= count_0/2)
assert_allclose(x1, x0, atol=1e-14)
def test_denormals(self):
A = np.array([[1, 2], [3, 4]], dtype=float)
A *= 100 * np.nextafter(0, 1)
b = np.array([1, 1])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = gcrotmk(A, b)
if info == 0:
assert_allclose(A.dot(xp), b)
| true
| true
|
f718d4231e5f1b451c3d615e57054feea91147f0
| 801
|
py
|
Python
|
alembic/versions/1ef399cbd2b_add_downloads_and_followers_to_mod.py
|
toadicus/KerbalStuff
|
91b3c0ee3c415a20292c9caa76022130bc5d5238
|
[
"MIT"
] | 1
|
2019-04-15T10:30:17.000Z
|
2019-04-15T10:30:17.000Z
|
alembic/versions/1ef399cbd2b_add_downloads_and_followers_to_mod.py
|
toadicus/KerbalStuff
|
91b3c0ee3c415a20292c9caa76022130bc5d5238
|
[
"MIT"
] | null | null | null |
alembic/versions/1ef399cbd2b_add_downloads_and_followers_to_mod.py
|
toadicus/KerbalStuff
|
91b3c0ee3c415a20292c9caa76022130bc5d5238
|
[
"MIT"
] | null | null | null |
"""Add downloads and followers to Mod
Revision ID: 1ef399cbd2b
Revises: 29cdccab86f
Create Date: 2014-06-11 17:10:26.480478
"""
# revision identifiers, used by Alembic.
revision = '1ef399cbd2b'
down_revision = '29cdccab86f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('mod', sa.Column('download_count', sa.Integer(), server_default='0', nullable=False))
op.add_column('mod', sa.Column('follower_count', sa.Integer(), server_default='0', nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('mod', 'follower_count')
op.drop_column('mod', 'download_count')
### end Alembic commands ###
| 27.62069
| 103
| 0.701623
|
revision = '1ef399cbd2b'
down_revision = '29cdccab86f'
from alembic import op
import sqlalchemy as sa
def upgrade():
n('mod', sa.Column('follower_count', sa.Integer(), server_default='0', nullable=False))
| true
| true
|
f718d4a046685a88df8c7e67861745f498bb0714
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pkginfo/tests/__init__.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pkginfo/tests/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pkginfo/tests/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/2c/59/97/f8e5f25cbfc169c1e81504fc2144624a0b7d4d17526ee7745023ffd740
| 96
| 96
| 0.895833
|
/home/runner/.cache/pip/pool/2c/59/97/f8e5f25cbfc169c1e81504fc2144624a0b7d4d17526ee7745023ffd740
| false
| true
|
f718d62abc1a0da728298427d458f2a847bb7f3c
| 40
|
py
|
Python
|
mydates/__init__.py
|
KirisakiMori/Test
|
4d12d9588e3f64dcc9d044f2ba4abcaa07364e01
|
[
"Apache-2.0"
] | null | null | null |
mydates/__init__.py
|
KirisakiMori/Test
|
4d12d9588e3f64dcc9d044f2ba4abcaa07364e01
|
[
"Apache-2.0"
] | null | null | null |
mydates/__init__.py
|
KirisakiMori/Test
|
4d12d9588e3f64dcc9d044f2ba4abcaa07364e01
|
[
"Apache-2.0"
] | null | null | null |
from .core import *
from . import Dates
| 20
| 20
| 0.725
|
from .core import *
from . import Dates
| true
| true
|
f718d7a372c2b5c43f47c1fac264c8f00320529c
| 2,130
|
py
|
Python
|
flask_client_side_session_test/__init__.py
|
WommyInStandingPosition/YtbDataApiRelated
|
4856ad2ee5be49bb74c79c3d6649f9d1fdbdc85d
|
[
"MIT"
] | null | null | null |
flask_client_side_session_test/__init__.py
|
WommyInStandingPosition/YtbDataApiRelated
|
4856ad2ee5be49bb74c79c3d6649f9d1fdbdc85d
|
[
"MIT"
] | null | null | null |
flask_client_side_session_test/__init__.py
|
WommyInStandingPosition/YtbDataApiRelated
|
4856ad2ee5be49bb74c79c3d6649f9d1fdbdc85d
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template_string, request, session, redirect, url_for
"""
The workflow os this client-side session server should be
1. When new user comes, assign it a random str as identifier. For any further request from the user, the identifier is needed
2. The user can queue either ytb items, or ytber's playlsit, or spotify list for download
3. After queue succeeded or failed, the client side script will check with server if any files are ready for download in a 30 - 60 sec intervals.
4. Keep checking until all queued request from client reached a result. then delete the session
however, this should not be the best approach. please check for https://testdriven.io/blog/flask-server-side-sessions/
for server side session.
"""
# Create the Flask application
app = Flask(__name__)
# Details on the Secret Key: https://flask.palletsprojects.com/en/1.1.x/config/#SECRET_KEY
# NOTE: The secret key is used to cryptographically-sign the cookies used for storing
# the session data.
app.secret_key = 'BAD_SECRET_KEY'
@app.route('/set_email', methods=['GET', 'POST'])
def set_email():
if request.method == 'POST':
# Save the form data to the session object
session['email'] = request.form['email_address']
return redirect(url_for('get_email'))
return """
<form method="post">
<label for="email">Enter your email address:</label>
<input type="email" id="email" name="email_address" required />
<button type="submit">Submit</button
</form>
"""
@app.route('/get_email')
def get_email():
return render_template_string("""
{% if session['email'] %}
<h1>Welcome {{ session['email'] }}!</h1>
{% else %}
<h1>Welcome! Please enter your email <a href="{{ url_for('set_email') }}">here.</a></h1>
{% endif %}
""")
@app.route('/delete_email')
def delete_email():
# Clear the email stored in the session object
session.pop('email', default=None)
return '<h1>Session deleted!</h1>'
if __name__ == '__main__':
app.run()
| 36.101695
| 145
| 0.666667
|
from flask import Flask, render_template_string, request, session, redirect, url_for
app = Flask(__name__)
et_key = 'BAD_SECRET_KEY'
@app.route('/set_email', methods=['GET', 'POST'])
def set_email():
if request.method == 'POST':
session['email'] = request.form['email_address']
return redirect(url_for('get_email'))
return """
<form method="post">
<label for="email">Enter your email address:</label>
<input type="email" id="email" name="email_address" required />
<button type="submit">Submit</button
</form>
"""
@app.route('/get_email')
def get_email():
return render_template_string("""
{% if session['email'] %}
<h1>Welcome {{ session['email'] }}!</h1>
{% else %}
<h1>Welcome! Please enter your email <a href="{{ url_for('set_email') }}">here.</a></h1>
{% endif %}
""")
@app.route('/delete_email')
def delete_email():
session.pop('email', default=None)
return '<h1>Session deleted!</h1>'
if __name__ == '__main__':
app.run()
| true
| true
|
f718d875583a4a0366807934bcf95cd9ab97d6a1
| 8,932
|
py
|
Python
|
pyro/contrib/gp/models/gpr.py
|
GautamV234/pyro
|
d5474ebc6101b330bf9060a3731830d4b6a585d5
|
[
"Apache-2.0"
] | 4,959
|
2017-11-03T14:39:17.000Z
|
2019-02-04T16:14:30.000Z
|
pyro/contrib/gp/models/gpr.py
|
GautamV234/pyro
|
d5474ebc6101b330bf9060a3731830d4b6a585d5
|
[
"Apache-2.0"
] | 985
|
2017-11-03T14:27:56.000Z
|
2019-02-02T18:52:54.000Z
|
pyro/contrib/gp/models/gpr.py
|
GautamV234/pyro
|
d5474ebc6101b330bf9060a3731830d4b6a585d5
|
[
"Apache-2.0"
] | 564
|
2017-11-03T15:05:55.000Z
|
2019-01-31T14:02:29.000Z
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
import torch.distributions as torchdist
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
from pyro.contrib.gp.models.model import GPModel
from pyro.contrib.gp.util import conditional
from pyro.nn.module import PyroParam, pyro_method
from pyro.util import warn_if_nan
class GPRegression(GPModel):
r"""
Gaussian Process Regression model.
The core of a Gaussian Process is a covariance function :math:`k` which governs
the similarity between input points. Given :math:`k`, we can establish a
distribution over functions :math:`f` by a multivarite normal distribution
.. math:: p(f(X)) = \mathcal{N}(0, k(X, X)),
where :math:`X` is any set of input points and :math:`k(X, X)` is a covariance
matrix whose entries are outputs :math:`k(x, z)` of :math:`k` over input pairs
:math:`(x, z)`. This distribution is usually denoted by
.. math:: f \sim \mathcal{GP}(0, k).
.. note:: Generally, beside a covariance matrix :math:`k`, a Gaussian Process can
also be specified by a mean function :math:`m` (which is a zero-value function
by default). In that case, its distribution will be
.. math:: p(f(X)) = \mathcal{N}(m(X), k(X, X)).
Given inputs :math:`X` and their noisy observations :math:`y`, the Gaussian Process
Regression model takes the form
.. math::
f &\sim \mathcal{GP}(0, k(X, X)),\\
y & \sim f + \epsilon,
where :math:`\epsilon` is Gaussian noise.
.. note:: This model has :math:`\mathcal{O}(N^3)` complexity for training,
:math:`\mathcal{O}(N^3)` complexity for testing. Here, :math:`N` is the number
of train inputs.
Reference:
[1] `Gaussian Processes for Machine Learning`,
Carl E. Rasmussen, Christopher K. I. Williams
:param torch.Tensor X: A input data for training. Its first dimension is the number
of data points.
:param torch.Tensor y: An output data for training. Its last dimension is the
number of data points.
:param ~pyro.contrib.gp.kernels.kernel.Kernel kernel: A Pyro kernel object, which
is the covariance function :math:`k`.
:param torch.Tensor noise: Variance of Gaussian noise of this model.
:param callable mean_function: An optional mean function :math:`m` of this Gaussian
process. By default, we use zero mean.
:param float jitter: A small positive term which is added into the diagonal part of
a covariance matrix to help stablize its Cholesky decomposition.
"""
def __init__(self, X, y, kernel, noise=None, mean_function=None, jitter=1e-6):
assert isinstance(
X, torch.Tensor
), "X needs to be a torch Tensor instead of a {}".format(type(X))
if y is not None:
assert isinstance(
y, torch.Tensor
), "y needs to be a torch Tensor instead of a {}".format(type(y))
super().__init__(X, y, kernel, mean_function, jitter)
noise = self.X.new_tensor(1.0) if noise is None else noise
self.noise = PyroParam(noise, constraints.positive)
@pyro_method
def model(self):
self.set_mode("model")
N = self.X.size(0)
Kff = self.kernel(self.X)
Kff.view(-1)[:: N + 1] += self.jitter + self.noise # add noise to diagonal
Lff = torch.linalg.cholesky(Kff)
zero_loc = self.X.new_zeros(self.X.size(0))
f_loc = zero_loc + self.mean_function(self.X)
if self.y is None:
f_var = Lff.pow(2).sum(dim=-1)
return f_loc, f_var
else:
return pyro.sample(
self._pyro_get_fullname("y"),
dist.MultivariateNormal(f_loc, scale_tril=Lff)
.expand_by(self.y.shape[:-1])
.to_event(self.y.dim() - 1),
obs=self.y,
)
@pyro_method
def guide(self):
self.set_mode("guide")
self._load_pyro_samples()
def forward(self, Xnew, full_cov=False, noiseless=True):
r"""
Computes the mean and covariance matrix (or variance) of Gaussian Process
posterior on a test input data :math:`X_{new}`:
.. math:: p(f^* \mid X_{new}, X, y, k, \epsilon) = \mathcal{N}(loc, cov).
.. note:: The noise parameter ``noise`` (:math:`\epsilon`) together with
kernel's parameters have been learned from a training procedure (MCMC or
SVI).
:param torch.Tensor Xnew: A input data for testing. Note that
``Xnew.shape[1:]`` must be the same as ``self.X.shape[1:]``.
:param bool full_cov: A flag to decide if we want to predict full covariance
matrix or just variance.
:param bool noiseless: A flag to decide if we want to include noise in the
prediction output or not.
:returns: loc and covariance matrix (or variance) of :math:`p(f^*(X_{new}))`
:rtype: tuple(torch.Tensor, torch.Tensor)
"""
self._check_Xnew_shape(Xnew)
self.set_mode("guide")
N = self.X.size(0)
Kff = self.kernel(self.X).contiguous()
Kff.view(-1)[:: N + 1] += self.jitter + self.noise # add noise to the diagonal
Lff = torch.linalg.cholesky(Kff)
y_residual = self.y - self.mean_function(self.X)
loc, cov = conditional(
Xnew,
self.X,
self.kernel,
y_residual,
None,
Lff,
full_cov,
jitter=self.jitter,
)
if full_cov and not noiseless:
M = Xnew.size(0)
cov = cov.contiguous()
cov.view(-1, M * M)[:, :: M + 1] += self.noise # add noise to the diagonal
if not full_cov and not noiseless:
cov = cov + self.noise
return loc + self.mean_function(Xnew), cov
def iter_sample(self, noiseless=True):
r"""
Iteratively constructs a sample from the Gaussian Process posterior.
Recall that at test input points :math:`X_{new}`, the posterior is
multivariate Gaussian distributed with mean and covariance matrix
given by :func:`forward`.
This method samples lazily from this multivariate Gaussian. The advantage
of this approach is that later query points can depend upon earlier ones.
Particularly useful when the querying is to be done by an optimisation
routine.
.. note:: The noise parameter ``noise`` (:math:`\epsilon`) together with
kernel's parameters have been learned from a training procedure (MCMC or
SVI).
:param bool noiseless: A flag to decide if we want to add sampling noise
to the samples beyond the noise inherent in the GP posterior.
:returns: sampler
:rtype: function
"""
noise = self.noise.detach()
X = self.X.clone().detach()
y = self.y.clone().detach()
N = X.size(0)
Kff = self.kernel(X).contiguous()
Kff.view(-1)[:: N + 1] += noise # add noise to the diagonal
outside_vars = {"X": X, "y": y, "N": N, "Kff": Kff}
def sample_next(xnew, outside_vars):
"""Repeatedly samples from the Gaussian process posterior,
conditioning on previously sampled values.
"""
warn_if_nan(xnew)
# Variables from outer scope
X, y, Kff = outside_vars["X"], outside_vars["y"], outside_vars["Kff"]
# Compute Cholesky decomposition of kernel matrix
Lff = torch.linalg.cholesky(Kff)
y_residual = y - self.mean_function(X)
# Compute conditional mean and variance
loc, cov = conditional(
xnew, X, self.kernel, y_residual, None, Lff, False, jitter=self.jitter
)
if not noiseless:
cov = cov + noise
ynew = torchdist.Normal(
loc + self.mean_function(xnew), cov.sqrt()
).rsample()
# Update kernel matrix
N = outside_vars["N"]
Kffnew = Kff.new_empty(N + 1, N + 1)
Kffnew[:N, :N] = Kff
cross = self.kernel(X, xnew).squeeze()
end = self.kernel(xnew, xnew).squeeze()
Kffnew[N, :N] = cross
Kffnew[:N, N] = cross
# No noise, just jitter for numerical stability
Kffnew[N, N] = end + self.jitter
# Heuristic to avoid adding degenerate points
if Kffnew.logdet() > -15.0:
outside_vars["Kff"] = Kffnew
outside_vars["N"] += 1
outside_vars["X"] = torch.cat((X, xnew))
outside_vars["y"] = torch.cat((y, ynew))
return ynew
return lambda xnew: sample_next(xnew, outside_vars)
| 38.17094
| 87
| 0.597627
|
import torch
import torch.distributions as torchdist
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
from pyro.contrib.gp.models.model import GPModel
from pyro.contrib.gp.util import conditional
from pyro.nn.module import PyroParam, pyro_method
from pyro.util import warn_if_nan
class GPRegression(GPModel):
def __init__(self, X, y, kernel, noise=None, mean_function=None, jitter=1e-6):
assert isinstance(
X, torch.Tensor
), "X needs to be a torch Tensor instead of a {}".format(type(X))
if y is not None:
assert isinstance(
y, torch.Tensor
), "y needs to be a torch Tensor instead of a {}".format(type(y))
super().__init__(X, y, kernel, mean_function, jitter)
noise = self.X.new_tensor(1.0) if noise is None else noise
self.noise = PyroParam(noise, constraints.positive)
@pyro_method
def model(self):
self.set_mode("model")
N = self.X.size(0)
Kff = self.kernel(self.X)
Kff.view(-1)[:: N + 1] += self.jitter + self.noise
Lff = torch.linalg.cholesky(Kff)
zero_loc = self.X.new_zeros(self.X.size(0))
f_loc = zero_loc + self.mean_function(self.X)
if self.y is None:
f_var = Lff.pow(2).sum(dim=-1)
return f_loc, f_var
else:
return pyro.sample(
self._pyro_get_fullname("y"),
dist.MultivariateNormal(f_loc, scale_tril=Lff)
.expand_by(self.y.shape[:-1])
.to_event(self.y.dim() - 1),
obs=self.y,
)
@pyro_method
def guide(self):
self.set_mode("guide")
self._load_pyro_samples()
def forward(self, Xnew, full_cov=False, noiseless=True):
self._check_Xnew_shape(Xnew)
self.set_mode("guide")
N = self.X.size(0)
Kff = self.kernel(self.X).contiguous()
Kff.view(-1)[:: N + 1] += self.jitter + self.noise
Lff = torch.linalg.cholesky(Kff)
y_residual = self.y - self.mean_function(self.X)
loc, cov = conditional(
Xnew,
self.X,
self.kernel,
y_residual,
None,
Lff,
full_cov,
jitter=self.jitter,
)
if full_cov and not noiseless:
M = Xnew.size(0)
cov = cov.contiguous()
cov.view(-1, M * M)[:, :: M + 1] += self.noise
if not full_cov and not noiseless:
cov = cov + self.noise
return loc + self.mean_function(Xnew), cov
def iter_sample(self, noiseless=True):
noise = self.noise.detach()
X = self.X.clone().detach()
y = self.y.clone().detach()
N = X.size(0)
Kff = self.kernel(X).contiguous()
Kff.view(-1)[:: N + 1] += noise
outside_vars = {"X": X, "y": y, "N": N, "Kff": Kff}
def sample_next(xnew, outside_vars):
warn_if_nan(xnew)
X, y, Kff = outside_vars["X"], outside_vars["y"], outside_vars["Kff"]
Lff = torch.linalg.cholesky(Kff)
y_residual = y - self.mean_function(X)
loc, cov = conditional(
xnew, X, self.kernel, y_residual, None, Lff, False, jitter=self.jitter
)
if not noiseless:
cov = cov + noise
ynew = torchdist.Normal(
loc + self.mean_function(xnew), cov.sqrt()
).rsample()
N = outside_vars["N"]
Kffnew = Kff.new_empty(N + 1, N + 1)
Kffnew[:N, :N] = Kff
cross = self.kernel(X, xnew).squeeze()
end = self.kernel(xnew, xnew).squeeze()
Kffnew[N, :N] = cross
Kffnew[:N, N] = cross
Kffnew[N, N] = end + self.jitter
if Kffnew.logdet() > -15.0:
outside_vars["Kff"] = Kffnew
outside_vars["N"] += 1
outside_vars["X"] = torch.cat((X, xnew))
outside_vars["y"] = torch.cat((y, ynew))
return ynew
return lambda xnew: sample_next(xnew, outside_vars)
| true
| true
|
f718d8fd87c623aee43fc9e984ea46e92add8c51
| 11,213
|
py
|
Python
|
source-code/PC/main.py
|
ZakkyMas/Audio_Mixer_Wireless
|
7f7a002fdf8eebe6891c06b803f1a3bd0c2da7ca
|
[
"MIT"
] | 3
|
2020-12-30T18:37:13.000Z
|
2021-10-02T07:43:10.000Z
|
source-code/PC/main.py
|
ZakkyMas/Audio_Mixer_Wireless
|
7f7a002fdf8eebe6891c06b803f1a3bd0c2da7ca
|
[
"MIT"
] | null | null | null |
source-code/PC/main.py
|
ZakkyMas/Audio_Mixer_Wireless
|
7f7a002fdf8eebe6891c06b803f1a3bd0c2da7ca
|
[
"MIT"
] | 1
|
2021-10-02T07:47:53.000Z
|
2021-10-02T07:47:53.000Z
|
import gc
#PC only
import json, socket, re, time
import random, os
os.system('start /wait cmd /c pip install psutil')
import psutil
gc.enable()
gc.collect()
class WebServer:
def __init__(self, system):
self._call = system
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind(('', 80))
self.s.listen(5)
self._stat = [False, ""]
def readFile(self, name):
data = ""
with open(name, 'r') as f:
data = f.read()
f.close()
return data
def Filter_Data(self, data):
com_0 = re.compile(r'\r\n\r\n')
com_1 = re.compile(r'\r\n')
com_2 = re.compile(r'\s|\?|\=')
Header, datas = com_0.split(data, 1)
Header = com_1.split(Header)
m_mode, m_link, *data_l, m_ver = com_2.split(Header[0])
return [m_mode, m_link, data_l, datas]
def Filter_Json(self, file):
if file['name'] == 'Audio':
self._call.JSON_Main['audio']['inpu'] = self._call.JSON_Web['audio']['inpu'] = int(file['inpu'])
self._call.JSON_Main['audio']['loud'] = self._call.JSON_Web['audio']['loud'] = int(file['loud'])
self._call.JSON_Main['audio']['gain'] = self._call.JSON_Web['audio']['gain'] = int(file['gain'])
self._call.JSON_Main['audio']['volu'] = self._call.JSON_Web['audio']['volu'] = int(file['volu'])
self._call.JSON_Main['audio']['bass'] = self._call.JSON_Web['audio']['bass'] = int(file['bass'])
self._call.JSON_Main['audio']['treb'] = self._call.JSON_Web['audio']['treb'] = int(file['treb'])
self._call.JSON_Main['audio']['ba-r'] = self._call.JSON_Web['audio']['ba-r'] = int(file['ba-r'])
self._call.JSON_Main['audio']['ba-l'] = self._call.JSON_Web['audio']['ba-l'] = int(file['ba-l'])
self._call.Save()
return
if file['name'] == 'Wifi':
self._call.JSON_Main['wifi']['mode'] = int(file['mode'])
self._call.JSON_Main['wifi']['SSID'] = file['user']
self._call.JSON_Main['wifi']['PASS'] = file['passA']
self._call.JSON_Web['wifi']['SSID'] = self._call.JSON_Main['wifi']['SSID']
self._call.Save()
self._call.REBOOT()
self._stat[0] = False
self._stat[1] = None
return
if file['name'] == 'Profil':
if self._call.JSON_Main['web']['USERNAME'] != file['userA']:
return
if self._call.JSON_Main['web']['PASSWORD'] != file['passA']:
return
self._call.JSON_Main['web']['USERNAME'] = file['userB']
self._call.JSON_Main['web']['PASSWORD'] = file['passB']
self._call.Save()
self._stat[0] = False
self._stat[1] = None
return
def Looping(self):
Server, Address = self.s.accept()
data_m = Server.recv(4096)
if not data_m:
Server.send('HTTP/1.1 301 OK\nLocation: /\nConnection: keep-alive\n\n'.encode())
Server.close()
return
Address = Address[0]
data_m = data_m.decode()
m_mode, m_link, data_g, data_dll = self.Filter_Data(data_m)
print("")
print(1, Address)
print(2, data_m)
print(3, m_mode)
print(4, m_link)
print(5, data_g)
print(6, data_dll)
data_p, data_t = ["", "text/html"]
if m_mode == 'GET':
if m_link == '/bootstrap.min.js':
data_p = self.readFile('bootstrap.min.js')
data_t = "text/javascript"
elif m_link == '/bootstrap.min.css':
data_p = self.readFile('bootstrap.min.css')
data_t = "text/css"
elif m_link == '/b_login.js':
data_p = self.readFile('b_login.js')
data_t = "text/javascript"
elif self._stat[0] and self._stat[1] == Address:
if m_link == '/getdata':
data_p = json.dumps(self._call.JSON_Web)
data_t = "application/json"
elif m_link == '/audiomixer':
data_p = self.readFile('a_audiomixer.html')
data_t = "text/html"
elif m_link == '/wifi':
data_p = self.readFile('a_wifi.html')
data_t = "text/html"
elif m_link == '/profil':
data_p = self.readFile('a_profil.html')
data_t = "text/html"
elif m_link == '/b_web.js':
data_p = self.readFile('b_web.js')
data_t = "text/javascript"
elif m_link == '/home':
data_p = self.readFile('a_home.html')
data_t = "text/html"
else:
Server.send('HTTP/1.1 301 OK\nLocation: /home\nConnection: keep-alive\n\n'.encode())
Server.close()
return
elif m_link == '/login':
data_p = self.readFile('a_login.html')
data_t = "text/html"
else:
Server.send('HTTP/1.1 301 OK\nLocation: /login\nConnection: keep-alive\n\n'.encode())
Server.close()
return
Server.send('HTTP/1.1 200 OK\nContent-Type: {}\nConnection: keep-alive\n\n'.format(data_t).encode())
Server.sendall(data_p.encode())
Server.close()
elif m_mode == 'POST':
if m_link == '/postJson':
file = json.loads('{}'.format(data_dll).replace('\\', '')[1:-1])
if file['name'] == 'Login':
if self._stat[0] and self._stat[1] != Address:
pass
elif file['user'] == self._call.JSON_Main['web']['USERNAME']:
if file['pass'] == self._call.JSON_Main['web']['PASSWORD']:
self._stat[0] = True
self._stat[1] = Address
elif file['name'] == 'Keluar':
if file['status'] and self._stat[1] == Address:
self._stat[0] = False
self._stat[1] = None
elif self._stat[0] and self._stat[1] == Address:
self.Filter_Json(file)
Server.send('HTTP/1.1 200 OK\n\n'.encode())
Server.close()
else:
Server.send('HTTP/1.1 404 OK\n\n'.encode())
Server.close()
def Exit(self):
self.s.close()
class Wifi:
def __init__(self, system):
self._call = system
def Looping(self):
pass
def Exit(self):
pass
class Hardware:
def __init__(self, system):
self._call = system
self._call.REBOOT = self.REBOOT
self._call.LED = self.LED
def LED(self, val=0):
pass
def REBOOT(self):
pass
def Looping(self):
self._call.JSON_Main['hardware']['volt'] = 6.0 + psutil.sensors_battery()[0] / 24
self._call.JSON_Web['hardware']['volt'] = self._call.JSON_Main['hardware']['volt']
self._call.JSON_Web['hardware']['free'] = psutil.virtual_memory()[4]
self._call.JSON_Web['hardware']['use'] = psutil.virtual_memory()[3]
self._call.JSON_Web['hardware']['freq'] = int("{}000".format(int(psutil.cpu_freq()[0])))
self._call.Save()
def Exit(self):
pass
class Audio:
def __init__(self, system):
self._call = system
self._call.AUDIO = self.AUDIO
self._call.AUDIO()
def AUDIO(self):
pass
def Looping(self):
pass
def Exit(self):
pass
class Storage:
def __init__(self, system):
self._call = system
self._call.Save = self.SAVE
self._call.RESET = self.RESET
with open('main.json') as f:
self._call.JSON_Main = json.loads(f.read())
f.close()
with open('b_web.json') as f:
self._call.JSON_Web = json.loads(f.read())
f.close()
def SAVE(self):
with open('main.json', 'w') as f:
f.write(json.dumps(self._call.JSON_Main))
f.close()
with open('b_web.json', 'w') as f:
f.write(json.dumps(self._call.JSON_Web))
f.close()
def RESET(self):
with open('reset_main.json') as f:
with open('main.json', 'w') as ff:
ff.write(f.read())
ff.close()
f.close()
with open('reset_b_web.json') as f:
with open('b_web.json', 'w') as ff:
ff.write(f.read())
ff.close()
f.close()
self._call.REBOOT()
def Looping(self):
pass
def Exit(self):
self.SAVE()
class System:
def __init__(self):
gc.enable()
gc.collect()
#File
self.Save = None
self.JSON_Web = None
self.JSON_Main = None
#function
self.AUDIO = None
self.LED = None
self.REBOOT = None
self.RESET = None
#data
self._data = []
self.Init()
def Init(self):
try:
gc.collect()
print("Start....")
self._data.append(Storage(self))
print("function Storage Ready")
self._data.append(Audio(self))
print("function Audio Ready")
self._data.append(Hardware(self))
print("function Hardware Ready")
self._data.append(Wifi(self))
print("function Wifi Ready")
self._data.append(WebServer(self))
print("function WebServer Ready")
except Exception as e:
print("ERROR LIBRARY :", e)
def Looping(self):
self.StartBrowser()
gc.enable()
gc.collect()
while True:
self.Update()
def StartBrowser(self):
myHostName = socket.gethostname()
myIP = socket.gethostbyname(myHostName)
print("IP address of the localhost is {}".format(myIP))
os.system("start http://{}".format(myIP))
def Update(self):
for a in self._data:
try:
gc.collect()
a.Looping()
except Exception as e:
print("ERROR Looping :", e)
def Exit(self):
try:
for a in self._data:
try:
gc.collect()
a.Exit()
except Exception:
pass
except Exception as e:
pass
print("Exit....")
gc.collect()
if __name__ == '__main__':
Main = System()
Main.Looping()
Main.Exit()
| 32.882698
| 113
| 0.483189
|
import gc
import json, socket, re, time
import random, os
os.system('start /wait cmd /c pip install psutil')
import psutil
gc.enable()
gc.collect()
class WebServer:
def __init__(self, system):
self._call = system
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind(('', 80))
self.s.listen(5)
self._stat = [False, ""]
def readFile(self, name):
data = ""
with open(name, 'r') as f:
data = f.read()
f.close()
return data
def Filter_Data(self, data):
com_0 = re.compile(r'\r\n\r\n')
com_1 = re.compile(r'\r\n')
com_2 = re.compile(r'\s|\?|\=')
Header, datas = com_0.split(data, 1)
Header = com_1.split(Header)
m_mode, m_link, *data_l, m_ver = com_2.split(Header[0])
return [m_mode, m_link, data_l, datas]
def Filter_Json(self, file):
if file['name'] == 'Audio':
self._call.JSON_Main['audio']['inpu'] = self._call.JSON_Web['audio']['inpu'] = int(file['inpu'])
self._call.JSON_Main['audio']['loud'] = self._call.JSON_Web['audio']['loud'] = int(file['loud'])
self._call.JSON_Main['audio']['gain'] = self._call.JSON_Web['audio']['gain'] = int(file['gain'])
self._call.JSON_Main['audio']['volu'] = self._call.JSON_Web['audio']['volu'] = int(file['volu'])
self._call.JSON_Main['audio']['bass'] = self._call.JSON_Web['audio']['bass'] = int(file['bass'])
self._call.JSON_Main['audio']['treb'] = self._call.JSON_Web['audio']['treb'] = int(file['treb'])
self._call.JSON_Main['audio']['ba-r'] = self._call.JSON_Web['audio']['ba-r'] = int(file['ba-r'])
self._call.JSON_Main['audio']['ba-l'] = self._call.JSON_Web['audio']['ba-l'] = int(file['ba-l'])
self._call.Save()
return
if file['name'] == 'Wifi':
self._call.JSON_Main['wifi']['mode'] = int(file['mode'])
self._call.JSON_Main['wifi']['SSID'] = file['user']
self._call.JSON_Main['wifi']['PASS'] = file['passA']
self._call.JSON_Web['wifi']['SSID'] = self._call.JSON_Main['wifi']['SSID']
self._call.Save()
self._call.REBOOT()
self._stat[0] = False
self._stat[1] = None
return
if file['name'] == 'Profil':
if self._call.JSON_Main['web']['USERNAME'] != file['userA']:
return
if self._call.JSON_Main['web']['PASSWORD'] != file['passA']:
return
self._call.JSON_Main['web']['USERNAME'] = file['userB']
self._call.JSON_Main['web']['PASSWORD'] = file['passB']
self._call.Save()
self._stat[0] = False
self._stat[1] = None
return
def Looping(self):
Server, Address = self.s.accept()
data_m = Server.recv(4096)
if not data_m:
Server.send('HTTP/1.1 301 OK\nLocation: /\nConnection: keep-alive\n\n'.encode())
Server.close()
return
Address = Address[0]
data_m = data_m.decode()
m_mode, m_link, data_g, data_dll = self.Filter_Data(data_m)
print("")
print(1, Address)
print(2, data_m)
print(3, m_mode)
print(4, m_link)
print(5, data_g)
print(6, data_dll)
data_p, data_t = ["", "text/html"]
if m_mode == 'GET':
if m_link == '/bootstrap.min.js':
data_p = self.readFile('bootstrap.min.js')
data_t = "text/javascript"
elif m_link == '/bootstrap.min.css':
data_p = self.readFile('bootstrap.min.css')
data_t = "text/css"
elif m_link == '/b_login.js':
data_p = self.readFile('b_login.js')
data_t = "text/javascript"
elif self._stat[0] and self._stat[1] == Address:
if m_link == '/getdata':
data_p = json.dumps(self._call.JSON_Web)
data_t = "application/json"
elif m_link == '/audiomixer':
data_p = self.readFile('a_audiomixer.html')
data_t = "text/html"
elif m_link == '/wifi':
data_p = self.readFile('a_wifi.html')
data_t = "text/html"
elif m_link == '/profil':
data_p = self.readFile('a_profil.html')
data_t = "text/html"
elif m_link == '/b_web.js':
data_p = self.readFile('b_web.js')
data_t = "text/javascript"
elif m_link == '/home':
data_p = self.readFile('a_home.html')
data_t = "text/html"
else:
Server.send('HTTP/1.1 301 OK\nLocation: /home\nConnection: keep-alive\n\n'.encode())
Server.close()
return
elif m_link == '/login':
data_p = self.readFile('a_login.html')
data_t = "text/html"
else:
Server.send('HTTP/1.1 301 OK\nLocation: /login\nConnection: keep-alive\n\n'.encode())
Server.close()
return
Server.send('HTTP/1.1 200 OK\nContent-Type: {}\nConnection: keep-alive\n\n'.format(data_t).encode())
Server.sendall(data_p.encode())
Server.close()
elif m_mode == 'POST':
if m_link == '/postJson':
file = json.loads('{}'.format(data_dll).replace('\\', '')[1:-1])
if file['name'] == 'Login':
if self._stat[0] and self._stat[1] != Address:
pass
elif file['user'] == self._call.JSON_Main['web']['USERNAME']:
if file['pass'] == self._call.JSON_Main['web']['PASSWORD']:
self._stat[0] = True
self._stat[1] = Address
elif file['name'] == 'Keluar':
if file['status'] and self._stat[1] == Address:
self._stat[0] = False
self._stat[1] = None
elif self._stat[0] and self._stat[1] == Address:
self.Filter_Json(file)
Server.send('HTTP/1.1 200 OK\n\n'.encode())
Server.close()
else:
Server.send('HTTP/1.1 404 OK\n\n'.encode())
Server.close()
def Exit(self):
self.s.close()
class Wifi:
def __init__(self, system):
self._call = system
def Looping(self):
pass
def Exit(self):
pass
class Hardware:
def __init__(self, system):
self._call = system
self._call.REBOOT = self.REBOOT
self._call.LED = self.LED
def LED(self, val=0):
pass
def REBOOT(self):
pass
def Looping(self):
self._call.JSON_Main['hardware']['volt'] = 6.0 + psutil.sensors_battery()[0] / 24
self._call.JSON_Web['hardware']['volt'] = self._call.JSON_Main['hardware']['volt']
self._call.JSON_Web['hardware']['free'] = psutil.virtual_memory()[4]
self._call.JSON_Web['hardware']['use'] = psutil.virtual_memory()[3]
self._call.JSON_Web['hardware']['freq'] = int("{}000".format(int(psutil.cpu_freq()[0])))
self._call.Save()
def Exit(self):
pass
class Audio:
def __init__(self, system):
self._call = system
self._call.AUDIO = self.AUDIO
self._call.AUDIO()
def AUDIO(self):
pass
def Looping(self):
pass
def Exit(self):
pass
class Storage:
def __init__(self, system):
self._call = system
self._call.Save = self.SAVE
self._call.RESET = self.RESET
with open('main.json') as f:
self._call.JSON_Main = json.loads(f.read())
f.close()
with open('b_web.json') as f:
self._call.JSON_Web = json.loads(f.read())
f.close()
def SAVE(self):
with open('main.json', 'w') as f:
f.write(json.dumps(self._call.JSON_Main))
f.close()
with open('b_web.json', 'w') as f:
f.write(json.dumps(self._call.JSON_Web))
f.close()
def RESET(self):
with open('reset_main.json') as f:
with open('main.json', 'w') as ff:
ff.write(f.read())
ff.close()
f.close()
with open('reset_b_web.json') as f:
with open('b_web.json', 'w') as ff:
ff.write(f.read())
ff.close()
f.close()
self._call.REBOOT()
def Looping(self):
pass
def Exit(self):
self.SAVE()
class System:
def __init__(self):
gc.enable()
gc.collect()
self.Save = None
self.JSON_Web = None
self.JSON_Main = None
self.AUDIO = None
self.LED = None
self.REBOOT = None
self.RESET = None
self._data = []
self.Init()
def Init(self):
try:
gc.collect()
print("Start....")
self._data.append(Storage(self))
print("function Storage Ready")
self._data.append(Audio(self))
print("function Audio Ready")
self._data.append(Hardware(self))
print("function Hardware Ready")
self._data.append(Wifi(self))
print("function Wifi Ready")
self._data.append(WebServer(self))
print("function WebServer Ready")
except Exception as e:
print("ERROR LIBRARY :", e)
def Looping(self):
self.StartBrowser()
gc.enable()
gc.collect()
while True:
self.Update()
def StartBrowser(self):
myHostName = socket.gethostname()
myIP = socket.gethostbyname(myHostName)
print("IP address of the localhost is {}".format(myIP))
os.system("start http://{}".format(myIP))
def Update(self):
for a in self._data:
try:
gc.collect()
a.Looping()
except Exception as e:
print("ERROR Looping :", e)
def Exit(self):
try:
for a in self._data:
try:
gc.collect()
a.Exit()
except Exception:
pass
except Exception as e:
pass
print("Exit....")
gc.collect()
if __name__ == '__main__':
Main = System()
Main.Looping()
Main.Exit()
| true
| true
|
f718d91c69a4489e6af5552967d46c000b7fdfe2
| 3,161
|
py
|
Python
|
c3dev/galmocks/data_loaders/load_tng_data.py
|
aphearin/c3dev
|
d36d083c9eb688640670dbe066bf299777a78ba7
|
[
"BSD-3-Clause"
] | 2
|
2020-09-23T00:47:06.000Z
|
2022-02-08T18:41:00.000Z
|
c3dev/galmocks/data_loaders/load_tng_data.py
|
aphearin/c3dev
|
d36d083c9eb688640670dbe066bf299777a78ba7
|
[
"BSD-3-Clause"
] | 2
|
2022-01-24T15:45:08.000Z
|
2022-02-07T20:58:40.000Z
|
c3dev/galmocks/data_loaders/load_tng_data.py
|
aphearin/c3dev
|
d36d083c9eb688640670dbe066bf299777a78ba7
|
[
"BSD-3-Clause"
] | 5
|
2018-03-27T17:21:06.000Z
|
2022-03-11T19:45:30.000Z
|
"""
"""
from collections import OrderedDict
import numpy as np
from halotools.utils import sliding_conditional_percentile
from astropy.table import Table
from ..utils.galprops import compute_lg_ssfr
SANDY_SCRATCH_PATH = "/global/cscratch1/sd/sihany/TNG300-1/output"
BEBOP = "/lcrc/project/halotools/C3EMC/TNG300-1"
NERSC = "/global/cfs/cdirs/desi/users/aphearin/C3EMC/TNG300-1"
TNG_LBOX = 205.0
def load_tng_subhalos(drn=NERSC, snapNum=55):
import illustris_python as il
subhalos = il.groupcat.loadSubhalos(drn, snapNum)
return subhalos
def load_tng_host_halos(drn=NERSC, snapNum=55):
import illustris_python as il
host_halos = il.groupcat.loadHalos(drn, snapNum)
return host_halos
def get_value_added_tng_data(subs, hosts):
hosts["halo_id"] = np.arange(len(hosts["GroupMass"])).astype(int)
host_keys_to_keep = ["halo_id", "GroupFirstSub", "GroupPos", "GroupVel"]
tng_hosts = Table(OrderedDict([(key, hosts[key]) for key in host_keys_to_keep]))
tng_hosts.rename_column("GroupPos", "pos")
tng_hosts.rename_column("GroupVel", "vel")
tng_hosts["logmh"] = np.log10(hosts["GroupMass"]) + 10
tng_hosts["pos"] = tng_hosts["pos"] / 1000
tng = Table()
tng["host_halo_logmh"] = tng_hosts["logmh"][subs["SubhaloGrNr"]]
tng["host_halo_pos"] = tng_hosts["pos"][subs["SubhaloGrNr"]]
tng["host_halo_vel"] = tng_hosts["vel"][subs["SubhaloGrNr"]]
tng["subhalo_pos"] = subs["SubhaloPos"] / 1000
tng["subhalo_vel"] = subs["SubhaloVel"]
tng["subhalo_mass"] = subs["SubhaloMass"] * 1e10
tng["subhalo_vmax"] = subs["SubhaloVmax"]
tng["subhalo_vdisp"] = subs["SubhaloVelDisp"]
tng["stellar_metallicity"] = subs["SubhaloStarMetallicity"]
tng["subhalo_mgas"] = subs["SubhaloMassType"][:, 0] * 1e10
tng["subhalo_dm"] = subs["SubhaloMassType"][:, 1] * 1e10
tng["mstar"] = subs["SubhaloMassType"][:, 4] * 1e10
tng["sfr"] = subs["SubhaloSFR"]
tng["lgssfr"] = compute_lg_ssfr(tng["mstar"], tng["sfr"])
tng["host_halo_index"] = subs["SubhaloGrNr"]
subhalo_id = np.arange(len(subs["SubhaloGrNr"])).astype(int)
subhalo_cen_id = subhalo_id[tng_hosts["GroupFirstSub"]]
tng["is_central"] = subhalo_cen_id == subhalo_id
# Broadcast properties of the central subhalo to each host
tng_hosts["central_subhalo_vmax"] = subs["SubhaloVmax"][tng_hosts["GroupFirstSub"]]
tng_hosts["central_subhalo_vdisp"] = subs["SubhaloVelDisp"][
tng_hosts["GroupFirstSub"]
]
# Broadcast properties of the central subhalo to each group member
tng["host_halo_vmax"] = tng_hosts["central_subhalo_vmax"][subs["SubhaloGrNr"]]
tng["host_halo_vdisp"] = tng_hosts["central_subhalo_vdisp"][subs["SubhaloGrNr"]]
tng_hosts["p_vmax"] = sliding_conditional_percentile(
tng_hosts["logmh"], tng_hosts["central_subhalo_vmax"], 101
)
tng_hosts["p_vdisp"] = sliding_conditional_percentile(
tng_hosts["logmh"], tng_hosts["central_subhalo_vdisp"], 101
)
tng["host_halo_p_vmax"] = tng_hosts["p_vmax"][subs["SubhaloGrNr"]]
tng["host_halo_p_vdisp"] = tng_hosts["p_vdisp"][subs["SubhaloGrNr"]]
return tng, tng_hosts
| 37.630952
| 87
| 0.702309
|
from collections import OrderedDict
import numpy as np
from halotools.utils import sliding_conditional_percentile
from astropy.table import Table
from ..utils.galprops import compute_lg_ssfr
SANDY_SCRATCH_PATH = "/global/cscratch1/sd/sihany/TNG300-1/output"
BEBOP = "/lcrc/project/halotools/C3EMC/TNG300-1"
NERSC = "/global/cfs/cdirs/desi/users/aphearin/C3EMC/TNG300-1"
TNG_LBOX = 205.0
def load_tng_subhalos(drn=NERSC, snapNum=55):
import illustris_python as il
subhalos = il.groupcat.loadSubhalos(drn, snapNum)
return subhalos
def load_tng_host_halos(drn=NERSC, snapNum=55):
import illustris_python as il
host_halos = il.groupcat.loadHalos(drn, snapNum)
return host_halos
def get_value_added_tng_data(subs, hosts):
hosts["halo_id"] = np.arange(len(hosts["GroupMass"])).astype(int)
host_keys_to_keep = ["halo_id", "GroupFirstSub", "GroupPos", "GroupVel"]
tng_hosts = Table(OrderedDict([(key, hosts[key]) for key in host_keys_to_keep]))
tng_hosts.rename_column("GroupPos", "pos")
tng_hosts.rename_column("GroupVel", "vel")
tng_hosts["logmh"] = np.log10(hosts["GroupMass"]) + 10
tng_hosts["pos"] = tng_hosts["pos"] / 1000
tng = Table()
tng["host_halo_logmh"] = tng_hosts["logmh"][subs["SubhaloGrNr"]]
tng["host_halo_pos"] = tng_hosts["pos"][subs["SubhaloGrNr"]]
tng["host_halo_vel"] = tng_hosts["vel"][subs["SubhaloGrNr"]]
tng["subhalo_pos"] = subs["SubhaloPos"] / 1000
tng["subhalo_vel"] = subs["SubhaloVel"]
tng["subhalo_mass"] = subs["SubhaloMass"] * 1e10
tng["subhalo_vmax"] = subs["SubhaloVmax"]
tng["subhalo_vdisp"] = subs["SubhaloVelDisp"]
tng["stellar_metallicity"] = subs["SubhaloStarMetallicity"]
tng["subhalo_mgas"] = subs["SubhaloMassType"][:, 0] * 1e10
tng["subhalo_dm"] = subs["SubhaloMassType"][:, 1] * 1e10
tng["mstar"] = subs["SubhaloMassType"][:, 4] * 1e10
tng["sfr"] = subs["SubhaloSFR"]
tng["lgssfr"] = compute_lg_ssfr(tng["mstar"], tng["sfr"])
tng["host_halo_index"] = subs["SubhaloGrNr"]
subhalo_id = np.arange(len(subs["SubhaloGrNr"])).astype(int)
subhalo_cen_id = subhalo_id[tng_hosts["GroupFirstSub"]]
tng["is_central"] = subhalo_cen_id == subhalo_id
tng_hosts["central_subhalo_vmax"] = subs["SubhaloVmax"][tng_hosts["GroupFirstSub"]]
tng_hosts["central_subhalo_vdisp"] = subs["SubhaloVelDisp"][
tng_hosts["GroupFirstSub"]
]
tng["host_halo_vmax"] = tng_hosts["central_subhalo_vmax"][subs["SubhaloGrNr"]]
tng["host_halo_vdisp"] = tng_hosts["central_subhalo_vdisp"][subs["SubhaloGrNr"]]
tng_hosts["p_vmax"] = sliding_conditional_percentile(
tng_hosts["logmh"], tng_hosts["central_subhalo_vmax"], 101
)
tng_hosts["p_vdisp"] = sliding_conditional_percentile(
tng_hosts["logmh"], tng_hosts["central_subhalo_vdisp"], 101
)
tng["host_halo_p_vmax"] = tng_hosts["p_vmax"][subs["SubhaloGrNr"]]
tng["host_halo_p_vdisp"] = tng_hosts["p_vdisp"][subs["SubhaloGrNr"]]
return tng, tng_hosts
| true
| true
|
f718d9523532a32523296a63dc44a0cb5a6195d4
| 2,166
|
py
|
Python
|
rounded_rect.py
|
nik-panekin/pyramid_puzzle
|
ce5bfc9295e0c5b2b516cc3662440a86cb293943
|
[
"MIT"
] | 5
|
2021-05-26T15:49:24.000Z
|
2021-06-21T07:45:54.000Z
|
rounded_rect.py
|
xiaodaoapple/pyramid_puzzle
|
ce5bfc9295e0c5b2b516cc3662440a86cb293943
|
[
"MIT"
] | null | null | null |
rounded_rect.py
|
xiaodaoapple/pyramid_puzzle
|
ce5bfc9295e0c5b2b516cc3662440a86cb293943
|
[
"MIT"
] | 1
|
2021-06-05T13:57:17.000Z
|
2021-06-05T13:57:17.000Z
|
"""Module for implementation the RoundedRect class.
"""
import pygame
# Brightness lowering for border color
# Must be in range (0..1) - not inclusively
BRIGHTNESS_LOW = 0.5
BORDER_WIDTH = 4 # Inner border width in pixels
class RoundedRect():
"""The RoundedRect class simplifies drawing filled rectangles with rounded
corners and thick border. The color of border is auto-calculated.
Public attributes:
rect: pygame.Rect (read only) - stores rectangle datastructure for
drawing. Its properties can be modified for positioning.
"""
def __init__(self, width: int, height: int, color: tuple):
"""Input:
width - integer value for rectangle width;
height - integer value for rectangle height;
color - tuple(r: int, g: int, b: int) for rectangle main color.
"""
self.color = color
self.border_color = [int(i * BRIGHTNESS_LOW) for i in self.color]
self.rect = pygame.Rect(0, 0, width, height)
def get_inner_rect(self) -> pygame.Rect:
"""Returns pygame.Rect instance representing inner rectangle filled
with main color.
"""
inner_rect = pygame.Rect(0, 0, self.rect.width - 2 * BORDER_WIDTH,
self.rect.height - 2 * BORDER_WIDTH)
inner_rect.center = self.rect.center
return inner_rect
def draw(self):
"""Draws rounded rectangle.
"""
ds = pygame.display.get_surface()
pygame.draw.rect(ds, self.border_color, self.rect,
border_radius=int(self.rect.height / 2))
inner_rect = self.get_inner_rect()
pygame.draw.rect(ds, self.color, inner_rect,
border_radius=int(inner_rect.height / 2))
def contains_point(self, point: tuple) -> bool:
"""Checks if a given point is inside the RoundedRect object area.
Input:
point - a tuple(x: int, y: int) representing point to check.
Returns:
True - if the point is inside the rectangular area;
False - otherwise.
"""
return self.rect.collidepoint(point)
| 36.711864
| 78
| 0.620037
|
import pygame
BRIGHTNESS_LOW = 0.5
BORDER_WIDTH = 4
class RoundedRect():
def __init__(self, width: int, height: int, color: tuple):
self.color = color
self.border_color = [int(i * BRIGHTNESS_LOW) for i in self.color]
self.rect = pygame.Rect(0, 0, width, height)
def get_inner_rect(self) -> pygame.Rect:
inner_rect = pygame.Rect(0, 0, self.rect.width - 2 * BORDER_WIDTH,
self.rect.height - 2 * BORDER_WIDTH)
inner_rect.center = self.rect.center
return inner_rect
def draw(self):
ds = pygame.display.get_surface()
pygame.draw.rect(ds, self.border_color, self.rect,
border_radius=int(self.rect.height / 2))
inner_rect = self.get_inner_rect()
pygame.draw.rect(ds, self.color, inner_rect,
border_radius=int(inner_rect.height / 2))
def contains_point(self, point: tuple) -> bool:
return self.rect.collidepoint(point)
| true
| true
|
f718d9cbb0d92c6ed590234cfed98502650487da
| 2,054
|
py
|
Python
|
4.30/code/three_nodes_bw.py
|
Therock90421/19-20-Computer_network_lab
|
7b1295b68d4b874d71bba01255e27a5b92fdee56
|
[
"MIT"
] | 3
|
2020-10-10T06:25:20.000Z
|
2021-04-28T13:58:48.000Z
|
4.30/code/three_nodes_bw.py
|
Therock90421/19-20-Computer_network_lab
|
7b1295b68d4b874d71bba01255e27a5b92fdee56
|
[
"MIT"
] | null | null | null |
4.30/code/three_nodes_bw.py
|
Therock90421/19-20-Computer_network_lab
|
7b1295b68d4b874d71bba01255e27a5b92fdee56
|
[
"MIT"
] | 3
|
2020-11-15T14:11:33.000Z
|
2022-02-24T08:51:16.000Z
|
#!/usr/bin/python
import os
import sys
import glob
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.link import TCLink
from mininet.cli import CLI
script_deps = [ 'ethtool' ]
def check_scripts():
dir = os.path.abspath(os.path.dirname(sys.argv[0]))
for fname in glob.glob(dir + '/' + 'scripts/*.sh'):
if not os.access(fname, os.X_OK):
print '%s should be set executable by using `chmod +x $script_name`' % (fname)
sys.exit(1)
for program in script_deps:
found = False
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, program)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
found = True
break
if not found:
print '`%s` is required but missing, which could be installed via `apt` or `aptitude`' % (program)
sys.exit(2)
# Mininet will assign an IP address for each interface of a node
# automatically, but hub or switch does not need IP address.
def clearIP(n):
for iface in n.intfList():
n.cmd('ifconfig %s 0.0.0.0' % (iface))
class BroadcastTopo(Topo):
def build(self):
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
s1 = self.addHost('s1')
self.addLink(h1, s1, bw=20)
self.addLink(h2, s1, bw=10)
self.addLink(h3, s1, bw=10)
if __name__ == '__main__':
check_scripts()
topo = BroadcastTopo()
net = Mininet(topo = topo, link = TCLink, controller = None)
h1, h2, h3, s1 = net.get('h1', 'h2', 'h3', 's1')
h1.cmd('ifconfig h1-eth0 10.0.0.1/8')
h2.cmd('ifconfig h2-eth0 10.0.0.2/8')
h3.cmd('ifconfig h3-eth0 10.0.0.3/8')
clearIP(s1)
for h in [ h1, h2, h3, s1 ]:
h.cmd('./scripts/disable_offloading.sh')
h.cmd('./scripts/disable_ipv6.sh')
net.start()
# s1.cmd('./switch-reference &')
# h2.cmd('iperf -s &')
# h3.cmd('iperf -s &')
CLI(net)
net.stop()
| 28.527778
| 110
| 0.587634
|
import os
import sys
import glob
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.link import TCLink
from mininet.cli import CLI
script_deps = [ 'ethtool' ]
def check_scripts():
dir = os.path.abspath(os.path.dirname(sys.argv[0]))
for fname in glob.glob(dir + '/' + 'scripts/*.sh'):
if not os.access(fname, os.X_OK):
print '%s should be set executable by using `chmod +x $script_name`' % (fname)
sys.exit(1)
for program in script_deps:
found = False
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, program)
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
found = True
break
if not found:
print '`%s` is required but missing, which could be installed via `apt` or `aptitude`' % (program)
sys.exit(2)
def clearIP(n):
for iface in n.intfList():
n.cmd('ifconfig %s 0.0.0.0' % (iface))
class BroadcastTopo(Topo):
def build(self):
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
s1 = self.addHost('s1')
self.addLink(h1, s1, bw=20)
self.addLink(h2, s1, bw=10)
self.addLink(h3, s1, bw=10)
if __name__ == '__main__':
check_scripts()
topo = BroadcastTopo()
net = Mininet(topo = topo, link = TCLink, controller = None)
h1, h2, h3, s1 = net.get('h1', 'h2', 'h3', 's1')
h1.cmd('ifconfig h1-eth0 10.0.0.1/8')
h2.cmd('ifconfig h2-eth0 10.0.0.2/8')
h3.cmd('ifconfig h3-eth0 10.0.0.3/8')
clearIP(s1)
for h in [ h1, h2, h3, s1 ]:
h.cmd('./scripts/disable_offloading.sh')
h.cmd('./scripts/disable_ipv6.sh')
net.start()
CLI(net)
net.stop()
| false
| true
|
f718da52417343bc9caaf1548feb02f4772b30d0
| 467
|
bzl
|
Python
|
closure/deps.bzl
|
kalbasit/rules_proto_grpc
|
7e0a97adc8801df1cd74ee435d74bbd857c98a36
|
[
"Apache-2.0"
] | 1
|
2021-08-11T23:14:07.000Z
|
2021-08-11T23:14:07.000Z
|
closure/deps.bzl
|
kalbasit/rules_proto_grpc
|
7e0a97adc8801df1cd74ee435d74bbd857c98a36
|
[
"Apache-2.0"
] | null | null | null |
closure/deps.bzl
|
kalbasit/rules_proto_grpc
|
7e0a97adc8801df1cd74ee435d74bbd857c98a36
|
[
"Apache-2.0"
] | null | null | null |
load(":repositories.bzl", "closure_repos")
# NOTE: THE RULES IN THIS FILE ARE KEPT FOR BACKWARDS COMPATIBILITY ONLY.
# Please use the rules in repositories.bzl
def closure_proto_compile(**kwargs):
print("Import of rules in deps.bzl is deprecated, please use repositories.bzl")
closure_repos(**kwargs)
def closure_proto_library(**kwargs):
print("Import of rules in deps.bzl is deprecated, please use repositories.bzl")
closure_repos(**kwargs)
| 35.923077
| 83
| 0.745182
|
load(":repositories.bzl", "closure_repos")
def closure_proto_compile(**kwargs):
print("Import of rules in deps.bzl is deprecated, please use repositories.bzl")
closure_repos(**kwargs)
def closure_proto_library(**kwargs):
print("Import of rules in deps.bzl is deprecated, please use repositories.bzl")
closure_repos(**kwargs)
| true
| true
|
f718da9cba675296f3b6e8de45a3a71e366eb326
| 7,481
|
py
|
Python
|
pypureclient/flasharray/FA_2_9/models/pod_array_status.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flasharray/FA_2_9/models/pod_array_status.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flasharray/FA_2_9/models/pod_array_status.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.9
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_9 import models
class PodArrayStatus(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'frozen_at': 'int',
'mediator_status': 'str',
'pre_elected': 'bool',
'progress': 'float',
'status': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'frozen_at': 'frozen_at',
'mediator_status': 'mediator_status',
'pre_elected': 'pre_elected',
'progress': 'progress',
'status': 'status'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
frozen_at=None, # type: int
mediator_status=None, # type: str
pre_elected=None, # type: bool
progress=None, # type: float
status=None, # type: str
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified.
name (str): The resource name, such as volume name, pod name, snapshot name, and so on.
frozen_at (int): The timestamp of when the data on the pod was frozen when the array went offline. Measured in milliseconds since the UNIX epoch. Also known as the recovery point. If the pod is in sync, a value of `null` will be returned.
mediator_status (str): The status of the mediator, which determines whether it is available to mediate a high availability event. Valid values are `flummoxed`, `online`, `unknown`, and `unreachable`. Only mediators in the `online` status can mediate high availability events. If set to `flummoxed`, the array can reach a mediator, but it is talking to the wrong one. Verify that the DNS in the environment is properly configured. This status might also appear if the pod has been offline on one array for an extended period of time and the peer array is unreachable. If set to `online`, the array is successfully communicating with the mediator, and the mediator is available to mediate a high availability event. If set to `unreachable`, the array cannot reach the mediator, either due to network issues or because the mediator is down. When a mediator is unreachable, synchronous replication continues to function provided all arrays are healthy and communicating, but a high availability event without mediator access can result in an outage.
pre_elected (bool): If set to `true`, the array has been pre-elected to remain online in the rare event that the mediator is inaccessible on both arrays within the stretched pod, and then later, the arrays within the stretched pod become disconnected from each other. If set to `false`, either the array has been pre-elected to remain offline while its peer array remains online, or pre-election is not in effect. One and only one array within each pod is pre-elected at a given point in time, so while a pre-elected array is keeping the pod online, the pod on its non-elected peer array remains offline during the communication failure. Users cannot pre-elect arrays.
progress (float): The percentage progress of the pod resyncing process for this array. The percentage is displayed as a decimal value, starting at 0.00 and ending at 1.00.
status (str): The status of the array within the stretched pod. Valid values are `offline`, `online`, `resyncing`, and `unknown`. If set to `offline`, the array is experiencing problems and may not have the latest pod data. The array cannot handle I/O to the pod and cannot take over during a high availability event. If set to `online`, the array is online and has the latest pod data. The array can handle I/O to the pod and take over during a high availability event. If set to `resyncing`, the array is actively getting the latest pod data so that it becomes fully synchronized with its peer array. During the resyncing process, the array cannot handle I/O to the pod. Once the arrays are fully synchronized, the array changes to `online` status. If set to `unknown`, the status of the peer array is unknown because this array is offline and cannot determine the state of the pod on the peer array. Only the peer array can ever be in unknown status; this unknown status is unique to the local array and will differ when viewed from its peer array.
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if frozen_at is not None:
self.frozen_at = frozen_at
if mediator_status is not None:
self.mediator_status = mediator_status
if pre_elected is not None:
self.pre_elected = pre_elected
if progress is not None:
self.progress = progress
if status is not None:
self.status = status
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PodArrayStatus`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PodArrayStatus, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PodArrayStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 50.547297
| 1,063
| 0.642561
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_9 import models
class PodArrayStatus(object):
swagger_types = {
'id': 'str',
'name': 'str',
'frozen_at': 'int',
'mediator_status': 'str',
'pre_elected': 'bool',
'progress': 'float',
'status': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'frozen_at': 'frozen_at',
'mediator_status': 'mediator_status',
'pre_elected': 'pre_elected',
'progress': 'progress',
'status': 'status'
}
required_args = {
}
def __init__(
self,
id=None,
name=None,
frozen_at=None,
mediator_status=None,
pre_elected=None,
progress=None,
status=None,
):
if id is not None:
self.id = id
if name is not None:
self.name = name
if frozen_at is not None:
self.frozen_at = frozen_at
if mediator_status is not None:
self.mediator_status = mediator_status
if pre_elected is not None:
self.pre_elected = pre_elected
if progress is not None:
self.progress = progress
if status is not None:
self.status = status
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PodArrayStatus`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PodArrayStatus, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, PodArrayStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f718db080a9c910e3b3827bd3a7417fe205bc663
| 43,146
|
py
|
Python
|
python/mxnet/model.py
|
yurivict/incubator-mxnet
|
3d38dbde744954854015919d4faf56ac1aea16de
|
[
"Apache-2.0"
] | 1
|
2019-12-13T02:05:16.000Z
|
2019-12-13T02:05:16.000Z
|
python/mxnet/model.py
|
yurivict/incubator-mxnet
|
3d38dbde744954854015919d4faf56ac1aea16de
|
[
"Apache-2.0"
] | 2
|
2021-12-10T01:39:06.000Z
|
2021-12-14T21:41:10.000Z
|
python/mxnet/model.py
|
yurivict/incubator-mxnet
|
3d38dbde744954854015919d4faf56ac1aea16de
|
[
"Apache-2.0"
] | 1
|
2019-12-02T04:16:13.000Z
|
2019-12-02T04:16:13.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
from __future__ import absolute_import, print_function
import os
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import ndarray as nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_sparse_kvstore(kvstore):
"""Create kvstore assuming some parameters' storage types are row_sparse.
Parameters
----------
kvstore : KVStore or str
The kvstore.
Returns
-------
kvstore : KVStore
update_on_kvstore : bool. Always True.
"""
# always update on kvstore
update_on_kvstore = True
if isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
kv = kvs.create(kvstore)
else:
raise TypeError("Cannot create '%s' KVStore with row_sparse parameters. "
"The type must be KVStore or str." % kvstore)
return (kv, update_on_kvstore)
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")))
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device == 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on NCCL kvstore."""
valid_indices = [index for index, grad_list in
enumerate(grad_arrays) if grad_list[0] is not None]
valid_grad_arrays = [grad_arrays[i] for i in valid_indices]
valid_param_arrays = [param_arrays[i] for i in valid_indices]
valid_param_names = [param_names[i] for i in valid_indices]
size = len(valid_grad_arrays)
start = 0
# Use aggregation by default only with NCCL
default_batch = '16'
batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch))
while start < size:
end = start + batch if start + batch < size else size
# push gradient, priority is negative index
kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start)
# pull back the weights
kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start)
start = end
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
updates = [[] for _ in range(num_device)]
for i, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
index = i
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution later
w, g = p
updates[k].append((index*num_device+k, g, w))
for dev_updates in updates:
# update params if param_arrays and grad_arrays are not empty
if dev_updates:
i, w, g = zip(*dev_updates)
updater(i, w, g)
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
else:
kvstore.set_optimizer(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
if 'nccl' in kvstore.type:
_update_params_on_kvstore_nccl(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
# end of all epochs
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params, remove_amp_cast=True):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
remove_amp_cast : bool, optional
Whether to remove the amp_cast and amp_multicast operators, before saving the model.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix, remove_amp_cast=remove_amp_cast)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_params(prefix, epoch):
"""Load params from a file
"""
save_dict = nd.load("%s-%04d.params" % (prefix, epoch))
arg_params = {}
aux_params = {}
if not save_dict:
logging.warning("Params file '%s' is empty", '%s-%04d.params' % (prefix, epoch))
return (arg_params, aux_params)
for k, v in save_dict.items():
tp, name = k.split(":", 1)
if tp == "arg":
arg_params[name] = v
if tp == "aux":
aux_params[name] = v
return (arg_params, aux_params)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
arg_params, aux_params = load_params(prefix, epoch)
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position
class FeedForward(BASE_ESTIMATOR):
"""Model class of MXNet for training and predicting feedforward nets.
This class is designed for a single-data single output supervised network.
Parameters
----------
symbol : Symbol
The symbol configuration of computation network.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi GPU training, pass in a list of gpu contexts.
num_epoch : int, optional
Training parameter, number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
Training parameter, name or optimizer object for training.
initializer : initializer function, optional
Training parameter, the initialization scheme used.
numpy_batch_size : int, optional
The batch size of training data.
Only needed when input array is numpy.
arg_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol
to be passed by aux_params and ``arg_params``.
If this is True, no error will be thrown when ``aux_params`` and ``arg_params``
contain more parameters than needed.
begin_epoch : int, optional
The begining training epoch.
kwargs : dict
The additional keyword arguments passed to optimizer.
"""
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
# model parameters
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
# basic configuration
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
# training parameters
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
# internal helper state
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
"""Check if name is a data argument."""
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
shapes = {name: self.arg_params[name].shape for name in self.arg_params}
shapes.update(dict(input_shapes))
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**shapes)
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **shapes)
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and '_async' not in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
if not optimizer.idx2name:
optimizer.idx2name = param_idx2name.copy()
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None, remove_amp_cast=True):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
remove_amp_cast : bool, optional
Whether to remove the amp_cast and amp_multicast operators, before saving the model.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params, remove_amp_cast=remove_amp_cast)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
| 41.566474
| 118
| 0.596463
|
from __future__ import absolute_import, print_function
import os
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import ndarray as nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_sparse_kvstore(kvstore):
update_on_kvstore = True
if isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
kv = kvs.create(kvstore)
else:
raise TypeError("Cannot create '%s' KVStore with row_sparse parameters. "
"The type must be KVStore or str." % kvstore)
return (kv, update_on_kvstore)
def _create_kvstore(kvstore, num_device, arg_params):
update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")))
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
if num_device == 1 and 'dist' not in kvstore:
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names):
valid_indices = [index for index, grad_list in
enumerate(grad_arrays) if grad_list[0] is not None]
valid_grad_arrays = [grad_arrays[i] for i in valid_indices]
valid_param_arrays = [param_arrays[i] for i in valid_indices]
valid_param_names = [param_names[i] for i in valid_indices]
size = len(valid_grad_arrays)
start = 0
default_batch = '16'
batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch))
while start < size:
end = start + batch if start + batch < size else size
kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start)
kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start)
start = end
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
kvstore.push(name, grad_list, priority=-index)
kvstore.pull(name, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
updates = [[] for _ in range(num_device)]
for i, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
index = i
if kvstore:
name = param_names[index]
kvstore.push(name, grad_list, priority=-index)
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
w, g = p
updates[k].append((index*num_device+k, g, w))
for dev_updates in updates:
if dev_updates:
i, w, g = zip(*dev_updates)
updater(i, w, g)
def _multiple_callbacks(callbacks, *args, **kwargs):
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
else:
kvstore.set_optimizer(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
tic = time.time()
eval_metric.reset()
nbatch = 0
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
if 'nccl' in kvstore.type:
_update_params_on_kvstore_nccl(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params, remove_amp_cast=True):
if symbol is not None:
symbol.save('%s-symbol.json' % prefix, remove_amp_cast=remove_amp_cast)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_params(prefix, epoch):
save_dict = nd.load("%s-%04d.params" % (prefix, epoch))
arg_params = {}
aux_params = {}
if not save_dict:
logging.warning("Params file '%s' is empty", '%s-%04d.params' % (prefix, epoch))
return (arg_params, aux_params)
for k, v in save_dict.items():
tp, name = k.split(":", 1)
if tp == "arg":
arg_params[name] = v
if tp == "aux":
aux_params[name] = v
return (arg_params, aux_params)
def load_checkpoint(prefix, epoch):
symbol = sym.load('%s-symbol.json' % prefix)
arg_params, aux_params = load_params(prefix, epoch)
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback
class FeedForward(BASE_ESTIMATOR):
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
_check_arguments(self.symbol)
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
shapes = {name: self.arg_params[name].shape for name in self.arg_params}
shapes.update(dict(input_shapes))
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**shapes)
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **shapes)
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key)
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and '_async' not in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
if not optimizer.idx2name:
optimizer.idx2name = param_idx2name.copy()
optimizer = self.optimizer
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None, remove_amp_cast=True):
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params, remove_amp_cast=remove_amp_cast)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
| true
| true
|
f718db406f28ba12957da6ad939fb6e3b4b39b16
| 17,400
|
py
|
Python
|
markdown_to_confluence/confluence.py
|
vmware-tanzu-labs/markdown-to-confluence
|
2a201f6721f819277fcbeda140b039a3c9cbf496
|
[
"Apache-2.0"
] | null | null | null |
markdown_to_confluence/confluence.py
|
vmware-tanzu-labs/markdown-to-confluence
|
2a201f6721f819277fcbeda140b039a3c9cbf496
|
[
"Apache-2.0"
] | null | null | null |
markdown_to_confluence/confluence.py
|
vmware-tanzu-labs/markdown-to-confluence
|
2a201f6721f819277fcbeda140b039a3c9cbf496
|
[
"Apache-2.0"
] | 1
|
2022-02-18T14:26:36.000Z
|
2022-02-18T14:26:36.000Z
|
import logging
import requests
import os
import pickle
import sys
from urllib.parse import urljoin
API_HEADERS = {
'User-Agent': 'markdown-to-confluence',
}
MULTIPART_HEADERS = {
'X-Atlassian-Token': 'nocheck' # Only need this for form uploads
}
DEFAULT_LABEL_PREFIX = 'global'
log = logging.getLogger(__name__)
class MissingArgumentException(Exception):
def __init__(self, arg):
self.message = 'Missing required argument: {}'.format(arg)
class Confluence():
def __init__(self,
api_url=None,
username=None,
password=None,
cookie=None,
headers=None,
dry_run=False,
_client=None):
"""Creates a new Confluence API client.
Arguments:
api_url {str} -- The URL to the Confluence API root (e.g. https://wiki.example.com/api/rest/)
username {str} -- The Confluence service account username
password {str} -- The Confluence service account password
headers {list(str)} -- The HTTP headers which will be set for all requests
dry_run {str} -- The Confluence service account password
"""
# A common gotcha will be given a URL that doesn't end with a /, so we
# can account for this
if not api_url.endswith('/'):
api_url = api_url + '/'
self.api_url = api_url
self.username = username
self.password = password
self.dry_run = dry_run
if _client is None:
_client = requests.Session()
self._session = _client
if cookie:
log.info(f'Using existing cookie from {cookie}')
with open(cookie, 'rb') as f:
self._session.cookies.update(pickle.load(f))
else:
log.info('No cookie provided. User username and password')
self._session.auth = (self.username, self.password)
for header in headers or []:
try:
name, value = header.split(':', 1)
except ValueError:
name, value = header, ''
self._session.headers[name] = value.lstrip()
def _require_kwargs(self, kwargs):
"""Ensures that certain kwargs have been provided
Arguments:
kwargs {dict} -- The dict of required kwargs
"""
missing = []
for k, v in kwargs.items():
if not v:
missing.append(k)
if missing:
raise MissingArgumentException(missing)
def _request(self,
method='GET',
path='',
params=None,
files=None,
data=None,
headers=None):
url = urljoin(self.api_url, path)
if not headers:
headers = {}
headers.update(API_HEADERS)
if files:
headers.update(MULTIPART_HEADERS)
if data:
headers.update({'Content-Type': 'application/json'})
if self.dry_run:
log.info('''{method} {url}:
Params: {params}
Data: {data}
Files: {files}'''.format(method=method,
url=url,
params=params,
data=data,
files=files))
if method != 'GET':
return {}
response = self._session.request(method=method,
url=url,
params=params,
json=data,
headers=headers,
files=files)
if not response.ok:
log.info('''{method} {url}: {status_code} {reason}
Params: {params}
Data: {data}
Files: {files}'''.format(method=method,
url=url,
status_code=response.status_code,
reason=response.reason,
params=params,
data=data,
files=files))
if response.status_code == 403 or response.status_code == 401:
log.info('Authorization failed. Please check your credentials.')
sys.exit(1)
return response.content
# Will probably want to be more robust here, but this should work for now
return response.json()
def get(self, path=None, params=None):
return self._request(method='GET', path=path, params=params)
def post(self, path=None, params=None, data=None, files=None):
return self._request(method='POST',
path=path,
params=params,
data=data,
files=files)
def put(self, path=None, params=None, data=None):
return self._request(method='PUT', path=path, params=params, data=data)
def exists(self, space=None, title=None, ancestor_id=None):
"""Returns the Confluence page that matches the provided metdata, if it exists.
Specifically, this leverages a Confluence Query Language (CQL) query
against the Confluence API. We assume that each slug is unique, at
least to the provided space/ancestor_id.
Arguments:
space {str} -- The Confluence space to use for filtering posts
slug {str} -- The page slug
ancestor_id {str} -- The ID of the parent page
"""
self._require_kwargs({'title': title})
cql_args = []
if title:
cql_args.append(f'title="{title}"')
if ancestor_id:
cql_args.append('ancestor={}'.format(ancestor_id))
if space:
cql_args.append('space={!r}'.format(space))
cql = ' and '.join(cql_args)
params = {'expand': 'version', 'cql': cql}
response = self.get(path='content/search', params=params)
if not response.get('size'):
return None
ret = [ r for r in response['results'] if r['type'] == 'page' and r['title'] == title ]
assert(len(ret) == 1)
return ret[0]
def ping(self):
"""
Basic request to get a cookie
"""
response = self.get(path=f'content', params={ 'type': 'page', 'limit': 1 })
return response.get('size')
def save_cookie(self, dest):
if self.ping():
with open(dest, 'wb') as f:
pickle.dump(self._session.cookies, f)
return True
return False
def get_page_content(self, id):
"""Returns the content of the Confluence page that matches the provided metdata, if it exists.
Arguments:
id {str} -- The ID of the page
"""
response = self.get(path=f'content/{id}', params={ 'expand': 'body.storage' })
return response.get('body')['storage']['value']
def create_labels(self, page_id=None, tags=[]):
"""Creates labels for the page to both assist with searching as well
as categorization.
We specifically require a slug to be provided, since this is how we
determine if a page exists. Any other tags are optional.
Keyword Arguments:
page_id {str} -- The ID of the existing page to which the label should apply
slug {str} -- The page slug to use as the label value
tags {list(str)} -- Any other tags to apply to the post
"""
labels = []
if tags is None:
tags = []
for tag in tags:
labels.append({'prefix': DEFAULT_LABEL_PREFIX, 'name': tag})
path = 'content/{page_id}/label'.format(page_id=page_id)
response = self.post(path=path, data=labels)
# Do a sanity check to ensure that the label for the slug appears in
# the results, since that's needed for us to find the page later.
labels = response.get('results', [])
if not labels:
log.error(
'No labels found after attempting to update page {}'.format(
page_id))
log.error('Here\'s the response we got:\n{}'.format(response))
return labels
log.info(
'Created the following labels for page {page_id}: {labels}'.format(
page_id=page_id,
labels=', '.join(label['name'] for label in labels)))
return labels
def _create_page_payload(self,
content=None,
title=None,
ancestor_id=None,
attachments=None,
space=None,
type='page'):
ret = {
'type': type,
'title': title,
'space': {
'key': space
},
'body': {
'storage': {
'representation': 'storage',
'value': content
}
}
}
if ancestor_id:
ret['ancestors'] = [{
'id': str(ancestor_id)
}]
return ret
def get_attachments(self, post_id):
"""Gets the attachments for a particular Confluence post
Arguments:
post_id {str} -- The Confluence post ID
"""
response = self.get("/content/{}/attachments".format(post_id))
return response.get('results', [])
def upload_attachment(self, post_id=None, attachment_path=None):
"""Uploads an attachment to a Confluence post
Keyword Arguments:
post_id {str} -- The Confluence post ID
attachment_path {str} -- The absolute path to the attachment
"""
path = 'content/{}/child/attachment'.format(post_id)
if not os.path.exists(attachment_path):
log.error('Attachment {} does not exist'.format(attachment_path))
return
log.info(
'Uploading attachment {attachment_path} to post {post_id}'.format(
attachment_path=attachment_path, post_id=post_id))
if not self.dry_run:
self.post(path=path,
params={'allowDuplicated': 'true'},
files={'file': open(attachment_path, 'rb')})
log.info('Uploaded {} to post ID {}'.format(attachment_path, post_id))
def get_author(self, username):
"""Returns the Confluence author profile for the provided username,
if it exists.
Arguments:
username {str} -- The Confluence username
"""
log.info('Looking up Confluence user key for {}'.format(username))
response = self.get(path='user', params={'username': username})
if not isinstance(response, dict) or not response.get('userKey'):
log.error('No Confluence user key for {}'.format(username))
return {}
return response
def create(self,
content=None,
space=None,
title=None,
ancestor_id=None,
slug=None,
tags=None,
attachments=None,
type='page'):
"""Creates a new page with the provided content.
If an ancestor_id is specified, then the page will be created as a
child of that ancestor page.
Keyword Arguments:
content {str} -- The HTML content to upload (required)
space {str} -- The Confluence space where the page should reside
title {str} -- The page title
ancestor_id {str} -- The ID of the parent Confluence page
slug {str} -- The unique slug for the page
tags {list(str)} -- The list of tags for the page
attachments {list(str)} -- List of absolute paths to attachments
which should uploaded.
"""
self._require_kwargs({
'content': content,
'slug': slug,
'title': title,
'space': space
})
page = self._create_page_payload(content='Created by markdown-to-confluence - <a href="https://github.com/vmware-tanzu-labs/markdown-to-confluence">https://github.com/vmware-tanzu-labs/markdown-to-confluence</a>',
title=title,
ancestor_id=ancestor_id,
space=space,
type=type)
response = self.post(path='content/', data=page)
page_id = response['id']
page_url = urljoin(self.api_url, response['_links']['webui'])
log.info('Page "{title}" (id {page_id}) created successfully at {url}'.
format(title=title, page_id=response.get('id'), url=page_url))
# Now that we have the page created, we can just treat the rest of the
# flow like an update.
return self.update(post_id=page_id,
content=content,
space=space,
title=title,
ancestor_id=ancestor_id,
slug=slug,
tags=tags,
page=response,
attachments=attachments)
def update(self,
post_id=None,
content=None,
space=None,
title=None,
ancestor_id=None,
slug=None,
tags=None,
attachments=None,
page=None,
type='page'):
"""Updates an existing page with new content.
This involves updating the attachments stored on Confluence, uploading
the page content, and finally updating the labels.
Keyword Arguments:
post_id {str} -- The ID of the Confluence post
content {str} -- The page represented in Confluence storage format
space {str} -- The Confluence space where the page should reside
title {str} -- The page title
ancestor_id {str} -- The ID of the parent Confluence page
slug {str} -- The unique slug for the page
tags {list(str)} -- The list of tags for the page
attachments {list(str)} -- The list of absolute file paths to any
attachments which should be uploaded
"""
self._require_kwargs({
'content': content,
'slug': slug,
'title': title,
'post_id': post_id,
'space': space
})
# Since the page already has an ID in Confluence, before updating our
# content which references certain attachments, we should make sure
# those attachments have been uploaded.
if attachments is None:
attachments = []
for attachment in attachments:
self.upload_attachment(post_id=post_id, attachment_path=attachment)
# Next, we can create the updated page structure
new_page = self._create_page_payload(content=content,
title=title,
ancestor_id=ancestor_id,
space=space,
type=type)
# Increment the version number, as required by the Confluence API
# https://docs.atlassian.com/ConfluenceServer/rest/7.1.0/#api/content-update
new_version = page['version']['number'] + 1
new_page['version'] = {'number': new_version}
# With the attachments uploaded, and our new page structure created,
# we can upload the final content up to Confluence.
path = 'content/{}'.format(page['id'])
response = self.put(path=path, data=new_page)
failure = False
# Dry-run option doesn't create any pages hence no urls,
# we set it to a fixed value
if self.dry_run:
page_url = '(dry run)'
# Test if there was a page creation error, if so set
# failure var to True
else:
try:
test = response['_links']['webui']
except(TypeError):
failure = True
else:
page_url = urljoin(self.api_url, response['_links']['webui'])
# Check for page creation failure and pass it back in the post_id var for tracking
if failure:
log.error('ERROR ---- Page "{title}" (id {page_id}) failed to update'.format(title=title, page_id=post_id))
post_id = post_id + " - fail"
else:
if tags:
# Finally, we can update the labels on the page
self.create_labels(page_id=post_id, tags=tags)
log.info('Page "{title}" (id {page_id}) updated successfully at {url}'.
format(title=title, page_id=post_id, url=page_url))
return post_id
| 37.339056
| 221
| 0.523218
|
import logging
import requests
import os
import pickle
import sys
from urllib.parse import urljoin
API_HEADERS = {
'User-Agent': 'markdown-to-confluence',
}
MULTIPART_HEADERS = {
'X-Atlassian-Token': 'nocheck'
}
DEFAULT_LABEL_PREFIX = 'global'
log = logging.getLogger(__name__)
class MissingArgumentException(Exception):
def __init__(self, arg):
self.message = 'Missing required argument: {}'.format(arg)
class Confluence():
def __init__(self,
api_url=None,
username=None,
password=None,
cookie=None,
headers=None,
dry_run=False,
_client=None):
# can account for this
if not api_url.endswith('/'):
api_url = api_url + '/'
self.api_url = api_url
self.username = username
self.password = password
self.dry_run = dry_run
if _client is None:
_client = requests.Session()
self._session = _client
if cookie:
log.info(f'Using existing cookie from {cookie}')
with open(cookie, 'rb') as f:
self._session.cookies.update(pickle.load(f))
else:
log.info('No cookie provided. User username and password')
self._session.auth = (self.username, self.password)
for header in headers or []:
try:
name, value = header.split(':', 1)
except ValueError:
name, value = header, ''
self._session.headers[name] = value.lstrip()
def _require_kwargs(self, kwargs):
missing = []
for k, v in kwargs.items():
if not v:
missing.append(k)
if missing:
raise MissingArgumentException(missing)
def _request(self,
method='GET',
path='',
params=None,
files=None,
data=None,
headers=None):
url = urljoin(self.api_url, path)
if not headers:
headers = {}
headers.update(API_HEADERS)
if files:
headers.update(MULTIPART_HEADERS)
if data:
headers.update({'Content-Type': 'application/json'})
if self.dry_run:
log.info('''{method} {url}:
Params: {params}
Data: {data}
Files: {files}'''.format(method=method,
url=url,
params=params,
data=data,
files=files))
if method != 'GET':
return {}
response = self._session.request(method=method,
url=url,
params=params,
json=data,
headers=headers,
files=files)
if not response.ok:
log.info('''{method} {url}: {status_code} {reason}
Params: {params}
Data: {data}
Files: {files}'''.format(method=method,
url=url,
status_code=response.status_code,
reason=response.reason,
params=params,
data=data,
files=files))
if response.status_code == 403 or response.status_code == 401:
log.info('Authorization failed. Please check your credentials.')
sys.exit(1)
return response.content
# Will probably want to be more robust here, but this should work for now
return response.json()
def get(self, path=None, params=None):
return self._request(method='GET', path=path, params=params)
def post(self, path=None, params=None, data=None, files=None):
return self._request(method='POST',
path=path,
params=params,
data=data,
files=files)
def put(self, path=None, params=None, data=None):
return self._request(method='PUT', path=path, params=params, data=data)
def exists(self, space=None, title=None, ancestor_id=None):
self._require_kwargs({'title': title})
cql_args = []
if title:
cql_args.append(f'title="{title}"')
if ancestor_id:
cql_args.append('ancestor={}'.format(ancestor_id))
if space:
cql_args.append('space={!r}'.format(space))
cql = ' and '.join(cql_args)
params = {'expand': 'version', 'cql': cql}
response = self.get(path='content/search', params=params)
if not response.get('size'):
return None
ret = [ r for r in response['results'] if r['type'] == 'page' and r['title'] == title ]
assert(len(ret) == 1)
return ret[0]
def ping(self):
response = self.get(path=f'content', params={ 'type': 'page', 'limit': 1 })
return response.get('size')
def save_cookie(self, dest):
if self.ping():
with open(dest, 'wb') as f:
pickle.dump(self._session.cookies, f)
return True
return False
def get_page_content(self, id):
response = self.get(path=f'content/{id}', params={ 'expand': 'body.storage' })
return response.get('body')['storage']['value']
def create_labels(self, page_id=None, tags=[]):
labels = []
if tags is None:
tags = []
for tag in tags:
labels.append({'prefix': DEFAULT_LABEL_PREFIX, 'name': tag})
path = 'content/{page_id}/label'.format(page_id=page_id)
response = self.post(path=path, data=labels)
# Do a sanity check to ensure that the label for the slug appears in
# the results, since that's needed for us to find the page later.
labels = response.get('results', [])
if not labels:
log.error(
'No labels found after attempting to update page {}'.format(
page_id))
log.error('Here\'s the response we got:\n{}'.format(response))
return labels
log.info(
'Created the following labels for page {page_id}: {labels}'.format(
page_id=page_id,
labels=', '.join(label['name'] for label in labels)))
return labels
def _create_page_payload(self,
content=None,
title=None,
ancestor_id=None,
attachments=None,
space=None,
type='page'):
ret = {
'type': type,
'title': title,
'space': {
'key': space
},
'body': {
'storage': {
'representation': 'storage',
'value': content
}
}
}
if ancestor_id:
ret['ancestors'] = [{
'id': str(ancestor_id)
}]
return ret
def get_attachments(self, post_id):
response = self.get("/content/{}/attachments".format(post_id))
return response.get('results', [])
def upload_attachment(self, post_id=None, attachment_path=None):
path = 'content/{}/child/attachment'.format(post_id)
if not os.path.exists(attachment_path):
log.error('Attachment {} does not exist'.format(attachment_path))
return
log.info(
'Uploading attachment {attachment_path} to post {post_id}'.format(
attachment_path=attachment_path, post_id=post_id))
if not self.dry_run:
self.post(path=path,
params={'allowDuplicated': 'true'},
files={'file': open(attachment_path, 'rb')})
log.info('Uploaded {} to post ID {}'.format(attachment_path, post_id))
def get_author(self, username):
log.info('Looking up Confluence user key for {}'.format(username))
response = self.get(path='user', params={'username': username})
if not isinstance(response, dict) or not response.get('userKey'):
log.error('No Confluence user key for {}'.format(username))
return {}
return response
def create(self,
content=None,
space=None,
title=None,
ancestor_id=None,
slug=None,
tags=None,
attachments=None,
type='page'):
self._require_kwargs({
'content': content,
'slug': slug,
'title': title,
'space': space
})
page = self._create_page_payload(content='Created by markdown-to-confluence - <a href="https://github.com/vmware-tanzu-labs/markdown-to-confluence">https://github.com/vmware-tanzu-labs/markdown-to-confluence</a>',
title=title,
ancestor_id=ancestor_id,
space=space,
type=type)
response = self.post(path='content/', data=page)
page_id = response['id']
page_url = urljoin(self.api_url, response['_links']['webui'])
log.info('Page "{title}" (id {page_id}) created successfully at {url}'.
format(title=title, page_id=response.get('id'), url=page_url))
# Now that we have the page created, we can just treat the rest of the
# flow like an update.
return self.update(post_id=page_id,
content=content,
space=space,
title=title,
ancestor_id=ancestor_id,
slug=slug,
tags=tags,
page=response,
attachments=attachments)
def update(self,
post_id=None,
content=None,
space=None,
title=None,
ancestor_id=None,
slug=None,
tags=None,
attachments=None,
page=None,
type='page'):
self._require_kwargs({
'content': content,
'slug': slug,
'title': title,
'post_id': post_id,
'space': space
})
# Since the page already has an ID in Confluence, before updating our
# content which references certain attachments, we should make sure
# those attachments have been uploaded.
if attachments is None:
attachments = []
for attachment in attachments:
self.upload_attachment(post_id=post_id, attachment_path=attachment)
# Next, we can create the updated page structure
new_page = self._create_page_payload(content=content,
title=title,
ancestor_id=ancestor_id,
space=space,
type=type)
# Increment the version number, as required by the Confluence API
# https://docs.atlassian.com/ConfluenceServer/rest/7.1.0/#api/content-update
new_version = page['version']['number'] + 1
new_page['version'] = {'number': new_version}
# With the attachments uploaded, and our new page structure created,
# we can upload the final content up to Confluence.
path = 'content/{}'.format(page['id'])
response = self.put(path=path, data=new_page)
failure = False
# Dry-run option doesn't create any pages hence no urls,
if self.dry_run:
page_url = '(dry run)'
else:
try:
test = response['_links']['webui']
except(TypeError):
failure = True
else:
page_url = urljoin(self.api_url, response['_links']['webui'])
if failure:
log.error('ERROR ---- Page "{title}" (id {page_id}) failed to update'.format(title=title, page_id=post_id))
post_id = post_id + " - fail"
else:
if tags:
self.create_labels(page_id=post_id, tags=tags)
log.info('Page "{title}" (id {page_id}) updated successfully at {url}'.
format(title=title, page_id=post_id, url=page_url))
return post_id
| true
| true
|
f718dc4daf902e24ed5c590318820e31e58d8a29
| 2,594
|
py
|
Python
|
srht/app.py
|
prplecake/legacy.sr.ht
|
191ba17ab59ffc9a3818712ac976e37a734f7cdc
|
[
"MIT"
] | null | null | null |
srht/app.py
|
prplecake/legacy.sr.ht
|
191ba17ab59ffc9a3818712ac976e37a734f7cdc
|
[
"MIT"
] | 8
|
2021-05-15T20:33:08.000Z
|
2021-06-02T04:39:23.000Z
|
srht/app.py
|
prplecake/legacy.sr.ht
|
191ba17ab59ffc9a3818712ac976e37a734f7cdc
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request, g, Response, redirect, url_for
from flask_login import LoginManager, current_user
from flask_wtf.csrf import CSRFProtect
from jinja2 import FileSystemLoader, ChoiceLoader
import random
import sys
import os
import locale
from srht.config import _cfg, _cfgi
from srht.database import db, init_db
from srht.objects import User
from srht.common import *
from srht.network import *
from srht.blueprints.html import html
from srht.blueprints.api import api
from srht.blueprints.oauth import oauth
app = Flask(__name__)
csrf = CSRFProtect()
csrf.init_app(app)
app.secret_key = _cfg("secret-key")
app.jinja_env.cache = None
init_db()
login_manager = LoginManager()
login_manager.init_app(app)
app.jinja_loader = ChoiceLoader([
FileSystemLoader("overrides"),
FileSystemLoader("templates"),
])
@login_manager.user_loader
def load_user(username):
return User.query.filter(User.username == username).first()
login_manager.anonymous_user = lambda: None
app.register_blueprint(html)
app.register_blueprint(api)
app.register_blueprint(oauth)
try:
locale.setlocale(locale.LC_ALL, 'en_US')
except:
pass
if not app.debug:
@app.errorhandler(500)
def handle_500(e):
# shit
try:
db.rollback()
db.close()
except:
# shit shit
sys.exit(1)
return render_template("internal_error.html"), 500
# Error handler
if _cfg("error-to") != "":
import logging
from logging.handlers import SMTPHandler
mail_handler = SMTPHandler((_cfg("smtp-host"), _cfg("smtp-port")),
_cfg("error-from"),
[_cfg("error-to")],
'sr.ht application exception occured',
credentials=(_cfg("smtp-user"), _cfg("smtp-password")))
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
@app.errorhandler(404)
def handle_404(e):
return render_template("not_found.html"), 404
moe = os.listdir('_static/moe/')
@app.context_processor
def inject():
return {
'root': _cfg("protocol") + "://" + _cfg("domain"),
'domain': _cfg("domain"),
'protocol': _cfg("protocol"),
'len': len,
'any': any,
'request': request,
'locale': locale,
'url_for': url_for,
'file_link': file_link,
'disown_link': disown_link,
'user': current_user,
'moe': random.choice(moe),
'random': random,
'owner': _cfg("owner"),
'owner_email': _cfg("owner_email"),
'_cfg': _cfg
}
| 26.20202
| 81
| 0.659214
|
from flask import Flask, render_template, request, g, Response, redirect, url_for
from flask_login import LoginManager, current_user
from flask_wtf.csrf import CSRFProtect
from jinja2 import FileSystemLoader, ChoiceLoader
import random
import sys
import os
import locale
from srht.config import _cfg, _cfgi
from srht.database import db, init_db
from srht.objects import User
from srht.common import *
from srht.network import *
from srht.blueprints.html import html
from srht.blueprints.api import api
from srht.blueprints.oauth import oauth
app = Flask(__name__)
csrf = CSRFProtect()
csrf.init_app(app)
app.secret_key = _cfg("secret-key")
app.jinja_env.cache = None
init_db()
login_manager = LoginManager()
login_manager.init_app(app)
app.jinja_loader = ChoiceLoader([
FileSystemLoader("overrides"),
FileSystemLoader("templates"),
])
@login_manager.user_loader
def load_user(username):
return User.query.filter(User.username == username).first()
login_manager.anonymous_user = lambda: None
app.register_blueprint(html)
app.register_blueprint(api)
app.register_blueprint(oauth)
try:
locale.setlocale(locale.LC_ALL, 'en_US')
except:
pass
if not app.debug:
@app.errorhandler(500)
def handle_500(e):
try:
db.rollback()
db.close()
except:
sys.exit(1)
return render_template("internal_error.html"), 500
if _cfg("error-to") != "":
import logging
from logging.handlers import SMTPHandler
mail_handler = SMTPHandler((_cfg("smtp-host"), _cfg("smtp-port")),
_cfg("error-from"),
[_cfg("error-to")],
'sr.ht application exception occured',
credentials=(_cfg("smtp-user"), _cfg("smtp-password")))
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
@app.errorhandler(404)
def handle_404(e):
return render_template("not_found.html"), 404
moe = os.listdir('_static/moe/')
@app.context_processor
def inject():
return {
'root': _cfg("protocol") + "://" + _cfg("domain"),
'domain': _cfg("domain"),
'protocol': _cfg("protocol"),
'len': len,
'any': any,
'request': request,
'locale': locale,
'url_for': url_for,
'file_link': file_link,
'disown_link': disown_link,
'user': current_user,
'moe': random.choice(moe),
'random': random,
'owner': _cfg("owner"),
'owner_email': _cfg("owner_email"),
'_cfg': _cfg
}
| true
| true
|
f718dcaef716d7184b7c71448727c6c8b98ce48e
| 6,970
|
py
|
Python
|
eval_tiny_one_image.py
|
feiwu77777/Face-detection-and-tracking
|
1135d2d93d5b667110551dc7e4b985b5861eb380
|
[
"MIT"
] | 3
|
2019-02-05T13:35:43.000Z
|
2019-02-05T13:40:45.000Z
|
eval_tiny_one_image.py
|
feiwu77777/Face-detection-and-tracking
|
1135d2d93d5b667110551dc7e4b985b5861eb380
|
[
"MIT"
] | 6
|
2019-12-16T22:21:15.000Z
|
2022-02-10T00:30:41.000Z
|
eval_tiny_one_image.py
|
feiwu77777/Finding_unique_faces
|
1135d2d93d5b667110551dc7e4b985b5861eb380
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 15:49:15 2018
@author: fei.wu
"""
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tiny_face_model
import util
import cv2
import numpy as np
import matplotlib.pyplot as plt
import pickle
import pylab as pl
from scipy.special import expit
MAX_INPUT_DIM = 5000.0
def overlay_bounding_boxes(raw_img, refined_bboxes, lw):
"""Overlay bounding boxes of face on images.
Args:
raw_img:
A target image.
refined_bboxes:
Bounding boxes of detected faces.
lw:
Line width of bounding boxes. If zero specified,
this is determined based on confidence of each detection.
Returns:
None.
"""
# Overlay bounding boxes on an image with the color based on the confidence.
for r in refined_bboxes:
_score = expit(r[4])
cm_idx = int(np.ceil(_score * 255))
rect_color = [int(np.ceil(x * 255)) for x in util.cm_data[cm_idx]] # parula
_lw = lw
if lw == 0: # line width of each bounding box is adaptively determined.
bw, bh = r[2] - r[0] + 1, r[3] - r[0] + 1
_lw = 1 if min(bw, bh) <= 20 else max(2, min(3, min(bh / 20, bw / 20)))
_lw = int(np.ceil(_lw * _score))
_r = [int(x) for x in r[:4]]
cv2.rectangle(raw_img, (_r[0], _r[1]), (_r[2], _r[3]), rect_color, _lw)
def evaluate(weight_file_path, frame, prob_thresh=0.5, nms_thresh=0.1, lw=3, display=False):
"""Detect faces in images.
Args:
prob_thresh:
The threshold of detection confidence.
nms_thresh:
The overlap threshold of non maximum suppression
weight_file_path:
A pretrained weight file in the pickle format
generated by matconvnet_hr101_to_tf.py.
data_dir:
A directory which contains images.
output_dir:
A directory into which images with detected faces are output.
lw:
Line width of bounding boxes. If zero specified,
this is determined based on confidence of each detection.
display:
Display tiny face images on window.
Returns:
None.
"""
# placeholder of input images. Currently batch size of one is supported.
x = tf.placeholder(tf.float32, [1, None, None, 3]) # n, h, w, c
# Create the tiny face model which weights are loaded from a pretrained model.
model = tiny_face_model.Model(weight_file_path)
score_final = model.tiny_face(x)
# Load an average image and clusters(reference boxes of templates).
with open(weight_file_path, "rb") as f:
_, mat_params_dict = pickle.load(f)
average_image = model.get_data_by_key("average_image")
clusters = model.get_data_by_key("clusters")
clusters_h = clusters[:, 3] - clusters[:, 1] + 1
clusters_w = clusters[:, 2] - clusters[:, 0] + 1
normal_idx = np.where(clusters[:, 4] == 1)
# main
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
raw_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
raw_img_f = raw_img.astype(np.float32)
def _calc_scales():
raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]
min_scale = min(np.floor(np.log2(np.max(clusters_w[normal_idx] / raw_w))),
np.floor(np.log2(np.max(clusters_h[normal_idx] / raw_h))))
max_scale = min(1.0, -np.log2(max(raw_h, raw_w) / MAX_INPUT_DIM))
scales_down = pl.frange(min_scale, 0, 1.)
scales_up = pl.frange(0.5, max_scale, 0.5)
scales_pow = np.hstack((scales_down, scales_up))
scales = np.power(2.0, scales_pow)
return scales
scales = _calc_scales()
# initialize output
bboxes = np.empty(shape=(0, 5))
# process input at different scales
for s in scales:
img = cv2.resize(raw_img_f, (0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)
img = img - average_image
img = img[np.newaxis, :]
# we don't run every template on every scale ids of templates to ignore
tids = list(range(4, 12)) + ([] if s <= 1.0 else list(range(18, 25)))
ignoredTids = list(set(range(0, clusters.shape[0])) - set(tids))
# run through the net
score_final_tf = sess.run(score_final, feed_dict={x: img})
# collect scores
score_cls_tf, score_reg_tf = score_final_tf[:, :, :, :25], score_final_tf[:, :, :, 25:125]
prob_cls_tf = expit(score_cls_tf)
prob_cls_tf[0, :, :, ignoredTids] = 0.0
def _calc_bounding_boxes():
# threshold for detection
_, fy, fx, fc = np.where(prob_cls_tf > prob_thresh)
# interpret heatmap into bounding boxes
cy = fy * 8 - 1
cx = fx * 8 - 1
ch = clusters[fc, 3] - clusters[fc, 1] + 1
cw = clusters[fc, 2] - clusters[fc, 0] + 1
# extract bounding box refinement
Nt = clusters.shape[0]
tx = score_reg_tf[0, :, :, 0:Nt]
ty = score_reg_tf[0, :, :, Nt:2*Nt]
tw = score_reg_tf[0, :, :, 2*Nt:3*Nt]
th = score_reg_tf[0, :, :, 3*Nt:4*Nt]
# refine bounding boxes
dcx = cw * tx[fy, fx, fc]
dcy = ch * ty[fy, fx, fc]
rcx = cx + dcx
rcy = cy + dcy
rcw = cw * np.exp(tw[fy, fx, fc])
rch = ch * np.exp(th[fy, fx, fc])
scores = score_cls_tf[0, fy, fx, fc]
tmp_bboxes = np.vstack((rcx - rcw / 2, rcy - rch / 2, rcx + rcw / 2, rcy + rch / 2))
tmp_bboxes = np.vstack((tmp_bboxes / s, scores))
tmp_bboxes = tmp_bboxes.transpose()
return tmp_bboxes
tmp_bboxes = _calc_bounding_boxes()
bboxes = np.vstack((bboxes, tmp_bboxes)) # <class 'tuple'>: (5265, 5)
# non maximum suppression
# refind_idx = util.nms(bboxes, nms_thresh)
refind_idx = tf.image.non_max_suppression(tf.convert_to_tensor(bboxes[:, :4], dtype=tf.float32),
tf.convert_to_tensor(bboxes[:, 4], dtype=tf.float32),
max_output_size=bboxes.shape[0], iou_threshold=nms_thresh)
refind_idx = sess.run(refind_idx)
refined_bboxes = bboxes[refind_idx]
overlay_bounding_boxes(raw_img, refined_bboxes, lw)
if display:
# plt.axis('off')
plt.imshow(raw_img)
plt.show()
return refined_bboxes
def main(frame):
print("Searching faces...")
with tf.Graph().as_default():
faces = evaluate(
weight_file_path= "weights.pckl", frame = frame,
prob_thresh=0.7, nms_thresh=0.1, #non max suppression threshold,
lw=2, display= False)
return faces
| 36.302083
| 110
| 0.58924
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tiny_face_model
import util
import cv2
import numpy as np
import matplotlib.pyplot as plt
import pickle
import pylab as pl
from scipy.special import expit
MAX_INPUT_DIM = 5000.0
def overlay_bounding_boxes(raw_img, refined_bboxes, lw):
for r in refined_bboxes:
_score = expit(r[4])
cm_idx = int(np.ceil(_score * 255))
rect_color = [int(np.ceil(x * 255)) for x in util.cm_data[cm_idx]]
_lw = lw
if lw == 0:
bw, bh = r[2] - r[0] + 1, r[3] - r[0] + 1
_lw = 1 if min(bw, bh) <= 20 else max(2, min(3, min(bh / 20, bw / 20)))
_lw = int(np.ceil(_lw * _score))
_r = [int(x) for x in r[:4]]
cv2.rectangle(raw_img, (_r[0], _r[1]), (_r[2], _r[3]), rect_color, _lw)
def evaluate(weight_file_path, frame, prob_thresh=0.5, nms_thresh=0.1, lw=3, display=False):
x = tf.placeholder(tf.float32, [1, None, None, 3])
model = tiny_face_model.Model(weight_file_path)
score_final = model.tiny_face(x)
with open(weight_file_path, "rb") as f:
_, mat_params_dict = pickle.load(f)
average_image = model.get_data_by_key("average_image")
clusters = model.get_data_by_key("clusters")
clusters_h = clusters[:, 3] - clusters[:, 1] + 1
clusters_w = clusters[:, 2] - clusters[:, 0] + 1
normal_idx = np.where(clusters[:, 4] == 1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
raw_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
raw_img_f = raw_img.astype(np.float32)
def _calc_scales():
raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]
min_scale = min(np.floor(np.log2(np.max(clusters_w[normal_idx] / raw_w))),
np.floor(np.log2(np.max(clusters_h[normal_idx] / raw_h))))
max_scale = min(1.0, -np.log2(max(raw_h, raw_w) / MAX_INPUT_DIM))
scales_down = pl.frange(min_scale, 0, 1.)
scales_up = pl.frange(0.5, max_scale, 0.5)
scales_pow = np.hstack((scales_down, scales_up))
scales = np.power(2.0, scales_pow)
return scales
scales = _calc_scales()
bboxes = np.empty(shape=(0, 5))
for s in scales:
img = cv2.resize(raw_img_f, (0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)
img = img - average_image
img = img[np.newaxis, :]
tids = list(range(4, 12)) + ([] if s <= 1.0 else list(range(18, 25)))
ignoredTids = list(set(range(0, clusters.shape[0])) - set(tids))
# run through the net
score_final_tf = sess.run(score_final, feed_dict={x: img})
# collect scores
score_cls_tf, score_reg_tf = score_final_tf[:, :, :, :25], score_final_tf[:, :, :, 25:125]
prob_cls_tf = expit(score_cls_tf)
prob_cls_tf[0, :, :, ignoredTids] = 0.0
def _calc_bounding_boxes():
# threshold for detection
_, fy, fx, fc = np.where(prob_cls_tf > prob_thresh)
# interpret heatmap into bounding boxes
cy = fy * 8 - 1
cx = fx * 8 - 1
ch = clusters[fc, 3] - clusters[fc, 1] + 1
cw = clusters[fc, 2] - clusters[fc, 0] + 1
# extract bounding box refinement
Nt = clusters.shape[0]
tx = score_reg_tf[0, :, :, 0:Nt]
ty = score_reg_tf[0, :, :, Nt:2*Nt]
tw = score_reg_tf[0, :, :, 2*Nt:3*Nt]
th = score_reg_tf[0, :, :, 3*Nt:4*Nt]
# refine bounding boxes
dcx = cw * tx[fy, fx, fc]
dcy = ch * ty[fy, fx, fc]
rcx = cx + dcx
rcy = cy + dcy
rcw = cw * np.exp(tw[fy, fx, fc])
rch = ch * np.exp(th[fy, fx, fc])
scores = score_cls_tf[0, fy, fx, fc]
tmp_bboxes = np.vstack((rcx - rcw / 2, rcy - rch / 2, rcx + rcw / 2, rcy + rch / 2))
tmp_bboxes = np.vstack((tmp_bboxes / s, scores))
tmp_bboxes = tmp_bboxes.transpose()
return tmp_bboxes
tmp_bboxes = _calc_bounding_boxes()
bboxes = np.vstack((bboxes, tmp_bboxes)) # <class 'tuple'>: (5265, 5)
# non maximum suppression
# refind_idx = util.nms(bboxes, nms_thresh)
refind_idx = tf.image.non_max_suppression(tf.convert_to_tensor(bboxes[:, :4], dtype=tf.float32),
tf.convert_to_tensor(bboxes[:, 4], dtype=tf.float32),
max_output_size=bboxes.shape[0], iou_threshold=nms_thresh)
refind_idx = sess.run(refind_idx)
refined_bboxes = bboxes[refind_idx]
overlay_bounding_boxes(raw_img, refined_bboxes, lw)
if display:
# plt.axis('off')
plt.imshow(raw_img)
plt.show()
return refined_bboxes
def main(frame):
print("Searching faces...")
with tf.Graph().as_default():
faces = evaluate(
weight_file_path= "weights.pckl", frame = frame,
prob_thresh=0.7, nms_thresh=0.1, #non max suppression threshold,
lw=2, display= False)
return faces
| true
| true
|
f718dd4064635f2726a078e00dcce2703c4f553c
| 399
|
py
|
Python
|
src/synergyspace/wsgi.py
|
zavalnav/synergyspace
|
fa43ee64be2732c4813a8f0bb98cc96ede921289
|
[
"MIT"
] | null | null | null |
src/synergyspace/wsgi.py
|
zavalnav/synergyspace
|
fa43ee64be2732c4813a8f0bb98cc96ede921289
|
[
"MIT"
] | null | null | null |
src/synergyspace/wsgi.py
|
zavalnav/synergyspace
|
fa43ee64be2732c4813a8f0bb98cc96ede921289
|
[
"MIT"
] | null | null | null |
"""
WSGI config for synergyspace project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "synergyspace.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 26.6
| 78
| 0.796992
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "synergyspace.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| true
| true
|
f718df30f82beb6e6e21b89a28a1753ce7989932
| 614
|
py
|
Python
|
data/test/python/f718df30f82beb6e6e21b89a28a1753ce7989932manage_cases_tool.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/test/python/f718df30f82beb6e6e21b89a28a1753ce7989932manage_cases_tool.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/test/python/f718df30f82beb6e6e21b89a28a1753ce7989932manage_cases_tool.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
from ert_gui.tools.manage_cases.case_init_configuration import CaseInitializationConfigurationPanel
from ert_gui.tools import Tool
from ert_gui.widgets import util
from ert_gui.widgets.closable_dialog import ClosableDialog
class ManageCasesTool(Tool):
def __init__(self):
super(ManageCasesTool, self).__init__("Manage Cases", "tools/manage_cases", util.resourceIcon("ide/database_gear"))
def trigger(self):
case_management_widget = CaseInitializationConfigurationPanel()
dialog = ClosableDialog("Manage Cases", case_management_widget, self.parent())
dialog.exec_()
| 29.238095
| 123
| 0.776873
|
from ert_gui.tools.manage_cases.case_init_configuration import CaseInitializationConfigurationPanel
from ert_gui.tools import Tool
from ert_gui.widgets import util
from ert_gui.widgets.closable_dialog import ClosableDialog
class ManageCasesTool(Tool):
def __init__(self):
super(ManageCasesTool, self).__init__("Manage Cases", "tools/manage_cases", util.resourceIcon("ide/database_gear"))
def trigger(self):
case_management_widget = CaseInitializationConfigurationPanel()
dialog = ClosableDialog("Manage Cases", case_management_widget, self.parent())
dialog.exec_()
| true
| true
|
f718e070d70485ffb24d1e10801c888fc1b35282
| 66
|
py
|
Python
|
main.py
|
ddkwing/har2case
|
6d440651c8d79228b7bf034790334e7c9406f023
|
[
"MIT"
] | null | null | null |
main.py
|
ddkwing/har2case
|
6d440651c8d79228b7bf034790334e7c9406f023
|
[
"MIT"
] | null | null | null |
main.py
|
ddkwing/har2case
|
6d440651c8d79228b7bf034790334e7c9406f023
|
[
"MIT"
] | null | null | null |
""" used for debugging
"""
from har2case.cli import main
main()
| 9.428571
| 29
| 0.681818
|
from har2case.cli import main
main()
| true
| true
|
f718e09316029e900e756332a21f6047b48f65a5
| 6,818
|
py
|
Python
|
collimator_geometry_DAC.py
|
Fahima-Islam/c3dp
|
f8eb9235dd4fba7edcc0642ed68e325346ff577e
|
[
"MIT"
] | null | null | null |
collimator_geometry_DAC.py
|
Fahima-Islam/c3dp
|
f8eb9235dd4fba7edcc0642ed68e325346ff577e
|
[
"MIT"
] | 1
|
2019-05-03T20:16:49.000Z
|
2019-05-03T20:16:49.000Z
|
collimator_geometry_DAC.py
|
Fahima-Islam/c3dp
|
f8eb9235dd4fba7edcc0642ed68e325346ff577e
|
[
"MIT"
] | null | null | null |
from collimator_zigzagBlade_old import Collimator_geom, Parameter_error
import os, sys
from instrument.geometry.pml import weave
from instrument.geometry import operations,shapes
from instrument.geometry.pml.Renderer import Renderer as base
parent_dir = os.path.abspath(os.pardir)
libpath = os.path.join(parent_dir, 'c3dp_source')
sample_path = os.path.join (parent_dir, 'sample')
coll_geo_file_name= 'coll_geometry.xml'
coll_geo_file = os.path.join(sample_path, coll_geo_file_name)
class File_inc_Renderer(base):
def _renderDocument(self, body):
self.onGeometry(body)
return
def header(self):
return []
def footer(self):
return []
def end(self):
return
def create (coll_front_end_from_center,max_coll_len=60., Snap_angle=False, vertical_number_channels=20, horizontal_number_channels=20,
detector_angles=[-45,-135],multiple_collimator=False, collimator_Nosupport=True, scad_flag=False,
outputfile=coll_geo_file):
length_of_each_part = max_coll_len
coll_last_height_detector=150.
coll_last_width_detector=60*2.
min_channel_wall_thickness =1
coll_last_front_end_from_center=coll_front_end_from_center+(2.*length_of_each_part)
coll_last_back_end_from_center =coll_last_front_end_from_center+length_of_each_part
coll_first=Collimator_geom()
coll_first_inner_radius = coll_front_end_from_center + (0. * length_of_each_part)
coll_first_outer_radius = coll_first_inner_radius + length_of_each_part
coll_first_height_detector = (coll_last_height_detector / coll_last_back_end_from_center) * coll_first_outer_radius
coll_first_width_detector = (coll_last_width_detector / coll_last_back_end_from_center) * coll_first_outer_radius # half part
coll_first.set_constraints(max_coll_height_detector=coll_first_height_detector,
max_coll_width_detector=coll_first_width_detector,
min_channel_wall_thickness=min_channel_wall_thickness,
max_coll_length=length_of_each_part,
min_channel_size=3,
collimator_front_end_from_center=coll_first_inner_radius,
collimator_parts=False,
no_right_border=False,
no_top_border=False,
horizontal_odd_blades=False,
vertical_odd_blades=False,
)
fist_vertical_number_blades = 3
fist_horizontal_number_blades = 3
coll_first.set_parameters(vertical_number_channels=fist_vertical_number_blades,
horizontal_number_channels=fist_horizontal_number_blades,
channel_length=length_of_each_part)
coll_middle = Collimator_geom()
coll_middle_inner_radius = coll_front_end_from_center + (1. * length_of_each_part)
coll_middle_outer_radius = length_of_each_part + coll_middle_inner_radius
coll_middle_height_detector = (coll_last_height_detector / coll_last_back_end_from_center) * coll_middle_outer_radius
coll_middle_width_detector = (coll_last_width_detector / coll_last_back_end_from_center) * coll_middle_outer_radius
coll_middle.set_constraints(max_coll_height_detector=coll_middle_height_detector,
max_coll_width_detector=coll_middle_width_detector,
min_channel_wall_thickness=min_channel_wall_thickness,
max_coll_length=length_of_each_part,
min_channel_size=3,
collimator_front_end_from_center=coll_middle_inner_radius,
collimator_parts=True,
initial_collimator_horizontal_channel_angle=0.0,
initial_collimator_vertical_channel_angle=0.0,
remove_vertical_blades_manually=True,
vertical_blade_index_list_toRemove=[2, 5],
remove_horizontal_blades_manually=True,
horizontal_blade_index_list_toRemove=[2, 5],
no_right_border=False,
no_top_border=False,
vertical_even_blades=False,
horizontal_even_blades=False)
coll_middle.set_parameters(vertical_number_channels=(fist_vertical_number_blades) * 3,
horizontal_number_channels=(fist_horizontal_number_blades) * 3,
channel_length=length_of_each_part)
col_last = Collimator_geom()
col_last.set_constraints(max_coll_height_detector=coll_last_height_detector,
max_coll_width_detector=coll_last_width_detector,
min_channel_wall_thickness=min_channel_wall_thickness,
max_coll_length=length_of_each_part,
min_channel_size=3.,
collimator_front_end_from_center=coll_last_front_end_from_center,
remove_horizontal_blades_manually=True,
horizontal_blade_index_list_toRemove=[2, 5, 11, 14, 20, 23],
remove_vertical_blades_manually=True,
vertical_blade_index_list_toRemove=[2, 5, 11, 14, 20, 23],
collimator_parts=True,
no_right_border=False,
no_top_border=False,
vertical_odd_blades=False,
horizontal_odd_blades=False)
col_last.set_parameters(vertical_number_channels=fist_vertical_number_blades * 9,
horizontal_number_channels=fist_horizontal_number_blades * 9,
channel_length=length_of_each_part)
coliFirst = coll_first.gen_collimators(detector_angles=detector_angles, multiple_collimator=False,
collimator_Nosupport=True)
coliMiddle = coll_middle.gen_collimators(detector_angles=detector_angles, multiple_collimator=False,
collimator_Nosupport=True)
colilast =col_last.gen_collimators(detector_angles=detector_angles, multiple_collimator=False,collimator_Nosupport=True)
whole = operations.unite(operations.unite(coliFirst, coliMiddle),colilast)
with open (outputfile,'wt') as file_h:
weave(whole,file_h, print_docs = False,renderer=File_inc_Renderer(), author='')
# gen_col__xml(angular_spacing=2, channel_size=1, outsideCurveLength_fromSOurce=50, insideCurveLength_fromSOurce=0, coll_file=outputfile)
| 49.405797
| 137
| 0.663684
|
from collimator_zigzagBlade_old import Collimator_geom, Parameter_error
import os, sys
from instrument.geometry.pml import weave
from instrument.geometry import operations,shapes
from instrument.geometry.pml.Renderer import Renderer as base
parent_dir = os.path.abspath(os.pardir)
libpath = os.path.join(parent_dir, 'c3dp_source')
sample_path = os.path.join (parent_dir, 'sample')
coll_geo_file_name= 'coll_geometry.xml'
coll_geo_file = os.path.join(sample_path, coll_geo_file_name)
class File_inc_Renderer(base):
def _renderDocument(self, body):
self.onGeometry(body)
return
def header(self):
return []
def footer(self):
return []
def end(self):
return
def create (coll_front_end_from_center,max_coll_len=60., Snap_angle=False, vertical_number_channels=20, horizontal_number_channels=20,
detector_angles=[-45,-135],multiple_collimator=False, collimator_Nosupport=True, scad_flag=False,
outputfile=coll_geo_file):
length_of_each_part = max_coll_len
coll_last_height_detector=150.
coll_last_width_detector=60*2.
min_channel_wall_thickness =1
coll_last_front_end_from_center=coll_front_end_from_center+(2.*length_of_each_part)
coll_last_back_end_from_center =coll_last_front_end_from_center+length_of_each_part
coll_first=Collimator_geom()
coll_first_inner_radius = coll_front_end_from_center + (0. * length_of_each_part)
coll_first_outer_radius = coll_first_inner_radius + length_of_each_part
coll_first_height_detector = (coll_last_height_detector / coll_last_back_end_from_center) * coll_first_outer_radius
coll_first_width_detector = (coll_last_width_detector / coll_last_back_end_from_center) * coll_first_outer_radius
coll_first.set_constraints(max_coll_height_detector=coll_first_height_detector,
max_coll_width_detector=coll_first_width_detector,
min_channel_wall_thickness=min_channel_wall_thickness,
max_coll_length=length_of_each_part,
min_channel_size=3,
collimator_front_end_from_center=coll_first_inner_radius,
collimator_parts=False,
no_right_border=False,
no_top_border=False,
horizontal_odd_blades=False,
vertical_odd_blades=False,
)
fist_vertical_number_blades = 3
fist_horizontal_number_blades = 3
coll_first.set_parameters(vertical_number_channels=fist_vertical_number_blades,
horizontal_number_channels=fist_horizontal_number_blades,
channel_length=length_of_each_part)
coll_middle = Collimator_geom()
coll_middle_inner_radius = coll_front_end_from_center + (1. * length_of_each_part)
coll_middle_outer_radius = length_of_each_part + coll_middle_inner_radius
coll_middle_height_detector = (coll_last_height_detector / coll_last_back_end_from_center) * coll_middle_outer_radius
coll_middle_width_detector = (coll_last_width_detector / coll_last_back_end_from_center) * coll_middle_outer_radius
coll_middle.set_constraints(max_coll_height_detector=coll_middle_height_detector,
max_coll_width_detector=coll_middle_width_detector,
min_channel_wall_thickness=min_channel_wall_thickness,
max_coll_length=length_of_each_part,
min_channel_size=3,
collimator_front_end_from_center=coll_middle_inner_radius,
collimator_parts=True,
initial_collimator_horizontal_channel_angle=0.0,
initial_collimator_vertical_channel_angle=0.0,
remove_vertical_blades_manually=True,
vertical_blade_index_list_toRemove=[2, 5],
remove_horizontal_blades_manually=True,
horizontal_blade_index_list_toRemove=[2, 5],
no_right_border=False,
no_top_border=False,
vertical_even_blades=False,
horizontal_even_blades=False)
coll_middle.set_parameters(vertical_number_channels=(fist_vertical_number_blades) * 3,
horizontal_number_channels=(fist_horizontal_number_blades) * 3,
channel_length=length_of_each_part)
col_last = Collimator_geom()
col_last.set_constraints(max_coll_height_detector=coll_last_height_detector,
max_coll_width_detector=coll_last_width_detector,
min_channel_wall_thickness=min_channel_wall_thickness,
max_coll_length=length_of_each_part,
min_channel_size=3.,
collimator_front_end_from_center=coll_last_front_end_from_center,
remove_horizontal_blades_manually=True,
horizontal_blade_index_list_toRemove=[2, 5, 11, 14, 20, 23],
remove_vertical_blades_manually=True,
vertical_blade_index_list_toRemove=[2, 5, 11, 14, 20, 23],
collimator_parts=True,
no_right_border=False,
no_top_border=False,
vertical_odd_blades=False,
horizontal_odd_blades=False)
col_last.set_parameters(vertical_number_channels=fist_vertical_number_blades * 9,
horizontal_number_channels=fist_horizontal_number_blades * 9,
channel_length=length_of_each_part)
coliFirst = coll_first.gen_collimators(detector_angles=detector_angles, multiple_collimator=False,
collimator_Nosupport=True)
coliMiddle = coll_middle.gen_collimators(detector_angles=detector_angles, multiple_collimator=False,
collimator_Nosupport=True)
colilast =col_last.gen_collimators(detector_angles=detector_angles, multiple_collimator=False,collimator_Nosupport=True)
whole = operations.unite(operations.unite(coliFirst, coliMiddle),colilast)
with open (outputfile,'wt') as file_h:
weave(whole,file_h, print_docs = False,renderer=File_inc_Renderer(), author='')
| true
| true
|
f718e0f64ee4309c479b119d007ee99dc6f7b42b
| 120,802
|
py
|
Python
|
pytorch_lightning/trainer/trainer.py
|
valanm22/pytorch-lightning
|
5d190eabd28671a6222741f5dd9ee3f214e519b1
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/trainer/trainer.py
|
valanm22/pytorch-lightning
|
5d190eabd28671a6222741f5dd9ee3f214e519b1
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/trainer/trainer.py
|
valanm22/pytorch-lightning
|
5d190eabd28671a6222741f5dd9ee3f214e519b1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer to automate the training."""
import inspect
import logging
import math
import os
import traceback
import warnings
from argparse import ArgumentParser, Namespace
from copy import deepcopy
from datetime import timedelta
from pathlib import Path
from typing import Any, Callable, cast, Dict, Iterable, List, Optional, Tuple, Type, Union
from weakref import proxy
import torch
from packaging.version import Version
from torch.optim import Optimizer
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from pytorch_lightning.accelerators import Accelerator, GPUAccelerator, IPUAccelerator, TPUAccelerator
from pytorch_lightning.callbacks import Callback, EarlyStopping, ModelCheckpoint, ProgressBarBase
from pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter
from pytorch_lightning.core.datamodule import LightningDataModule
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.loggers import LightningLoggerBase
from pytorch_lightning.loggers.base import DummyLogger, LoggerCollection
from pytorch_lightning.loggers.tensorboard import TensorBoardLogger
from pytorch_lightning.loops import PredictionLoop, TrainingEpochLoop
from pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop
from pytorch_lightning.loops.fit_loop import FitLoop
from pytorch_lightning.loops.utilities import _parse_loop_limits, _reset_progress
from pytorch_lightning.plugins import (
ApexMixedPrecisionPlugin,
NativeMixedPrecisionPlugin,
PLUGIN_INPUT,
PrecisionPlugin,
)
from pytorch_lightning.plugins.environments.slurm_environment import SLURMEnvironment
from pytorch_lightning.profiler import (
AdvancedProfiler,
BaseProfiler,
PassThroughProfiler,
PyTorchProfiler,
SimpleProfiler,
XLAProfiler,
)
from pytorch_lightning.strategies import ParallelStrategy, Strategy
from pytorch_lightning.strategies.ddp_spawn import DDPSpawnStrategy
from pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin
from pytorch_lightning.trainer.configuration_validator import verify_loop_configurations
from pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector
from pytorch_lightning.trainer.connectors.callback_connector import CallbackConnector
from pytorch_lightning.trainer.connectors.checkpoint_connector import CheckpointConnector
from pytorch_lightning.trainer.connectors.data_connector import DataConnector
from pytorch_lightning.trainer.connectors.logger_connector import LoggerConnector
from pytorch_lightning.trainer.connectors.logger_connector.result import _ResultCollection
from pytorch_lightning.trainer.connectors.signal_connector import SignalConnector
from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin
from pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin
from pytorch_lightning.trainer.states import RunningStage, TrainerFn, TrainerState, TrainerStatus
from pytorch_lightning.trainer.supporters import CombinedLoader
from pytorch_lightning.tuner.lr_finder import _LRFinder
from pytorch_lightning.tuner.tuning import Tuner
from pytorch_lightning.utilities import (
_IPU_AVAILABLE,
_TPU_AVAILABLE,
AMPType,
device_parser,
GradClipAlgorithmType,
parsing,
)
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.argparse import (
_defaults_from_env_vars,
add_argparse_args,
from_argparse_args,
parse_argparser,
parse_env_variables,
)
from pytorch_lightning.utilities.auto_restart import _add_capture_metadata_collate
from pytorch_lightning.utilities.cloud_io import get_filesystem
from pytorch_lightning.utilities.data import _auto_add_worker_init_fn, has_len_all_ranks
from pytorch_lightning.utilities.distributed import distributed_available
from pytorch_lightning.utilities.exceptions import ExitGracefullyException, MisconfigurationException
from pytorch_lightning.utilities.imports import _fault_tolerant_training
from pytorch_lightning.utilities.meta import is_on_meta_device, materialize_module
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.seed import isolate_rng
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.types import (
_EVALUATE_OUTPUT,
_PATH,
_PREDICT_OUTPUT,
EVAL_DATALOADERS,
LRSchedulerConfig,
STEP_OUTPUT,
TRAIN_DATALOADERS,
)
from pytorch_lightning.utilities.warnings import PossibleUserWarning
log = logging.getLogger(__name__)
# warnings to ignore in trainer
warnings.filterwarnings(
"ignore", message="torch.distributed.reduce_op is deprecated, please use torch.distributed.ReduceOp instead"
)
class Trainer(
TrainerCallbackHookMixin, # TODO: Remove in v1.8
TrainerOptimizersMixin, # TODO: Remove in v1.8
TrainerDataLoadingMixin, # TODO: Remove in v1.8
):
@_defaults_from_env_vars
def __init__(
self,
logger: Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool] = True,
checkpoint_callback: Optional[bool] = None,
enable_checkpointing: bool = True,
callbacks: Optional[Union[List[Callback], Callback]] = None,
default_root_dir: Optional[str] = None,
gradient_clip_val: Optional[Union[int, float]] = None,
gradient_clip_algorithm: Optional[str] = None,
process_position: int = 0,
num_nodes: int = 1,
num_processes: Optional[int] = None,
devices: Optional[Union[List[int], str, int]] = None,
gpus: Optional[Union[List[int], str, int]] = None,
auto_select_gpus: bool = False,
tpu_cores: Optional[Union[List[int], str, int]] = None,
ipus: Optional[int] = None,
log_gpu_memory: Optional[str] = None, # TODO: Remove in 1.7
progress_bar_refresh_rate: Optional[int] = None, # TODO: remove in v1.7
enable_progress_bar: bool = True,
overfit_batches: Union[int, float] = 0.0,
track_grad_norm: Union[int, float, str] = -1,
check_val_every_n_epoch: int = 1,
fast_dev_run: Union[int, bool] = False,
accumulate_grad_batches: Optional[Union[int, Dict[int, int]]] = None,
max_epochs: Optional[int] = None,
min_epochs: Optional[int] = None,
max_steps: int = -1,
min_steps: Optional[int] = None,
max_time: Optional[Union[str, timedelta, Dict[str, int]]] = None,
limit_train_batches: Optional[Union[int, float]] = None,
limit_val_batches: Optional[Union[int, float]] = None,
limit_test_batches: Optional[Union[int, float]] = None,
limit_predict_batches: Optional[Union[int, float]] = None,
val_check_interval: Optional[Union[int, float]] = None,
flush_logs_every_n_steps: Optional[int] = None,
log_every_n_steps: int = 50,
accelerator: Optional[Union[str, Accelerator]] = None,
strategy: Optional[Union[str, Strategy]] = None,
sync_batchnorm: bool = False,
precision: Union[int, str] = 32,
enable_model_summary: bool = True,
weights_summary: Optional[str] = "top",
weights_save_path: Optional[str] = None, # TODO: Remove in 1.8
num_sanity_val_steps: int = 2,
resume_from_checkpoint: Optional[Union[Path, str]] = None,
profiler: Optional[Union[BaseProfiler, str]] = None,
benchmark: Optional[bool] = None,
deterministic: bool = False,
reload_dataloaders_every_n_epochs: int = 0,
auto_lr_find: Union[bool, str] = False,
replace_sampler_ddp: bool = True,
detect_anomaly: bool = False,
auto_scale_batch_size: Union[str, bool] = False,
prepare_data_per_node: Optional[bool] = None,
plugins: Optional[Union[PLUGIN_INPUT, List[PLUGIN_INPUT]]] = None,
amp_backend: str = "native",
amp_level: Optional[str] = None,
move_metrics_to_cpu: bool = False,
multiple_trainloader_mode: str = "max_size_cycle",
stochastic_weight_avg: bool = False,
terminate_on_nan: Optional[bool] = None,
) -> None:
r"""
Customize every aspect of training via flags.
Args:
accelerator: Supports passing different accelerator types ("cpu", "gpu", "tpu", "ipu", "auto")
as well as custom accelerator instances.
.. deprecated:: v1.5
Passing training strategies (e.g., 'ddp') to ``accelerator`` has been deprecated in v1.5.0
and will be removed in v1.7.0. Please use the ``strategy`` argument instead.
accumulate_grad_batches: Accumulates grads every k batches or as set up in the dict.
Default: ``None``.
amp_backend: The mixed precision backend to use ("native" or "apex").
Default: ``'native''``.
amp_level: The optimization level to use (O1, O2, etc...). By default it will be set to "O2"
if ``amp_backend`` is set to "apex".
auto_lr_find: If set to True, will make trainer.tune() run a learning rate finder,
trying to optimize initial learning for faster convergence. trainer.tune() method will
set the suggested learning rate in self.lr or self.learning_rate in the LightningModule.
To use a different key set a string instead of True with the key name.
Default: ``False``.
auto_scale_batch_size: If set to True, will `initially` run a batch size
finder trying to find the largest batch size that fits into memory.
The result will be stored in self.batch_size in the LightningModule.
Additionally, can be set to either `power` that estimates the batch size through
a power search or `binsearch` that estimates the batch size through a binary search.
Default: ``False``.
auto_select_gpus: If enabled and ``gpus`` is an integer, pick available
gpus automatically. This is especially useful when
GPUs are configured to be in "exclusive mode", such
that only one process at a time can access them.
Default: ``False``.
benchmark: Sets ``torch.backends.cudnn.benchmark``.
Defaults to ``True`` if :paramref:`~pytorch_lightning.trainer.trainer.Trainer.deterministic`
is ``False``. Overwrite to manually set a different value. Default: ``None``.
callbacks: Add a callback or list of callbacks.
Default: ``None``.
checkpoint_callback: If ``True``, enable checkpointing.
Default: ``None``.
.. deprecated:: v1.5
``checkpoint_callback`` has been deprecated in v1.5 and will be removed in v1.7.
Please consider using ``enable_checkpointing`` instead.
enable_checkpointing: If ``True``, enable checkpointing.
It will configure a default ModelCheckpoint callback if there is no user-defined ModelCheckpoint in
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.callbacks`.
Default: ``True``.
check_val_every_n_epoch: Check val every n train epochs.
Default: ``1``.
default_root_dir: Default path for logs and weights when no logger/ckpt_callback passed.
Default: ``os.getcwd()``.
Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/'
detect_anomaly: Enable anomaly detection for the autograd engine.
Default: ``False``.
deterministic: If ``True``, sets whether PyTorch operations must use deterministic algorithms.
Default: ``False``.
devices: Will be mapped to either `gpus`, `tpu_cores`, `num_processes` or `ipus`,
based on the accelerator type.
fast_dev_run: Runs n if set to ``n`` (int) else 1 if set to ``True`` batch(es)
of train, val and test to find any bugs (ie: a sort of unit test).
Default: ``False``.
flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps).
.. deprecated:: v1.5
``flush_logs_every_n_steps`` has been deprecated in v1.5 and will be removed in v1.7.
Please configure flushing directly in the logger instead.
gpus: Number of GPUs to train on (int) or which GPUs to train on (list or str) applied per node
Default: ``None``.
gradient_clip_val: The value at which to clip gradients. Passing ``gradient_clip_val=None`` disables
gradient clipping. If using Automatic Mixed Precision (AMP), the gradients will be unscaled before.
Default: ``None``.
gradient_clip_algorithm: The gradient clipping algorithm to use. Pass ``gradient_clip_algorithm="value"``
to clip by value, and ``gradient_clip_algorithm="norm"`` to clip by norm. By default it will
be set to ``"norm"``.
limit_train_batches: How much of training dataset to check (float = fraction, int = num_batches).
Default: ``1.0``.
limit_val_batches: How much of validation dataset to check (float = fraction, int = num_batches).
Default: ``1.0``.
limit_test_batches: How much of test dataset to check (float = fraction, int = num_batches).
Default: ``1.0``.
limit_predict_batches: How much of prediction dataset to check (float = fraction, int = num_batches).
Default: ``1.0``.
logger: Logger (or iterable collection of loggers) for experiment tracking. A ``True`` value uses
the default ``TensorBoardLogger``. ``False`` will disable logging. If multiple loggers are
provided and the `save_dir` property of that logger is not set, local files (checkpoints,
profiler traces, etc.) are saved in ``default_root_dir`` rather than in the ``log_dir`` of any
of the individual loggers.
Default: ``True``.
log_gpu_memory: None, 'min_max', 'all'. Might slow performance.
.. deprecated:: v1.5
Deprecated in v1.5.0 and will be removed in v1.7.0
Please use the ``DeviceStatsMonitor`` callback directly instead.
log_every_n_steps: How often to log within steps.
Default: ``50``.
prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data.
Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data
.. deprecated:: v1.5
Deprecated in v1.5.0 and will be removed in v1.7.0
Please set ``prepare_data_per_node`` in ``LightningDataModule`` and/or
``LightningModule`` directly instead.
process_position: Orders the progress bar when running multiple models on same machine.
.. deprecated:: v1.5
``process_position`` has been deprecated in v1.5 and will be removed in v1.7.
Please pass :class:`~pytorch_lightning.callbacks.progress.TQDMProgressBar` with ``process_position``
directly to the Trainer's ``callbacks`` argument instead.
progress_bar_refresh_rate: How often to refresh progress bar (in steps). Value ``0`` disables progress bar.
Ignored when a custom progress bar is passed to :paramref:`~Trainer.callbacks`. Default: None, means
a suitable value will be chosen based on the environment (terminal, Google COLAB, etc.).
.. deprecated:: v1.5
``progress_bar_refresh_rate`` has been deprecated in v1.5 and will be removed in v1.7.
Please pass :class:`~pytorch_lightning.callbacks.progress.TQDMProgressBar` with ``refresh_rate``
directly to the Trainer's ``callbacks`` argument instead. To disable the progress bar,
pass ``enable_progress_bar = False`` to the Trainer.
enable_progress_bar: Whether to enable to progress bar by default.
Default: ``False``.
profiler: To profile individual steps during training and assist in identifying bottlenecks.
Default: ``None``.
overfit_batches: Overfit a fraction of training data (float) or a set number of batches (int).
Default: ``0.0``.
plugins: Plugins allow modification of core behavior like ddp and amp, and enable custom lightning plugins.
Default: ``None``.
precision: Double precision (64), full precision (32), half precision (16) or bfloat16 precision (bf16).
Can be used on CPU, GPU, TPUs or IPUs.
Default: ``32``.
max_epochs: Stop training once this number of epochs is reached. Disabled by default (None).
If both max_epochs and max_steps are not specified, defaults to ``max_epochs = 1000``.
To enable infinite training, set ``max_epochs = -1``.
min_epochs: Force training for at least these many epochs. Disabled by default (None).
max_steps: Stop training after this number of steps. Disabled by default (-1). If ``max_steps = -1``
and ``max_epochs = None``, will default to ``max_epochs = 1000``. To enable infinite training, set
``max_epochs`` to ``-1``.
min_steps: Force training for at least these number of steps. Disabled by default (``None``).
max_time: Stop training after this amount of time has passed. Disabled by default (``None``).
The time duration can be specified in the format DD:HH:MM:SS (days, hours, minutes seconds), as a
:class:`datetime.timedelta`, or a dictionary with keys that will be passed to
:class:`datetime.timedelta`.
num_nodes: Number of GPU nodes for distributed training.
Default: ``1``.
num_processes: Number of processes for distributed training with ``accelerator="cpu"``.
Default: ``1``.
num_sanity_val_steps: Sanity check runs n validation batches before starting the training routine.
Set it to `-1` to run all batches in all validation dataloaders.
Default: ``2``.
reload_dataloaders_every_n_epochs: Set to a non-negative integer to reload dataloaders every n epochs.
Default: ``0``.
replace_sampler_ddp: Explicitly enables or disables sampler replacement. If not specified this
will toggled automatically when DDP is used. By default it will add ``shuffle=True`` for
train sampler and ``shuffle=False`` for val/test sampler. If you want to customize it,
you can set ``replace_sampler_ddp=False`` and add your own distributed sampler.
resume_from_checkpoint: Path/URL of the checkpoint from which training is resumed. If there is
no checkpoint file at the path, an exception is raised. If resuming from mid-epoch checkpoint,
training will start from the beginning of the next epoch.
.. deprecated:: v1.5
``resume_from_checkpoint`` is deprecated in v1.5 and will be removed in v2.0.
Please pass the path to ``Trainer.fit(..., ckpt_path=...)`` instead.
strategy: Supports different training strategies with aliases
as well custom training type plugins.
Default: ``None``.
sync_batchnorm: Synchronize batch norm layers between process groups/whole world.
Default: ``False``.
terminate_on_nan: If set to True, will terminate training (by raising a `ValueError`) at the
end of each training batch, if any of the parameters or the loss are NaN or +/-inf.
.. deprecated:: v1.5
Trainer argument ``terminate_on_nan`` was deprecated in v1.5 and will be removed in 1.7.
Please use ``detect_anomaly`` instead.
detect_anomaly: Enable anomaly detection for the autograd engine.
Default: ``False``.
tpu_cores: How many TPU cores to train on (1 or 8) / Single TPU to train on (1)
Default: ``None``.
ipus: How many IPUs to train on.
Default: ``None``.
track_grad_norm: -1 no tracking. Otherwise tracks that p-norm. May be set to 'inf' infinity-norm. If using
Automatic Mixed Precision (AMP), the gradients will be unscaled before logging them.
Default: ``-1``.
val_check_interval: How often to check the validation set. Pass a ``float`` in the range [0.0, 1.0] to check
after a fraction of the training epoch. Pass an ``int`` to check after a fixed number of training
batches.
Default: ``1.0``.
enable_model_summary: Whether to enable model summarization by default.
Default: ``True``.
weights_summary: Prints a summary of the weights when training begins.
.. deprecated:: v1.5
``weights_summary`` has been deprecated in v1.5 and will be removed in v1.7.
To disable the summary, pass ``enable_model_summary = False`` to the Trainer.
To customize the summary, pass :class:`~pytorch_lightning.callbacks.model_summary.ModelSummary`
directly to the Trainer's ``callbacks`` argument.
weights_save_path: Where to save weights if specified. Will override default_root_dir
for checkpoints only. Use this if for whatever reason you need the checkpoints
stored in a different place than the logs written in `default_root_dir`.
Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/'
Defaults to `default_root_dir`.
.. deprecated:: v1.6
``weights_save_path`` has been deprecated in v1.6 and will be removed in v1.8. Please pass
``dirpath`` directly to the :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint`
callback.
move_metrics_to_cpu: Whether to force internal logged metrics to be moved to cpu.
This can save some gpu memory, but can make training slower. Use with attention.
Default: ``False``.
multiple_trainloader_mode: How to loop over the datasets when there are multiple train loaders.
In 'max_size_cycle' mode, the trainer ends one epoch when the largest dataset is traversed,
and smaller datasets reload when running out of their data. In 'min_size' mode, all the datasets
reload when reaching the minimum length of datasets.
Default: ``"max_size_cycle"``.
stochastic_weight_avg: Whether to use `Stochastic Weight Averaging (SWA)
<https://pytorch.org/blog/pytorch-1.6-now-includes-stochastic-weight-averaging/>`_.
Default: ``False``.
.. deprecated:: v1.5
``stochastic_weight_avg`` has been deprecated in v1.5 and will be removed in v1.7.
Please pass :class:`~pytorch_lightning.callbacks.stochastic_weight_avg.StochasticWeightAveraging`
directly to the Trainer's ``callbacks`` argument instead.
"""
super().__init__()
Trainer._log_api_event("init")
log.detail(f"{self.__class__.__name__}: Initializing trainer with parameters: {locals()}")
self.state = TrainerState()
gpu_ids, tpu_cores = self._parse_devices(gpus, auto_select_gpus, tpu_cores)
# init connectors
self._data_connector = DataConnector(self, multiple_trainloader_mode)
self._accelerator_connector = AcceleratorConnector(
num_processes=num_processes,
devices=devices,
tpu_cores=tpu_cores,
ipus=ipus,
accelerator=accelerator,
strategy=strategy,
gpus=gpus,
gpu_ids=gpu_ids,
num_nodes=num_nodes,
sync_batchnorm=sync_batchnorm,
benchmark=benchmark,
replace_sampler_ddp=replace_sampler_ddp,
deterministic=deterministic,
precision=precision,
amp_type=amp_backend,
amp_level=amp_level,
plugins=plugins,
)
self._logger_connector = LoggerConnector(self, log_gpu_memory)
self._callback_connector = CallbackConnector(self)
self._checkpoint_connector = CheckpointConnector(self, resume_from_checkpoint)
self._signal_connector = SignalConnector(self)
self.tuner = Tuner(self)
min_steps, max_steps, min_epochs, max_epochs, max_time = _parse_loop_limits(
min_steps, max_steps, min_epochs, max_epochs, max_time
)
fit_loop = FitLoop(min_epochs=min_epochs, max_epochs=max_epochs)
training_epoch_loop = TrainingEpochLoop(min_steps=min_steps, max_steps=max_steps)
fit_loop.connect(epoch_loop=training_epoch_loop)
# default .fit() loop
self.fit_loop = fit_loop
# default .validate() loop
self.validate_loop = EvaluationLoop()
# default .test() loop
self.test_loop = EvaluationLoop()
# default .predict() loop
self.predict_loop = PredictionLoop()
# set when a checkpoint is loaded via `Trainer.{fit,validate,test,predict}`.
self._ckpt_path: Optional[str] = None
# .validate(), predict() and .test() set these when they load a checkpoint. They will be removed in favor of
# the unified read-only `Trainer.ckpt_path` attribute in v1.8
self._validated_ckpt_path: Optional[str] = None # TODO: remove in v1.8
self._tested_ckpt_path: Optional[str] = None # TODO: remove in v1.8
self._predicted_ckpt_path: Optional[str] = None # TODO: remove in v1.8
# todo: remove in v1.7
self._weights_summary: Optional[str] = None
# init callbacks
# Declare attributes to be set in _callback_connector on_trainer_init
self._callback_connector.on_trainer_init(
callbacks,
checkpoint_callback,
enable_checkpointing,
enable_progress_bar,
progress_bar_refresh_rate,
process_position,
default_root_dir,
weights_save_path,
enable_model_summary,
weights_summary,
stochastic_weight_avg,
max_time,
accumulate_grad_batches,
)
# hook
self._call_callback_hooks("on_init_start")
# init data flags
self.check_val_every_n_epoch: int
self._data_connector.on_trainer_init(
check_val_every_n_epoch,
reload_dataloaders_every_n_epochs,
prepare_data_per_node,
)
if terminate_on_nan is not None:
rank_zero_deprecation(
"Trainer argument `terminate_on_nan` was deprecated in v1.5 and will be removed in 1.7."
" Please use `Trainer(detect_anomaly=True)` instead."
)
if not isinstance(terminate_on_nan, bool):
raise TypeError(f"`terminate_on_nan` should be a bool, got {terminate_on_nan}.")
# gradient clipping
if gradient_clip_val is not None and not isinstance(gradient_clip_val, (int, float)):
raise TypeError(f"`gradient_clip_val` should be an int or a float. Got {gradient_clip_val}.")
if gradient_clip_algorithm is not None and not GradClipAlgorithmType.supported_type(
gradient_clip_algorithm.lower()
):
raise MisconfigurationException(
f"`gradient_clip_algorithm` {gradient_clip_algorithm} is invalid. "
f"Allowed algorithms: {GradClipAlgorithmType.supported_types()}."
)
# gradient norm tracking
if track_grad_norm != -1 and not (
(isinstance(track_grad_norm, (int, float)) or track_grad_norm == "inf") and float(track_grad_norm) > 0
):
raise MisconfigurationException(
f"`track_grad_norm` must be a positive number or 'inf' (infinity norm). Got {track_grad_norm}."
)
self._terminate_on_nan = terminate_on_nan
self.gradient_clip_val: Union[int, float] = gradient_clip_val
self.gradient_clip_algorithm = (
GradClipAlgorithmType(gradient_clip_algorithm.lower())
if gradient_clip_algorithm is not None
else gradient_clip_algorithm
)
self.track_grad_norm: float = float(track_grad_norm)
self._detect_anomaly: bool = detect_anomaly
self._setup_on_init(num_sanity_val_steps)
# configure tuner
self.tuner.on_trainer_init(auto_lr_find, auto_scale_batch_size)
# configure profiler
self.__init_profiler(profiler)
# init logger flags
self._loggers: List[LightningLoggerBase]
self._logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu)
# init debugging flags
self.val_check_interval: Union[int, float]
self._init_debugging_flags(
limit_train_batches,
limit_val_batches,
limit_test_batches,
limit_predict_batches,
val_check_interval,
overfit_batches,
fast_dev_run,
)
# Callback system
self._call_callback_hooks("on_init_end")
def _init_debugging_flags(
self,
limit_train_batches: Optional[Union[int, float]],
limit_val_batches: Optional[Union[int, float]],
limit_test_batches: Optional[Union[int, float]],
limit_predict_batches: Optional[Union[int, float]],
val_check_interval: Optional[Union[int, float]],
overfit_batches: Union[int, float],
fast_dev_run: Union[int, bool],
) -> None:
if isinstance(fast_dev_run, int) and (fast_dev_run < 0):
raise MisconfigurationException(
f"fast_dev_run={fast_dev_run} is not a valid configuration. It should be >= 0."
)
self.fast_dev_run = fast_dev_run
# set fast_dev_run=True when it is 1, used while logging
if fast_dev_run == 1:
self.fast_dev_run = True
if fast_dev_run:
num_batches = int(fast_dev_run)
limit_train_batches = num_batches
limit_val_batches = num_batches
limit_test_batches = num_batches
limit_predict_batches = num_batches
self.fit_loop.max_steps = num_batches
self.num_sanity_val_steps = 0
self.fit_loop.max_epochs = 1
val_check_interval = 1.0
self.check_val_every_n_epoch = 1
self.loggers = [DummyLogger()] if self.loggers else []
rank_zero_info(
"Running in fast_dev_run mode: will run a full train,"
f" val, test and prediction loop using {num_batches} batch(es)."
)
self.limit_train_batches = _determine_batch_limits(limit_train_batches, "limit_train_batches")
self.limit_val_batches = _determine_batch_limits(limit_val_batches, "limit_val_batches")
self.limit_test_batches = _determine_batch_limits(limit_test_batches, "limit_test_batches")
self.limit_predict_batches = _determine_batch_limits(limit_predict_batches, "limit_predict_batches")
self.val_check_interval = _determine_batch_limits(val_check_interval, "val_check_interval")
self.overfit_batches = _determine_batch_limits(overfit_batches, "overfit_batches")
self._determine_data_use_amount(self.overfit_batches)
def _determine_data_use_amount(self, overfit_batches: float) -> None:
"""Use less data for debugging purposes."""
if overfit_batches > 0:
self.limit_train_batches = overfit_batches
self.limit_val_batches = 0
def _setup_on_init(self, num_sanity_val_steps: int) -> None:
self._log_device_info()
self.should_stop = False
self.state = TrainerState()
self.num_training_batches = float("inf")
self.train_dataloader = None
if num_sanity_val_steps == -1:
self.num_sanity_val_steps = float("inf")
else:
self.num_sanity_val_steps = num_sanity_val_steps
self.num_sanity_val_batches = []
self.num_test_batches = []
self.num_val_batches = []
self.test_dataloaders = None
self.val_dataloaders = None
self._last_train_dl_reload_epoch = float("-inf")
self._last_val_dl_reload_epoch = float("-inf")
self.num_predict_batches = []
def _call_and_handle_interrupt(self, trainer_fn: Callable, *args: Any, **kwargs: Any) -> Any:
r"""
Error handling, intended to be used only for main trainer function entry points (fit, validate, test, predict)
as all errors should funnel through them
Args:
trainer_fn: one of (fit, validate, test, predict)
*args: positional arguments to be passed to the `trainer_fn`
**kwargs: keyword arguments to be passed to `trainer_fn`
"""
try:
if self.strategy.launcher is not None:
return self.strategy.launcher.launch(trainer_fn, *args, trainer=self, **kwargs)
else:
return trainer_fn(*args, **kwargs)
# TODO: treat KeyboardInterrupt as BaseException (delete the code below) in v1.7
except KeyboardInterrupt as exception:
rank_zero_warn("Detected KeyboardInterrupt, attempting graceful shutdown...")
# user could press Ctrl+c many times... only shutdown once
if not self.interrupted:
self.state.status = TrainerStatus.INTERRUPTED
self._call_callback_hooks("on_keyboard_interrupt")
self._call_callback_hooks("on_exception", exception)
except BaseException as exception:
self.state.status = TrainerStatus.INTERRUPTED
if distributed_available() and self.world_size > 1:
# try syncing remaining processes, kill otherwise
self.strategy.reconciliate_processes(traceback.format_exc())
self._call_callback_hooks("on_exception", exception)
self._teardown()
# teardown might access the stage so we reset it after
self.state.stage = None
raise
def fit(
self,
model: "pl.LightningModule",
train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
datamodule: Optional[LightningDataModule] = None,
ckpt_path: Optional[str] = None,
) -> None:
r"""
Runs the full optimization routine.
Args:
model: Model to fit.
train_dataloaders: A collection of :class:`torch.utils.data.DataLoader` or a
:class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying training samples.
In the case of multiple dataloaders, please see this :ref:`section <multiple-dataloaders>`.
val_dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying validation samples.
ckpt_path: Path/URL of the checkpoint from which training is resumed. If there is
no checkpoint file at the path, an exception is raised. If resuming from mid-epoch checkpoint,
training will start from the beginning of the next epoch.
datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.
"""
self.strategy.model = model
self._call_and_handle_interrupt(
self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
)
def _fit_impl(
self,
model: "pl.LightningModule",
train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
datamodule: Optional[LightningDataModule] = None,
ckpt_path: Optional[str] = None,
) -> None:
Trainer._log_api_event("fit")
log.detail(f"{self.__class__.__name__}: trainer fit stage")
self.state.fn = TrainerFn.FITTING
self.state.status = TrainerStatus.RUNNING
self.training = True
self._last_train_dl_reload_epoch = float("-inf")
self._last_val_dl_reload_epoch = float("-inf")
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(train_dataloaders, LightningDataModule):
datamodule = train_dataloaders
train_dataloaders = None
# If you supply a datamodule you can't supply train_dataloader or val_dataloaders
if (train_dataloaders is not None or val_dataloaders is not None) and datamodule is not None:
raise MisconfigurationException(
"You cannot pass `train_dataloader` or `val_dataloaders` to `trainer.fit(datamodule=...)`"
)
# links data to the trainer
self._data_connector.attach_data(
model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, datamodule=datamodule
)
# TODO: ckpt_path only in v2.0
ckpt_path = ckpt_path or self.resume_from_checkpoint
self._ckpt_path = self.__set_ckpt_path(
ckpt_path, model_provided=True, model_connected=self.lightning_module is not None
)
results = self._run(model, ckpt_path=self.ckpt_path)
assert self.state.stopped
self.training = False
return results
def validate(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[str] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
) -> _EVALUATE_OUTPUT:
r"""
Perform one evaluation epoch over the validation set.
Args:
model: The model to validate.
dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them,
or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying validation samples.
ckpt_path: Either ``best`` or path to the checkpoint you wish to validate.
If ``None`` and the model instance was passed, use the current weights.
Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded
if a checkpoint callback is configured.
verbose: If True, prints the validation results.
datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.
Returns:
List of dictionaries with metrics logged during the validation phase, e.g., in model- or callback hooks
like :meth:`~pytorch_lightning.core.lightning.LightningModule.validation_step`,
:meth:`~pytorch_lightning.core.lightning.LightningModule.validation_epoch_end`, etc.
The length of the list corresponds to the number of validation dataloaders used.
"""
self.strategy.model = model or self.lightning_module
return self._call_and_handle_interrupt(self._validate_impl, model, dataloaders, ckpt_path, verbose, datamodule)
def _validate_impl(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[str] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
) -> _EVALUATE_OUTPUT:
# --------------------
# SETUP HOOK
# --------------------
Trainer._log_api_event("validate")
log.detail(f"{self.__class__.__name__}: trainer validate stage")
self.state.fn = TrainerFn.VALIDATING
self.state.status = TrainerStatus.RUNNING
self.validating = True
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(dataloaders, LightningDataModule):
datamodule = dataloaders
dataloaders = None
# If you supply a datamodule you can't supply val_dataloaders
if dataloaders is not None and datamodule:
raise MisconfigurationException("You cannot pass both `trainer.validate(dataloaders=..., datamodule=...)`")
model_provided = model is not None
model = model or self.lightning_module
if model is None:
raise MisconfigurationException(
"`model` must be provided to `trainer.validate()` when it hasn't been passed in a previous run"
)
self.validate_loop.verbose = verbose
# links data to the trainer
self._data_connector.attach_data(model, val_dataloaders=dataloaders, datamodule=datamodule)
self._ckpt_path = self.__set_ckpt_path(
ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None
)
self._validated_ckpt_path = self.ckpt_path # TODO: remove in v1.8
# run validate
results = self._run(model, ckpt_path=self.ckpt_path)
assert self.state.stopped
self.validating = False
return results
def test(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[str] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
) -> _EVALUATE_OUTPUT:
r"""
Perform one evaluation epoch over the test set.
It's separated from fit to make sure you never run on your test set until you want to.
Args:
model: The model to test.
dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them,
or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying test samples.
ckpt_path: Either ``best`` or path to the checkpoint you wish to test.
If ``None`` and the model instance was passed, use the current weights.
Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded
if a checkpoint callback is configured.
verbose: If True, prints the test results.
datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.
Returns:
List of dictionaries with metrics logged during the test phase, e.g., in model- or callback hooks
like :meth:`~pytorch_lightning.core.lightning.LightningModule.test_step`,
:meth:`~pytorch_lightning.core.lightning.LightningModule.test_epoch_end`, etc.
The length of the list corresponds to the number of test dataloaders used.
"""
self.strategy.model = model or self.lightning_module
return self._call_and_handle_interrupt(self._test_impl, model, dataloaders, ckpt_path, verbose, datamodule)
def _test_impl(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[str] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
) -> _EVALUATE_OUTPUT:
# --------------------
# SETUP HOOK
# --------------------
Trainer._log_api_event("test")
log.detail(f"{self.__class__.__name__}: trainer test stage")
self.state.fn = TrainerFn.TESTING
self.state.status = TrainerStatus.RUNNING
self.testing = True
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(dataloaders, LightningDataModule):
datamodule = dataloaders
dataloaders = None
# If you supply a datamodule you can't supply test_dataloaders
if dataloaders is not None and datamodule:
raise MisconfigurationException("You cannot pass both `trainer.test(dataloaders=..., datamodule=...)`")
model_provided = model is not None
model = model or self.lightning_module
if model is None:
raise MisconfigurationException(
"`model` must be provided to `trainer.test()` when it hasn't been passed in a previous run"
)
self.test_loop.verbose = verbose
# links data to the trainer
self._data_connector.attach_data(model, test_dataloaders=dataloaders, datamodule=datamodule)
self._ckpt_path = self.__set_ckpt_path(
ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None
)
self._tested_ckpt_path = self.ckpt_path # TODO: remove in v1.8
# run test
results = self._run(model, ckpt_path=self.ckpt_path)
assert self.state.stopped
self.testing = False
return results
def predict(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
datamodule: Optional[LightningDataModule] = None,
return_predictions: Optional[bool] = None,
ckpt_path: Optional[str] = None,
) -> Optional[_PREDICT_OUTPUT]:
r"""
Run inference on your data.
This will call the model forward function to compute predictions. Useful to perform distributed
and batched predictions. Logging is disabled in the predict hooks.
Args:
model: The model to predict with.
dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them,
or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying prediction samples.
datamodule: The datamodule with a predict_dataloader method that returns one or more dataloaders.
return_predictions: Whether to return predictions.
``True`` by default except when an accelerator that spawns processes is used (not supported).
ckpt_path: Either ``best`` or path to the checkpoint you wish to predict.
If ``None`` and the model instance was passed, use the current weights.
Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded
if a checkpoint callback is configured.
Returns:
Returns a list of dictionaries, one for each provided dataloader containing their respective predictions.
"""
self.strategy.model = model or self.lightning_module
return self._call_and_handle_interrupt(
self._predict_impl, model, dataloaders, datamodule, return_predictions, ckpt_path
)
def _predict_impl(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
datamodule: Optional[LightningDataModule] = None,
return_predictions: Optional[bool] = None,
ckpt_path: Optional[str] = None,
) -> Optional[_PREDICT_OUTPUT]:
# --------------------
# SETUP HOOK
# --------------------
Trainer._log_api_event("predict")
log.detail(f"{self.__class__.__name__}: trainer predict stage")
self.state.fn = TrainerFn.PREDICTING
self.state.status = TrainerStatus.RUNNING
self.predicting = True
self.predict_loop.return_predictions = return_predictions
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(dataloaders, LightningDataModule):
datamodule = dataloaders
dataloaders = None
if dataloaders is not None and datamodule:
raise MisconfigurationException("You cannot pass both `trainer.predict(dataloaders=..., datamodule=...)`")
model_provided = model is not None
model = model or self.lightning_module
if model is None:
raise MisconfigurationException(
"`model` must be provided to `trainer.predict()` when it hasn't been passed in a previous run"
)
# links data to the trainer
self._data_connector.attach_data(model, predict_dataloaders=dataloaders, datamodule=datamodule)
self._ckpt_path = self.__set_ckpt_path(
ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None
)
self._predicted_ckpt_path = self.ckpt_path # TODO: remove in v1.8
results = self._run(model, ckpt_path=self.ckpt_path)
assert self.state.stopped
self.predicting = False
return results
def tune(
self,
model: "pl.LightningModule",
train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
datamodule: Optional[LightningDataModule] = None,
scale_batch_size_kwargs: Optional[Dict[str, Any]] = None,
lr_find_kwargs: Optional[Dict[str, Any]] = None,
) -> Dict[str, Optional[Union[int, _LRFinder]]]:
r"""
Runs routines to tune hyperparameters before training.
Args:
model: Model to tune.
train_dataloaders: A collection of :class:`torch.utils.data.DataLoader` or a
:class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying training samples.
In the case of multiple dataloaders, please see this :ref:`section <multiple-dataloaders>`.
val_dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying validation samples.
datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.
scale_batch_size_kwargs: Arguments for :func:`~pytorch_lightning.tuner.batch_size_scaling.scale_batch_size`
lr_find_kwargs: Arguments for :func:`~pytorch_lightning.tuner.lr_finder.lr_find`
"""
Trainer._log_api_event("tune")
self.state.fn = TrainerFn.TUNING
self.state.status = TrainerStatus.RUNNING
self.tuning = True
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(train_dataloaders, LightningDataModule):
datamodule = train_dataloaders
train_dataloaders = None
# If you supply a datamodule you can't supply train_dataloader or val_dataloaders
if (train_dataloaders is not None or val_dataloaders is not None) and datamodule is not None:
raise MisconfigurationException(
"You cannot pass `train_dataloader` or `val_dataloaders` to `trainer.tune(datamodule=...)`"
)
# links data to the trainer
self._data_connector.attach_data(
model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, datamodule=datamodule
)
with isolate_rng():
result = self.tuner._tune(
model, scale_batch_size_kwargs=scale_batch_size_kwargs, lr_find_kwargs=lr_find_kwargs
)
assert self.state.stopped
self.tuning = False
return result
def _restore_modules_and_callbacks(self, checkpoint_path: Optional[_PATH] = None) -> None:
# restore modules after setup
self._checkpoint_connector.resume_start(checkpoint_path)
self._checkpoint_connector.restore_model()
self._checkpoint_connector.restore_datamodule()
if self.state.fn == TrainerFn.FITTING:
# restore callback states
self._checkpoint_connector.restore_callbacks()
def _run(
self, model: "pl.LightningModule", ckpt_path: Optional[str] = None
) -> Optional[Union[_EVALUATE_OUTPUT, _PREDICT_OUTPUT]]:
# clean hparams
if hasattr(model, "hparams"):
parsing.clean_namespace(model.hparams)
# attach model to the training type plugin
self.strategy.connect(model)
self._callback_connector._attach_model_callbacks()
self._callback_connector._attach_model_logging_functions()
verify_loop_configurations(self)
# hook
log.detail(f"{self.__class__.__name__}: preparing data")
self._data_connector.prepare_data()
# ----------------------------
# SET UP TRAINING
# ----------------------------
self._call_callback_hooks("on_before_accelerator_backend_setup")
log.detail(f"{self.__class__.__name__}: setting up strategy environment")
self.strategy.setup_environment()
self.__setup_profiler()
self._call_setup_hook() # allow user to setup lightning_module in accelerator environment
# check if we should delay restoring checkpoint till later
if not self.strategy.restore_checkpoint_after_setup:
log.detail(f"{self.__class__.__name__}: restoring module and callbacks from checkpoint path: {ckpt_path}")
self._restore_modules_and_callbacks(ckpt_path)
log.detail(f"{self.__class__.__name__}: configuring sharded model")
self._call_configure_sharded_model() # allow user to setup in model sharded environment
# ----------------------------
# INSPECT THE CORE LOOPS
# ----------------------------
fr"""
Lightning internal flow looks like this:
{Trainer.fit} or {Trainer.test} or {Trainer.predict} ||
| ||
spawn processes ||
{self.strategy.setup_environment} ||
| ||
setup accelerator ||
and strategy || LIGHTNING
| ||
{self._run_stage} || FLOW
| ||
{self._run_train} || DIRECTION
or {self._run_evaluate} ||
or {self._run_predict} ||
| ||
results \/
This is used to guide readers to the core loops: train, test, predict.
{self._run_predict} is the simplest to understand, use `Go to Definition` to read it :)
"""
# ----------------------------
# TRAIN
# ----------------------------
# reset logger connector
self._logger_connector.reset_results()
self._logger_connector.reset_metrics()
# strategy will configure model and move it to the device
self.strategy.setup(self)
# hook
if self.state.fn == TrainerFn.FITTING:
self._call_callback_hooks("on_fit_start")
self._call_lightning_module_hook("on_fit_start")
self._log_hyperparams()
if self.strategy.restore_checkpoint_after_setup:
log.detail(f"{self.__class__.__name__}: restoring module and callbacks from checkpoint path: {ckpt_path}")
self._restore_modules_and_callbacks(ckpt_path)
# restore optimizers, etc.
log.detail(f"{self.__class__.__name__}: restoring training state")
self._checkpoint_connector.restore_training_state()
self._checkpoint_connector.resume_end()
results = self._run_stage()
log.detail(f"{self.__class__.__name__}: trainer tearing down")
self._teardown()
# ----------------------------
# POST-Training CLEAN UP
# ----------------------------
# hook
if self.state.fn == TrainerFn.FITTING:
self._call_callback_hooks("on_fit_end")
self._call_lightning_module_hook("on_fit_end")
log.detail(f"{self.__class__.__name__}: calling teardown hooks")
self._call_teardown_hook()
self.state.status = TrainerStatus.FINISHED
self.state.stage = None
return results
def _log_hyperparams(self) -> None:
if not self.loggers:
return
# log hyper-parameters
hparams_initial = None
# save exp to get started (this is where the first experiment logs are written)
datamodule_log_hyperparams = self.datamodule._log_hyperparams if self.datamodule is not None else False
if self.lightning_module._log_hyperparams and datamodule_log_hyperparams:
datamodule_hparams = self.datamodule.hparams_initial
lightning_hparams = self.lightning_module.hparams_initial
inconsistent_keys = []
for key in lightning_hparams.keys() & datamodule_hparams.keys():
lm_val, dm_val = lightning_hparams[key], datamodule_hparams[key]
if type(lm_val) != type(dm_val):
inconsistent_keys.append(key)
elif isinstance(lm_val, torch.Tensor) and id(lm_val) != id(dm_val):
inconsistent_keys.append(key)
elif lm_val != dm_val:
inconsistent_keys.append(key)
if inconsistent_keys:
raise MisconfigurationException(
f"Error while merging hparams: the keys {inconsistent_keys} are present "
"in both the LightningModule's and LightningDataModule's hparams "
"but have different values."
)
hparams_initial = {**lightning_hparams, **datamodule_hparams}
elif self.lightning_module._log_hyperparams:
hparams_initial = self.lightning_module.hparams_initial
elif datamodule_log_hyperparams:
hparams_initial = self.datamodule.hparams_initial
for logger in self.loggers:
if hparams_initial is not None:
logger.log_hyperparams(hparams_initial)
logger.log_graph(self.lightning_module)
logger.save()
def _teardown(self):
"""This is the Trainer's internal teardown, unrelated to the `teardown` hooks in LightningModule and
Callback; those are handled by :meth:`_call_teardown_hook`."""
self.strategy.post_dispatch(self)
self.strategy.teardown()
loop = self._active_loop
# loop should never be `None` here but it can because we don't know the trainer stage with `ddp_spawn`
if loop is not None:
loop.teardown()
self._logger_connector.teardown()
self._signal_connector.teardown()
def run_stage(self) -> None:
rank_zero_deprecation(
"`Trainer.run_stage` is deprecated in v1.6 and will be removed in v1.8. Use"
" `Trainer.{fit,validate,test,predict}` instead."
)
return self._run_stage()
def _run_stage(self):
self.strategy.barrier("run-stage")
self.strategy.dispatch(self)
if self.evaluating:
return self._run_evaluate()
if self.predicting:
return self._run_predict()
return self._run_train()
def _pre_training_routine(self):
# wait for all to join if on distributed
self.strategy.barrier("setup_training")
# register signals
self._signal_connector.register_signal_handlers()
# --------------------------
# Pre-train
# --------------------------
self._call_callback_hooks("on_pretrain_routine_start")
self._call_lightning_module_hook("on_pretrain_routine_start")
self._call_callback_hooks("on_pretrain_routine_end")
self._call_lightning_module_hook("on_pretrain_routine_end")
def _run_train(self) -> None:
self._pre_training_routine()
with isolate_rng():
self._run_sanity_check()
# enable train mode
self.model.train()
torch.set_grad_enabled(True)
self.fit_loop.trainer = self
with torch.autograd.set_detect_anomaly(self._detect_anomaly):
self.fit_loop.run()
def _run_evaluate(self) -> _EVALUATE_OUTPUT:
assert self.evaluating
# reload dataloaders
self._evaluation_loop._reload_evaluation_dataloaders()
# reset trainer on this loop and all child loops in case user connected a custom loop
self._evaluation_loop.trainer = self
with self.profiler.profile(f"run_{self.state.stage}_evaluation"), torch.no_grad():
eval_loop_results = self._evaluation_loop.run()
# remove the tensors from the eval results
for result in eval_loop_results:
if isinstance(result, dict):
for k, v in result.items():
if isinstance(v, torch.Tensor):
result[k] = v.cpu().item()
return eval_loop_results
def _run_predict(self) -> Optional[_PREDICT_OUTPUT]:
self.reset_predict_dataloader(self.lightning_module)
# reset trainer on this loop and all child loops in case user connected a custom loop
self.predict_loop.trainer = self
with torch.no_grad():
return self.predict_loop.run()
def _run_sanity_check(self) -> None:
val_loop = self.fit_loop.epoch_loop.val_loop
should_sanity_check = (
self.enable_validation
and self.num_sanity_val_steps > 0
# do not sanity check if restarting because it would mess up the loaded state
and not val_loop.restarting
)
# run tiny validation (if validation defined)
# to make sure program won't crash during val
if should_sanity_check:
stage = self.state.stage
self.sanity_checking = True
# reset logger connector
self._logger_connector.reset_results()
self._logger_connector.reset_metrics()
self._call_callback_hooks("on_sanity_check_start")
# reload dataloaders
val_loop._reload_evaluation_dataloaders()
self.num_sanity_val_batches = [
min(self.num_sanity_val_steps, val_batches) for val_batches in self.num_val_batches
]
# run eval step
with torch.no_grad():
val_loop.run()
self._call_callback_hooks("on_sanity_check_end")
# reset logger connector
self._logger_connector.reset_results()
self._logger_connector.reset_metrics()
# reset the progress tracking state after sanity checking. we don't need to set the state before
# because sanity check only runs when we are not restarting
_reset_progress(val_loop)
# restore the previous stage when the sanity check if finished
self.state.stage = stage
def __set_ckpt_path(self, ckpt_path: Optional[str], model_provided: bool, model_connected: bool) -> Optional[str]:
# fault-tolerance takes precedence
from pytorch_lightning.callbacks.fault_tolerance import _FaultToleranceCheckpoint
ft_checkpoints = [cb for cb in self.callbacks if isinstance(cb, _FaultToleranceCheckpoint)]
if ft_checkpoints:
ft_ckpt_path = ft_checkpoints[0].ckpt_path
fs = get_filesystem(ft_ckpt_path)
if fs.exists(ft_ckpt_path):
return ft_ckpt_path
if model_provided and ckpt_path is None:
# use passed model to function without loading weights
return
fn = self.state.fn.value
if model_connected and ckpt_path is None:
rank_zero_warn(
f"`.{fn}(ckpt_path=None)` was called without a model."
" The best model of the previous `fit` call will be used."
f" You can pass `{fn}(ckpt_path='best')` to use and best model"
" checkpoint and avoid this warning or"
" `ckpt_path=trainer.checkpoint_callback.last_model_path` to use the last model."
)
ckpt_path = "best"
if ckpt_path == "best":
if len(self.checkpoint_callbacks) > 1:
rank_zero_warn(
f'`.{fn}(ckpt_path="best")` is called with Trainer configured with multiple `ModelCheckpoint`'
" callbacks. It will use the best checkpoint path from first checkpoint callback."
)
if not self.checkpoint_callback:
raise MisconfigurationException(
f'`.{fn}(ckpt_path="best")` is set but `ModelCheckpoint` is not configured.'
)
if not self.checkpoint_callback.best_model_path:
if self.fast_dev_run:
raise MisconfigurationException(
f'You cannot execute `.{fn}(ckpt_path="best")` with `fast_dev_run=True`.'
f" Please pass an exact checkpoint path to `.{fn}(ckpt_path=...)`"
)
raise MisconfigurationException(
f'`.{fn}(ckpt_path="best")` is set but `ModelCheckpoint` is not configured to save the best model.'
)
# load best weights
ckpt_path = self.checkpoint_callback.best_model_path
if not ckpt_path:
raise MisconfigurationException(
f"`.{fn}()` found no path for the best weights: {ckpt_path!r}. Please"
f" specify a path for a checkpoint `.{fn}(ckpt_path=PATH)`"
)
return ckpt_path
def _call_setup_hook(self) -> None:
fn = self.state.fn._setup_fn
self.strategy.barrier("pre_setup")
if self.datamodule is not None:
self.datamodule.setup(stage=fn)
self._call_callback_hooks("setup", stage=fn)
self._call_lightning_module_hook("setup", stage=fn)
self.strategy.barrier("post_setup")
def _call_configure_sharded_model(self) -> None:
with self.strategy.model_sharded_context():
self._handle_meta_model()
self._call_lightning_module_hook("configure_sharded_model")
self._call_callback_hooks("on_configure_sharded_model")
def _handle_meta_model(self) -> None:
if not is_on_meta_device(self.lightning_module):
return
if isinstance(self.strategy, DDPSpawnStrategy):
raise MisconfigurationException("LightningModule on meta device isn't supported with spawn.")
materialize_module(self.lightning_module)
# the trainer reference is lost during materialization
self.lightning_module.trainer = proxy(self)
def _call_teardown_hook(self) -> None:
fn = self.state.fn._setup_fn
if self.datamodule is not None:
self.datamodule.teardown(stage=fn)
self._call_callback_hooks("teardown", stage=fn)
self._call_lightning_module_hook("teardown", stage=fn)
self.lightning_module._current_fx_name = None
# these could have become stale if metrics are defined in `setup`
self.lightning_module._metric_attributes = None
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu kill loggers.
for logger in self.loggers:
logger.finalize("success")
# summarize profile results
self.profiler.describe()
def call_hook(
self, hook_name: str, *args: Any, pl_module: Optional["pl.LightningModule"] = None, **kwargs: Any
) -> Any:
r"""
.. deprecated:: v1.6
The Trainer's `call_hook` method was deprecated in v1.6 and will be removed in v1.8.
"""
rank_zero_deprecation("The Trainer's `call_hook` method was deprecated in v1.6 and will be removed in v1.8.")
pl_module = self.lightning_module or pl_module
if pl_module:
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = hook_name
# always profile hooks
with self.profiler.profile(hook_name):
# first call trainer hook
callback_fx = getattr(self, hook_name, None)
if callable(callback_fx):
callback_fx(*args, **kwargs)
# next call hook in lightningModule
output = None
model_fx = getattr(pl_module, hook_name, None)
if callable(model_fx):
output = model_fx(*args, **kwargs)
# call the strategy hook
if hook_name not in ("setup", "teardown", "on_train_start") and hasattr(self.strategy, hook_name):
strategy_hook = getattr(self.strategy, hook_name)
strategy_output = strategy_hook(*args, **kwargs)
output = strategy_output if output is None else output
if pl_module:
# restore current_fx when nested context
pl_module._current_fx_name = prev_fx_name
return output
def _call_lightning_module_hook(
self,
hook_name: str,
*args: Any,
pl_module: Optional["pl.LightningModule"] = None,
**kwargs: Any,
) -> Any:
pl_module = pl_module or self.lightning_module
if pl_module is None:
raise TypeError("No Lightning Module is available to call hooks on")
fn = getattr(pl_module, hook_name)
if not callable(fn):
return
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = hook_name
with self.profiler.profile(f"[LightningModule]{pl_module.__class__.__name__}.{hook_name}"):
output = fn(*args, **kwargs)
# restore current_fx when nested context
pl_module._current_fx_name = prev_fx_name
return output
def _call_callback_hooks(
self,
hook_name: str,
*args: Any,
**kwargs: Any,
) -> None:
log.detail(f"{self.__class__.__name__}: calling callback hook: {hook_name}")
# TODO: remove if block in v1.8
if hook_name in ("on_init_start", "on_init_end"):
# these `Callback` hooks are the only ones that do not take a lightning module.
# we also don't profile bc profiler hasn't been set yet
for callback in self.callbacks:
fn = getattr(callback, hook_name)
if callable(fn):
fn(self, *args, **kwargs)
return
pl_module = self.lightning_module
if pl_module:
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = hook_name
# TODO: remove if block in v1.7
if hook_name == "on_train_batch_start":
with self.profiler.profile(hook_name):
self._on_train_batch_start(*args, **kwargs)
elif hook_name == "on_train_batch_end":
with self.profiler.profile(hook_name):
self._on_train_batch_end(*args, **kwargs)
else:
for callback in self.callbacks:
fn = getattr(callback, hook_name)
if callable(fn):
with self.profiler.profile(f"[Callback]{callback.state_key}.{hook_name}"):
fn(self, self.lightning_module, *args, **kwargs)
if pl_module:
# restore current_fx when nested context
pl_module._current_fx_name = prev_fx_name
# TODO: Delete this in v1.7 (deprecations: #9816 and #11148)
def _on_train_batch_start(self, batch, batch_idx, dataloader_idx=0):
r"""Called when the training batch begins. This function is needed because of two different deprecations affecting
the original function in TrainerCallbackHookMixin: #9816 and #11148.
"""
for callback in self.callbacks:
if is_param_in_hook_signature(callback.on_train_batch_start, "dataloader_idx", explicit=True):
callback.on_train_batch_start(self, self.lightning_module, batch, batch_idx, 0)
else:
callback.on_train_batch_start(self, self.lightning_module, batch, batch_idx)
# TODO: Delete this in v1.7 (deprecations: #9816 and #11148)
def _on_train_batch_end(self, outputs: STEP_OUTPUT, batch, batch_idx, dataloader_idx=0):
r"""Called when the training batch ends. This function is needed because of two different deprecations affecting
the original function in TrainerCallbackHookMixin: #9816 and #11148.
"""
for callback in self.callbacks:
if is_param_in_hook_signature(callback.on_train_batch_end, "dataloader_idx", explicit=True):
callback.on_train_batch_end(self, self.lightning_module, outputs, batch, batch_idx, 0)
else:
callback.on_train_batch_end(self, self.lightning_module, outputs, batch, batch_idx)
def _call_callbacks_state_dict(self) -> Dict[str, dict]:
"""Called when saving a model checkpoint, calls and returns every callback's `state_dict`, keyed by
`Callback.state_key`."""
callback_state_dicts = {}
for callback in self.callbacks:
state_dict = callback.state_dict()
if state_dict:
callback_state_dicts[callback.state_key] = state_dict
return callback_state_dicts
def _call_callbacks_on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""Called when saving a model checkpoint, calls every callback's `on_save_checkpoint` hook.
Will be removed in v1.8: If state is returned, we insert the callback state into
``checkpoint["callbacks"][Callback.state_key]``. It overrides ``state_dict`` if already present.
"""
for callback in self.callbacks:
# TODO: Add profiling for on_save_checkpoint hook
state = callback.on_save_checkpoint(self, self.lightning_module, checkpoint)
if state:
# TODO: Add deprecation warning if state is returned (see reference PR #11887)
checkpoint["callbacks"][callback.state_key] = state
def _call_callbacks_on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""Called when loading a model checkpoint.
Calls every callback's `on_load_checkpoint` hook. We have a dedicated function for this rather than using
`_call_callback_hooks` because we have special logic for getting callback_states.
"""
callback_states: Dict[Union[Type, str], Dict] = checkpoint.get("callbacks")
if callback_states is None:
return
is_legacy_ckpt = Version(checkpoint["pytorch-lightning_version"]) < Version("1.5.0dev")
current_callbacks_keys = {cb._legacy_state_key if is_legacy_ckpt else cb.state_key for cb in self.callbacks}
difference = callback_states.keys() - current_callbacks_keys
if difference:
rank_zero_warn(
"Be aware that when using `ckpt_path`,"
" callbacks used to create the checkpoint need to be provided during `Trainer` instantiation."
f" Please add the following callbacks: {list(difference)}.",
)
for callback in self.callbacks:
state = callback_states.get(callback.state_key, callback_states.get(callback._legacy_state_key))
if state:
state = deepcopy(state)
# TODO: Add profiling for on_load_checkpoint hook
callback.on_load_checkpoint(self, self.lightning_module, state)
def _call_callbacks_load_state_dict(self, checkpoint: Dict[str, Any]) -> None:
"""Called when loading a model checkpoint, calls every callback's `load_state_dict`."""
callback_states: Dict[Union[Type, str], Dict] = checkpoint.get("callbacks")
if callback_states is None:
return
for callback in self.callbacks:
state = callback_states.get(callback.state_key, callback_states.get(callback._legacy_state_key))
if state:
state = deepcopy(state)
callback.load_state_dict(state)
def _call_strategy_hook(
self,
hook_name: str,
*args: Any,
**kwargs: Any,
) -> Any:
pl_module = self.lightning_module
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = hook_name
fn = getattr(self.strategy, hook_name)
if not callable(fn):
return
with self.profiler.profile(f"[Strategy]{self.strategy.__class__.__name__}.{hook_name}"):
output = fn(*args, **kwargs)
# restore current_fx when nested context
pl_module._current_fx_name = prev_fx_name
return output
@staticmethod
def _parse_devices(
gpus: Optional[Union[List[int], str, int]],
auto_select_gpus: bool,
tpu_cores: Optional[Union[List[int], str, int]],
) -> Tuple[Optional[List[int]], Optional[Union[List[int], int]]]:
return device_parser._parse_devices(gpus, auto_select_gpus, tpu_cores)
@staticmethod
def _log_api_event(event: str) -> None:
torch._C._log_api_usage_once("lightning.trainer." + event)
def __init_profiler(self, profiler: Optional[Union[BaseProfiler, str]]) -> None:
if isinstance(profiler, str):
PROFILERS = {
"simple": SimpleProfiler,
"advanced": AdvancedProfiler,
"pytorch": PyTorchProfiler,
"xla": XLAProfiler,
}
profiler = profiler.lower()
if profiler not in PROFILERS:
raise MisconfigurationException(
"When passing string value for the `profiler` parameter of `Trainer`,"
f" it can only be one of {list(PROFILERS.keys())}"
)
profiler_class = PROFILERS[profiler]
profiler = profiler_class()
self.profiler: BaseProfiler = profiler or PassThroughProfiler()
def __setup_profiler(self) -> None:
local_rank = self.local_rank if self.world_size > 1 else None
self.profiler._lightning_module = proxy(self.lightning_module)
self.profiler.setup(stage=self.state.fn._setup_fn, local_rank=local_rank, log_dir=self.log_dir)
def _log_device_info(self) -> None:
rank_zero_info(
f"GPU available: {torch.cuda.is_available()}, used: {isinstance(self.accelerator, GPUAccelerator)}"
)
num_tpu_cores = (
self.tpu_cores if self.tpu_cores is not None and isinstance(self.accelerator, TPUAccelerator) else 0
)
rank_zero_info(f"TPU available: {_TPU_AVAILABLE}, using: {num_tpu_cores} TPU cores")
num_ipus = self.ipus if self.ipus is not None else 0
rank_zero_info(f"IPU available: {_IPU_AVAILABLE}, using: {num_ipus} IPUs")
if torch.cuda.is_available() and not isinstance(self.accelerator, GPUAccelerator):
rank_zero_warn(
"GPU available but not used. Set `accelerator` and `devices` using"
f" `Trainer(accelerator='gpu', devices={GPUAccelerator.auto_device_count()})`.",
category=PossibleUserWarning,
)
if _TPU_AVAILABLE and not isinstance(self.accelerator, TPUAccelerator):
rank_zero_warn(
"TPU available but not used. Set `accelerator` and `devices` using"
f" `Trainer(accelerator='tpu', devices={TPUAccelerator.auto_device_count()})`."
)
if _IPU_AVAILABLE and not isinstance(self.accelerator, IPUAccelerator):
rank_zero_warn(
"IPU available but not used. Set `accelerator` and `devices` using"
f" `Trainer(accelerator='ipu', devices={IPUAccelerator.auto_device_count()})`."
)
"""
Data loading methods
"""
def reset_train_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
"""Resets the train dataloader and initialises required variables (number of batches, when to validate,
etc.).
Args:
model: The ``LightningModule`` if calling this outside of the trainer scope.
"""
source = self._data_connector._train_dataloader_source
pl_module = self.lightning_module or model
has_step = is_overridden("training_step", pl_module)
enable_training = self.limit_train_batches > 0
if not (source.is_defined() and has_step and enable_training):
return
self.train_dataloader = self._data_connector._request_dataloader(RunningStage.TRAINING, model=model)
if self.overfit_batches > 0:
self.train_dataloader = self._data_connector._resolve_overfit_batches(self.train_dataloader)
# automatically add samplers
self.train_dataloader = apply_to_collection(
self.train_dataloader,
(DataLoader, CombinedLoader),
self._data_connector._prepare_dataloader,
mode=RunningStage.TRAINING,
)
loaders = (
self.train_dataloader.loaders
if isinstance(self.train_dataloader, CombinedLoader)
else self.train_dataloader
)
# check the workers recursively
apply_to_collection(loaders, DataLoader, self._data_connector._worker_check, "train_dataloader")
# add worker_init_fn for correct seeding in worker processes
apply_to_collection(loaders, DataLoader, _auto_add_worker_init_fn, rank=self.global_rank)
# add collate_fn to collect metadata for fault tolerant training
if _fault_tolerant_training():
apply_to_collection(loaders, DataLoader, _add_capture_metadata_collate)
# wrap the sequence of train loaders to a CombinedLoader object for computing the num_training_batches
if not isinstance(self.train_dataloader, CombinedLoader):
self.train_dataloader = CombinedLoader(loaders, self._data_connector.multiple_trainloader_mode)
module = model or self.lightning_module or self.datamodule
self.num_training_batches = (
len(self.train_dataloader)
if has_len_all_ranks(self.train_dataloader, self.strategy, module)
else float("inf")
)
if isinstance(self.limit_train_batches, int):
self.num_training_batches = min(self.num_training_batches, int(self.limit_train_batches))
elif self.num_training_batches != float("inf"):
self.num_training_batches = int(self.num_training_batches * self.limit_train_batches)
elif self.limit_train_batches != 1.0:
raise MisconfigurationException(
"When using an IterableDataset for `limit_train_batches`,"
" `Trainer(limit_train_batches)` must be `1.0` or an int. An int k specifies"
" `num_training_batches` to use."
)
if isinstance(self.val_check_interval, int):
self.val_check_batch = self.val_check_interval
if self.val_check_batch > self.num_training_batches:
raise ValueError(
f"`val_check_interval` ({self.val_check_interval}) must be less than or equal "
f"to the number of the training batches ({self.num_training_batches}). "
"If you want to disable validation set `limit_val_batches` to 0.0 instead."
)
else:
if not has_len_all_ranks(self.train_dataloader, self.strategy, module):
if self.val_check_interval == 1.0:
self.val_check_batch = float("inf")
else:
raise MisconfigurationException(
"When using an IterableDataset for `train_dataloader`,"
" `Trainer(val_check_interval)` must be `1.0` or an int. An int k specifies"
" checking validation every k training batches."
)
else:
self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
self.val_check_batch = max(1, self.val_check_batch)
if self.loggers and self.num_training_batches < self.log_every_n_steps:
rank_zero_warn(
f"The number of training samples ({self.num_training_batches}) is smaller than the logging interval"
f" Trainer(log_every_n_steps={self.log_every_n_steps}). Set a lower value for log_every_n_steps if"
" you want to see logs for the training epoch.",
category=PossibleUserWarning,
)
# store epoch of dataloader reset for reload_dataloaders_every_n_epochs
self._last_train_dl_reload_epoch = self.current_epoch
def reset_val_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
"""Resets the validation dataloader and determines the number of batches.
Args:
model: The ``LightningModule`` if called outside of the trainer scope.
"""
source = self._data_connector._val_dataloader_source
pl_module = self.lightning_module or model
has_step = is_overridden("validation_step", pl_module)
enable_validation = self.limit_val_batches > 0
if source.is_defined() and has_step and enable_validation:
self.num_val_batches, self.val_dataloaders = self._data_connector._reset_eval_dataloader(
RunningStage.VALIDATING, model=pl_module
)
# store epoch of dataloader reset for reload_dataloaders_every_n_epochs
self._last_val_dl_reload_epoch = self.current_epoch
def reset_test_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
"""Resets the test dataloader and determines the number of batches.
Args:
model: The ``LightningModule`` if called outside of the trainer scope.
"""
source = self._data_connector._test_dataloader_source
pl_module = self.lightning_module or model
has_step = is_overridden("test_step", pl_module)
enable_testing = self.limit_test_batches > 0
if source.is_defined() and has_step and enable_testing:
self.num_test_batches, self.test_dataloaders = self._data_connector._reset_eval_dataloader(
RunningStage.TESTING, model=pl_module
)
def reset_predict_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
"""Resets the predict dataloader and determines the number of batches.
Args:
model: The ``LightningModule`` if called outside of the trainer scope.
"""
source = self._data_connector._predict_dataloader_source
pl_module = self.lightning_module or model
enable_prediction = self.limit_predict_batches > 0
if source.is_defined() and enable_prediction:
self.num_predict_batches, self.predict_dataloaders = self._data_connector._reset_eval_dataloader(
RunningStage.PREDICTING, model=pl_module
)
def reset_train_val_dataloaders(self, model: Optional["pl.LightningModule"] = None) -> None:
"""Resets train and val dataloaders if none are attached to the trainer.
The val dataloader must be initialized before training loop starts, as the training loop
inspects the val dataloader to determine whether to run the evaluation loop.
Args:
model: The ``LightningModule`` if called outside of the trainer scope.
"""
if self.train_dataloader is None:
self.reset_train_dataloader(model=model)
if self.val_dataloaders is None:
self.reset_val_dataloader(model=model)
"""
Accelerator properties
"""
@property
def accelerator(self) -> Accelerator:
return self.strategy.accelerator
@property
def strategy(self) -> Strategy:
return self._accelerator_connector.strategy
@property
def training_type_plugin(self) -> Strategy:
rank_zero_deprecation(
"`Trainer.training_type_plugin` is deprecated in v1.6 and will be removed in v1.8. Use"
" `Trainer.strategy` instead."
)
return self.strategy
@property
def precision_plugin(self) -> PrecisionPlugin:
return self.strategy.precision_plugin
@property
def global_rank(self) -> int:
return self.strategy.global_rank
@property
def local_rank(self) -> int:
# some training types define a local rank
return getattr(self.strategy, "local_rank", 0)
@property
def node_rank(self) -> int:
# some training types define a node rank
return getattr(self.strategy, "node_rank", 0)
@property
def world_size(self) -> int:
# some training types define a world size
return getattr(self.strategy, "world_size", 1)
@property
def should_rank_save_checkpoint(self) -> bool:
rank_zero_deprecation(
"`Trainer.should_rank_save_checkpoint` is deprecated in v1.6 and will be removed in v1.8.", stacklevel=5
)
strategy = self.strategy
return (
isinstance(strategy, pl.strategies.TPUSpawnStrategy) and strategy.local_rank == 0 or strategy.is_global_zero
)
@property
def num_nodes(self) -> int:
return getattr(self.strategy, "num_nodes", 1)
@property
def device_ids(self) -> List[int]:
"""List of device indexes per node."""
devices = getattr(self.strategy, "parallel_devices", [self.strategy.root_device])
device_ids = []
for idx, device in enumerate(devices):
if isinstance(device, torch.device):
device_ids.append(device.index or idx)
elif isinstance(device, int):
device_ids.append(device)
return device_ids
@property
def num_devices(self) -> int:
"""Number of devices the trainer uses per node."""
return len(self.device_ids)
@property
def num_processes(self) -> int:
return self._accelerator_connector.num_processes
@property
def root_gpu(self) -> Optional[int]:
rank_zero_deprecation(
"`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. "
"Please use `Trainer.strategy.root_device.index` instead."
)
return self.strategy.root_device.index if isinstance(self.accelerator, GPUAccelerator) else None
@property
def tpu_cores(self) -> int:
return self._accelerator_connector.tpu_cores
@property
def ipus(self) -> int:
return self._accelerator_connector.num_ipus
@property
def num_gpus(self) -> int:
rank_zero_deprecation(
"`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8."
" Please use `Trainer.num_devices` instead."
)
return self.num_devices if isinstance(self.accelerator, GPUAccelerator) else 0
@property
def devices(self) -> int:
rank_zero_deprecation(
"`Trainer.devices` was deprecated in v1.6 and will be removed in v1.8."
" Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead."
)
return self.num_devices
@property
def data_parallel_device_ids(self) -> Optional[List[int]]:
return (
self._accelerator_connector.parallel_device_ids if self._accelerator_connector.parallel_device_ids else None
)
@property
def lightning_module(self) -> "pl.LightningModule":
# TODO: this is actually an optional return
return self.strategy.lightning_module
@property
def optimizers(self) -> List[Optimizer]:
return self.strategy.optimizers
@optimizers.setter
def optimizers(self, new_optims: Optional[List[Optimizer]]) -> None:
self.strategy.optimizers = new_optims
@property
def lightning_optimizers(self) -> Dict[int, LightningOptimizer]:
rank_zero_deprecation(
"`Trainer.lightning_optimizers` is deprecated in v1.6 and will be removed in v1.8", stacklevel=5
)
return self.strategy._lightning_optimizers
@property
def lr_scheduler_configs(self) -> List[LRSchedulerConfig]:
return self.strategy.lr_scheduler_configs
@property
def lr_schedulers(self) -> List[Dict[str, Any]]:
rank_zero_deprecation(
"`Trainer.lr_schedulers` is deprecated in v1.6 and will be removed in v1.8."
" You can use `trainer.lr_scheduler_configs` instead which contains dataclasses instead of dictionaries.",
stacklevel=5,
)
from dataclasses import asdict
return [asdict(config) for config in self.strategy.lr_scheduler_configs]
@property
def optimizer_frequencies(self) -> List[int]:
return self.strategy.optimizer_frequencies
@optimizer_frequencies.setter
def optimizer_frequencies(self, new_freqs: List[int]) -> None:
self.strategy.optimizer_frequencies = new_freqs
@property
def amp_backend(self) -> Optional[AMPType]:
if isinstance(self.precision_plugin, ApexMixedPrecisionPlugin):
return AMPType.APEX
if isinstance(self.precision_plugin, NativeMixedPrecisionPlugin):
return AMPType.NATIVE
return None
@property
def precision(self) -> Union[str, int]:
return self.strategy.precision_plugin.precision
@property
def scaler(self) -> Optional[Any]:
return getattr(self.precision_plugin, "scaler", None)
@property
def gpus(self) -> Optional[Union[List[int], str, int]]:
return self._accelerator_connector.gpus
@property
def model(self) -> torch.nn.Module:
"""The LightningModule, but possibly wrapped into DataParallel or DistributedDataParallel.
To access the pure LightningModule, use
:meth:`~pytorch_lightning.trainer.trainer.Trainer.lightning_module` instead.
"""
return self.strategy.model
@model.setter
def model(self, model: torch.nn.Module) -> None:
"""Setter for the model, pass-through to accelerator and plugin where the model reference is stored. Used
by the Tuner to reset the state of Trainer and Accelerator.
Args:
model: The LightningModule, possibly wrapped into DataParallel or DistributedDataParallel, depending
on the backend.
"""
self.strategy.model = model
"""
General properties
"""
@property
def log_dir(self) -> Optional[str]:
if len(self.loggers) == 1:
if isinstance(self.logger, TensorBoardLogger):
dirpath = self.logger.log_dir
else:
dirpath = self.logger.save_dir
else:
dirpath = self.default_root_dir
dirpath = self.strategy.broadcast(dirpath)
return dirpath
@property
def use_amp(self) -> bool:
rank_zero_deprecation(
"`Trainer.use_amp` is deprecated in v1.6.0 and will be removed in v1.8.0."
" Please use `Trainer.amp_backend` instead."
)
return self.precision == 16
@property
def is_global_zero(self) -> bool:
return self.strategy.is_global_zero
@property
def slurm_job_id(self) -> Optional[int]:
rank_zero_deprecation("Method `slurm_job_id` is deprecated in v1.6.0 and will be removed in v1.7.0.")
return SLURMEnvironment.job_id()
@property
def distributed_sampler_kwargs(self) -> Optional[dict]:
if isinstance(self.strategy, ParallelStrategy):
return self.strategy.distributed_sampler_kwargs
@property
def data_parallel(self) -> bool:
return isinstance(self.strategy, ParallelStrategy)
@property
def progress_bar_dict(self) -> dict:
"""Read-only for progress bar metrics."""
rank_zero_deprecation(
"`trainer.progress_bar_dict` is deprecated in v1.5 and will be removed in v1.7."
" Use `ProgressBarBase.get_metrics` instead."
)
ref_model = self.lightning_module
ref_model = cast(pl.LightningModule, ref_model)
if self.progress_bar_callback:
return self.progress_bar_callback.get_metrics(self, ref_model)
return self.progress_bar_metrics
@property
def enable_validation(self) -> bool:
"""Check if we should run validation during training."""
return (
self._data_connector._val_dataloader_source.is_defined()
and is_overridden("validation_step", self.lightning_module)
and self.limit_val_batches > 0
)
@property
def default_root_dir(self) -> str:
"""The default location to save artifacts of loggers, checkpoints etc.
It is used as a fallback if logger or checkpoint callback do not define specific save paths.
"""
if get_filesystem(self._default_root_dir).protocol == "file":
return os.path.normpath(self._default_root_dir)
return self._default_root_dir
@property
def weights_save_path(self) -> str:
"""
The default root location to save weights (checkpoints), e.g., when the
:class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` does not define a file path.
.. deprecated:: v1.6
`Trainer.weights_save_path` has been deprecated in v1.6 and will be removed in v1.8.
"""
rank_zero_deprecation("`Trainer.weights_save_path` has been deprecated in v1.6 and will be removed in v1.8.")
return self._weights_save_path_internal
# TODO: Remove _weights_save_path_internal in v1.8
@property
def _weights_save_path_internal(self) -> str:
"""This is an internal implementation of weights_save_path which allows weights_save_path to be used
internally by the framework without emitting a deprecation warning.
To be removed in v1.8.
"""
if get_filesystem(self._weights_save_path).protocol == "file":
return os.path.normpath(self._weights_save_path)
return self._weights_save_path
@property
def early_stopping_callback(self) -> Optional[EarlyStopping]:
"""The first :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` callback in the
Trainer.callbacks list, or ``None`` if it doesn't exist."""
callbacks = self.early_stopping_callbacks
return callbacks[0] if len(callbacks) > 0 else None
@property
def early_stopping_callbacks(self) -> List[EarlyStopping]:
"""A list of all instances of :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` found in
the Trainer.callbacks list."""
return [c for c in self.callbacks if isinstance(c, EarlyStopping)]
@property
def prediction_writer_callbacks(self) -> List[BasePredictionWriter]:
"""A list of all instances of :class:`~pytorch_lightning.callbacks.prediction_writer.BasePredictionWriter`
found in the Trainer.callbacks list."""
return [cb for cb in self.callbacks if isinstance(cb, BasePredictionWriter)]
@property
def checkpoint_callback(self) -> Optional[ModelCheckpoint]:
"""The first :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callback in the
Trainer.callbacks list, or ``None`` if it doesn't exist."""
callbacks = self.checkpoint_callbacks
return callbacks[0] if len(callbacks) > 0 else None
@property
def checkpoint_callbacks(self) -> List[ModelCheckpoint]:
"""A list of all instances of :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` found
in the Trainer.callbacks list."""
return [c for c in self.callbacks if isinstance(c, ModelCheckpoint)]
@property
def progress_bar_callback(self) -> Optional[ProgressBarBase]:
"""An instance of :class:`~pytorch_lightning.callbacks.progress.base.ProgressBarBase` found in the
Trainer.callbacks list, or ``None`` if one doesn't exist."""
for c in self.callbacks:
if isinstance(c, ProgressBarBase):
return c
return None
@property
def resume_from_checkpoint(self) -> Optional[Union[str, Path]]:
resume_from_checkpoint = self._checkpoint_connector.resume_from_checkpoint_fit_path
if resume_from_checkpoint is not None:
rank_zero_deprecation(
"`trainer.resume_from_checkpoint` is deprecated in v1.5 and will be removed in v2.0."
" Specify the fit checkpoint path with `trainer.fit(ckpt_path=)` instead.",
stacklevel=5,
)
return resume_from_checkpoint
@property
def ckpt_path(self) -> Optional[str]:
"""Set to the path/URL of a checkpoint loaded via :meth:`~pytorch_lightning.trainer.trainer.Trainer.fit`,
:meth:`~pytorch_lightning.trainer.trainer.Trainer.validate`,
:meth:`~pytorch_lightning.trainer.trainer.Trainer.test`, or
:meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`. ``None`` otherwise."""
return self._ckpt_path
@property
def validated_ckpt_path(self) -> Optional[str]:
rank_zero_deprecation(
"The `Trainer.validated_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via"
" `Trainer.ckpt_path` instead.",
stacklevel=5,
)
return self._validated_ckpt_path
@validated_ckpt_path.setter
def validated_ckpt_path(self, ckpt_path: Optional[str]) -> None:
rank_zero_deprecation(
"The `Trainer.validated_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via the"
" read-only `Trainer.ckpt_path`.",
stacklevel=5,
)
self._validated_ckpt_path = ckpt_path
@property
def tested_ckpt_path(self) -> Optional[str]:
rank_zero_deprecation(
"The `Trainer.tested_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via"
" `Trainer.ckpt_path` instead.",
stacklevel=5,
)
return self._tested_ckpt_path
@tested_ckpt_path.setter
def tested_ckpt_path(self, ckpt_path: Optional[str]) -> None:
rank_zero_deprecation(
"The `Trainer.tested_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via the"
" read-only `Trainer.ckpt_path` instead.",
stacklevel=5,
)
self._tested_ckpt_path = ckpt_path
@property
def predicted_ckpt_path(self) -> Optional[str]:
rank_zero_deprecation(
"The `Trainer.predicted_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via"
" `Trainer.ckpt_path` instead.",
stacklevel=5,
)
return self._predicted_ckpt_path
@predicted_ckpt_path.setter
def predicted_ckpt_path(self, ckpt_path: Optional[str]) -> None:
rank_zero_deprecation(
"The `Trainer.predicted_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via the"
" read-only `Trainer.ckpt_path` instead.",
stacklevel=5,
)
self._predicted_ckpt_path = ckpt_path
def save_checkpoint(
self, filepath: _PATH, weights_only: bool = False, storage_options: Optional[Any] = None
) -> None:
r"""
Runs routine to create a checkpoint.
Args:
filepath: Path where checkpoint is saved.
weights_only: If ``True``, will only save the model weights.
storage_options: parameter for how to save to storage, passed to ``CheckpointIO`` plugin
"""
self._checkpoint_connector.save_checkpoint(filepath, weights_only=weights_only, storage_options=storage_options)
"""
Parsing properties
"""
@classmethod
def default_attributes(cls) -> dict:
init_signature = inspect.signature(cls)
return {k: v.default for k, v in init_signature.parameters.items()}
@classmethod
def get_deprecated_arg_names(cls) -> List:
"""Returns a list with deprecated Trainer arguments."""
depr_arg_names = []
for name, val in cls.__dict__.items():
if name.startswith("DEPRECATED") and isinstance(val, (tuple, list)):
depr_arg_names.extend(val)
return depr_arg_names
@classmethod
def from_argparse_args(cls: Any, args: Union[Namespace, ArgumentParser], **kwargs) -> Any:
return from_argparse_args(cls, args, **kwargs)
@classmethod
def parse_argparser(cls, arg_parser: Union[ArgumentParser, Namespace]) -> Namespace:
return parse_argparser(cls, arg_parser)
@classmethod
def match_env_arguments(cls) -> Namespace:
return parse_env_variables(cls)
@classmethod
def add_argparse_args(cls, parent_parser: ArgumentParser, **kwargs) -> ArgumentParser:
return add_argparse_args(cls, parent_parser, **kwargs)
"""
State properties
"""
@property
def interrupted(self) -> bool:
return self.state.status == TrainerStatus.INTERRUPTED
@property
def training(self) -> bool:
return self.state.stage == RunningStage.TRAINING
@training.setter
def training(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.TRAINING
elif self.training:
self.state.stage = None
@property
def testing(self) -> bool:
return self.state.stage == RunningStage.TESTING
@testing.setter
def testing(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.TESTING
elif self.testing:
self.state.stage = None
@property
def predicting(self) -> bool:
return self.state.stage == RunningStage.PREDICTING
@predicting.setter
def predicting(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.PREDICTING
elif self.predicting:
self.state.stage = None
@property
def tuning(self) -> bool:
return self.state.stage == RunningStage.TUNING
@tuning.setter
def tuning(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.TUNING
elif self.tuning:
self.state.stage = None
@property
def validating(self) -> bool:
return self.state.stage == RunningStage.VALIDATING
@validating.setter
def validating(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.VALIDATING
elif self.validating:
self.state.stage = None
@property
def evaluating(self) -> bool:
return self.state.stage and self.state.stage.evaluating
@property
def sanity_checking(self) -> bool:
return self.state.stage == RunningStage.SANITY_CHECKING
@sanity_checking.setter
def sanity_checking(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.SANITY_CHECKING
elif self.sanity_checking:
self.state.stage = None
"""
Loop properties
"""
@property
def global_step(self) -> int:
"""The number of optimizer steps taken (does not reset each epoch).
This includes multiple optimizers and TBPTT steps (if enabled).
"""
return self.fit_loop.epoch_loop.global_step
@property
def current_epoch(self) -> int:
"""The current epoch, updated after the epoch end hooks are run."""
return self.fit_loop.epoch_progress.current.completed
@property
def max_epochs(self) -> int:
return self.fit_loop.max_epochs
@property
def min_epochs(self) -> int:
return self.fit_loop.min_epochs
@property
def max_steps(self) -> int:
return self.fit_loop.max_steps
@property
def min_steps(self) -> Optional[int]:
return self.fit_loop.min_steps
@property
def is_last_batch(self) -> bool:
return self.fit_loop.epoch_loop.batch_progress.is_last_batch
@property
def fit_loop(self) -> FitLoop:
return self._fit_loop
@fit_loop.setter
def fit_loop(self, loop: FitLoop):
"""Attach a custom fit loop to this Trainer.
It will run with
:meth:`~pytorch_lightning.trainer.trainer.Trainer.fit`.
"""
loop.trainer = self
self._fit_loop = loop
@property
def validate_loop(self) -> EvaluationLoop:
return self._validate_loop
@validate_loop.setter
def validate_loop(self, loop: EvaluationLoop):
"""Attach a custom validation loop to this Trainer.
It will run with
:meth:`~pytorch_lightning.trainer.trainer.Trainer.validate`. Note that this loop is different from the one
running during training inside the :meth:`pytorch_lightning.trainer.trainer.Trainer.fit` call.
"""
loop.trainer = self
self._validate_loop = loop
@property
def test_loop(self) -> EvaluationLoop:
return self._test_loop
@test_loop.setter
def test_loop(self, loop: EvaluationLoop):
"""Attach a custom test loop to this Trainer.
It will run with
:meth:`~pytorch_lightning.trainer.trainer.Trainer.test`.
"""
loop.trainer = self
self._test_loop = loop
@property
def predict_loop(self) -> PredictionLoop:
return self._predict_loop
@predict_loop.setter
def predict_loop(self, loop: PredictionLoop):
"""Attach a custom prediction loop to this Trainer.
It will run with
:meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`.
"""
loop.trainer = self
self._predict_loop = loop
@property
def verbose_evaluate(self) -> bool:
rank_zero_deprecation(
"The `Trainer.verbose_evaluate` property has been deprecated and will be removed in v1.8. The current value"
" returned is the union of the validate and test loop values. You can choose which one to access with"
" `trainer.{validate,test}_loop.verbose`.",
stacklevel=5,
)
return self.validate_loop.verbose or self.test_loop.verbose
@verbose_evaluate.setter
def verbose_evaluate(self, verbose: bool) -> None:
rank_zero_deprecation(
"The `Trainer.verbose_evaluate` property has been deprecated and will be removed in v1.8. This will set"
" the value for both trainer.{validate,test}_loop.verbose`.",
stacklevel=5,
)
self.validate_loop.verbose = verbose
self.test_loop.verbose = verbose
@property
def _evaluation_loop(self) -> EvaluationLoop:
if self.state.fn in (TrainerFn.FITTING, TrainerFn.TUNING):
return self.fit_loop.epoch_loop.val_loop
if self.state.fn == TrainerFn.VALIDATING:
return self.validate_loop
if self.state.fn == TrainerFn.TESTING:
return self.test_loop
raise RuntimeError("The `Trainer._evaluation_loop` property isn't defined. Accessed outside of scope")
@property
def _active_loop(self) -> Optional[Union[FitLoop, EvaluationLoop, PredictionLoop]]:
if self.training:
return self.fit_loop
if self.sanity_checking or self.evaluating:
return self._evaluation_loop
if self.predicting:
return self.predict_loop
"""
Logging properties
"""
@property
def logger(self) -> Optional[LightningLoggerBase]:
if len(self.loggers) == 0:
return None
if len(self.loggers) == 1:
return self.loggers[0]
else:
rank_zero_warn(
"Using trainer.logger when Trainer is configured to use multiple loggers."
" This behavior will change in v1.8 when LoggerCollection is removed, and"
" trainer.logger will return the first logger in trainer.loggers"
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return LoggerCollection(self.loggers)
@logger.setter
def logger(self, logger: Optional[LightningLoggerBase]) -> None:
if not logger:
self.loggers = []
elif isinstance(logger, LoggerCollection):
self.loggers = list(logger)
else:
self.loggers = [logger]
@property
def loggers(self) -> List[LightningLoggerBase]:
return self._loggers
@loggers.setter
def loggers(self, loggers: Optional[List[LightningLoggerBase]]) -> None:
self._loggers = loggers if loggers else []
@property
def callback_metrics(self) -> dict:
return self._logger_connector.callback_metrics
@property
def logged_metrics(self) -> dict:
return self._logger_connector.logged_metrics
@property
def progress_bar_metrics(self) -> dict:
return self._logger_connector.progress_bar_metrics
@property
def _results(self) -> Optional[_ResultCollection]:
active_loop = self._active_loop
if active_loop is not None:
return active_loop._results
def _exit_gracefully_on_signal(self) -> None:
if not _fault_tolerant_training() or not self._should_terminate_gracefully():
return
raise ExitGracefullyException(0)
def _should_terminate_gracefully(self) -> bool:
value = torch.tensor(int(self._terminate_gracefully), device=self.strategy.root_device)
return self.strategy.reduce(value, reduce_op="sum") > 0
@property
def weights_summary(self) -> Optional[str]:
rank_zero_deprecation("`Trainer.weights_summary` is deprecated in v1.5 and will be removed in v1.7.")
return self._weights_summary
@weights_summary.setter
def weights_summary(self, val: Optional[str]) -> None:
rank_zero_deprecation("Setting `Trainer.weights_summary` is deprecated in v1.5 and will be removed in v1.7.")
self._weights_summary = val
"""
Other
"""
@property
def estimated_stepping_batches(self) -> Union[int, float]:
r"""
Estimated stepping batches for the complete training inferred from DataLoaders, gradient
accumulation factor and distributed setup.
Examples::
def configure_optimizers(self):
optimizer = ...
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer, max_lr=1e-3, total_steps=self.trainer.estimated_stepping_batches
)
return [optimizer], [scheduler]
"""
accumulation_scheduler = self.accumulation_scheduler
if accumulation_scheduler.epochs != [0]:
raise MisconfigurationException(
"Estimated stepping batches cannot be computed with different"
" `accumulate_grad_batches` at different epochs."
)
# infinite training
if self.max_epochs == -1 and self.max_steps == -1:
return float("inf")
if self.train_dataloader is None:
rank_zero_info("Loading `train_dataloader` to estimate number of stepping batches.")
self.reset_train_dataloader()
total_batches = self.num_training_batches
# iterable dataset
if total_batches == float("inf"):
return self.max_steps
self.accumulate_grad_batches = accumulation_scheduler.get_accumulate_grad_batches(self.current_epoch)
effective_batch_size = self.accumulate_grad_batches
max_estimated_steps = math.ceil(total_batches / effective_batch_size) * max(self.max_epochs, 1)
max_estimated_steps = min(max_estimated_steps, self.max_steps) if self.max_steps != -1 else max_estimated_steps
return max_estimated_steps
@property
def terminate_on_nan(self) -> bool:
rank_zero_deprecation("`Trainer.terminate_on_nan` is deprecated in v1.5 and will be removed in 1.7.")
return self._terminate_on_nan
@terminate_on_nan.setter
def terminate_on_nan(self, val: bool) -> None:
rank_zero_deprecation(
f"Setting `Trainer.terminate_on_nan = {val}` is deprecated in v1.5 and will be removed in 1.7."
f" Please set `Trainer(detect_anomaly={val})` instead."
)
self._terminate_on_nan = val # : 212
def _determine_batch_limits(batches: Optional[Union[int, float]], name: str) -> Union[int, float]:
if batches is None:
# batches is optional to know if the user passed a value so that we can show the above info messages only to the
# users that set a value explicitly
return 1.0
# differentiating based on the type can be error-prone for users. show a message describing the chosen behaviour
if isinstance(batches, int) and batches == 1:
if name == "limit_train_batches":
message = "1 batch per epoch will be used."
elif name == "val_check_interval":
message = "validation will run after every batch."
else:
message = "1 batch will be used."
rank_zero_info(f"`Trainer({name}=1)` was configured so {message}")
elif isinstance(batches, float) and batches == 1.0:
if name == "limit_train_batches":
message = "100% of the batches per epoch will be used."
elif name == "val_check_interval":
message = "validation will run at the end of the training epoch."
else:
message = "100% of the batches will be used."
rank_zero_info(f"`Trainer({name}=1.0)` was configured so {message}.")
if 0 <= batches <= 1:
return batches
if batches > 1 and batches % 1.0 == 0:
return int(batches)
raise MisconfigurationException(
f"You have passed invalid value {batches} for {name}, it has to be in [0.0, 1.0] or an int."
)
| 42.776912
| 122
| 0.648367
|
import inspect
import logging
import math
import os
import traceback
import warnings
from argparse import ArgumentParser, Namespace
from copy import deepcopy
from datetime import timedelta
from pathlib import Path
from typing import Any, Callable, cast, Dict, Iterable, List, Optional, Tuple, Type, Union
from weakref import proxy
import torch
from packaging.version import Version
from torch.optim import Optimizer
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from pytorch_lightning.accelerators import Accelerator, GPUAccelerator, IPUAccelerator, TPUAccelerator
from pytorch_lightning.callbacks import Callback, EarlyStopping, ModelCheckpoint, ProgressBarBase
from pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter
from pytorch_lightning.core.datamodule import LightningDataModule
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.loggers import LightningLoggerBase
from pytorch_lightning.loggers.base import DummyLogger, LoggerCollection
from pytorch_lightning.loggers.tensorboard import TensorBoardLogger
from pytorch_lightning.loops import PredictionLoop, TrainingEpochLoop
from pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop
from pytorch_lightning.loops.fit_loop import FitLoop
from pytorch_lightning.loops.utilities import _parse_loop_limits, _reset_progress
from pytorch_lightning.plugins import (
ApexMixedPrecisionPlugin,
NativeMixedPrecisionPlugin,
PLUGIN_INPUT,
PrecisionPlugin,
)
from pytorch_lightning.plugins.environments.slurm_environment import SLURMEnvironment
from pytorch_lightning.profiler import (
AdvancedProfiler,
BaseProfiler,
PassThroughProfiler,
PyTorchProfiler,
SimpleProfiler,
XLAProfiler,
)
from pytorch_lightning.strategies import ParallelStrategy, Strategy
from pytorch_lightning.strategies.ddp_spawn import DDPSpawnStrategy
from pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin
from pytorch_lightning.trainer.configuration_validator import verify_loop_configurations
from pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector
from pytorch_lightning.trainer.connectors.callback_connector import CallbackConnector
from pytorch_lightning.trainer.connectors.checkpoint_connector import CheckpointConnector
from pytorch_lightning.trainer.connectors.data_connector import DataConnector
from pytorch_lightning.trainer.connectors.logger_connector import LoggerConnector
from pytorch_lightning.trainer.connectors.logger_connector.result import _ResultCollection
from pytorch_lightning.trainer.connectors.signal_connector import SignalConnector
from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin
from pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin
from pytorch_lightning.trainer.states import RunningStage, TrainerFn, TrainerState, TrainerStatus
from pytorch_lightning.trainer.supporters import CombinedLoader
from pytorch_lightning.tuner.lr_finder import _LRFinder
from pytorch_lightning.tuner.tuning import Tuner
from pytorch_lightning.utilities import (
_IPU_AVAILABLE,
_TPU_AVAILABLE,
AMPType,
device_parser,
GradClipAlgorithmType,
parsing,
)
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.argparse import (
_defaults_from_env_vars,
add_argparse_args,
from_argparse_args,
parse_argparser,
parse_env_variables,
)
from pytorch_lightning.utilities.auto_restart import _add_capture_metadata_collate
from pytorch_lightning.utilities.cloud_io import get_filesystem
from pytorch_lightning.utilities.data import _auto_add_worker_init_fn, has_len_all_ranks
from pytorch_lightning.utilities.distributed import distributed_available
from pytorch_lightning.utilities.exceptions import ExitGracefullyException, MisconfigurationException
from pytorch_lightning.utilities.imports import _fault_tolerant_training
from pytorch_lightning.utilities.meta import is_on_meta_device, materialize_module
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.seed import isolate_rng
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.types import (
_EVALUATE_OUTPUT,
_PATH,
_PREDICT_OUTPUT,
EVAL_DATALOADERS,
LRSchedulerConfig,
STEP_OUTPUT,
TRAIN_DATALOADERS,
)
from pytorch_lightning.utilities.warnings import PossibleUserWarning
log = logging.getLogger(__name__)
warnings.filterwarnings(
"ignore", message="torch.distributed.reduce_op is deprecated, please use torch.distributed.ReduceOp instead"
)
class Trainer(
TrainerCallbackHookMixin,
TrainerOptimizersMixin,
TrainerDataLoadingMixin,
):
@_defaults_from_env_vars
def __init__(
self,
logger: Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool] = True,
checkpoint_callback: Optional[bool] = None,
enable_checkpointing: bool = True,
callbacks: Optional[Union[List[Callback], Callback]] = None,
default_root_dir: Optional[str] = None,
gradient_clip_val: Optional[Union[int, float]] = None,
gradient_clip_algorithm: Optional[str] = None,
process_position: int = 0,
num_nodes: int = 1,
num_processes: Optional[int] = None,
devices: Optional[Union[List[int], str, int]] = None,
gpus: Optional[Union[List[int], str, int]] = None,
auto_select_gpus: bool = False,
tpu_cores: Optional[Union[List[int], str, int]] = None,
ipus: Optional[int] = None,
log_gpu_memory: Optional[str] = None,
progress_bar_refresh_rate: Optional[int] = None,
enable_progress_bar: bool = True,
overfit_batches: Union[int, float] = 0.0,
track_grad_norm: Union[int, float, str] = -1,
check_val_every_n_epoch: int = 1,
fast_dev_run: Union[int, bool] = False,
accumulate_grad_batches: Optional[Union[int, Dict[int, int]]] = None,
max_epochs: Optional[int] = None,
min_epochs: Optional[int] = None,
max_steps: int = -1,
min_steps: Optional[int] = None,
max_time: Optional[Union[str, timedelta, Dict[str, int]]] = None,
limit_train_batches: Optional[Union[int, float]] = None,
limit_val_batches: Optional[Union[int, float]] = None,
limit_test_batches: Optional[Union[int, float]] = None,
limit_predict_batches: Optional[Union[int, float]] = None,
val_check_interval: Optional[Union[int, float]] = None,
flush_logs_every_n_steps: Optional[int] = None,
log_every_n_steps: int = 50,
accelerator: Optional[Union[str, Accelerator]] = None,
strategy: Optional[Union[str, Strategy]] = None,
sync_batchnorm: bool = False,
precision: Union[int, str] = 32,
enable_model_summary: bool = True,
weights_summary: Optional[str] = "top",
weights_save_path: Optional[str] = None,
num_sanity_val_steps: int = 2,
resume_from_checkpoint: Optional[Union[Path, str]] = None,
profiler: Optional[Union[BaseProfiler, str]] = None,
benchmark: Optional[bool] = None,
deterministic: bool = False,
reload_dataloaders_every_n_epochs: int = 0,
auto_lr_find: Union[bool, str] = False,
replace_sampler_ddp: bool = True,
detect_anomaly: bool = False,
auto_scale_batch_size: Union[str, bool] = False,
prepare_data_per_node: Optional[bool] = None,
plugins: Optional[Union[PLUGIN_INPUT, List[PLUGIN_INPUT]]] = None,
amp_backend: str = "native",
amp_level: Optional[str] = None,
move_metrics_to_cpu: bool = False,
multiple_trainloader_mode: str = "max_size_cycle",
stochastic_weight_avg: bool = False,
terminate_on_nan: Optional[bool] = None,
) -> None:
super().__init__()
Trainer._log_api_event("init")
log.detail(f"{self.__class__.__name__}: Initializing trainer with parameters: {locals()}")
self.state = TrainerState()
gpu_ids, tpu_cores = self._parse_devices(gpus, auto_select_gpus, tpu_cores)
self._data_connector = DataConnector(self, multiple_trainloader_mode)
self._accelerator_connector = AcceleratorConnector(
num_processes=num_processes,
devices=devices,
tpu_cores=tpu_cores,
ipus=ipus,
accelerator=accelerator,
strategy=strategy,
gpus=gpus,
gpu_ids=gpu_ids,
num_nodes=num_nodes,
sync_batchnorm=sync_batchnorm,
benchmark=benchmark,
replace_sampler_ddp=replace_sampler_ddp,
deterministic=deterministic,
precision=precision,
amp_type=amp_backend,
amp_level=amp_level,
plugins=plugins,
)
self._logger_connector = LoggerConnector(self, log_gpu_memory)
self._callback_connector = CallbackConnector(self)
self._checkpoint_connector = CheckpointConnector(self, resume_from_checkpoint)
self._signal_connector = SignalConnector(self)
self.tuner = Tuner(self)
min_steps, max_steps, min_epochs, max_epochs, max_time = _parse_loop_limits(
min_steps, max_steps, min_epochs, max_epochs, max_time
)
fit_loop = FitLoop(min_epochs=min_epochs, max_epochs=max_epochs)
training_epoch_loop = TrainingEpochLoop(min_steps=min_steps, max_steps=max_steps)
fit_loop.connect(epoch_loop=training_epoch_loop)
self.fit_loop = fit_loop
self.validate_loop = EvaluationLoop()
self.test_loop = EvaluationLoop()
self.predict_loop = PredictionLoop()
self._ckpt_path: Optional[str] = None
self._validated_ckpt_path: Optional[str] = None
self._tested_ckpt_path: Optional[str] = None
self._predicted_ckpt_path: Optional[str] = None
self._weights_summary: Optional[str] = None
self._callback_connector.on_trainer_init(
callbacks,
checkpoint_callback,
enable_checkpointing,
enable_progress_bar,
progress_bar_refresh_rate,
process_position,
default_root_dir,
weights_save_path,
enable_model_summary,
weights_summary,
stochastic_weight_avg,
max_time,
accumulate_grad_batches,
)
self._call_callback_hooks("on_init_start")
self.check_val_every_n_epoch: int
self._data_connector.on_trainer_init(
check_val_every_n_epoch,
reload_dataloaders_every_n_epochs,
prepare_data_per_node,
)
if terminate_on_nan is not None:
rank_zero_deprecation(
"Trainer argument `terminate_on_nan` was deprecated in v1.5 and will be removed in 1.7."
" Please use `Trainer(detect_anomaly=True)` instead."
)
if not isinstance(terminate_on_nan, bool):
raise TypeError(f"`terminate_on_nan` should be a bool, got {terminate_on_nan}.")
if gradient_clip_val is not None and not isinstance(gradient_clip_val, (int, float)):
raise TypeError(f"`gradient_clip_val` should be an int or a float. Got {gradient_clip_val}.")
if gradient_clip_algorithm is not None and not GradClipAlgorithmType.supported_type(
gradient_clip_algorithm.lower()
):
raise MisconfigurationException(
f"`gradient_clip_algorithm` {gradient_clip_algorithm} is invalid. "
f"Allowed algorithms: {GradClipAlgorithmType.supported_types()}."
)
if track_grad_norm != -1 and not (
(isinstance(track_grad_norm, (int, float)) or track_grad_norm == "inf") and float(track_grad_norm) > 0
):
raise MisconfigurationException(
f"`track_grad_norm` must be a positive number or 'inf' (infinity norm). Got {track_grad_norm}."
)
self._terminate_on_nan = terminate_on_nan
self.gradient_clip_val: Union[int, float] = gradient_clip_val
self.gradient_clip_algorithm = (
GradClipAlgorithmType(gradient_clip_algorithm.lower())
if gradient_clip_algorithm is not None
else gradient_clip_algorithm
)
self.track_grad_norm: float = float(track_grad_norm)
self._detect_anomaly: bool = detect_anomaly
self._setup_on_init(num_sanity_val_steps)
self.tuner.on_trainer_init(auto_lr_find, auto_scale_batch_size)
self.__init_profiler(profiler)
self._loggers: List[LightningLoggerBase]
self._logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu)
self.val_check_interval: Union[int, float]
self._init_debugging_flags(
limit_train_batches,
limit_val_batches,
limit_test_batches,
limit_predict_batches,
val_check_interval,
overfit_batches,
fast_dev_run,
)
self._call_callback_hooks("on_init_end")
def _init_debugging_flags(
self,
limit_train_batches: Optional[Union[int, float]],
limit_val_batches: Optional[Union[int, float]],
limit_test_batches: Optional[Union[int, float]],
limit_predict_batches: Optional[Union[int, float]],
val_check_interval: Optional[Union[int, float]],
overfit_batches: Union[int, float],
fast_dev_run: Union[int, bool],
) -> None:
if isinstance(fast_dev_run, int) and (fast_dev_run < 0):
raise MisconfigurationException(
f"fast_dev_run={fast_dev_run} is not a valid configuration. It should be >= 0."
)
self.fast_dev_run = fast_dev_run
if fast_dev_run == 1:
self.fast_dev_run = True
if fast_dev_run:
num_batches = int(fast_dev_run)
limit_train_batches = num_batches
limit_val_batches = num_batches
limit_test_batches = num_batches
limit_predict_batches = num_batches
self.fit_loop.max_steps = num_batches
self.num_sanity_val_steps = 0
self.fit_loop.max_epochs = 1
val_check_interval = 1.0
self.check_val_every_n_epoch = 1
self.loggers = [DummyLogger()] if self.loggers else []
rank_zero_info(
"Running in fast_dev_run mode: will run a full train,"
f" val, test and prediction loop using {num_batches} batch(es)."
)
self.limit_train_batches = _determine_batch_limits(limit_train_batches, "limit_train_batches")
self.limit_val_batches = _determine_batch_limits(limit_val_batches, "limit_val_batches")
self.limit_test_batches = _determine_batch_limits(limit_test_batches, "limit_test_batches")
self.limit_predict_batches = _determine_batch_limits(limit_predict_batches, "limit_predict_batches")
self.val_check_interval = _determine_batch_limits(val_check_interval, "val_check_interval")
self.overfit_batches = _determine_batch_limits(overfit_batches, "overfit_batches")
self._determine_data_use_amount(self.overfit_batches)
def _determine_data_use_amount(self, overfit_batches: float) -> None:
if overfit_batches > 0:
self.limit_train_batches = overfit_batches
self.limit_val_batches = 0
def _setup_on_init(self, num_sanity_val_steps: int) -> None:
self._log_device_info()
self.should_stop = False
self.state = TrainerState()
self.num_training_batches = float("inf")
self.train_dataloader = None
if num_sanity_val_steps == -1:
self.num_sanity_val_steps = float("inf")
else:
self.num_sanity_val_steps = num_sanity_val_steps
self.num_sanity_val_batches = []
self.num_test_batches = []
self.num_val_batches = []
self.test_dataloaders = None
self.val_dataloaders = None
self._last_train_dl_reload_epoch = float("-inf")
self._last_val_dl_reload_epoch = float("-inf")
self.num_predict_batches = []
def _call_and_handle_interrupt(self, trainer_fn: Callable, *args: Any, **kwargs: Any) -> Any:
try:
if self.strategy.launcher is not None:
return self.strategy.launcher.launch(trainer_fn, *args, trainer=self, **kwargs)
else:
return trainer_fn(*args, **kwargs)
except KeyboardInterrupt as exception:
rank_zero_warn("Detected KeyboardInterrupt, attempting graceful shutdown...")
if not self.interrupted:
self.state.status = TrainerStatus.INTERRUPTED
self._call_callback_hooks("on_keyboard_interrupt")
self._call_callback_hooks("on_exception", exception)
except BaseException as exception:
self.state.status = TrainerStatus.INTERRUPTED
if distributed_available() and self.world_size > 1:
self.strategy.reconciliate_processes(traceback.format_exc())
self._call_callback_hooks("on_exception", exception)
self._teardown()
self.state.stage = None
raise
def fit(
self,
model: "pl.LightningModule",
train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
datamodule: Optional[LightningDataModule] = None,
ckpt_path: Optional[str] = None,
) -> None:
self.strategy.model = model
self._call_and_handle_interrupt(
self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
)
def _fit_impl(
self,
model: "pl.LightningModule",
train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
datamodule: Optional[LightningDataModule] = None,
ckpt_path: Optional[str] = None,
) -> None:
Trainer._log_api_event("fit")
log.detail(f"{self.__class__.__name__}: trainer fit stage")
self.state.fn = TrainerFn.FITTING
self.state.status = TrainerStatus.RUNNING
self.training = True
self._last_train_dl_reload_epoch = float("-inf")
self._last_val_dl_reload_epoch = float("-inf")
if isinstance(train_dataloaders, LightningDataModule):
datamodule = train_dataloaders
train_dataloaders = None
if (train_dataloaders is not None or val_dataloaders is not None) and datamodule is not None:
raise MisconfigurationException(
"You cannot pass `train_dataloader` or `val_dataloaders` to `trainer.fit(datamodule=...)`"
)
# links data to the trainer
self._data_connector.attach_data(
model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, datamodule=datamodule
)
# TODO: ckpt_path only in v2.0
ckpt_path = ckpt_path or self.resume_from_checkpoint
self._ckpt_path = self.__set_ckpt_path(
ckpt_path, model_provided=True, model_connected=self.lightning_module is not None
)
results = self._run(model, ckpt_path=self.ckpt_path)
assert self.state.stopped
self.training = False
return results
def validate(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[str] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
) -> _EVALUATE_OUTPUT:
self.strategy.model = model or self.lightning_module
return self._call_and_handle_interrupt(self._validate_impl, model, dataloaders, ckpt_path, verbose, datamodule)
def _validate_impl(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[str] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
) -> _EVALUATE_OUTPUT:
# --------------------
# SETUP HOOK
# --------------------
Trainer._log_api_event("validate")
log.detail(f"{self.__class__.__name__}: trainer validate stage")
self.state.fn = TrainerFn.VALIDATING
self.state.status = TrainerStatus.RUNNING
self.validating = True
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(dataloaders, LightningDataModule):
datamodule = dataloaders
dataloaders = None
# If you supply a datamodule you can't supply val_dataloaders
if dataloaders is not None and datamodule:
raise MisconfigurationException("You cannot pass both `trainer.validate(dataloaders=..., datamodule=...)`")
model_provided = model is not None
model = model or self.lightning_module
if model is None:
raise MisconfigurationException(
"`model` must be provided to `trainer.validate()` when it hasn't been passed in a previous run"
)
self.validate_loop.verbose = verbose
# links data to the trainer
self._data_connector.attach_data(model, val_dataloaders=dataloaders, datamodule=datamodule)
self._ckpt_path = self.__set_ckpt_path(
ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None
)
self._validated_ckpt_path = self.ckpt_path # TODO: remove in v1.8
# run validate
results = self._run(model, ckpt_path=self.ckpt_path)
assert self.state.stopped
self.validating = False
return results
def test(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[str] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
) -> _EVALUATE_OUTPUT:
self.strategy.model = model or self.lightning_module
return self._call_and_handle_interrupt(self._test_impl, model, dataloaders, ckpt_path, verbose, datamodule)
def _test_impl(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[str] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
) -> _EVALUATE_OUTPUT:
# --------------------
# SETUP HOOK
# --------------------
Trainer._log_api_event("test")
log.detail(f"{self.__class__.__name__}: trainer test stage")
self.state.fn = TrainerFn.TESTING
self.state.status = TrainerStatus.RUNNING
self.testing = True
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(dataloaders, LightningDataModule):
datamodule = dataloaders
dataloaders = None
# If you supply a datamodule you can't supply test_dataloaders
if dataloaders is not None and datamodule:
raise MisconfigurationException("You cannot pass both `trainer.test(dataloaders=..., datamodule=...)`")
model_provided = model is not None
model = model or self.lightning_module
if model is None:
raise MisconfigurationException(
"`model` must be provided to `trainer.test()` when it hasn't been passed in a previous run"
)
self.test_loop.verbose = verbose
# links data to the trainer
self._data_connector.attach_data(model, test_dataloaders=dataloaders, datamodule=datamodule)
self._ckpt_path = self.__set_ckpt_path(
ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None
)
self._tested_ckpt_path = self.ckpt_path # TODO: remove in v1.8
# run test
results = self._run(model, ckpt_path=self.ckpt_path)
assert self.state.stopped
self.testing = False
return results
def predict(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
datamodule: Optional[LightningDataModule] = None,
return_predictions: Optional[bool] = None,
ckpt_path: Optional[str] = None,
) -> Optional[_PREDICT_OUTPUT]:
self.strategy.model = model or self.lightning_module
return self._call_and_handle_interrupt(
self._predict_impl, model, dataloaders, datamodule, return_predictions, ckpt_path
)
def _predict_impl(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
datamodule: Optional[LightningDataModule] = None,
return_predictions: Optional[bool] = None,
ckpt_path: Optional[str] = None,
) -> Optional[_PREDICT_OUTPUT]:
# --------------------
# SETUP HOOK
# --------------------
Trainer._log_api_event("predict")
log.detail(f"{self.__class__.__name__}: trainer predict stage")
self.state.fn = TrainerFn.PREDICTING
self.state.status = TrainerStatus.RUNNING
self.predicting = True
self.predict_loop.return_predictions = return_predictions
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(dataloaders, LightningDataModule):
datamodule = dataloaders
dataloaders = None
if dataloaders is not None and datamodule:
raise MisconfigurationException("You cannot pass both `trainer.predict(dataloaders=..., datamodule=...)`")
model_provided = model is not None
model = model or self.lightning_module
if model is None:
raise MisconfigurationException(
"`model` must be provided to `trainer.predict()` when it hasn't been passed in a previous run"
)
self._data_connector.attach_data(model, predict_dataloaders=dataloaders, datamodule=datamodule)
self._ckpt_path = self.__set_ckpt_path(
ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None
)
self._predicted_ckpt_path = self.ckpt_path
results = self._run(model, ckpt_path=self.ckpt_path)
assert self.state.stopped
self.predicting = False
return results
def tune(
self,
model: "pl.LightningModule",
train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
datamodule: Optional[LightningDataModule] = None,
scale_batch_size_kwargs: Optional[Dict[str, Any]] = None,
lr_find_kwargs: Optional[Dict[str, Any]] = None,
) -> Dict[str, Optional[Union[int, _LRFinder]]]:
Trainer._log_api_event("tune")
self.state.fn = TrainerFn.TUNING
self.state.status = TrainerStatus.RUNNING
self.tuning = True
if isinstance(train_dataloaders, LightningDataModule):
datamodule = train_dataloaders
train_dataloaders = None
if (train_dataloaders is not None or val_dataloaders is not None) and datamodule is not None:
raise MisconfigurationException(
"You cannot pass `train_dataloader` or `val_dataloaders` to `trainer.tune(datamodule=...)`"
)
# links data to the trainer
self._data_connector.attach_data(
model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, datamodule=datamodule
)
with isolate_rng():
result = self.tuner._tune(
model, scale_batch_size_kwargs=scale_batch_size_kwargs, lr_find_kwargs=lr_find_kwargs
)
assert self.state.stopped
self.tuning = False
return result
def _restore_modules_and_callbacks(self, checkpoint_path: Optional[_PATH] = None) -> None:
# restore modules after setup
self._checkpoint_connector.resume_start(checkpoint_path)
self._checkpoint_connector.restore_model()
self._checkpoint_connector.restore_datamodule()
if self.state.fn == TrainerFn.FITTING:
# restore callback states
self._checkpoint_connector.restore_callbacks()
def _run(
self, model: "pl.LightningModule", ckpt_path: Optional[str] = None
) -> Optional[Union[_EVALUATE_OUTPUT, _PREDICT_OUTPUT]]:
# clean hparams
if hasattr(model, "hparams"):
parsing.clean_namespace(model.hparams)
# attach model to the training type plugin
self.strategy.connect(model)
self._callback_connector._attach_model_callbacks()
self._callback_connector._attach_model_logging_functions()
verify_loop_configurations(self)
# hook
log.detail(f"{self.__class__.__name__}: preparing data")
self._data_connector.prepare_data()
# ----------------------------
# SET UP TRAINING
# ----------------------------
self._call_callback_hooks("on_before_accelerator_backend_setup")
log.detail(f"{self.__class__.__name__}: setting up strategy environment")
self.strategy.setup_environment()
self.__setup_profiler()
self._call_setup_hook() # allow user to setup lightning_module in accelerator environment
# check if we should delay restoring checkpoint till later
if not self.strategy.restore_checkpoint_after_setup:
log.detail(f"{self.__class__.__name__}: restoring module and callbacks from checkpoint path: {ckpt_path}")
self._restore_modules_and_callbacks(ckpt_path)
log.detail(f"{self.__class__.__name__}: configuring sharded model")
self._call_configure_sharded_model() # allow user to setup in model sharded environment
# ----------------------------
# INSPECT THE CORE LOOPS
# ----------------------------
fr"""
Lightning internal flow looks like this:
{Trainer.fit} or {Trainer.test} or {Trainer.predict} ||
| ||
spawn processes ||
{self.strategy.setup_environment} ||
| ||
setup accelerator ||
and strategy || LIGHTNING
| ||
{self._run_stage} || FLOW
| ||
{self._run_train} || DIRECTION
or {self._run_evaluate} ||
or {self._run_predict} ||
| ||
results \/
This is used to guide readers to the core loops: train, test, predict.
{self._run_predict} is the simplest to understand, use `Go to Definition` to read it :)
"""
# ----------------------------
# TRAIN
# ----------------------------
# reset logger connector
self._logger_connector.reset_results()
self._logger_connector.reset_metrics()
# strategy will configure model and move it to the device
self.strategy.setup(self)
# hook
if self.state.fn == TrainerFn.FITTING:
self._call_callback_hooks("on_fit_start")
self._call_lightning_module_hook("on_fit_start")
self._log_hyperparams()
if self.strategy.restore_checkpoint_after_setup:
log.detail(f"{self.__class__.__name__}: restoring module and callbacks from checkpoint path: {ckpt_path}")
self._restore_modules_and_callbacks(ckpt_path)
# restore optimizers, etc.
log.detail(f"{self.__class__.__name__}: restoring training state")
self._checkpoint_connector.restore_training_state()
self._checkpoint_connector.resume_end()
results = self._run_stage()
log.detail(f"{self.__class__.__name__}: trainer tearing down")
self._teardown()
# ----------------------------
# POST-Training CLEAN UP
# ----------------------------
# hook
if self.state.fn == TrainerFn.FITTING:
self._call_callback_hooks("on_fit_end")
self._call_lightning_module_hook("on_fit_end")
log.detail(f"{self.__class__.__name__}: calling teardown hooks")
self._call_teardown_hook()
self.state.status = TrainerStatus.FINISHED
self.state.stage = None
return results
def _log_hyperparams(self) -> None:
if not self.loggers:
return
# log hyper-parameters
hparams_initial = None
# save exp to get started (this is where the first experiment logs are written)
datamodule_log_hyperparams = self.datamodule._log_hyperparams if self.datamodule is not None else False
if self.lightning_module._log_hyperparams and datamodule_log_hyperparams:
datamodule_hparams = self.datamodule.hparams_initial
lightning_hparams = self.lightning_module.hparams_initial
inconsistent_keys = []
for key in lightning_hparams.keys() & datamodule_hparams.keys():
lm_val, dm_val = lightning_hparams[key], datamodule_hparams[key]
if type(lm_val) != type(dm_val):
inconsistent_keys.append(key)
elif isinstance(lm_val, torch.Tensor) and id(lm_val) != id(dm_val):
inconsistent_keys.append(key)
elif lm_val != dm_val:
inconsistent_keys.append(key)
if inconsistent_keys:
raise MisconfigurationException(
f"Error while merging hparams: the keys {inconsistent_keys} are present "
"in both the LightningModule's and LightningDataModule's hparams "
"but have different values."
)
hparams_initial = {**lightning_hparams, **datamodule_hparams}
elif self.lightning_module._log_hyperparams:
hparams_initial = self.lightning_module.hparams_initial
elif datamodule_log_hyperparams:
hparams_initial = self.datamodule.hparams_initial
for logger in self.loggers:
if hparams_initial is not None:
logger.log_hyperparams(hparams_initial)
logger.log_graph(self.lightning_module)
logger.save()
def _teardown(self):
self.strategy.post_dispatch(self)
self.strategy.teardown()
loop = self._active_loop
# loop should never be `None` here but it can because we don't know the trainer stage with `ddp_spawn`
if loop is not None:
loop.teardown()
self._logger_connector.teardown()
self._signal_connector.teardown()
def run_stage(self) -> None:
rank_zero_deprecation(
"`Trainer.run_stage` is deprecated in v1.6 and will be removed in v1.8. Use"
" `Trainer.{fit,validate,test,predict}` instead."
)
return self._run_stage()
def _run_stage(self):
self.strategy.barrier("run-stage")
self.strategy.dispatch(self)
if self.evaluating:
return self._run_evaluate()
if self.predicting:
return self._run_predict()
return self._run_train()
def _pre_training_routine(self):
self.strategy.barrier("setup_training")
self._signal_connector.register_signal_handlers()
self._call_callback_hooks("on_pretrain_routine_start")
self._call_lightning_module_hook("on_pretrain_routine_start")
self._call_callback_hooks("on_pretrain_routine_end")
self._call_lightning_module_hook("on_pretrain_routine_end")
def _run_train(self) -> None:
self._pre_training_routine()
with isolate_rng():
self._run_sanity_check()
self.model.train()
torch.set_grad_enabled(True)
self.fit_loop.trainer = self
with torch.autograd.set_detect_anomaly(self._detect_anomaly):
self.fit_loop.run()
def _run_evaluate(self) -> _EVALUATE_OUTPUT:
assert self.evaluating
self._evaluation_loop._reload_evaluation_dataloaders()
self._evaluation_loop.trainer = self
with self.profiler.profile(f"run_{self.state.stage}_evaluation"), torch.no_grad():
eval_loop_results = self._evaluation_loop.run()
for result in eval_loop_results:
if isinstance(result, dict):
for k, v in result.items():
if isinstance(v, torch.Tensor):
result[k] = v.cpu().item()
return eval_loop_results
def _run_predict(self) -> Optional[_PREDICT_OUTPUT]:
self.reset_predict_dataloader(self.lightning_module)
self.predict_loop.trainer = self
with torch.no_grad():
return self.predict_loop.run()
def _run_sanity_check(self) -> None:
val_loop = self.fit_loop.epoch_loop.val_loop
should_sanity_check = (
self.enable_validation
and self.num_sanity_val_steps > 0
and not val_loop.restarting
)
if should_sanity_check:
stage = self.state.stage
self.sanity_checking = True
# reset logger connector
self._logger_connector.reset_results()
self._logger_connector.reset_metrics()
self._call_callback_hooks("on_sanity_check_start")
# reload dataloaders
val_loop._reload_evaluation_dataloaders()
self.num_sanity_val_batches = [
min(self.num_sanity_val_steps, val_batches) for val_batches in self.num_val_batches
]
# run eval step
with torch.no_grad():
val_loop.run()
self._call_callback_hooks("on_sanity_check_end")
# reset logger connector
self._logger_connector.reset_results()
self._logger_connector.reset_metrics()
# reset the progress tracking state after sanity checking. we don't need to set the state before
_reset_progress(val_loop)
self.state.stage = stage
def __set_ckpt_path(self, ckpt_path: Optional[str], model_provided: bool, model_connected: bool) -> Optional[str]:
from pytorch_lightning.callbacks.fault_tolerance import _FaultToleranceCheckpoint
ft_checkpoints = [cb for cb in self.callbacks if isinstance(cb, _FaultToleranceCheckpoint)]
if ft_checkpoints:
ft_ckpt_path = ft_checkpoints[0].ckpt_path
fs = get_filesystem(ft_ckpt_path)
if fs.exists(ft_ckpt_path):
return ft_ckpt_path
if model_provided and ckpt_path is None:
return
fn = self.state.fn.value
if model_connected and ckpt_path is None:
rank_zero_warn(
f"`.{fn}(ckpt_path=None)` was called without a model."
" The best model of the previous `fit` call will be used."
f" You can pass `{fn}(ckpt_path='best')` to use and best model"
" checkpoint and avoid this warning or"
" `ckpt_path=trainer.checkpoint_callback.last_model_path` to use the last model."
)
ckpt_path = "best"
if ckpt_path == "best":
if len(self.checkpoint_callbacks) > 1:
rank_zero_warn(
f'`.{fn}(ckpt_path="best")` is called with Trainer configured with multiple `ModelCheckpoint`'
" callbacks. It will use the best checkpoint path from first checkpoint callback."
)
if not self.checkpoint_callback:
raise MisconfigurationException(
f'`.{fn}(ckpt_path="best")` is set but `ModelCheckpoint` is not configured.'
)
if not self.checkpoint_callback.best_model_path:
if self.fast_dev_run:
raise MisconfigurationException(
f'You cannot execute `.{fn}(ckpt_path="best")` with `fast_dev_run=True`.'
f" Please pass an exact checkpoint path to `.{fn}(ckpt_path=...)`"
)
raise MisconfigurationException(
f'`.{fn}(ckpt_path="best")` is set but `ModelCheckpoint` is not configured to save the best model.'
)
ckpt_path = self.checkpoint_callback.best_model_path
if not ckpt_path:
raise MisconfigurationException(
f"`.{fn}()` found no path for the best weights: {ckpt_path!r}. Please"
f" specify a path for a checkpoint `.{fn}(ckpt_path=PATH)`"
)
return ckpt_path
def _call_setup_hook(self) -> None:
fn = self.state.fn._setup_fn
self.strategy.barrier("pre_setup")
if self.datamodule is not None:
self.datamodule.setup(stage=fn)
self._call_callback_hooks("setup", stage=fn)
self._call_lightning_module_hook("setup", stage=fn)
self.strategy.barrier("post_setup")
def _call_configure_sharded_model(self) -> None:
with self.strategy.model_sharded_context():
self._handle_meta_model()
self._call_lightning_module_hook("configure_sharded_model")
self._call_callback_hooks("on_configure_sharded_model")
def _handle_meta_model(self) -> None:
if not is_on_meta_device(self.lightning_module):
return
if isinstance(self.strategy, DDPSpawnStrategy):
raise MisconfigurationException("LightningModule on meta device isn't supported with spawn.")
materialize_module(self.lightning_module)
# the trainer reference is lost during materialization
self.lightning_module.trainer = proxy(self)
def _call_teardown_hook(self) -> None:
fn = self.state.fn._setup_fn
if self.datamodule is not None:
self.datamodule.teardown(stage=fn)
self._call_callback_hooks("teardown", stage=fn)
self._call_lightning_module_hook("teardown", stage=fn)
self.lightning_module._current_fx_name = None
# these could have become stale if metrics are defined in `setup`
self.lightning_module._metric_attributes = None
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu kill loggers.
for logger in self.loggers:
logger.finalize("success")
# summarize profile results
self.profiler.describe()
def call_hook(
self, hook_name: str, *args: Any, pl_module: Optional["pl.LightningModule"] = None, **kwargs: Any
) -> Any:
rank_zero_deprecation("The Trainer's `call_hook` method was deprecated in v1.6 and will be removed in v1.8.")
pl_module = self.lightning_module or pl_module
if pl_module:
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = hook_name
with self.profiler.profile(hook_name):
callback_fx = getattr(self, hook_name, None)
if callable(callback_fx):
callback_fx(*args, **kwargs)
output = None
model_fx = getattr(pl_module, hook_name, None)
if callable(model_fx):
output = model_fx(*args, **kwargs)
if hook_name not in ("setup", "teardown", "on_train_start") and hasattr(self.strategy, hook_name):
strategy_hook = getattr(self.strategy, hook_name)
strategy_output = strategy_hook(*args, **kwargs)
output = strategy_output if output is None else output
if pl_module:
pl_module._current_fx_name = prev_fx_name
return output
def _call_lightning_module_hook(
self,
hook_name: str,
*args: Any,
pl_module: Optional["pl.LightningModule"] = None,
**kwargs: Any,
) -> Any:
pl_module = pl_module or self.lightning_module
if pl_module is None:
raise TypeError("No Lightning Module is available to call hooks on")
fn = getattr(pl_module, hook_name)
if not callable(fn):
return
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = hook_name
with self.profiler.profile(f"[LightningModule]{pl_module.__class__.__name__}.{hook_name}"):
output = fn(*args, **kwargs)
pl_module._current_fx_name = prev_fx_name
return output
def _call_callback_hooks(
self,
hook_name: str,
*args: Any,
**kwargs: Any,
) -> None:
log.detail(f"{self.__class__.__name__}: calling callback hook: {hook_name}")
if hook_name in ("on_init_start", "on_init_end"):
for callback in self.callbacks:
fn = getattr(callback, hook_name)
if callable(fn):
fn(self, *args, **kwargs)
return
pl_module = self.lightning_module
if pl_module:
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = hook_name
if hook_name == "on_train_batch_start":
with self.profiler.profile(hook_name):
self._on_train_batch_start(*args, **kwargs)
elif hook_name == "on_train_batch_end":
with self.profiler.profile(hook_name):
self._on_train_batch_end(*args, **kwargs)
else:
for callback in self.callbacks:
fn = getattr(callback, hook_name)
if callable(fn):
with self.profiler.profile(f"[Callback]{callback.state_key}.{hook_name}"):
fn(self, self.lightning_module, *args, **kwargs)
if pl_module:
pl_module._current_fx_name = prev_fx_name
_start(self, batch, batch_idx, dataloader_idx=0):
for callback in self.callbacks:
if is_param_in_hook_signature(callback.on_train_batch_start, "dataloader_idx", explicit=True):
callback.on_train_batch_start(self, self.lightning_module, batch, batch_idx, 0)
else:
callback.on_train_batch_start(self, self.lightning_module, batch, batch_idx)
_end(self, outputs: STEP_OUTPUT, batch, batch_idx, dataloader_idx=0):
for callback in self.callbacks:
if is_param_in_hook_signature(callback.on_train_batch_end, "dataloader_idx", explicit=True):
callback.on_train_batch_end(self, self.lightning_module, outputs, batch, batch_idx, 0)
else:
callback.on_train_batch_end(self, self.lightning_module, outputs, batch, batch_idx)
def _call_callbacks_state_dict(self) -> Dict[str, dict]:
callback_state_dicts = {}
for callback in self.callbacks:
state_dict = callback.state_dict()
if state_dict:
callback_state_dicts[callback.state_key] = state_dict
return callback_state_dicts
def _call_callbacks_on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
for callback in self.callbacks:
state = callback.on_save_checkpoint(self, self.lightning_module, checkpoint)
if state:
checkpoint["callbacks"][callback.state_key] = state
def _call_callbacks_on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
callback_states: Dict[Union[Type, str], Dict] = checkpoint.get("callbacks")
if callback_states is None:
return
is_legacy_ckpt = Version(checkpoint["pytorch-lightning_version"]) < Version("1.5.0dev")
current_callbacks_keys = {cb._legacy_state_key if is_legacy_ckpt else cb.state_key for cb in self.callbacks}
difference = callback_states.keys() - current_callbacks_keys
if difference:
rank_zero_warn(
"Be aware that when using `ckpt_path`,"
" callbacks used to create the checkpoint need to be provided during `Trainer` instantiation."
f" Please add the following callbacks: {list(difference)}.",
)
for callback in self.callbacks:
state = callback_states.get(callback.state_key, callback_states.get(callback._legacy_state_key))
if state:
state = deepcopy(state)
callback.on_load_checkpoint(self, self.lightning_module, state)
def _call_callbacks_load_state_dict(self, checkpoint: Dict[str, Any]) -> None:
callback_states: Dict[Union[Type, str], Dict] = checkpoint.get("callbacks")
if callback_states is None:
return
for callback in self.callbacks:
state = callback_states.get(callback.state_key, callback_states.get(callback._legacy_state_key))
if state:
state = deepcopy(state)
callback.load_state_dict(state)
def _call_strategy_hook(
self,
hook_name: str,
*args: Any,
**kwargs: Any,
) -> Any:
pl_module = self.lightning_module
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = hook_name
fn = getattr(self.strategy, hook_name)
if not callable(fn):
return
with self.profiler.profile(f"[Strategy]{self.strategy.__class__.__name__}.{hook_name}"):
output = fn(*args, **kwargs)
pl_module._current_fx_name = prev_fx_name
return output
@staticmethod
def _parse_devices(
gpus: Optional[Union[List[int], str, int]],
auto_select_gpus: bool,
tpu_cores: Optional[Union[List[int], str, int]],
) -> Tuple[Optional[List[int]], Optional[Union[List[int], int]]]:
return device_parser._parse_devices(gpus, auto_select_gpus, tpu_cores)
@staticmethod
def _log_api_event(event: str) -> None:
torch._C._log_api_usage_once("lightning.trainer." + event)
def __init_profiler(self, profiler: Optional[Union[BaseProfiler, str]]) -> None:
if isinstance(profiler, str):
PROFILERS = {
"simple": SimpleProfiler,
"advanced": AdvancedProfiler,
"pytorch": PyTorchProfiler,
"xla": XLAProfiler,
}
profiler = profiler.lower()
if profiler not in PROFILERS:
raise MisconfigurationException(
"When passing string value for the `profiler` parameter of `Trainer`,"
f" it can only be one of {list(PROFILERS.keys())}"
)
profiler_class = PROFILERS[profiler]
profiler = profiler_class()
self.profiler: BaseProfiler = profiler or PassThroughProfiler()
def __setup_profiler(self) -> None:
local_rank = self.local_rank if self.world_size > 1 else None
self.profiler._lightning_module = proxy(self.lightning_module)
self.profiler.setup(stage=self.state.fn._setup_fn, local_rank=local_rank, log_dir=self.log_dir)
def _log_device_info(self) -> None:
rank_zero_info(
f"GPU available: {torch.cuda.is_available()}, used: {isinstance(self.accelerator, GPUAccelerator)}"
)
num_tpu_cores = (
self.tpu_cores if self.tpu_cores is not None and isinstance(self.accelerator, TPUAccelerator) else 0
)
rank_zero_info(f"TPU available: {_TPU_AVAILABLE}, using: {num_tpu_cores} TPU cores")
num_ipus = self.ipus if self.ipus is not None else 0
rank_zero_info(f"IPU available: {_IPU_AVAILABLE}, using: {num_ipus} IPUs")
if torch.cuda.is_available() and not isinstance(self.accelerator, GPUAccelerator):
rank_zero_warn(
"GPU available but not used. Set `accelerator` and `devices` using"
f" `Trainer(accelerator='gpu', devices={GPUAccelerator.auto_device_count()})`.",
category=PossibleUserWarning,
)
if _TPU_AVAILABLE and not isinstance(self.accelerator, TPUAccelerator):
rank_zero_warn(
"TPU available but not used. Set `accelerator` and `devices` using"
f" `Trainer(accelerator='tpu', devices={TPUAccelerator.auto_device_count()})`."
)
if _IPU_AVAILABLE and not isinstance(self.accelerator, IPUAccelerator):
rank_zero_warn(
"IPU available but not used. Set `accelerator` and `devices` using"
f" `Trainer(accelerator='ipu', devices={IPUAccelerator.auto_device_count()})`."
)
def reset_train_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
source = self._data_connector._train_dataloader_source
pl_module = self.lightning_module or model
has_step = is_overridden("training_step", pl_module)
enable_training = self.limit_train_batches > 0
if not (source.is_defined() and has_step and enable_training):
return
self.train_dataloader = self._data_connector._request_dataloader(RunningStage.TRAINING, model=model)
if self.overfit_batches > 0:
self.train_dataloader = self._data_connector._resolve_overfit_batches(self.train_dataloader)
self.train_dataloader = apply_to_collection(
self.train_dataloader,
(DataLoader, CombinedLoader),
self._data_connector._prepare_dataloader,
mode=RunningStage.TRAINING,
)
loaders = (
self.train_dataloader.loaders
if isinstance(self.train_dataloader, CombinedLoader)
else self.train_dataloader
)
apply_to_collection(loaders, DataLoader, self._data_connector._worker_check, "train_dataloader")
apply_to_collection(loaders, DataLoader, _auto_add_worker_init_fn, rank=self.global_rank)
if _fault_tolerant_training():
apply_to_collection(loaders, DataLoader, _add_capture_metadata_collate)
if not isinstance(self.train_dataloader, CombinedLoader):
self.train_dataloader = CombinedLoader(loaders, self._data_connector.multiple_trainloader_mode)
module = model or self.lightning_module or self.datamodule
self.num_training_batches = (
len(self.train_dataloader)
if has_len_all_ranks(self.train_dataloader, self.strategy, module)
else float("inf")
)
if isinstance(self.limit_train_batches, int):
self.num_training_batches = min(self.num_training_batches, int(self.limit_train_batches))
elif self.num_training_batches != float("inf"):
self.num_training_batches = int(self.num_training_batches * self.limit_train_batches)
elif self.limit_train_batches != 1.0:
raise MisconfigurationException(
"When using an IterableDataset for `limit_train_batches`,"
" `Trainer(limit_train_batches)` must be `1.0` or an int. An int k specifies"
" `num_training_batches` to use."
)
if isinstance(self.val_check_interval, int):
self.val_check_batch = self.val_check_interval
if self.val_check_batch > self.num_training_batches:
raise ValueError(
f"`val_check_interval` ({self.val_check_interval}) must be less than or equal "
f"to the number of the training batches ({self.num_training_batches}). "
"If you want to disable validation set `limit_val_batches` to 0.0 instead."
)
else:
if not has_len_all_ranks(self.train_dataloader, self.strategy, module):
if self.val_check_interval == 1.0:
self.val_check_batch = float("inf")
else:
raise MisconfigurationException(
"When using an IterableDataset for `train_dataloader`,"
" `Trainer(val_check_interval)` must be `1.0` or an int. An int k specifies"
" checking validation every k training batches."
)
else:
self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
self.val_check_batch = max(1, self.val_check_batch)
if self.loggers and self.num_training_batches < self.log_every_n_steps:
rank_zero_warn(
f"The number of training samples ({self.num_training_batches}) is smaller than the logging interval"
f" Trainer(log_every_n_steps={self.log_every_n_steps}). Set a lower value for log_every_n_steps if"
" you want to see logs for the training epoch.",
category=PossibleUserWarning,
)
self._last_train_dl_reload_epoch = self.current_epoch
def reset_val_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
source = self._data_connector._val_dataloader_source
pl_module = self.lightning_module or model
has_step = is_overridden("validation_step", pl_module)
enable_validation = self.limit_val_batches > 0
if source.is_defined() and has_step and enable_validation:
self.num_val_batches, self.val_dataloaders = self._data_connector._reset_eval_dataloader(
RunningStage.VALIDATING, model=pl_module
)
self._last_val_dl_reload_epoch = self.current_epoch
def reset_test_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
source = self._data_connector._test_dataloader_source
pl_module = self.lightning_module or model
has_step = is_overridden("test_step", pl_module)
enable_testing = self.limit_test_batches > 0
if source.is_defined() and has_step and enable_testing:
self.num_test_batches, self.test_dataloaders = self._data_connector._reset_eval_dataloader(
RunningStage.TESTING, model=pl_module
)
def reset_predict_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
source = self._data_connector._predict_dataloader_source
pl_module = self.lightning_module or model
enable_prediction = self.limit_predict_batches > 0
if source.is_defined() and enable_prediction:
self.num_predict_batches, self.predict_dataloaders = self._data_connector._reset_eval_dataloader(
RunningStage.PREDICTING, model=pl_module
)
def reset_train_val_dataloaders(self, model: Optional["pl.LightningModule"] = None) -> None:
if self.train_dataloader is None:
self.reset_train_dataloader(model=model)
if self.val_dataloaders is None:
self.reset_val_dataloader(model=model)
@property
def accelerator(self) -> Accelerator:
return self.strategy.accelerator
@property
def strategy(self) -> Strategy:
return self._accelerator_connector.strategy
@property
def training_type_plugin(self) -> Strategy:
rank_zero_deprecation(
"`Trainer.training_type_plugin` is deprecated in v1.6 and will be removed in v1.8. Use"
" `Trainer.strategy` instead."
)
return self.strategy
@property
def precision_plugin(self) -> PrecisionPlugin:
return self.strategy.precision_plugin
@property
def global_rank(self) -> int:
return self.strategy.global_rank
@property
def local_rank(self) -> int:
return getattr(self.strategy, "local_rank", 0)
@property
def node_rank(self) -> int:
return getattr(self.strategy, "node_rank", 0)
@property
def world_size(self) -> int:
return getattr(self.strategy, "world_size", 1)
@property
def should_rank_save_checkpoint(self) -> bool:
rank_zero_deprecation(
"`Trainer.should_rank_save_checkpoint` is deprecated in v1.6 and will be removed in v1.8.", stacklevel=5
)
strategy = self.strategy
return (
isinstance(strategy, pl.strategies.TPUSpawnStrategy) and strategy.local_rank == 0 or strategy.is_global_zero
)
@property
def num_nodes(self) -> int:
return getattr(self.strategy, "num_nodes", 1)
@property
def device_ids(self) -> List[int]:
devices = getattr(self.strategy, "parallel_devices", [self.strategy.root_device])
device_ids = []
for idx, device in enumerate(devices):
if isinstance(device, torch.device):
device_ids.append(device.index or idx)
elif isinstance(device, int):
device_ids.append(device)
return device_ids
@property
def num_devices(self) -> int:
return len(self.device_ids)
@property
def num_processes(self) -> int:
return self._accelerator_connector.num_processes
@property
def root_gpu(self) -> Optional[int]:
rank_zero_deprecation(
"`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. "
"Please use `Trainer.strategy.root_device.index` instead."
)
return self.strategy.root_device.index if isinstance(self.accelerator, GPUAccelerator) else None
@property
def tpu_cores(self) -> int:
return self._accelerator_connector.tpu_cores
@property
def ipus(self) -> int:
return self._accelerator_connector.num_ipus
@property
def num_gpus(self) -> int:
rank_zero_deprecation(
"`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8."
" Please use `Trainer.num_devices` instead."
)
return self.num_devices if isinstance(self.accelerator, GPUAccelerator) else 0
@property
def devices(self) -> int:
rank_zero_deprecation(
"`Trainer.devices` was deprecated in v1.6 and will be removed in v1.8."
" Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead."
)
return self.num_devices
@property
def data_parallel_device_ids(self) -> Optional[List[int]]:
return (
self._accelerator_connector.parallel_device_ids if self._accelerator_connector.parallel_device_ids else None
)
@property
def lightning_module(self) -> "pl.LightningModule":
return self.strategy.lightning_module
@property
def optimizers(self) -> List[Optimizer]:
return self.strategy.optimizers
@optimizers.setter
def optimizers(self, new_optims: Optional[List[Optimizer]]) -> None:
self.strategy.optimizers = new_optims
@property
def lightning_optimizers(self) -> Dict[int, LightningOptimizer]:
rank_zero_deprecation(
"`Trainer.lightning_optimizers` is deprecated in v1.6 and will be removed in v1.8", stacklevel=5
)
return self.strategy._lightning_optimizers
@property
def lr_scheduler_configs(self) -> List[LRSchedulerConfig]:
return self.strategy.lr_scheduler_configs
@property
def lr_schedulers(self) -> List[Dict[str, Any]]:
rank_zero_deprecation(
"`Trainer.lr_schedulers` is deprecated in v1.6 and will be removed in v1.8."
" You can use `trainer.lr_scheduler_configs` instead which contains dataclasses instead of dictionaries.",
stacklevel=5,
)
from dataclasses import asdict
return [asdict(config) for config in self.strategy.lr_scheduler_configs]
@property
def optimizer_frequencies(self) -> List[int]:
return self.strategy.optimizer_frequencies
@optimizer_frequencies.setter
def optimizer_frequencies(self, new_freqs: List[int]) -> None:
self.strategy.optimizer_frequencies = new_freqs
@property
def amp_backend(self) -> Optional[AMPType]:
if isinstance(self.precision_plugin, ApexMixedPrecisionPlugin):
return AMPType.APEX
if isinstance(self.precision_plugin, NativeMixedPrecisionPlugin):
return AMPType.NATIVE
return None
@property
def precision(self) -> Union[str, int]:
return self.strategy.precision_plugin.precision
@property
def scaler(self) -> Optional[Any]:
return getattr(self.precision_plugin, "scaler", None)
@property
def gpus(self) -> Optional[Union[List[int], str, int]]:
return self._accelerator_connector.gpus
@property
def model(self) -> torch.nn.Module:
return self.strategy.model
@model.setter
def model(self, model: torch.nn.Module) -> None:
self.strategy.model = model
@property
def log_dir(self) -> Optional[str]:
if len(self.loggers) == 1:
if isinstance(self.logger, TensorBoardLogger):
dirpath = self.logger.log_dir
else:
dirpath = self.logger.save_dir
else:
dirpath = self.default_root_dir
dirpath = self.strategy.broadcast(dirpath)
return dirpath
@property
def use_amp(self) -> bool:
rank_zero_deprecation(
"`Trainer.use_amp` is deprecated in v1.6.0 and will be removed in v1.8.0."
" Please use `Trainer.amp_backend` instead."
)
return self.precision == 16
@property
def is_global_zero(self) -> bool:
return self.strategy.is_global_zero
@property
def slurm_job_id(self) -> Optional[int]:
rank_zero_deprecation("Method `slurm_job_id` is deprecated in v1.6.0 and will be removed in v1.7.0.")
return SLURMEnvironment.job_id()
@property
def distributed_sampler_kwargs(self) -> Optional[dict]:
if isinstance(self.strategy, ParallelStrategy):
return self.strategy.distributed_sampler_kwargs
@property
def data_parallel(self) -> bool:
return isinstance(self.strategy, ParallelStrategy)
@property
def progress_bar_dict(self) -> dict:
rank_zero_deprecation(
"`trainer.progress_bar_dict` is deprecated in v1.5 and will be removed in v1.7."
" Use `ProgressBarBase.get_metrics` instead."
)
ref_model = self.lightning_module
ref_model = cast(pl.LightningModule, ref_model)
if self.progress_bar_callback:
return self.progress_bar_callback.get_metrics(self, ref_model)
return self.progress_bar_metrics
@property
def enable_validation(self) -> bool:
return (
self._data_connector._val_dataloader_source.is_defined()
and is_overridden("validation_step", self.lightning_module)
and self.limit_val_batches > 0
)
@property
def default_root_dir(self) -> str:
if get_filesystem(self._default_root_dir).protocol == "file":
return os.path.normpath(self._default_root_dir)
return self._default_root_dir
@property
def weights_save_path(self) -> str:
rank_zero_deprecation("`Trainer.weights_save_path` has been deprecated in v1.6 and will be removed in v1.8.")
return self._weights_save_path_internal
@property
def _weights_save_path_internal(self) -> str:
if get_filesystem(self._weights_save_path).protocol == "file":
return os.path.normpath(self._weights_save_path)
return self._weights_save_path
@property
def early_stopping_callback(self) -> Optional[EarlyStopping]:
callbacks = self.early_stopping_callbacks
return callbacks[0] if len(callbacks) > 0 else None
@property
def early_stopping_callbacks(self) -> List[EarlyStopping]:
return [c for c in self.callbacks if isinstance(c, EarlyStopping)]
@property
def prediction_writer_callbacks(self) -> List[BasePredictionWriter]:
return [cb for cb in self.callbacks if isinstance(cb, BasePredictionWriter)]
@property
def checkpoint_callback(self) -> Optional[ModelCheckpoint]:
callbacks = self.checkpoint_callbacks
return callbacks[0] if len(callbacks) > 0 else None
@property
def checkpoint_callbacks(self) -> List[ModelCheckpoint]:
return [c for c in self.callbacks if isinstance(c, ModelCheckpoint)]
@property
def progress_bar_callback(self) -> Optional[ProgressBarBase]:
for c in self.callbacks:
if isinstance(c, ProgressBarBase):
return c
return None
@property
def resume_from_checkpoint(self) -> Optional[Union[str, Path]]:
resume_from_checkpoint = self._checkpoint_connector.resume_from_checkpoint_fit_path
if resume_from_checkpoint is not None:
rank_zero_deprecation(
"`trainer.resume_from_checkpoint` is deprecated in v1.5 and will be removed in v2.0."
" Specify the fit checkpoint path with `trainer.fit(ckpt_path=)` instead.",
stacklevel=5,
)
return resume_from_checkpoint
@property
def ckpt_path(self) -> Optional[str]:
return self._ckpt_path
@property
def validated_ckpt_path(self) -> Optional[str]:
rank_zero_deprecation(
"The `Trainer.validated_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via"
" `Trainer.ckpt_path` instead.",
stacklevel=5,
)
return self._validated_ckpt_path
@validated_ckpt_path.setter
def validated_ckpt_path(self, ckpt_path: Optional[str]) -> None:
rank_zero_deprecation(
"The `Trainer.validated_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via the"
" read-only `Trainer.ckpt_path`.",
stacklevel=5,
)
self._validated_ckpt_path = ckpt_path
@property
def tested_ckpt_path(self) -> Optional[str]:
rank_zero_deprecation(
"The `Trainer.tested_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via"
" `Trainer.ckpt_path` instead.",
stacklevel=5,
)
return self._tested_ckpt_path
@tested_ckpt_path.setter
def tested_ckpt_path(self, ckpt_path: Optional[str]) -> None:
rank_zero_deprecation(
"The `Trainer.tested_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via the"
" read-only `Trainer.ckpt_path` instead.",
stacklevel=5,
)
self._tested_ckpt_path = ckpt_path
@property
def predicted_ckpt_path(self) -> Optional[str]:
rank_zero_deprecation(
"The `Trainer.predicted_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via"
" `Trainer.ckpt_path` instead.",
stacklevel=5,
)
return self._predicted_ckpt_path
@predicted_ckpt_path.setter
def predicted_ckpt_path(self, ckpt_path: Optional[str]) -> None:
rank_zero_deprecation(
"The `Trainer.predicted_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via the"
" read-only `Trainer.ckpt_path` instead.",
stacklevel=5,
)
self._predicted_ckpt_path = ckpt_path
def save_checkpoint(
self, filepath: _PATH, weights_only: bool = False, storage_options: Optional[Any] = None
) -> None:
self._checkpoint_connector.save_checkpoint(filepath, weights_only=weights_only, storage_options=storage_options)
@classmethod
def default_attributes(cls) -> dict:
init_signature = inspect.signature(cls)
return {k: v.default for k, v in init_signature.parameters.items()}
@classmethod
def get_deprecated_arg_names(cls) -> List:
depr_arg_names = []
for name, val in cls.__dict__.items():
if name.startswith("DEPRECATED") and isinstance(val, (tuple, list)):
depr_arg_names.extend(val)
return depr_arg_names
@classmethod
def from_argparse_args(cls: Any, args: Union[Namespace, ArgumentParser], **kwargs) -> Any:
return from_argparse_args(cls, args, **kwargs)
@classmethod
def parse_argparser(cls, arg_parser: Union[ArgumentParser, Namespace]) -> Namespace:
return parse_argparser(cls, arg_parser)
@classmethod
def match_env_arguments(cls) -> Namespace:
return parse_env_variables(cls)
@classmethod
def add_argparse_args(cls, parent_parser: ArgumentParser, **kwargs) -> ArgumentParser:
return add_argparse_args(cls, parent_parser, **kwargs)
@property
def interrupted(self) -> bool:
return self.state.status == TrainerStatus.INTERRUPTED
@property
def training(self) -> bool:
return self.state.stage == RunningStage.TRAINING
@training.setter
def training(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.TRAINING
elif self.training:
self.state.stage = None
@property
def testing(self) -> bool:
return self.state.stage == RunningStage.TESTING
@testing.setter
def testing(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.TESTING
elif self.testing:
self.state.stage = None
@property
def predicting(self) -> bool:
return self.state.stage == RunningStage.PREDICTING
@predicting.setter
def predicting(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.PREDICTING
elif self.predicting:
self.state.stage = None
@property
def tuning(self) -> bool:
return self.state.stage == RunningStage.TUNING
@tuning.setter
def tuning(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.TUNING
elif self.tuning:
self.state.stage = None
@property
def validating(self) -> bool:
return self.state.stage == RunningStage.VALIDATING
@validating.setter
def validating(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.VALIDATING
elif self.validating:
self.state.stage = None
@property
def evaluating(self) -> bool:
return self.state.stage and self.state.stage.evaluating
@property
def sanity_checking(self) -> bool:
return self.state.stage == RunningStage.SANITY_CHECKING
@sanity_checking.setter
def sanity_checking(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.SANITY_CHECKING
elif self.sanity_checking:
self.state.stage = None
@property
def global_step(self) -> int:
return self.fit_loop.epoch_loop.global_step
@property
def current_epoch(self) -> int:
return self.fit_loop.epoch_progress.current.completed
@property
def max_epochs(self) -> int:
return self.fit_loop.max_epochs
@property
def min_epochs(self) -> int:
return self.fit_loop.min_epochs
@property
def max_steps(self) -> int:
return self.fit_loop.max_steps
@property
def min_steps(self) -> Optional[int]:
return self.fit_loop.min_steps
@property
def is_last_batch(self) -> bool:
return self.fit_loop.epoch_loop.batch_progress.is_last_batch
@property
def fit_loop(self) -> FitLoop:
return self._fit_loop
@fit_loop.setter
def fit_loop(self, loop: FitLoop):
loop.trainer = self
self._fit_loop = loop
@property
def validate_loop(self) -> EvaluationLoop:
return self._validate_loop
@validate_loop.setter
def validate_loop(self, loop: EvaluationLoop):
loop.trainer = self
self._validate_loop = loop
@property
def test_loop(self) -> EvaluationLoop:
return self._test_loop
@test_loop.setter
def test_loop(self, loop: EvaluationLoop):
loop.trainer = self
self._test_loop = loop
@property
def predict_loop(self) -> PredictionLoop:
return self._predict_loop
@predict_loop.setter
def predict_loop(self, loop: PredictionLoop):
loop.trainer = self
self._predict_loop = loop
@property
def verbose_evaluate(self) -> bool:
rank_zero_deprecation(
"The `Trainer.verbose_evaluate` property has been deprecated and will be removed in v1.8. The current value"
" returned is the union of the validate and test loop values. You can choose which one to access with"
" `trainer.{validate,test}_loop.verbose`.",
stacklevel=5,
)
return self.validate_loop.verbose or self.test_loop.verbose
@verbose_evaluate.setter
def verbose_evaluate(self, verbose: bool) -> None:
rank_zero_deprecation(
"The `Trainer.verbose_evaluate` property has been deprecated and will be removed in v1.8. This will set"
" the value for both trainer.{validate,test}_loop.verbose`.",
stacklevel=5,
)
self.validate_loop.verbose = verbose
self.test_loop.verbose = verbose
@property
def _evaluation_loop(self) -> EvaluationLoop:
if self.state.fn in (TrainerFn.FITTING, TrainerFn.TUNING):
return self.fit_loop.epoch_loop.val_loop
if self.state.fn == TrainerFn.VALIDATING:
return self.validate_loop
if self.state.fn == TrainerFn.TESTING:
return self.test_loop
raise RuntimeError("The `Trainer._evaluation_loop` property isn't defined. Accessed outside of scope")
@property
def _active_loop(self) -> Optional[Union[FitLoop, EvaluationLoop, PredictionLoop]]:
if self.training:
return self.fit_loop
if self.sanity_checking or self.evaluating:
return self._evaluation_loop
if self.predicting:
return self.predict_loop
@property
def logger(self) -> Optional[LightningLoggerBase]:
if len(self.loggers) == 0:
return None
if len(self.loggers) == 1:
return self.loggers[0]
else:
rank_zero_warn(
"Using trainer.logger when Trainer is configured to use multiple loggers."
" This behavior will change in v1.8 when LoggerCollection is removed, and"
" trainer.logger will return the first logger in trainer.loggers"
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return LoggerCollection(self.loggers)
@logger.setter
def logger(self, logger: Optional[LightningLoggerBase]) -> None:
if not logger:
self.loggers = []
elif isinstance(logger, LoggerCollection):
self.loggers = list(logger)
else:
self.loggers = [logger]
@property
def loggers(self) -> List[LightningLoggerBase]:
return self._loggers
@loggers.setter
def loggers(self, loggers: Optional[List[LightningLoggerBase]]) -> None:
self._loggers = loggers if loggers else []
@property
def callback_metrics(self) -> dict:
return self._logger_connector.callback_metrics
@property
def logged_metrics(self) -> dict:
return self._logger_connector.logged_metrics
@property
def progress_bar_metrics(self) -> dict:
return self._logger_connector.progress_bar_metrics
@property
def _results(self) -> Optional[_ResultCollection]:
active_loop = self._active_loop
if active_loop is not None:
return active_loop._results
def _exit_gracefully_on_signal(self) -> None:
if not _fault_tolerant_training() or not self._should_terminate_gracefully():
return
raise ExitGracefullyException(0)
def _should_terminate_gracefully(self) -> bool:
value = torch.tensor(int(self._terminate_gracefully), device=self.strategy.root_device)
return self.strategy.reduce(value, reduce_op="sum") > 0
@property
def weights_summary(self) -> Optional[str]:
rank_zero_deprecation("`Trainer.weights_summary` is deprecated in v1.5 and will be removed in v1.7.")
return self._weights_summary
@weights_summary.setter
def weights_summary(self, val: Optional[str]) -> None:
rank_zero_deprecation("Setting `Trainer.weights_summary` is deprecated in v1.5 and will be removed in v1.7.")
self._weights_summary = val
@property
def estimated_stepping_batches(self) -> Union[int, float]:
accumulation_scheduler = self.accumulation_scheduler
if accumulation_scheduler.epochs != [0]:
raise MisconfigurationException(
"Estimated stepping batches cannot be computed with different"
" `accumulate_grad_batches` at different epochs."
)
# infinite training
if self.max_epochs == -1 and self.max_steps == -1:
return float("inf")
if self.train_dataloader is None:
rank_zero_info("Loading `train_dataloader` to estimate number of stepping batches.")
self.reset_train_dataloader()
total_batches = self.num_training_batches
# iterable dataset
if total_batches == float("inf"):
return self.max_steps
self.accumulate_grad_batches = accumulation_scheduler.get_accumulate_grad_batches(self.current_epoch)
effective_batch_size = self.accumulate_grad_batches
max_estimated_steps = math.ceil(total_batches / effective_batch_size) * max(self.max_epochs, 1)
max_estimated_steps = min(max_estimated_steps, self.max_steps) if self.max_steps != -1 else max_estimated_steps
return max_estimated_steps
@property
def terminate_on_nan(self) -> bool:
rank_zero_deprecation("`Trainer.terminate_on_nan` is deprecated in v1.5 and will be removed in 1.7.")
return self._terminate_on_nan
@terminate_on_nan.setter
def terminate_on_nan(self, val: bool) -> None:
rank_zero_deprecation(
f"Setting `Trainer.terminate_on_nan = {val}` is deprecated in v1.5 and will be removed in 1.7."
f" Please set `Trainer(detect_anomaly={val})` instead."
)
self._terminate_on_nan = val # : 212
def _determine_batch_limits(batches: Optional[Union[int, float]], name: str) -> Union[int, float]:
if batches is None:
# batches is optional to know if the user passed a value so that we can show the above info messages only to the
# users that set a value explicitly
return 1.0
# differentiating based on the type can be error-prone for users. show a message describing the chosen behaviour
if isinstance(batches, int) and batches == 1:
if name == "limit_train_batches":
message = "1 batch per epoch will be used."
elif name == "val_check_interval":
message = "validation will run after every batch."
else:
message = "1 batch will be used."
rank_zero_info(f"`Trainer({name}=1)` was configured so {message}")
elif isinstance(batches, float) and batches == 1.0:
if name == "limit_train_batches":
message = "100% of the batches per epoch will be used."
elif name == "val_check_interval":
message = "validation will run at the end of the training epoch."
else:
message = "100% of the batches will be used."
rank_zero_info(f"`Trainer({name}=1.0)` was configured so {message}.")
if 0 <= batches <= 1:
return batches
if batches > 1 and batches % 1.0 == 0:
return int(batches)
raise MisconfigurationException(
f"You have passed invalid value {batches} for {name}, it has to be in [0.0, 1.0] or an int."
)
| true
| true
|
f718e1316edf5968cf316a2a3fea7d8e4a2d96be
| 2,331
|
py
|
Python
|
torch_glow/tests/nodes/add_test.py
|
saranyakrish14/glow
|
3562fba6a77d2bb4aacf98a5bff5a737a93f6adc
|
[
"Apache-2.0"
] | null | null | null |
torch_glow/tests/nodes/add_test.py
|
saranyakrish14/glow
|
3562fba6a77d2bb4aacf98a5bff5a737a93f6adc
|
[
"Apache-2.0"
] | null | null | null |
torch_glow/tests/nodes/add_test.py
|
saranyakrish14/glow
|
3562fba6a77d2bb4aacf98a5bff5a737a93f6adc
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleAddModule(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleAddModule, self).__init__()
self.inplace = inplace
def forward(self, a, b):
if b.size() == torch.Size([]):
return (a * a).add(b.item())
if self.inplace:
c = a.add_(b)
return c.add_(c)
else:
c = a.add(b)
return c.add(c)
class TestAdd(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleAddModule(), torch.randn(4), torch.randn(4)),
lambda: ("inplace", SimpleAddModule(True), torch.randn(4), torch.randn(4)),
lambda: (
"broadcast",
SimpleAddModule(),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast",
SimpleAddModule(),
torch.randn(8, 3, 4, 2),
torch.randn(1, 2),
),
lambda: (
"broadcast",
SimpleAddModule(),
torch.randn(4, 2),
torch.randn(8, 3, 4, 2),
),
lambda: ("float", SimpleAddModule(), torch.randn(4), torch.tensor(1.2345)),
lambda: (
"float_and_int",
SimpleAddModule(),
torch.randn(4),
torch.tensor(42),
True,
),
lambda: (
"int32",
SimpleAddModule(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int32),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int32),
),
lambda: (
"int64",
SimpleAddModule(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
),
]
)
def test_add(self, _, module, a, b, skip_to_glow=False):
utils.run_comparison_tests(
module,
(a, b),
fusible_ops={"aten::add_"} if module.inplace else {"aten::add"},
)
| 31.5
| 87
| 0.464178
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleAddModule(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleAddModule, self).__init__()
self.inplace = inplace
def forward(self, a, b):
if b.size() == torch.Size([]):
return (a * a).add(b.item())
if self.inplace:
c = a.add_(b)
return c.add_(c)
else:
c = a.add(b)
return c.add(c)
class TestAdd(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleAddModule(), torch.randn(4), torch.randn(4)),
lambda: ("inplace", SimpleAddModule(True), torch.randn(4), torch.randn(4)),
lambda: (
"broadcast",
SimpleAddModule(),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast",
SimpleAddModule(),
torch.randn(8, 3, 4, 2),
torch.randn(1, 2),
),
lambda: (
"broadcast",
SimpleAddModule(),
torch.randn(4, 2),
torch.randn(8, 3, 4, 2),
),
lambda: ("float", SimpleAddModule(), torch.randn(4), torch.tensor(1.2345)),
lambda: (
"float_and_int",
SimpleAddModule(),
torch.randn(4),
torch.tensor(42),
True,
),
lambda: (
"int32",
SimpleAddModule(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int32),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int32),
),
lambda: (
"int64",
SimpleAddModule(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
),
]
)
def test_add(self, _, module, a, b, skip_to_glow=False):
utils.run_comparison_tests(
module,
(a, b),
fusible_ops={"aten::add_"} if module.inplace else {"aten::add"},
)
| true
| true
|
f718e21d6e596548dd45b16aaf36234cd6ca2bb8
| 6,655
|
py
|
Python
|
encoder.py
|
kuangliu/pytorch-ssd
|
02ed1cbe6962e791895ab1c455dc5ddfb87291b9
|
[
"MIT"
] | 124
|
2017-02-16T01:53:14.000Z
|
2022-02-22T12:48:13.000Z
|
encoder.py
|
droogg/pytorch-ssd
|
02ed1cbe6962e791895ab1c455dc5ddfb87291b9
|
[
"MIT"
] | 10
|
2017-07-04T01:38:56.000Z
|
2021-08-03T09:34:34.000Z
|
encoder.py
|
droogg/pytorch-ssd
|
02ed1cbe6962e791895ab1c455dc5ddfb87291b9
|
[
"MIT"
] | 43
|
2017-07-31T10:46:23.000Z
|
2021-02-16T14:12:42.000Z
|
'''Encode target locations and labels.'''
import torch
import math
import itertools
class DataEncoder:
def __init__(self):
'''Compute default box sizes with scale and aspect transform.'''
scale = 300.
steps = [s / scale for s in (8, 16, 32, 64, 100, 300)]
sizes = [s / scale for s in (30, 60, 111, 162, 213, 264, 315)]
aspect_ratios = ((2,), (2,3), (2,3), (2,3), (2,), (2,))
feature_map_sizes = (38, 19, 10, 5, 3, 1)
num_layers = len(feature_map_sizes)
boxes = []
for i in range(num_layers):
fmsize = feature_map_sizes[i]
for h,w in itertools.product(range(fmsize), repeat=2):
cx = (w + 0.5)*steps[i]
cy = (h + 0.5)*steps[i]
s = sizes[i]
boxes.append((cx, cy, s, s))
s = math.sqrt(sizes[i] * sizes[i+1])
boxes.append((cx, cy, s, s))
s = sizes[i]
for ar in aspect_ratios[i]:
boxes.append((cx, cy, s * math.sqrt(ar), s / math.sqrt(ar)))
boxes.append((cx, cy, s / math.sqrt(ar), s * math.sqrt(ar)))
self.default_boxes = torch.Tensor(boxes)
def iou(self, box1, box2):
'''Compute the intersection over union of two set of boxes, each box is [x1,y1,x2,y2].
Args:
box1: (tensor) bounding boxes, sized [N,4].
box2: (tensor) bounding boxes, sized [M,4].
Return:
(tensor) iou, sized [N,M].
'''
N = box1.size(0)
M = box2.size(0)
lt = torch.max(
box1[:,:2].unsqueeze(1).expand(N,M,2), # [N,2] -> [N,1,2] -> [N,M,2]
box2[:,:2].unsqueeze(0).expand(N,M,2), # [M,2] -> [1,M,2] -> [N,M,2]
)
rb = torch.min(
box1[:,2:].unsqueeze(1).expand(N,M,2), # [N,2] -> [N,1,2] -> [N,M,2]
box2[:,2:].unsqueeze(0).expand(N,M,2), # [M,2] -> [1,M,2] -> [N,M,2]
)
wh = rb - lt # [N,M,2]
wh[wh<0] = 0 # clip at 0
inter = wh[:,:,0] * wh[:,:,1] # [N,M]
area1 = (box1[:,2]-box1[:,0]) * (box1[:,3]-box1[:,1]) # [N,]
area2 = (box2[:,2]-box2[:,0]) * (box2[:,3]-box2[:,1]) # [M,]
area1 = area1.unsqueeze(1).expand_as(inter) # [N,] -> [N,1] -> [N,M]
area2 = area2.unsqueeze(0).expand_as(inter) # [M,] -> [1,M] -> [N,M]
iou = inter / (area1 + area2 - inter)
return iou
def encode(self, boxes, classes, threshold=0.5):
'''Transform target bounding boxes and class labels to SSD boxes and classes.
Match each object box to all the default boxes, pick the ones with the
Jaccard-Index > 0.5:
Jaccard(A,B) = AB / (A+B-AB)
Args:
boxes: (tensor) object bounding boxes (xmin,ymin,xmax,ymax) of a image, sized [#obj, 4].
classes: (tensor) object class labels of a image, sized [#obj,].
threshold: (float) Jaccard index threshold
Returns:
boxes: (tensor) bounding boxes, sized [#obj, 8732, 4].
classes: (tensor) class labels, sized [8732,]
'''
default_boxes = self.default_boxes
num_default_boxes = default_boxes.size(0)
num_objs = boxes.size(0)
iou = self.iou( # [#obj,8732]
boxes,
torch.cat([default_boxes[:,:2] - default_boxes[:,2:]/2,
default_boxes[:,:2] + default_boxes[:,2:]/2], 1)
)
iou, max_idx = iou.max(0) # [1,8732]
max_idx.squeeze_(0) # [8732,]
iou.squeeze_(0) # [8732,]
boxes = boxes[max_idx] # [8732,4]
variances = [0.1, 0.2]
cxcy = (boxes[:,:2] + boxes[:,2:])/2 - default_boxes[:,:2] # [8732,2]
cxcy /= variances[0] * default_boxes[:,2:]
wh = (boxes[:,2:] - boxes[:,:2]) / default_boxes[:,2:] # [8732,2]
wh = torch.log(wh) / variances[1]
loc = torch.cat([cxcy, wh], 1) # [8732,4]
conf = 1 + classes[max_idx] # [8732,], background class = 0
conf[iou<threshold] = 0 # background
return loc, conf
def nms(self, bboxes, scores, threshold=0.5, mode='union'):
'''Non maximum suppression.
Args:
bboxes: (tensor) bounding boxes, sized [N,4].
scores: (tensor) bbox scores, sized [N,].
threshold: (float) overlap threshold.
mode: (str) 'union' or 'min'.
Returns:
keep: (tensor) selected indices.
Ref:
https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py
'''
x1 = bboxes[:,0]
y1 = bboxes[:,1]
x2 = bboxes[:,2]
y2 = bboxes[:,3]
areas = (x2-x1) * (y2-y1)
_, order = scores.sort(0, descending=True)
keep = []
while order.numel() > 0:
i = order[0]
keep.append(i)
if order.numel() == 1:
break
xx1 = x1[order[1:]].clamp(min=x1[i])
yy1 = y1[order[1:]].clamp(min=y1[i])
xx2 = x2[order[1:]].clamp(max=x2[i])
yy2 = y2[order[1:]].clamp(max=y2[i])
w = (xx2-xx1).clamp(min=0)
h = (yy2-yy1).clamp(min=0)
inter = w*h
if mode == 'union':
ovr = inter / (areas[i] + areas[order[1:]] - inter)
elif mode == 'min':
ovr = inter / areas[order[1:]].clamp(max=areas[i])
else:
raise TypeError('Unknown nms mode: %s.' % mode)
ids = (ovr<=threshold).nonzero().squeeze()
if ids.numel() == 0:
break
order = order[ids+1]
return torch.LongTensor(keep)
def decode(self, loc, conf):
'''Transform predicted loc/conf back to real bbox locations and class labels.
Args:
loc: (tensor) predicted loc, sized [8732,4].
conf: (tensor) predicted conf, sized [8732,21].
Returns:
boxes: (tensor) bbox locations, sized [#obj, 4].
labels: (tensor) class labels, sized [#obj,1].
'''
variances = [0.1, 0.2]
wh = torch.exp(loc[:,2:]*variances[1]) * self.default_boxes[:,2:]
cxcy = loc[:,:2] * variances[0] * self.default_boxes[:,2:] + self.default_boxes[:,:2]
boxes = torch.cat([cxcy-wh/2, cxcy+wh/2], 1) # [8732,4]
max_conf, labels = conf.max(1) # [8732,1]
ids = labels.squeeze(1).nonzero().squeeze(1) # [#boxes,]
keep = self.nms(boxes[ids], max_conf[ids].squeeze(1))
return boxes[ids][keep], labels[ids][keep], max_conf[ids][keep]
| 35.21164
| 98
| 0.494365
|
import torch
import math
import itertools
class DataEncoder:
def __init__(self):
scale = 300.
steps = [s / scale for s in (8, 16, 32, 64, 100, 300)]
sizes = [s / scale for s in (30, 60, 111, 162, 213, 264, 315)]
aspect_ratios = ((2,), (2,3), (2,3), (2,3), (2,), (2,))
feature_map_sizes = (38, 19, 10, 5, 3, 1)
num_layers = len(feature_map_sizes)
boxes = []
for i in range(num_layers):
fmsize = feature_map_sizes[i]
for h,w in itertools.product(range(fmsize), repeat=2):
cx = (w + 0.5)*steps[i]
cy = (h + 0.5)*steps[i]
s = sizes[i]
boxes.append((cx, cy, s, s))
s = math.sqrt(sizes[i] * sizes[i+1])
boxes.append((cx, cy, s, s))
s = sizes[i]
for ar in aspect_ratios[i]:
boxes.append((cx, cy, s * math.sqrt(ar), s / math.sqrt(ar)))
boxes.append((cx, cy, s / math.sqrt(ar), s * math.sqrt(ar)))
self.default_boxes = torch.Tensor(boxes)
def iou(self, box1, box2):
N = box1.size(0)
M = box2.size(0)
lt = torch.max(
box1[:,:2].unsqueeze(1).expand(N,M,2),
box2[:,:2].unsqueeze(0).expand(N,M,2),
)
rb = torch.min(
box1[:,2:].unsqueeze(1).expand(N,M,2),
box2[:,2:].unsqueeze(0).expand(N,M,2),
)
wh = rb - lt
wh[wh<0] = 0
inter = wh[:,:,0] * wh[:,:,1]
area1 = (box1[:,2]-box1[:,0]) * (box1[:,3]-box1[:,1])
area2 = (box2[:,2]-box2[:,0]) * (box2[:,3]-box2[:,1])
area1 = area1.unsqueeze(1).expand_as(inter)
area2 = area2.unsqueeze(0).expand_as(inter)
iou = inter / (area1 + area2 - inter)
return iou
def encode(self, boxes, classes, threshold=0.5):
default_boxes = self.default_boxes
num_default_boxes = default_boxes.size(0)
num_objs = boxes.size(0)
iou = self.iou( boxes,
torch.cat([default_boxes[:,:2] - default_boxes[:,2:]/2,
default_boxes[:,:2] + default_boxes[:,2:]/2], 1)
)
iou, max_idx = iou.max(0)
max_idx.squeeze_(0)
iou.squeeze_(0)
boxes = boxes[max_idx]
variances = [0.1, 0.2]
cxcy = (boxes[:,:2] + boxes[:,2:])/2 - default_boxes[:,:2]
cxcy /= variances[0] * default_boxes[:,2:]
wh = (boxes[:,2:] - boxes[:,:2]) / default_boxes[:,2:]
wh = torch.log(wh) / variances[1]
loc = torch.cat([cxcy, wh], 1)
conf = 1 + classes[max_idx]
conf[iou<threshold] = 0
return loc, conf
def nms(self, bboxes, scores, threshold=0.5, mode='union'):
x1 = bboxes[:,0]
y1 = bboxes[:,1]
x2 = bboxes[:,2]
y2 = bboxes[:,3]
areas = (x2-x1) * (y2-y1)
_, order = scores.sort(0, descending=True)
keep = []
while order.numel() > 0:
i = order[0]
keep.append(i)
if order.numel() == 1:
break
xx1 = x1[order[1:]].clamp(min=x1[i])
yy1 = y1[order[1:]].clamp(min=y1[i])
xx2 = x2[order[1:]].clamp(max=x2[i])
yy2 = y2[order[1:]].clamp(max=y2[i])
w = (xx2-xx1).clamp(min=0)
h = (yy2-yy1).clamp(min=0)
inter = w*h
if mode == 'union':
ovr = inter / (areas[i] + areas[order[1:]] - inter)
elif mode == 'min':
ovr = inter / areas[order[1:]].clamp(max=areas[i])
else:
raise TypeError('Unknown nms mode: %s.' % mode)
ids = (ovr<=threshold).nonzero().squeeze()
if ids.numel() == 0:
break
order = order[ids+1]
return torch.LongTensor(keep)
def decode(self, loc, conf):
variances = [0.1, 0.2]
wh = torch.exp(loc[:,2:]*variances[1]) * self.default_boxes[:,2:]
cxcy = loc[:,:2] * variances[0] * self.default_boxes[:,2:] + self.default_boxes[:,:2]
boxes = torch.cat([cxcy-wh/2, cxcy+wh/2], 1)
max_conf, labels = conf.max(1)
ids = labels.squeeze(1).nonzero().squeeze(1) keep = self.nms(boxes[ids], max_conf[ids].squeeze(1))
return boxes[ids][keep], labels[ids][keep], max_conf[ids][keep]
| true
| true
|
f718e377e05cbba7bfafea8603bd1a95b92d24ae
| 8,948
|
py
|
Python
|
src/third_party/wiredtiger/test/suite/test_bulk01.py
|
danx0r/mongo
|
70d4944c235bcdf7fbbc63971099563d2af72956
|
[
"Apache-2.0"
] | 72
|
2020-06-12T06:33:41.000Z
|
2021-03-22T03:15:56.000Z
|
src/third_party/wiredtiger/test/suite/test_bulk01.py
|
danx0r/mongo
|
70d4944c235bcdf7fbbc63971099563d2af72956
|
[
"Apache-2.0"
] | 9
|
2020-07-02T09:36:49.000Z
|
2021-03-25T23:54:00.000Z
|
src/third_party/wiredtiger/test/suite/test_bulk01.py
|
danx0r/mongo
|
70d4944c235bcdf7fbbc63971099563d2af72956
|
[
"Apache-2.0"
] | 14
|
2020-06-12T03:08:03.000Z
|
2021-02-03T11:43:09.000Z
|
#!/usr/bin/env python
#
# Public Domain 2014-2018 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# test_bulk.py
# bulk-cursor test.
#
import wiredtiger, wttest
from wtdataset import SimpleDataSet, simple_key, simple_value
from wtscenario import make_scenarios
# Smoke test bulk-load.
class test_bulk_load(wttest.WiredTigerTestCase):
name = 'test_bulk'
types = [
('file', dict(type='file:')),
('table', dict(type='table:'))
]
keyfmt = [
('integer', dict(keyfmt='i')),
('recno', dict(keyfmt='r')),
('string', dict(keyfmt='S')),
]
valfmt = [
('fixed', dict(valfmt='8t')),
('integer', dict(valfmt='i')),
('string', dict(valfmt='S')),
]
scenarios = make_scenarios(types, keyfmt, valfmt)
# Test a simple bulk-load
def test_bulk_load(self):
uri = self.type + self.name
self.session.create(uri,
'key_format=' + self.keyfmt + ',value_format=' + self.valfmt)
cursor = self.session.open_cursor(uri, None, "bulk")
for i in range(1, 1000):
cursor[simple_key(cursor, i)] = simple_value(cursor, i)
# Test a bulk-load triggers variable-length column-store RLE correctly.
def test_bulk_load_var_rle(self):
if self.keyfmt != 'r' or self.valfmt == '8t':
return
# We can't directly test RLE, it's internal to WiredTiger. However,
# diagnostic builds catch records that should have been RLE compressed,
# but weren't, so setting matching values should be sufficient.
uri = self.type + self.name
self.session.create(uri,
'key_format=' + self.keyfmt + ',value_format=' + self.valfmt)
cursor = self.session.open_cursor(uri, None, "bulk")
for i in range(1, 1000):
cursor[simple_key(cursor, i)] = simple_value(cursor, i/7)
# Test a bulk-load variable-length column-store append ignores any key.
def test_bulk_load_var_append(self):
if self.keyfmt != 'r':
return
uri = self.type + self.name
self.session.create(uri,
'key_format=' + self.keyfmt + ',value_format=' + self.valfmt)
cursor = self.session.open_cursor(uri, None, "bulk,append")
for i in range(1, 1000):
cursor[simple_key(cursor, 37)] = simple_value(cursor, i)
cursor.close()
cursor = self.session.open_cursor(uri, None, None)
for i in range(1, 1000):
cursor.set_key(simple_key(cursor, i))
cursor.search()
self.assertEqual(cursor.get_value(), simple_value(cursor, i))
# Test that column-store bulk-load handles skipped records correctly.
def test_bulk_load_col_delete(self):
if self.keyfmt != 'r':
return
uri = self.type + self.name
self.session.create(uri,
'key_format=' + self.keyfmt + ',value_format=' + self.valfmt)
cursor = self.session.open_cursor(uri, None, "bulk")
for i in range(1, 1000):
if i % 7 == 0:
cursor[simple_key(cursor, i)] = simple_value(cursor, i)
# Ensure we create all the missing records.
i = i + 1
cursor[simple_key(cursor, i)] = simple_value(cursor, i)
cursor.close()
cursor = self.session.open_cursor(uri, None, None)
# Verify all the records are there, in their proper state.
for i in range(1, 1000):
cursor.set_key(simple_key(cursor, i))
if i % 7 == 0:
cursor.search()
self.assertEqual(cursor.get_value(), simple_value(cursor, i))
elif cursor.value_format == '8t':
cursor.search()
self.assertEqual(cursor.get_value(), 0)
else:
self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND)
# Test that variable-length column-store bulk-load efficiently creates big
# records.
def test_bulk_load_col_big(self):
if self.keyfmt != 'r' or self.valfmt == '8t':
return
uri = self.type + self.name
self.session.create(uri,
'key_format=' + self.keyfmt + ',value_format=' + self.valfmt)
cursor = self.session.open_cursor(uri, None, "bulk")
for i in range(1, 10):
cursor[simple_key(cursor, i)] = simple_value(cursor, i)
# A big record -- if it's not efficient, we'll just hang.
big = 18446744073709551606
cursor[simple_key(cursor, big)] = simple_value(cursor, big)
cursor.close()
cursor = self.session.open_cursor(uri, None, None)
cursor.set_key(simple_key(cursor, big))
cursor.search()
self.assertEqual(cursor.get_value(), simple_value(cursor, big))
# Test that bulk-load out-of-order fails.
def test_bulk_load_order_check(self):
uri = self.type + self.name
self.session.create(uri,
'key_format=' + self.keyfmt + ',value_format=' + self.valfmt)
cursor = self.session.open_cursor(uri, None, "bulk")
cursor[simple_key(cursor, 10)] = simple_value(cursor, 10)
for i in [1, 9, 10]:
cursor.set_key(simple_key(cursor, 1))
cursor.set_value(simple_value(cursor, 1))
msg = '/than previously inserted key/'
self.assertRaisesWithMessage(
wiredtiger.WiredTigerError, lambda: cursor.insert(), msg)
cursor[simple_key(cursor, 11)] = simple_value(cursor, 11)
# Test that row-store bulk-load out-of-order can succeed.
def test_bulk_load_row_order_nocheck(self):
# Row-store offers an optional fast-past that skips the relatively
# expensive key-order checks, used when the input is known to be
# correct. Column-store comparisons are cheap, so it doesn't have
# that fast-path support.
if self.keyfmt != 'S':
return
uri = self.type + self.name
self.session.create(uri,
'key_format=' + self.keyfmt + ',value_format=' + self.valfmt)
cursor = self.session.open_cursor(uri, None, "bulk,skip_sort_check")
cursor[simple_key(cursor, 10)] = simple_value(cursor, 10)
cursor[simple_key(cursor, 1)] = simple_value(cursor, 1)
if not wiredtiger.diagnostic_build():
self.skipTest('requires a diagnostic build')
# Close explicitly, there's going to be a failure.
msg = '/are incorrectly sorted/'
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: self.conn.close(), msg)
# Test bulk-load only permitted on newly created objects.
def test_bulk_load_not_empty(self):
uri = self.type + self.name
self.session.create(uri, 'key_format=S,value_format=S')
cursor = self.session.open_cursor(uri, None)
cursor[simple_key(cursor, 1)] = simple_value(cursor, 1)
# Close the insert cursor, else we'll get EBUSY.
cursor.close()
msg = '/bulk-load is only supported on newly created objects/'
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: self.session.open_cursor(uri, None, "bulk"), msg)
# Test that bulk-load objects cannot be opened by other cursors.
def test_bulk_load_busy(self):
uri = self.type + self.name
self.session.create(uri, 'key_format=S,value_format=S')
cursor = self.session.open_cursor(uri, None)
cursor[simple_key(cursor, 1)] = simple_value(cursor, 1)
# Don't close the insert cursor, we want EBUSY.
self.assertRaises(wiredtiger.WiredTigerError,
lambda: self.session.open_cursor(uri, None, "bulk"))
if __name__ == '__main__':
wttest.run()
| 40.858447
| 79
| 0.63679
|
import wiredtiger, wttest
from wtdataset import SimpleDataSet, simple_key, simple_value
from wtscenario import make_scenarios
class test_bulk_load(wttest.WiredTigerTestCase):
name = 'test_bulk'
types = [
('file', dict(type='file:')),
('table', dict(type='table:'))
]
keyfmt = [
('integer', dict(keyfmt='i')),
('recno', dict(keyfmt='r')),
('string', dict(keyfmt='S')),
]
valfmt = [
('fixed', dict(valfmt='8t')),
('integer', dict(valfmt='i')),
('string', dict(valfmt='S')),
]
scenarios = make_scenarios(types, keyfmt, valfmt)
def test_bulk_load(self):
uri = self.type + self.name
self.session.create(uri,
'key_format=' + self.keyfmt + ',value_format=' + self.valfmt)
cursor = self.session.open_cursor(uri, None, "bulk")
for i in range(1, 1000):
cursor[simple_key(cursor, i)] = simple_value(cursor, i)
def test_bulk_load_var_rle(self):
if self.keyfmt != 'r' or self.valfmt == '8t':
return
uri = self.type + self.name
self.session.create(uri,
'key_format=' + self.keyfmt + ',value_format=' + self.valfmt)
cursor = self.session.open_cursor(uri, None, "bulk")
for i in range(1, 1000):
cursor[simple_key(cursor, i)] = simple_value(cursor, i/7)
# Test a bulk-load variable-length column-store append ignores any key.
def test_bulk_load_var_append(self):
if self.keyfmt != 'r':
return
uri = self.type + self.name
self.session.create(uri,
'key_format=' + self.keyfmt + ',value_format=' + self.valfmt)
cursor = self.session.open_cursor(uri, None, "bulk,append")
for i in range(1, 1000):
cursor[simple_key(cursor, 37)] = simple_value(cursor, i)
cursor.close()
cursor = self.session.open_cursor(uri, None, None)
for i in range(1, 1000):
cursor.set_key(simple_key(cursor, i))
cursor.search()
self.assertEqual(cursor.get_value(), simple_value(cursor, i))
# Test that column-store bulk-load handles skipped records correctly.
def test_bulk_load_col_delete(self):
if self.keyfmt != 'r':
return
uri = self.type + self.name
self.session.create(uri,
'key_format=' + self.keyfmt + ',value_format=' + self.valfmt)
cursor = self.session.open_cursor(uri, None, "bulk")
for i in range(1, 1000):
if i % 7 == 0:
cursor[simple_key(cursor, i)] = simple_value(cursor, i)
# Ensure we create all the missing records.
i = i + 1
cursor[simple_key(cursor, i)] = simple_value(cursor, i)
cursor.close()
cursor = self.session.open_cursor(uri, None, None)
# Verify all the records are there, in their proper state.
for i in range(1, 1000):
cursor.set_key(simple_key(cursor, i))
if i % 7 == 0:
cursor.search()
self.assertEqual(cursor.get_value(), simple_value(cursor, i))
elif cursor.value_format == '8t':
cursor.search()
self.assertEqual(cursor.get_value(), 0)
else:
self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND)
# Test that variable-length column-store bulk-load efficiently creates big
# records.
def test_bulk_load_col_big(self):
if self.keyfmt != 'r' or self.valfmt == '8t':
return
uri = self.type + self.name
self.session.create(uri,
'key_format=' + self.keyfmt + ',value_format=' + self.valfmt)
cursor = self.session.open_cursor(uri, None, "bulk")
for i in range(1, 10):
cursor[simple_key(cursor, i)] = simple_value(cursor, i)
# A big record -- if it's not efficient, we'll just hang.
big = 18446744073709551606
cursor[simple_key(cursor, big)] = simple_value(cursor, big)
cursor.close()
cursor = self.session.open_cursor(uri, None, None)
cursor.set_key(simple_key(cursor, big))
cursor.search()
self.assertEqual(cursor.get_value(), simple_value(cursor, big))
# Test that bulk-load out-of-order fails.
def test_bulk_load_order_check(self):
uri = self.type + self.name
self.session.create(uri,
'key_format=' + self.keyfmt + ',value_format=' + self.valfmt)
cursor = self.session.open_cursor(uri, None, "bulk")
cursor[simple_key(cursor, 10)] = simple_value(cursor, 10)
for i in [1, 9, 10]:
cursor.set_key(simple_key(cursor, 1))
cursor.set_value(simple_value(cursor, 1))
msg = '/than previously inserted key/'
self.assertRaisesWithMessage(
wiredtiger.WiredTigerError, lambda: cursor.insert(), msg)
cursor[simple_key(cursor, 11)] = simple_value(cursor, 11)
# Test that row-store bulk-load out-of-order can succeed.
def test_bulk_load_row_order_nocheck(self):
# Row-store offers an optional fast-past that skips the relatively
# expensive key-order checks, used when the input is known to be
# correct. Column-store comparisons are cheap, so it doesn't have
if self.keyfmt != 'S':
return
uri = self.type + self.name
self.session.create(uri,
'key_format=' + self.keyfmt + ',value_format=' + self.valfmt)
cursor = self.session.open_cursor(uri, None, "bulk,skip_sort_check")
cursor[simple_key(cursor, 10)] = simple_value(cursor, 10)
cursor[simple_key(cursor, 1)] = simple_value(cursor, 1)
if not wiredtiger.diagnostic_build():
self.skipTest('requires a diagnostic build')
msg = '/are incorrectly sorted/'
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: self.conn.close(), msg)
# Test bulk-load only permitted on newly created objects.
def test_bulk_load_not_empty(self):
uri = self.type + self.name
self.session.create(uri, 'key_format=S,value_format=S')
cursor = self.session.open_cursor(uri, None)
cursor[simple_key(cursor, 1)] = simple_value(cursor, 1)
# Close the insert cursor, else we'll get EBUSY.
cursor.close()
msg = '/bulk-load is only supported on newly created objects/'
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda: self.session.open_cursor(uri, None, "bulk"), msg)
def test_bulk_load_busy(self):
uri = self.type + self.name
self.session.create(uri, 'key_format=S,value_format=S')
cursor = self.session.open_cursor(uri, None)
cursor[simple_key(cursor, 1)] = simple_value(cursor, 1)
self.assertRaises(wiredtiger.WiredTigerError,
lambda: self.session.open_cursor(uri, None, "bulk"))
if __name__ == '__main__':
wttest.run()
| true
| true
|
f718e400a830f367fac94079177fd69a5f120546
| 2,523
|
py
|
Python
|
platemapgenerator_calccompsforsingleplate.py
|
johnmgregoire/JCAPGeneratePrintCode
|
afc1dbe6125d0024a46889011ab653ed24016fe4
|
[
"BSD-3-Clause"
] | null | null | null |
platemapgenerator_calccompsforsingleplate.py
|
johnmgregoire/JCAPGeneratePrintCode
|
afc1dbe6125d0024a46889011ab653ed24016fe4
|
[
"BSD-3-Clause"
] | null | null | null |
platemapgenerator_calccompsforsingleplate.py
|
johnmgregoire/JCAPGeneratePrintCode
|
afc1dbe6125d0024a46889011ab653ed24016fe4
|
[
"BSD-3-Clause"
] | null | null | null |
import time, copy, pickle
import os, os.path
import sys
import numpy, pylab
sys.path.append('C:/Users/Gregoire/Documents/PythonCode/JCAP')
from readplatemap import *
modelpath='C:/Users/Gregoire/Documents/CaltechWork/platemaps/Quaternarysingleplate/plate333_1map_full.txt'
newpath='C:/Users/Gregoire/Documents/CaltechWork/platemaps/Quaternarysingleplate/plate20intervwbin.txt'
writelines=[]
f=open(modelpath, mode='r')
ls=f.readlines()[:2]
writelines+=[l.strip() for l in ls]
f.close()
dlist=readsingleplatemaptxt(modelpath, returnfiducials=False)
dlistsrc=readplatemaptxt(codes=[0, 1, 2, 3])
smpsrc=numpy.array([d['Sample'] for d in dlistsrc])
codesrc=numpy.array([d['code'] for d in dlistsrc])
intervs=20
comps=[[1.0*b/intervs, 1.0*c/intervs, 1.0*(intervs-a-b-c)/intervs, 1.0*a/intervs] for a in numpy.arange(0,intervs+1)[::-1] for b in numpy.arange(0,intervs+1-a) for c in numpy.arange(0, intervs+1-a-b)][::-1]
def genbinarycomps(intervs, elind1, elind2, ndim=4):
aa=numpy.linspace(0.,1.,intervs+1)
c=numpy.zeros((len(aa), ndim), dtype='float64')
c[:, elind1]=aa
c[:, elind2]=1.-aa
return c
comps2=comps
codes=[0]*len(comps)
binintervs=5
for i, j in [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]:
comps2+=list(genbinarycomps(binintervs, i, j))+[numpy.zeros(4, dtype='float64')] #add 6 compositions in binary line and then zeros
codes+=[4]*6+[1]
comps2+=[numpy.zeros(4, dtype='float64')]*6 #6 more zeros to round out the 1819 code0 samples in a standard platemap
codes+=[1]*6
comps2=[numpy.array(c) for c in comps2]
comps2pop=copy.copy(comps2)
codespop=copy.copy(codes)
for d in dlist:
if d['code']==0:
c=comps2pop.pop(0)
cd=codespop.pop(0)
for k, v in zip(['A', 'B', 'C', 'D'], c):
d[k]=v
d['code']=cd
k_f=[\
('Sample','%04d'),\
('x','%.2f'),\
('y','%.2f'),\
('dx','%.2f'),\
('dx','%.2f'),\
('A','%.3f'),\
('B','%.3f'),\
('C','%.3f'),\
('D','%.3f'),\
('E','%.3f'),\
('F','%.3f'),\
('G','%.3f'),\
('H','%.3f'),\
('code','%d'),\
]
writelines+=[', '.join([f %d[k] for k, f in k_f]) for d in dlist]
f=open(newpath, mode='w')
f.write('\n'.join(writelines))
f.close()
sys.path.append('C:/Users/Gregoire/Documents/PythonCode/ternaryplot')
from myquaternaryutility import QuaternaryPlot
for d in dlist:
c=numpy.array([d[el] for el in ['A', 'B', 'C', 'D']])
if c.sum()>0:
c/=c.sum()
d['compositions']=c
carr=numpy.array([d['compositions'] for d in dlist])
stpq=QuaternaryPlot(111)
stpq.scatter(carr)
pylab.show()
| 27.725275
| 206
| 0.634958
|
import time, copy, pickle
import os, os.path
import sys
import numpy, pylab
sys.path.append('C:/Users/Gregoire/Documents/PythonCode/JCAP')
from readplatemap import *
modelpath='C:/Users/Gregoire/Documents/CaltechWork/platemaps/Quaternarysingleplate/plate333_1map_full.txt'
newpath='C:/Users/Gregoire/Documents/CaltechWork/platemaps/Quaternarysingleplate/plate20intervwbin.txt'
writelines=[]
f=open(modelpath, mode='r')
ls=f.readlines()[:2]
writelines+=[l.strip() for l in ls]
f.close()
dlist=readsingleplatemaptxt(modelpath, returnfiducials=False)
dlistsrc=readplatemaptxt(codes=[0, 1, 2, 3])
smpsrc=numpy.array([d['Sample'] for d in dlistsrc])
codesrc=numpy.array([d['code'] for d in dlistsrc])
intervs=20
comps=[[1.0*b/intervs, 1.0*c/intervs, 1.0*(intervs-a-b-c)/intervs, 1.0*a/intervs] for a in numpy.arange(0,intervs+1)[::-1] for b in numpy.arange(0,intervs+1-a) for c in numpy.arange(0, intervs+1-a-b)][::-1]
def genbinarycomps(intervs, elind1, elind2, ndim=4):
aa=numpy.linspace(0.,1.,intervs+1)
c=numpy.zeros((len(aa), ndim), dtype='float64')
c[:, elind1]=aa
c[:, elind2]=1.-aa
return c
comps2=comps
codes=[0]*len(comps)
binintervs=5
for i, j in [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]:
comps2+=list(genbinarycomps(binintervs, i, j))+[numpy.zeros(4, dtype='float64')]
codes+=[4]*6+[1]
comps2+=[numpy.zeros(4, dtype='float64')]*6
codes+=[1]*6
comps2=[numpy.array(c) for c in comps2]
comps2pop=copy.copy(comps2)
codespop=copy.copy(codes)
for d in dlist:
if d['code']==0:
c=comps2pop.pop(0)
cd=codespop.pop(0)
for k, v in zip(['A', 'B', 'C', 'D'], c):
d[k]=v
d['code']=cd
k_f=[\
('Sample','%04d'),\
('x','%.2f'),\
('y','%.2f'),\
('dx','%.2f'),\
('dx','%.2f'),\
('A','%.3f'),\
('B','%.3f'),\
('C','%.3f'),\
('D','%.3f'),\
('E','%.3f'),\
('F','%.3f'),\
('G','%.3f'),\
('H','%.3f'),\
('code','%d'),\
]
writelines+=[', '.join([f %d[k] for k, f in k_f]) for d in dlist]
f=open(newpath, mode='w')
f.write('\n'.join(writelines))
f.close()
sys.path.append('C:/Users/Gregoire/Documents/PythonCode/ternaryplot')
from myquaternaryutility import QuaternaryPlot
for d in dlist:
c=numpy.array([d[el] for el in ['A', 'B', 'C', 'D']])
if c.sum()>0:
c/=c.sum()
d['compositions']=c
carr=numpy.array([d['compositions'] for d in dlist])
stpq=QuaternaryPlot(111)
stpq.scatter(carr)
pylab.show()
| true
| true
|
f718e4068584530c0e91c11baa692efddfe5fe53
| 9,392
|
py
|
Python
|
syne_tune/optimizer/schedulers/searchers/bayesopt/utils/test_objects.py
|
hfurkanbozkurt/syne-tune
|
05ee2668f0155b40c3ee3b61e4b3d58f3f9f3c4f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
syne_tune/optimizer/schedulers/searchers/bayesopt/utils/test_objects.py
|
hfurkanbozkurt/syne-tune
|
05ee2668f0155b40c3ee3b61e4b3d58f3f9f3c4f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
syne_tune/optimizer/schedulers/searchers/bayesopt/utils/test_objects.py
|
hfurkanbozkurt/syne-tune
|
05ee2668f0155b40c3ee3b61e4b3d58f3f9f3c4f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Could eventually remove this code: Is this needed in unit tests?
"""
Object definitions that are used for testing.
"""
from typing import Iterator, Tuple, Dict, List, Optional, Union
import numpy as np
from syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.common \
import Hyperparameter, Configuration, dictionarize_objective
from syne_tune.config_space import Categorical, loguniform, randint, \
choice, uniform
from syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.hp_ranges \
import HyperparameterRanges
from syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.hp_ranges_factory \
import make_hyperparameter_ranges
from syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.tuning_job_state \
import TuningJobState
from syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.common import \
TrialEvaluations, PendingEvaluation
from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.constants \
import MCMCConfig, OptimizationConfig
from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.gp_regression \
import GaussianProcessRegression
from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.gpr_mcmc \
import GPRegressionMCMC
from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.kernel \
import Matern52, KernelFunction
from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping \
import WarpedKernel, Warping
from syne_tune.optimizer.schedulers.searchers.bayesopt.tuning_algorithms.base_classes \
import CandidateGenerator
from syne_tune.optimizer.schedulers.searchers.bayesopt.tuning_algorithms.common \
import ExclusionList
def build_kernel(state: TuningJobState,
do_warping: bool = False) -> KernelFunction:
dims, warping_ranges = dimensionality_and_warping_ranges(state.hp_ranges)
kernel = Matern52(dims, ARD=True)
if do_warping:
return WarpedKernel(
kernel=kernel, warping=Warping(dims, warping_ranges))
else:
return kernel
def default_gpmodel(
state: TuningJobState, random_seed: int,
optimization_config: OptimizationConfig) -> GaussianProcessRegression:
return GaussianProcessRegression(
kernel=build_kernel(state),
optimization_config=optimization_config,
random_seed=random_seed
)
def default_gpmodel_mcmc(
state: TuningJobState, random_seed: int,
mcmc_config: MCMCConfig) -> GPRegressionMCMC:
return GPRegressionMCMC(
build_kernel=lambda: build_kernel(state),
mcmc_config=mcmc_config,
random_seed=random_seed
)
def dimensionality_and_warping_ranges(hp_ranges: HyperparameterRanges) -> \
Tuple[int, Dict[int, Tuple[float, float]]]:
lower_config = dict()
upper_config = dict()
for name, hp_range in hp_ranges.config_space.items():
if not isinstance(hp_range, Categorical):
lower_config[name] = hp_range.lower
upper_config[name] = hp_range.upper
else:
lower_config[name] = hp_range.categories[0]
upper_config[name] = hp_range.categories[0]
lower_internal = hp_ranges.to_ndarray(lower_config)
upper_internal = hp_ranges.to_ndarray(upper_config)
dims = 0
warping_ranges = dict()
for name in hp_ranges.internal_keys:
hp_range = hp_ranges.config_space[name]
if not isinstance(hp_range, Categorical):
_lower = lower_internal[dims]
_upper = upper_internal[dims]
if _upper > _lower: # exclude cases where max equal to min
warping_ranges[dims] = (_lower, _upper)
else:
assert _lower == _upper
dims += 1
else:
# For binary, we use a single dimension, not 2
sz = len(hp_range.categories)
if sz == 2:
sz = 1
dims += sz
return dims, warping_ranges
class RepeatedCandidateGenerator(CandidateGenerator):
"""Generates candidates from a fixed set. Used to test the deduplication logic."""
def __init__(self, n_unique_candidates: int):
self.config_space = {
'a': uniform(0, n_unique_candidates),
'b': randint(0, n_unique_candidates),
'c': choice([f"value_{i}" for i in range(n_unique_candidates)])}
self.hp_ranges = make_hyperparameter_ranges(self.config_space)
self.all_unique_candidates = [
{'a': 1.0*j, 'b': j, 'c': f"value_{j}"}
for j in range(n_unique_candidates)]
def generate_candidates(self) -> Iterator[Configuration]:
i = 0
while True:
i += 1
yield self.all_unique_candidates[i % len(self.all_unique_candidates)]
# Example black box function, with adjustable location of global minimum.
# Potentially could catch issues with optimizer, e.g. if the optimizer
# ignoring somehow candidates on the edge of search space.
# A simple quadratic function is used.
class Quadratic3d:
def __init__(self, local_minima, active_metric, metric_names):
# local_minima: point where local_minima is located
self.local_minima = np.array(local_minima).astype('float')
self.local_minima[0] = np.log10(self.local_minima[0])
self.active_metric = active_metric
self.metric_names = metric_names
@property
def search_space(self):
config_space = {
'x': loguniform(1.0, 100.0),
'y': randint(0, 2),
'z': choice(['0.0', '1.0', '2.0'])}
return make_hyperparameter_ranges(config_space)
@property
def f_min(self):
return 0.0
def __call__(self, candidate):
p = np.array([float(hp) for hp in candidate])
p[0] = np.log10(p[0])
return dictionarize_objective(np.sum((self.local_minima - p) ** 2))
def tuples_to_configs(config_tpls: List[Tuple[Hyperparameter, ...]],
hp_ranges: HyperparameterRanges) -> List[Configuration]:
"""
Many unit tests write configs as tuples.
"""
return [hp_ranges.tuple_to_config(x) for x in config_tpls]
def create_exclusion_set(
candidates_tpl, hp_ranges: HyperparameterRanges,
is_dict: bool = False) -> ExclusionList:
"""
Creates exclusion list from set of tuples.
"""
if not is_dict:
candidates_tpl = tuples_to_configs(candidates_tpl, hp_ranges)
config_for_trial = {
str(trial_id): config for trial_id, config in enumerate(candidates_tpl)}
state = TuningJobState(
hp_ranges=hp_ranges,
config_for_trial=config_for_trial,
trials_evaluations=[],
failed_trials=[str(x) for x in range(len(candidates_tpl))])
return ExclusionList(state)
TupleOrDict = Union[tuple, dict]
def create_tuning_job_state(
hp_ranges: HyperparameterRanges, cand_tuples: List[TupleOrDict],
metrics: List[Dict],
pending_tuples: Optional[List[TupleOrDict]] = None,
failed_tuples: Optional[List[TupleOrDict]] = None) -> TuningJobState:
"""
Builds `TuningJobState` from basics, where configs are given as tuples or
as dicts.
NOTE: We assume that all configs in the different lists are different!
"""
if cand_tuples and isinstance(cand_tuples[0], tuple):
configs = tuples_to_configs(cand_tuples, hp_ranges)
else:
configs = cand_tuples
trials_evaluations = [TrialEvaluations(trial_id=str(trial_id), metrics=y)
for trial_id, y in enumerate(metrics)]
pending_evaluations = None
if pending_tuples is not None:
sz = len(configs)
extra = len(pending_tuples)
if pending_tuples and isinstance(pending_tuples[0], tuple):
extra_configs = tuples_to_configs(pending_tuples, hp_ranges)
else:
extra_configs = pending_tuples
configs.extend(extra_configs)
pending_evaluations = [PendingEvaluation(trial_id=str(trial_id))
for trial_id in range(sz, sz + extra)]
failed_trials = None
if failed_tuples is not None:
sz = len(configs)
extra = len(failed_tuples)
if failed_tuples and isinstance(failed_tuples[0], tuple):
extra_configs = tuples_to_configs(failed_tuples, hp_ranges)
else:
extra_configs = failed_tuples
configs.extend(extra_configs)
failed_trials = [str(x) for x in range(sz, sz + extra)]
config_for_trial = {
str(trial_id): config for trial_id, config in enumerate(configs)}
return TuningJobState(
hp_ranges=hp_ranges,
config_for_trial=config_for_trial,
trials_evaluations=trials_evaluations,
failed_trials=failed_trials,
pending_evaluations=pending_evaluations)
| 38.650206
| 87
| 0.693463
|
from typing import Iterator, Tuple, Dict, List, Optional, Union
import numpy as np
from syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.common \
import Hyperparameter, Configuration, dictionarize_objective
from syne_tune.config_space import Categorical, loguniform, randint, \
choice, uniform
from syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.hp_ranges \
import HyperparameterRanges
from syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.hp_ranges_factory \
import make_hyperparameter_ranges
from syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.tuning_job_state \
import TuningJobState
from syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.common import \
TrialEvaluations, PendingEvaluation
from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.constants \
import MCMCConfig, OptimizationConfig
from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.gp_regression \
import GaussianProcessRegression
from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.gpr_mcmc \
import GPRegressionMCMC
from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.kernel \
import Matern52, KernelFunction
from syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping \
import WarpedKernel, Warping
from syne_tune.optimizer.schedulers.searchers.bayesopt.tuning_algorithms.base_classes \
import CandidateGenerator
from syne_tune.optimizer.schedulers.searchers.bayesopt.tuning_algorithms.common \
import ExclusionList
def build_kernel(state: TuningJobState,
do_warping: bool = False) -> KernelFunction:
dims, warping_ranges = dimensionality_and_warping_ranges(state.hp_ranges)
kernel = Matern52(dims, ARD=True)
if do_warping:
return WarpedKernel(
kernel=kernel, warping=Warping(dims, warping_ranges))
else:
return kernel
def default_gpmodel(
state: TuningJobState, random_seed: int,
optimization_config: OptimizationConfig) -> GaussianProcessRegression:
return GaussianProcessRegression(
kernel=build_kernel(state),
optimization_config=optimization_config,
random_seed=random_seed
)
def default_gpmodel_mcmc(
state: TuningJobState, random_seed: int,
mcmc_config: MCMCConfig) -> GPRegressionMCMC:
return GPRegressionMCMC(
build_kernel=lambda: build_kernel(state),
mcmc_config=mcmc_config,
random_seed=random_seed
)
def dimensionality_and_warping_ranges(hp_ranges: HyperparameterRanges) -> \
Tuple[int, Dict[int, Tuple[float, float]]]:
lower_config = dict()
upper_config = dict()
for name, hp_range in hp_ranges.config_space.items():
if not isinstance(hp_range, Categorical):
lower_config[name] = hp_range.lower
upper_config[name] = hp_range.upper
else:
lower_config[name] = hp_range.categories[0]
upper_config[name] = hp_range.categories[0]
lower_internal = hp_ranges.to_ndarray(lower_config)
upper_internal = hp_ranges.to_ndarray(upper_config)
dims = 0
warping_ranges = dict()
for name in hp_ranges.internal_keys:
hp_range = hp_ranges.config_space[name]
if not isinstance(hp_range, Categorical):
_lower = lower_internal[dims]
_upper = upper_internal[dims]
if _upper > _lower:
warping_ranges[dims] = (_lower, _upper)
else:
assert _lower == _upper
dims += 1
else:
sz = len(hp_range.categories)
if sz == 2:
sz = 1
dims += sz
return dims, warping_ranges
class RepeatedCandidateGenerator(CandidateGenerator):
def __init__(self, n_unique_candidates: int):
self.config_space = {
'a': uniform(0, n_unique_candidates),
'b': randint(0, n_unique_candidates),
'c': choice([f"value_{i}" for i in range(n_unique_candidates)])}
self.hp_ranges = make_hyperparameter_ranges(self.config_space)
self.all_unique_candidates = [
{'a': 1.0*j, 'b': j, 'c': f"value_{j}"}
for j in range(n_unique_candidates)]
def generate_candidates(self) -> Iterator[Configuration]:
i = 0
while True:
i += 1
yield self.all_unique_candidates[i % len(self.all_unique_candidates)]
class Quadratic3d:
def __init__(self, local_minima, active_metric, metric_names):
self.local_minima = np.array(local_minima).astype('float')
self.local_minima[0] = np.log10(self.local_minima[0])
self.active_metric = active_metric
self.metric_names = metric_names
@property
def search_space(self):
config_space = {
'x': loguniform(1.0, 100.0),
'y': randint(0, 2),
'z': choice(['0.0', '1.0', '2.0'])}
return make_hyperparameter_ranges(config_space)
@property
def f_min(self):
return 0.0
def __call__(self, candidate):
p = np.array([float(hp) for hp in candidate])
p[0] = np.log10(p[0])
return dictionarize_objective(np.sum((self.local_minima - p) ** 2))
def tuples_to_configs(config_tpls: List[Tuple[Hyperparameter, ...]],
hp_ranges: HyperparameterRanges) -> List[Configuration]:
return [hp_ranges.tuple_to_config(x) for x in config_tpls]
def create_exclusion_set(
candidates_tpl, hp_ranges: HyperparameterRanges,
is_dict: bool = False) -> ExclusionList:
if not is_dict:
candidates_tpl = tuples_to_configs(candidates_tpl, hp_ranges)
config_for_trial = {
str(trial_id): config for trial_id, config in enumerate(candidates_tpl)}
state = TuningJobState(
hp_ranges=hp_ranges,
config_for_trial=config_for_trial,
trials_evaluations=[],
failed_trials=[str(x) for x in range(len(candidates_tpl))])
return ExclusionList(state)
TupleOrDict = Union[tuple, dict]
def create_tuning_job_state(
hp_ranges: HyperparameterRanges, cand_tuples: List[TupleOrDict],
metrics: List[Dict],
pending_tuples: Optional[List[TupleOrDict]] = None,
failed_tuples: Optional[List[TupleOrDict]] = None) -> TuningJobState:
if cand_tuples and isinstance(cand_tuples[0], tuple):
configs = tuples_to_configs(cand_tuples, hp_ranges)
else:
configs = cand_tuples
trials_evaluations = [TrialEvaluations(trial_id=str(trial_id), metrics=y)
for trial_id, y in enumerate(metrics)]
pending_evaluations = None
if pending_tuples is not None:
sz = len(configs)
extra = len(pending_tuples)
if pending_tuples and isinstance(pending_tuples[0], tuple):
extra_configs = tuples_to_configs(pending_tuples, hp_ranges)
else:
extra_configs = pending_tuples
configs.extend(extra_configs)
pending_evaluations = [PendingEvaluation(trial_id=str(trial_id))
for trial_id in range(sz, sz + extra)]
failed_trials = None
if failed_tuples is not None:
sz = len(configs)
extra = len(failed_tuples)
if failed_tuples and isinstance(failed_tuples[0], tuple):
extra_configs = tuples_to_configs(failed_tuples, hp_ranges)
else:
extra_configs = failed_tuples
configs.extend(extra_configs)
failed_trials = [str(x) for x in range(sz, sz + extra)]
config_for_trial = {
str(trial_id): config for trial_id, config in enumerate(configs)}
return TuningJobState(
hp_ranges=hp_ranges,
config_for_trial=config_for_trial,
trials_evaluations=trials_evaluations,
failed_trials=failed_trials,
pending_evaluations=pending_evaluations)
| true
| true
|
f718e454d347591a4f961fd1e1312485c40195be
| 631
|
py
|
Python
|
setup.py
|
sagarkar10/pandarallel
|
48e14a3c9011e8a19440abe0a49192982d485b8e
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
sagarkar10/pandarallel
|
48e14a3c9011e8a19440abe0a49192982d485b8e
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
sagarkar10/pandarallel
|
48e14a3c9011e8a19440abe0a49192982d485b8e
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup, find_packages
install_requires = [
'pandas',
'pyarrow >= 0.12.1',
'pathos >= 0.2.4'
]
setup(
name='pandarallel',
version='1.3.2',
python_requires='>=3.5',
packages=find_packages(),
author='Manu NALEPA',
author_email='nalepae@gmail.com',
description='An easy to use library to speed up computation (by parallelizing on multi CPUs) with pandas.',
long_description='See https://github.com/nalepae/pandarallel/tree/v1.3.2 for complete user guide.',
url='https://github.com/nalepae/pandarallel',
install_requires=install_requires,
license='BSD',
)
| 28.681818
| 111
| 0.684628
|
from setuptools import setup, find_packages
install_requires = [
'pandas',
'pyarrow >= 0.12.1',
'pathos >= 0.2.4'
]
setup(
name='pandarallel',
version='1.3.2',
python_requires='>=3.5',
packages=find_packages(),
author='Manu NALEPA',
author_email='nalepae@gmail.com',
description='An easy to use library to speed up computation (by parallelizing on multi CPUs) with pandas.',
long_description='See https://github.com/nalepae/pandarallel/tree/v1.3.2 for complete user guide.',
url='https://github.com/nalepae/pandarallel',
install_requires=install_requires,
license='BSD',
)
| true
| true
|
f718e485c61a21c86f6b388bc00183ecb3b618e3
| 4,641
|
py
|
Python
|
zerionAPI/api.py
|
jhsu98/zerion-py
|
6c21fadc18e57814fd43a5b3fd187253f6f255e2
|
[
"MIT"
] | 4
|
2022-01-03T00:25:45.000Z
|
2022-02-04T21:51:25.000Z
|
zerionAPI/api.py
|
jhsu98/zerion-py
|
6c21fadc18e57814fd43a5b3fd187253f6f255e2
|
[
"MIT"
] | 1
|
2022-02-04T19:12:25.000Z
|
2022-02-04T21:40:18.000Z
|
zerionAPI/api.py
|
jhsu98/zerion-py
|
6c21fadc18e57814fd43a5b3fd187253f6f255e2
|
[
"MIT"
] | null | null | null |
import time
import re
import jwt
import requests
import json
from pprint import pprint
from abc import abstractmethod, ABC
import logging
logging.basicConfig(filename='app.log', level=logging.DEBUG,
format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
class Response:
def __init__(self, r):
self.headers = r.headers
self.status_code = r.status_code
self.response = r.json()
def __repr__(self):
return str(self.status_code)
def __str__(self):
return str(self.status_code)
class API(ABC):
def __init__(self, server=None, client_key=None, client_secret=None, params={}):
if not isinstance(server, str) or not isinstance(client_key, str) or not isinstance(client_secret, str):
raise TypeError("Invalid API credentials")
self.__client_key = client_key
self.__client_secret = client_secret
self.__params = params
self.__access_token = None
self.__access_token_expiration = None
self.__start_time = None
self.__session = requests.Session()
self.__session.headers.update({'Content-Type': 'application/json'})
self.__api_calls = 0
self.__last_execution_time = None
self.__rate_limit_retry = params.get('rate_limit_retry', False)
self.requestAccessToken()
def requestAccessToken(self):
"""Create JWT and request iFormBuilder Access Token
If token is successfully returned, stored in session header
Else null token is stored in session header
"""
try:
url = "https://identity.zerionsoftware.com/oauth2/token"
# url = "https://qa-identity.zerionsoftware.com/oauth2/token" if self.__isQA else "https://identity.zerionsoftware.com/oauth2/token"
jwt_payload = {
'iss': self.__client_key,
'aud': url,
'iat': time.time(),
'exp': time.time() + 300
}
encoded_jwt = jwt.encode(
jwt_payload, self.__client_secret, algorithm='HS256')
token_body = {
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'assertion': encoded_jwt
}
result = requests.post(url, data=token_body, timeout=5)
result.raise_for_status()
except Exception as e:
print(f'Exception: {e}')
return
else:
self.__start_time = time.time()
self.__access_token = result.json()['access_token']
self.__session.headers.update(
{'Authorization': "Bearer %s" % self.__access_token})
self.__access_token_expiration = time.time() + 3300
def getParams(self):
return self.__params
def getAccessToken(self):
return self.__access_token
def getAccessTokenExpiration(self):
return self.__access_token_expiration
def getApiCount(self):
return self.__api_calls
def getLastExecution(self):
return self.__last_execution_time
def getStartTime(self):
return self.__start_time
def getApiLifetime(self):
return round(time.time() - self.__start_time, 2)
def call(self, method, resource, body=None):
if self.getAccessToken() is not None and time.time() > self.getAccessTokenExpiration():
self.requestAccessToken()
method = method.upper()
if method not in ('GET','POST','PUT','DELETE'):
raise ValueError(f'{method} is not an accepted method')
isRateLimited = False
while not isRateLimited:
if method == 'GET':
result = self.__session.get(resource)
elif method == 'POST':
result = self.__session.post(resource, data=json.dumps(body))
elif method == 'PUT':
result = self.__session.put(resource, data=json.dumps(body))
elif method == 'DELETE':
result = self.__session.delete(resource)
self.__api_calls += 1
self.__last_execution_time = result.elapsed
if result.status_code == 429 and self.__rate_limit_retry == True:
print(f'Rate Limited for {resource}, waiting 60 seconds to retry...')
time.sleep(60)
else:
isRateLimited = True
return Response(result)
@abstractmethod
def describeResources(self):
raise NotImplementedError
@abstractmethod
def describeResource(self, resource):
raise NotImplementedError
| 33.630435
| 144
| 0.609782
|
import time
import re
import jwt
import requests
import json
from pprint import pprint
from abc import abstractmethod, ABC
import logging
logging.basicConfig(filename='app.log', level=logging.DEBUG,
format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
class Response:
def __init__(self, r):
self.headers = r.headers
self.status_code = r.status_code
self.response = r.json()
def __repr__(self):
return str(self.status_code)
def __str__(self):
return str(self.status_code)
class API(ABC):
def __init__(self, server=None, client_key=None, client_secret=None, params={}):
if not isinstance(server, str) or not isinstance(client_key, str) or not isinstance(client_secret, str):
raise TypeError("Invalid API credentials")
self.__client_key = client_key
self.__client_secret = client_secret
self.__params = params
self.__access_token = None
self.__access_token_expiration = None
self.__start_time = None
self.__session = requests.Session()
self.__session.headers.update({'Content-Type': 'application/json'})
self.__api_calls = 0
self.__last_execution_time = None
self.__rate_limit_retry = params.get('rate_limit_retry', False)
self.requestAccessToken()
def requestAccessToken(self):
try:
url = "https://identity.zerionsoftware.com/oauth2/token"
jwt_payload = {
'iss': self.__client_key,
'aud': url,
'iat': time.time(),
'exp': time.time() + 300
}
encoded_jwt = jwt.encode(
jwt_payload, self.__client_secret, algorithm='HS256')
token_body = {
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'assertion': encoded_jwt
}
result = requests.post(url, data=token_body, timeout=5)
result.raise_for_status()
except Exception as e:
print(f'Exception: {e}')
return
else:
self.__start_time = time.time()
self.__access_token = result.json()['access_token']
self.__session.headers.update(
{'Authorization': "Bearer %s" % self.__access_token})
self.__access_token_expiration = time.time() + 3300
def getParams(self):
return self.__params
def getAccessToken(self):
return self.__access_token
def getAccessTokenExpiration(self):
return self.__access_token_expiration
def getApiCount(self):
return self.__api_calls
def getLastExecution(self):
return self.__last_execution_time
def getStartTime(self):
return self.__start_time
def getApiLifetime(self):
return round(time.time() - self.__start_time, 2)
def call(self, method, resource, body=None):
if self.getAccessToken() is not None and time.time() > self.getAccessTokenExpiration():
self.requestAccessToken()
method = method.upper()
if method not in ('GET','POST','PUT','DELETE'):
raise ValueError(f'{method} is not an accepted method')
isRateLimited = False
while not isRateLimited:
if method == 'GET':
result = self.__session.get(resource)
elif method == 'POST':
result = self.__session.post(resource, data=json.dumps(body))
elif method == 'PUT':
result = self.__session.put(resource, data=json.dumps(body))
elif method == 'DELETE':
result = self.__session.delete(resource)
self.__api_calls += 1
self.__last_execution_time = result.elapsed
if result.status_code == 429 and self.__rate_limit_retry == True:
print(f'Rate Limited for {resource}, waiting 60 seconds to retry...')
time.sleep(60)
else:
isRateLimited = True
return Response(result)
@abstractmethod
def describeResources(self):
raise NotImplementedError
@abstractmethod
def describeResource(self, resource):
raise NotImplementedError
| true
| true
|
f718e494ba3808dafe39908eb2ec71cbde91f235
| 2,684
|
py
|
Python
|
storm_analysis/sa_utilities/merge_hdf5.py
|
bintulab/storm-analysis
|
71ae493cbd17ddb97938d0ae2032d97a0eaa76b2
|
[
"CNRI-Python"
] | null | null | null |
storm_analysis/sa_utilities/merge_hdf5.py
|
bintulab/storm-analysis
|
71ae493cbd17ddb97938d0ae2032d97a0eaa76b2
|
[
"CNRI-Python"
] | null | null | null |
storm_analysis/sa_utilities/merge_hdf5.py
|
bintulab/storm-analysis
|
71ae493cbd17ddb97938d0ae2032d97a0eaa76b2
|
[
"CNRI-Python"
] | 1
|
2021-04-19T18:17:06.000Z
|
2021-04-19T18:17:06.000Z
|
#!/usr/bin/env python
"""
Merge multiple HDF5 format localization files (tracks only). No
alignment is performed. Metadata is taken from the first file.
Hazen 01/18
"""
import sys
import storm_analysis.sa_library.sa_h5py as saH5Py
def mergeHDF5(hdf5_files, results_file):
"""
Note: This only merges the tracks not the localizations.
"""
with saH5Py.SAH5Py(results_file, is_existing = False) as h5_out:
for i, h5_name in enumerate(hdf5_files):
with saH5Py.SAH5Py(h5_name) as h5_in:
if (i == 0):
[mx, my] = h5_in.getMovieInformation()[:2]
h5_out.setMovieInformation(mx, my, 0, "")
h5_out.setPixelSize(h5_in.getPixelSize())
h5_out.addMetadata(h5_in.getMetadata())
for tracks in h5_in.tracksIterator():
sys.stdout.write(".")
sys.stdout.flush()
h5_out.addTracks(tracks)
sys.stdout.write("\n")
if (__name__ == "__main__"):
import argparse
parser = argparse.ArgumentParser(description='Merge multiple HDF5 localization files.')
parser.add_argument('--inbin', dest='inbin', type=str, required=True, nargs = "*",
help = "The names of the localization files.")
parser.add_argument('--results', dest='results', type=str, required=True,
help = "File to save the merged results in.")
args = parser.parse_args()
mergeHDF5(args.inbin, args.results)
#
# The MIT License
#
# Copyright (c) 2018 Babcock Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| 36.767123
| 91
| 0.674739
|
import sys
import storm_analysis.sa_library.sa_h5py as saH5Py
def mergeHDF5(hdf5_files, results_file):
with saH5Py.SAH5Py(results_file, is_existing = False) as h5_out:
for i, h5_name in enumerate(hdf5_files):
with saH5Py.SAH5Py(h5_name) as h5_in:
if (i == 0):
[mx, my] = h5_in.getMovieInformation()[:2]
h5_out.setMovieInformation(mx, my, 0, "")
h5_out.setPixelSize(h5_in.getPixelSize())
h5_out.addMetadata(h5_in.getMetadata())
for tracks in h5_in.tracksIterator():
sys.stdout.write(".")
sys.stdout.flush()
h5_out.addTracks(tracks)
sys.stdout.write("\n")
if (__name__ == "__main__"):
import argparse
parser = argparse.ArgumentParser(description='Merge multiple HDF5 localization files.')
parser.add_argument('--inbin', dest='inbin', type=str, required=True, nargs = "*",
help = "The names of the localization files.")
parser.add_argument('--results', dest='results', type=str, required=True,
help = "File to save the merged results in.")
args = parser.parse_args()
mergeHDF5(args.inbin, args.results)
| true
| true
|
f718e4b6fe5e6cbcd80d9790d53329bb92e5e0b4
| 532
|
py
|
Python
|
app/core/models.py
|
DevelopwithTom/simple_inventory_api
|
5ce67be1c6ddbe7f5283256d52cf38779cbfdd89
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
DevelopwithTom/simple_inventory_api
|
5ce67be1c6ddbe7f5283256d52cf38779cbfdd89
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
DevelopwithTom/simple_inventory_api
|
5ce67be1c6ddbe7f5283256d52cf38779cbfdd89
|
[
"MIT"
] | null | null | null |
from django.db import models
import uuid
class Product(models.Model):
""" Creates and saves a new product """
sku = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False, unique=True)
name = models.CharField(max_length=255, blank=False, null=False)
quantity = models.IntegerField(blank=False, null=False)
price = models.DecimalField(max_digits=8, decimal_places=2)
def __str__(self):
return "Name: %s, Quantity Available: '%s', Price: £%s" % (self.name, self.quantity, self.price)
| 38
| 104
| 0.710526
|
from django.db import models
import uuid
class Product(models.Model):
sku = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False, unique=True)
name = models.CharField(max_length=255, blank=False, null=False)
quantity = models.IntegerField(blank=False, null=False)
price = models.DecimalField(max_digits=8, decimal_places=2)
def __str__(self):
return "Name: %s, Quantity Available: '%s', Price: £%s" % (self.name, self.quantity, self.price)
| true
| true
|
f718e4de43e06d53c6c3c35158f0b3ac68f8b713
| 4,230
|
py
|
Python
|
util/utility.py
|
uhrwecker/GRDonuts
|
3087aeb5c169251bdb711b425dcc3040ff962da7
|
[
"MIT"
] | null | null | null |
util/utility.py
|
uhrwecker/GRDonuts
|
3087aeb5c169251bdb711b425dcc3040ff962da7
|
[
"MIT"
] | 25
|
2020-03-26T11:16:58.000Z
|
2020-09-10T18:31:52.000Z
|
util/utility.py
|
uhrwecker/GRDonuts
|
3087aeb5c169251bdb711b425dcc3040ff962da7
|
[
"MIT"
] | null | null | null |
import numpy as np
class UtilInverse():
def __init__(self, verbose=True):
self.verbose = verbose
def find_nearest_ind(self, array, value):
index = []
for ind in range(len(array)-1):
if array[ind] < value and array[ind+1] > value:
index.append(ind)
if array[ind] > value and array[ind+1] < value:
index.append(ind)
return index
def sort_array_by_column(self, array, order=['f0']):
bits = 'i8'+',i8'*(len(array[0])-1)
array.view(bits).sort(order=order, axis=0)
return array
class UtilStability():
def __init__(self, verbose=True):
self.verbose = verbose
def retrieve_extrema(self, w, r):
self.check_for_stable_point(w, self.verbose)
min_mask = np.r_[True, w[1:] < w[:-1]] & np.r_[w[:-1] < w[1:], True]
max_mask = np.r_[True, w[1:] > w[:-1]] & np.r_[w[:-1] > w[1:], True]
w_min = w[min_mask]
r_min = r[min_mask]
w_max = w[max_mask]
r_max = r[max_mask]
try:
if w_min[0] == w[0]:
w_min = np.delete(w_min, 0)
r_min = np.delete(r_min, 0)
if w_max[-1] == w[-1]:
w_max = np.delete(w_max, -1)
r_max = np.delete(r_max, -1)
if self.verbose:
print('Simple extremum analysis: ')
print('- W has maximum/a at w='+str(w_max.tolist()))
print('- W has minimum/a at w='+str(w_min.tolist()))
return w_min.tolist(), w_max.tolist(), r_min.tolist(), r_max.tolist()
except:
return [0], [0], [0], [0]
def check_for_stable_point(self, w, exit_if_not_stable=False):
'''
Checks if array has at least one minimum and
its maximum is only local
'''
min_mask = np.r_[True, w[1:] < w[:-1]] & np.r_[w[:-1] < w[1:], True]
max_mask = np.r_[True, w[1:] > w[:-1]] & np.r_[w[:-1] > w[1:], True]
w_min = w[min_mask]
w_max = w[max_mask]
## if w_max[0] == w[0] or w_max[0] == w[1]:
## '''
## The potentianl comes from +inf, so its not a stable point.
## '''
## raise ValueError()
if len(w_min) < 2 and len(w_max) < 2:
'''
The function is monotonically. There is no stable point.
'''
self._error_monotonically(exit_if_not_stable)
elif len(w_min) < 1 or len(w_max) < 1:
'''
The function has either a local maximum OR local minimum, but not
both, thus is not stable
'''
self._error_only_one_extremum(exit_if_not_stable)
elif w_max[0] > w_max[1]:
'''
The potential is not closed, there is no Roche limit.
Matter will extend into infitiy.
'''
self._error_no_roche_limit(exit_if_not_stable)
elif self.verbose and len(w_min) > 1 and len(w_max) > 1:
print('Potential is possibly stable')
return 0
def closure_rating_function(self, w, r):
wmin, wmax, rmin, rmax = self.retrieve_extrema(w, r)
int_l = np.where(r == rmax[0])[0][0]
int_r = np.where(w > wmax[0])[0][0]
area_func = abs(w[int_l:int_r] - wmax[-1])
area = np.trapz(area_func)
return area
def _error_monotonically(self, flag):
if flag:
raise ValueError('Potential not closed, potential is monotonically.')
else:
if self.verbose:
print('WARNING: Potential not closed, potential is monotonically.')
def _error_only_one_extremum(self, flag):
if flag:
raise ValueError('Potential not closed, only has one extremum.')
else:
if self.verbose:
print('WARNING: Potential not closed, only has one extremum.')
def _error_no_roche_limit(self, flag):
if flag:
raise ValueError('Potential is not closed, matter extends into infinity.')
else:
if self.verbose:
print('WARNING: Potential not close, no Roche limit.')
| 31.567164
| 86
| 0.529787
|
import numpy as np
class UtilInverse():
def __init__(self, verbose=True):
self.verbose = verbose
def find_nearest_ind(self, array, value):
index = []
for ind in range(len(array)-1):
if array[ind] < value and array[ind+1] > value:
index.append(ind)
if array[ind] > value and array[ind+1] < value:
index.append(ind)
return index
def sort_array_by_column(self, array, order=['f0']):
bits = 'i8'+',i8'*(len(array[0])-1)
array.view(bits).sort(order=order, axis=0)
return array
class UtilStability():
def __init__(self, verbose=True):
self.verbose = verbose
def retrieve_extrema(self, w, r):
self.check_for_stable_point(w, self.verbose)
min_mask = np.r_[True, w[1:] < w[:-1]] & np.r_[w[:-1] < w[1:], True]
max_mask = np.r_[True, w[1:] > w[:-1]] & np.r_[w[:-1] > w[1:], True]
w_min = w[min_mask]
r_min = r[min_mask]
w_max = w[max_mask]
r_max = r[max_mask]
try:
if w_min[0] == w[0]:
w_min = np.delete(w_min, 0)
r_min = np.delete(r_min, 0)
if w_max[-1] == w[-1]:
w_max = np.delete(w_max, -1)
r_max = np.delete(r_max, -1)
if self.verbose:
print('Simple extremum analysis: ')
print('- W has maximum/a at w='+str(w_max.tolist()))
print('- W has minimum/a at w='+str(w_min.tolist()))
return w_min.tolist(), w_max.tolist(), r_min.tolist(), r_max.tolist()
except:
return [0], [0], [0], [0]
def check_for_stable_point(self, w, exit_if_not_stable=False):
min_mask = np.r_[True, w[1:] < w[:-1]] & np.r_[w[:-1] < w[1:], True]
max_mask = np.r_[True, w[1:] > w[:-1]] & np.r_[w[:-1] > w[1:], True]
w_min = w[min_mask]
w_max = w[max_mask]
le point.
## '''
len(w_max) < 2:
self._error_monotonically(exit_if_not_stable)
elif len(w_min) < 1 or len(w_max) < 1:
'''
The function has either a local maximum OR local minimum, but not
both, thus is not stable
'''
self._error_only_one_extremum(exit_if_not_stable)
elif w_max[0] > w_max[1]:
'''
The potential is not closed, there is no Roche limit.
Matter will extend into infitiy.
'''
self._error_no_roche_limit(exit_if_not_stable)
elif self.verbose and len(w_min) > 1 and len(w_max) > 1:
print('Potential is possibly stable')
return 0
def closure_rating_function(self, w, r):
wmin, wmax, rmin, rmax = self.retrieve_extrema(w, r)
int_l = np.where(r == rmax[0])[0][0]
int_r = np.where(w > wmax[0])[0][0]
area_func = abs(w[int_l:int_r] - wmax[-1])
area = np.trapz(area_func)
return area
def _error_monotonically(self, flag):
if flag:
raise ValueError('Potential not closed, potential is monotonically.')
else:
if self.verbose:
print('WARNING: Potential not closed, potential is monotonically.')
def _error_only_one_extremum(self, flag):
if flag:
raise ValueError('Potential not closed, only has one extremum.')
else:
if self.verbose:
print('WARNING: Potential not closed, only has one extremum.')
def _error_no_roche_limit(self, flag):
if flag:
raise ValueError('Potential is not closed, matter extends into infinity.')
else:
if self.verbose:
print('WARNING: Potential not close, no Roche limit.')
| true
| true
|
f718e59a80e86c09e45c4dc013b7a554747f8a69
| 5,489
|
py
|
Python
|
image-viewer.py
|
kuqihanyan/Arduboy-Python-Utilities
|
e4bd7ed0cebfca250b40873bdcc75a4eb70363ef
|
[
"CC0-1.0"
] | 37
|
2018-11-23T03:01:10.000Z
|
2022-02-26T07:46:28.000Z
|
image-viewer.py
|
kuqihanyan/Arduboy-Python-Utilities
|
e4bd7ed0cebfca250b40873bdcc75a4eb70363ef
|
[
"CC0-1.0"
] | 3
|
2021-01-28T01:33:31.000Z
|
2021-07-27T02:18:16.000Z
|
image-viewer.py
|
kuqihanyan/Arduboy-Python-Utilities
|
e4bd7ed0cebfca250b40873bdcc75a4eb70363ef
|
[
"CC0-1.0"
] | 24
|
2018-06-25T00:55:20.000Z
|
2022-02-09T10:50:36.000Z
|
print("\nArduboy image viewer v1.0 by Mr.Blinky Jul.2019\n")
#requires pyserial and PILlow to be installed.
#Use "python -m pip install pyserial" and "python -m pip install pillow" on commandline
import sys
import time
import os
try:
from serial.tools.list_ports import comports
from serial import Serial
except:
print("The pySerial module is required but not installed!")
print("Use 'python -m pip install pyserial' from the commandline to install.")
sys.exit()
try:
from PIL import Image
except:
print("The PILlow module is not installed.")
print("type 'python -m pip install pillow' on commandline to install")
sys.exit()
compatibledevices = [
#Arduboy Leonardo
"VID:PID=2341:0036", "VID:PID=2341:8036",
"VID:PID=2A03:0036", "VID:PID=2A03:8036",
#Arduboy Micro
"VID:PID=2341:0037", "VID:PID=2341:8037",
"VID:PID=2A03:0037", "VID:PID=2A03:8037",
#Genuino Micro
"VID:PID=2341:0237", "VID:PID=2341:8237",
#Sparkfun Pro Micro 5V
"VID:PID=1B4F:9205", "VID:PID=1B4F:9206",
#Adafruit ItsyBitsy 5V
"VID:PID=239A:000E", "VID:PID=239A:800E",
]
bootloader_active = False
def delayedExit():
time.sleep(2)
sys.exit()
def getComPort(verbose):
global bootloader_active
devicelist = list(comports())
for device in devicelist:
for vidpid in compatibledevices:
if vidpid in device[2]:
port=device[0]
bootloader_active = (compatibledevices.index(vidpid) & 1) == 0
if verbose : print("Found {} at port {}".format(device[1],port))
return port
if verbose : print("Arduboy not found.")
def bootloaderStart():
global bootloader
## find and connect to Arduboy in bootloader mode ##
port = getComPort(True)
if port is None : delayedExit()
if not bootloader_active:
print("Selecting bootloader mode...")
bootloader = Serial(port,1200)
bootloader.close()
time.sleep(0.5)
#wait for disconnect and reconnect in bootloader mode
while getComPort(False) == port :
time.sleep(0.1)
if bootloader_active: break
while getComPort(False) is None : time.sleep(0.1)
port = getComPort(True)
sys.stdout.write("Opening port ...")
sys.stdout.flush()
for retries in range(20):
try:
time.sleep(0.1)
bootloader = Serial(port,57600)
break
except:
if retries == 19:
print(" Failed!")
delayedExit()
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(0.4)
print("")
def getVersion():
bootloader.write(b"V")
return int(bootloader.read(2))
def getJedecID():
bootloader.write(b"j")
jedec_id = bootloader.read(3)
time.sleep(0.5)
bootloader.write(b"j")
jedec_id2 = bootloader.read(3)
if jedec_id2 != jedec_id or jedec_id == b'\x00\x00\x00' or jedec_id == b'\xFF\xFF\xFF':
print("No flash cart detected.")
delayedExit()
return bytearray(jedec_id)
def ledControl(b): #Bit 7 set: OLED display off (v1.1) Disable bootloader menu buttons (v1.3+)
bootloader.write(bytearray([ord('x'), b])) #Bit 6 set: RGB Breathing function off (v1.1 to v1.3 only)
bootloader.read(1) #Bit 5 set: RxTx status function off
#Bit 4 set: Rx LED on
#Bit 3 set: Tx LED on
#Bit 2 set: RGB LED green on
#Bit 1 set: RGB LED red on
#Bit 0 set: RGB LED blue on
def readButtons():
bootloader.write(b'v')
buttons = ((ord(bootloader.read(1)) - ord('1') << 2)) | ((ord(bootloader.read(1)) - ord('A') << 4))
return buttons
def display(image):
bootloader.write(b'A\x00\x00')
bootloader.read(1)
bootloader.write(b'B\x04\x00D' + image[0:1024]) #display supports 1K blocks
bootloader.read(1)
def bootloaderExit():
bootloader.write(b"E")
bootloader.read(1)
################################################################################
def usage():
print("\nUSAGE:\n\n{} imagefile".format(os.path.basename(sys.argv[0])))
print()
print("Displays a 128x64 pixel image on Arduboy's display in bootloader mode until a ")
print("button is pressed (Cathy3K bootloader required).")
delayedExit()
################################################################################
if len(sys.argv) < 2 : usage()
#load and convert image
img = Image.open(sys.argv[1]).convert("1")
width, height = img.size
if (width != 128) or (height != 64) :
print("Error: Title screen '{}' is not 128 x 64 pixels.".format(filename))
DelayedExit()
pixels = list(img.getdata())
bytes = bytearray(int((height // 8) * width))
i = 0
b = 0
for y in range (0,height,8):
for x in range (0,width):
for p in range (0,8):
b = b >> 1
if pixels[(y + p) * width + x] > 0:
b |= 0x80
bytes[i] = b
i += 1
#select bootloader mode
bootloaderStart()
if getVersion() < 13:
print("Arduboy requires Cathy3K Bootloader v1.3 or later")
bootloaderExit()
DelayedExit()
ledControl(0x80) #Disable menu buttons
#display image
print("\nDisplaying image on Arduboy. Press any button on Arduboy to end.")
while not readButtons():
display(bytes)
while readButtons():
pass
ledControl(0) #Enable menu buttons
bootloaderExit()
| 31.545977
| 121
| 0.596466
|
print("\nArduboy image viewer v1.0 by Mr.Blinky Jul.2019\n")
import sys
import time
import os
try:
from serial.tools.list_ports import comports
from serial import Serial
except:
print("The pySerial module is required but not installed!")
print("Use 'python -m pip install pyserial' from the commandline to install.")
sys.exit()
try:
from PIL import Image
except:
print("The PILlow module is not installed.")
print("type 'python -m pip install pillow' on commandline to install")
sys.exit()
compatibledevices = [
"VID:PID=2341:0036", "VID:PID=2341:8036",
"VID:PID=2A03:0036", "VID:PID=2A03:8036",
"VID:PID=2341:0037", "VID:PID=2341:8037",
"VID:PID=2A03:0037", "VID:PID=2A03:8037",
"VID:PID=2341:0237", "VID:PID=2341:8237",
"VID:PID=1B4F:9205", "VID:PID=1B4F:9206",
"VID:PID=239A:000E", "VID:PID=239A:800E",
]
bootloader_active = False
def delayedExit():
time.sleep(2)
sys.exit()
def getComPort(verbose):
global bootloader_active
devicelist = list(comports())
for device in devicelist:
for vidpid in compatibledevices:
if vidpid in device[2]:
port=device[0]
bootloader_active = (compatibledevices.index(vidpid) & 1) == 0
if verbose : print("Found {} at port {}".format(device[1],port))
return port
if verbose : print("Arduboy not found.")
def bootloaderStart():
global bootloader
it()
if not bootloader_active:
print("Selecting bootloader mode...")
bootloader = Serial(port,1200)
bootloader.close()
time.sleep(0.5)
while getComPort(False) == port :
time.sleep(0.1)
if bootloader_active: break
while getComPort(False) is None : time.sleep(0.1)
port = getComPort(True)
sys.stdout.write("Opening port ...")
sys.stdout.flush()
for retries in range(20):
try:
time.sleep(0.1)
bootloader = Serial(port,57600)
break
except:
if retries == 19:
print(" Failed!")
delayedExit()
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(0.4)
print("")
def getVersion():
bootloader.write(b"V")
return int(bootloader.read(2))
def getJedecID():
bootloader.write(b"j")
jedec_id = bootloader.read(3)
time.sleep(0.5)
bootloader.write(b"j")
jedec_id2 = bootloader.read(3)
if jedec_id2 != jedec_id or jedec_id == b'\x00\x00\x00' or jedec_id == b'\xFF\xFF\xFF':
print("No flash cart detected.")
delayedExit()
return bytearray(jedec_id)
def ledControl(b):
bootloader.write(bytearray([ord('x'), b]))
bootloader.read(1)
def readButtons():
bootloader.write(b'v')
buttons = ((ord(bootloader.read(1)) - ord('1') << 2)) | ((ord(bootloader.read(1)) - ord('A') << 4))
return buttons
def display(image):
bootloader.write(b'A\x00\x00')
bootloader.read(1)
bootloader.write(b'B\x04\x00D' + image[0:1024])
bootloader.read(1)
def bootloaderExit():
bootloader.write(b"E")
bootloader.read(1)
| true
| true
|
f718e68c5abac3512deb144ea362e1ed01d290af
| 251
|
py
|
Python
|
linebot/models/line_types.py
|
tominaga443/globalbot
|
bbbb8f0e4c5164231656f515f4b889df8f28da86
|
[
"MIT"
] | 2
|
2016-04-17T08:30:03.000Z
|
2016-05-09T02:58:52.000Z
|
linebot/models/line_types.py
|
tominaga443/globalbot
|
bbbb8f0e4c5164231656f515f4b889df8f28da86
|
[
"MIT"
] | null | null | null |
linebot/models/line_types.py
|
tominaga443/globalbot
|
bbbb8f0e4c5164231656f515f4b889df8f28da86
|
[
"MIT"
] | null | null | null |
from enum import Enum
class EventType(Enum):
message = "138311609000106303"
operation = "138311609100106403"
class ContentType(Enum):
text = 1
image = 2
video = 3
audio = 4
location = 7
sticker = 8
contact = 10
| 14.764706
| 36
| 0.625498
|
from enum import Enum
class EventType(Enum):
message = "138311609000106303"
operation = "138311609100106403"
class ContentType(Enum):
text = 1
image = 2
video = 3
audio = 4
location = 7
sticker = 8
contact = 10
| true
| true
|
f718e849c9b0d90bdc2380ce8a6c1c37b4447bdb
| 59,962
|
py
|
Python
|
python/dgl/distributed/dist_graph.py
|
blokhinnv/dgl
|
bcf92f6c21afd4ad48a86d2ee543386099190791
|
[
"Apache-2.0"
] | null | null | null |
python/dgl/distributed/dist_graph.py
|
blokhinnv/dgl
|
bcf92f6c21afd4ad48a86d2ee543386099190791
|
[
"Apache-2.0"
] | null | null | null |
python/dgl/distributed/dist_graph.py
|
blokhinnv/dgl
|
bcf92f6c21afd4ad48a86d2ee543386099190791
|
[
"Apache-2.0"
] | null | null | null |
"""Define distributed graph."""
from collections.abc import MutableMapping
from collections import namedtuple
import os
import numpy as np
from ..heterograph import DGLHeteroGraph
from ..convert import heterograph as dgl_heterograph
from ..convert import graph as dgl_graph
from ..transform import compact_graphs
from .. import heterograph_index
from .. import backend as F
from ..base import NID, EID, NTYPE, ETYPE, ALL, is_all
from .kvstore import KVServer, get_kvstore
from .._ffi.ndarray import empty_shared_mem
from ..frame import infer_scheme
from .partition import load_partition, load_partition_book
from .graph_partition_book import PartitionPolicy, get_shared_mem_partition_book
from .graph_partition_book import HeteroDataName, parse_hetero_data_name
from .graph_partition_book import NodePartitionPolicy, EdgePartitionPolicy
from .shared_mem_utils import _to_shared_mem, _get_ndata_path, _get_edata_path, DTYPE_DICT
from . import rpc
from . import role
from .server_state import ServerState
from .rpc_server import start_server
from .graph_services import find_edges as dist_find_edges
from .graph_services import out_degrees as dist_out_degrees
from .graph_services import in_degrees as dist_in_degrees
from .dist_tensor import DistTensor
INIT_GRAPH = 800001
class InitGraphRequest(rpc.Request):
""" Init graph on the backup servers.
When the backup server starts, they don't load the graph structure.
This request tells the backup servers that they can map to the graph structure
with shared memory.
"""
def __init__(self, graph_name):
self._graph_name = graph_name
def __getstate__(self):
return self._graph_name
def __setstate__(self, state):
self._graph_name = state
def process_request(self, server_state):
if server_state.graph is None:
server_state.graph = _get_graph_from_shared_mem(self._graph_name)
return InitGraphResponse(self._graph_name)
class InitGraphResponse(rpc.Response):
""" Ack the init graph request
"""
def __init__(self, graph_name):
self._graph_name = graph_name
def __getstate__(self):
return self._graph_name
def __setstate__(self, state):
self._graph_name = state
def _copy_graph_to_shared_mem(g, graph_name, graph_format):
new_g = g.shared_memory(graph_name, formats=graph_format)
# We should share the node/edge data to the client explicitly instead of putting them
# in the KVStore because some of the node/edge data may be duplicated.
new_g.ndata['inner_node'] = _to_shared_mem(g.ndata['inner_node'],
_get_ndata_path(graph_name, 'inner_node'))
new_g.ndata[NID] = _to_shared_mem(g.ndata[NID], _get_ndata_path(graph_name, NID))
new_g.edata['inner_edge'] = _to_shared_mem(g.edata['inner_edge'],
_get_edata_path(graph_name, 'inner_edge'))
new_g.edata[EID] = _to_shared_mem(g.edata[EID], _get_edata_path(graph_name, EID))
new_g.edata[ETYPE] = _to_shared_mem(g.edata[ETYPE], _get_edata_path(graph_name, ETYPE))
return new_g
FIELD_DICT = {'inner_node': F.int32, # A flag indicates whether the node is inside a partition.
'inner_edge': F.int32, # A flag indicates whether the edge is inside a partition.
NID: F.int64,
EID: F.int64,
NTYPE: F.int32,
ETYPE: F.int32}
def _get_shared_mem_ndata(g, graph_name, name):
''' Get shared-memory node data from DistGraph server.
This is called by the DistGraph client to access the node data in the DistGraph server
with shared memory.
'''
shape = (g.number_of_nodes(),)
dtype = FIELD_DICT[name]
dtype = DTYPE_DICT[dtype]
data = empty_shared_mem(_get_ndata_path(graph_name, name), False, shape, dtype)
dlpack = data.to_dlpack()
return F.zerocopy_from_dlpack(dlpack)
def _get_shared_mem_edata(g, graph_name, name):
''' Get shared-memory edge data from DistGraph server.
This is called by the DistGraph client to access the edge data in the DistGraph server
with shared memory.
'''
shape = (g.number_of_edges(),)
dtype = FIELD_DICT[name]
dtype = DTYPE_DICT[dtype]
data = empty_shared_mem(_get_edata_path(graph_name, name), False, shape, dtype)
dlpack = data.to_dlpack()
return F.zerocopy_from_dlpack(dlpack)
def _get_graph_from_shared_mem(graph_name):
''' Get the graph from the DistGraph server.
The DistGraph server puts the graph structure of the local partition in the shared memory.
The client can access the graph structure and some metadata on nodes and edges directly
through shared memory to reduce the overhead of data access.
'''
g, ntypes, etypes = heterograph_index.create_heterograph_from_shared_memory(graph_name)
if g is None:
return None
g = DGLHeteroGraph(g, ntypes, etypes)
g.ndata['inner_node'] = _get_shared_mem_ndata(g, graph_name, 'inner_node')
g.ndata[NID] = _get_shared_mem_ndata(g, graph_name, NID)
g.edata['inner_edge'] = _get_shared_mem_edata(g, graph_name, 'inner_edge')
g.edata[EID] = _get_shared_mem_edata(g, graph_name, EID)
g.edata[ETYPE] = _get_shared_mem_edata(g, graph_name, ETYPE)
return g
NodeSpace = namedtuple('NodeSpace', ['data'])
EdgeSpace = namedtuple('EdgeSpace', ['data'])
class HeteroNodeView(object):
"""A NodeView class to act as G.nodes for a DistGraph."""
__slots__ = ['_graph']
def __init__(self, graph):
self._graph = graph
def __getitem__(self, key):
assert isinstance(key, str)
return NodeSpace(data=NodeDataView(self._graph, key))
class HeteroEdgeView(object):
"""A NodeView class to act as G.nodes for a DistGraph."""
__slots__ = ['_graph']
def __init__(self, graph):
self._graph = graph
def __getitem__(self, key):
assert isinstance(key, str)
return EdgeSpace(data=EdgeDataView(self._graph, key))
class NodeDataView(MutableMapping):
"""The data view class when dist_graph.ndata[...].data is called.
"""
__slots__ = ['_graph', '_data']
def __init__(self, g, ntype=None):
self._graph = g
# When this is created, the server may already load node data. We need to
# initialize the node data in advance.
names = g._get_ndata_names(ntype)
if ntype is None:
self._data = g._ndata_store
else:
if ntype in g._ndata_store:
self._data = g._ndata_store[ntype]
else:
self._data = {}
g._ndata_store[ntype] = self._data
for name in names:
assert name.is_node()
policy = PartitionPolicy(name.policy_str, g.get_partition_book())
dtype, shape, _ = g._client.get_data_meta(str(name))
# We create a wrapper on the existing tensor in the kvstore.
self._data[name.get_name()] = DistTensor(shape, dtype, name.get_name(),
part_policy=policy)
def _get_names(self):
return list(self._data.keys())
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, val):
self._data[key] = val
def __delitem__(self, key):
del self._data[key]
def __len__(self):
# The number of node data may change. Let's count it every time we need them.
# It's not called frequently. It should be fine.
return len(self._data)
def __iter__(self):
return iter(self._data)
def __repr__(self):
reprs = {}
for name in self._data:
dtype = F.dtype(self._data[name])
shape = F.shape(self._data[name])
reprs[name] = 'DistTensor(shape={}, dtype={})'.format(str(shape), str(dtype))
return repr(reprs)
class EdgeDataView(MutableMapping):
"""The data view class when G.edges[...].data is called.
"""
__slots__ = ['_graph', '_data']
def __init__(self, g, etype=None):
self._graph = g
# When this is created, the server may already load edge data. We need to
# initialize the edge data in advance.
names = g._get_edata_names(etype)
if etype is None:
self._data = g._edata_store
else:
if etype in g._edata_store:
self._data = g._edata_store[etype]
else:
self._data = {}
g._edata_store[etype] = self._data
for name in names:
assert name.is_edge()
policy = PartitionPolicy(name.policy_str, g.get_partition_book())
dtype, shape, _ = g._client.get_data_meta(str(name))
# We create a wrapper on the existing tensor in the kvstore.
self._data[name.get_name()] = DistTensor(shape, dtype, name.get_name(),
part_policy=policy)
def _get_names(self):
return list(self._data.keys())
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, val):
self._data[key] = val
def __delitem__(self, key):
del self._data[key]
def __len__(self):
# The number of edge data may change. Let's count it every time we need them.
# It's not called frequently. It should be fine.
return len(self._data)
def __iter__(self):
return iter(self._data)
def __repr__(self):
reprs = {}
for name in self._data:
dtype = F.dtype(self._data[name])
shape = F.shape(self._data[name])
reprs[name] = 'DistTensor(shape={}, dtype={})'.format(str(shape), str(dtype))
return repr(reprs)
class DistGraphServer(KVServer):
''' The DistGraph server.
This DistGraph server loads the graph data and sets up a service so that trainers and
samplers can read data of a graph partition (graph structure, node data and edge data)
from remote machines. A server is responsible for one graph partition.
Currently, each machine runs only one main server with a set of backup servers to handle
clients' requests. The main server and the backup servers all handle the requests for the same
graph partition. They all share the partition data (graph structure and node/edge data) with
shared memory.
By default, the partition data is shared with the DistGraph clients that run on
the same machine. However, a user can disable shared memory option. This is useful for the case
that a user wants to run the server and the client on different machines.
Parameters
----------
server_id : int
The server ID (start from 0).
ip_config : str
Path of IP configuration file.
num_servers : int
Server count on each machine.
num_clients : int
Total number of client nodes.
part_config : string
The path of the config file generated by the partition tool.
disable_shared_mem : bool
Disable shared memory.
graph_format : str or list of str
The graph formats.
'''
def __init__(self, server_id, ip_config, num_servers,
num_clients, part_config, disable_shared_mem=False,
graph_format=('csc', 'coo')):
super(DistGraphServer, self).__init__(server_id=server_id,
ip_config=ip_config,
num_servers=num_servers,
num_clients=num_clients)
self.ip_config = ip_config
self.num_servers = num_servers
# Load graph partition data.
if self.is_backup_server():
# The backup server doesn't load the graph partition. It'll initialized afterwards.
self.gpb, graph_name, ntypes, etypes = load_partition_book(part_config, self.part_id)
self.client_g = None
else:
self.client_g, node_feats, edge_feats, self.gpb, graph_name, \
ntypes, etypes = load_partition(part_config, self.part_id)
print('load ' + graph_name)
# Create the graph formats specified the users.
self.client_g = self.client_g.formats(graph_format)
self.client_g.create_formats_()
if not disable_shared_mem:
self.client_g = _copy_graph_to_shared_mem(self.client_g, graph_name, graph_format)
if not disable_shared_mem:
self.gpb.shared_memory(graph_name)
assert self.gpb.partid == self.part_id
for ntype in ntypes:
node_name = HeteroDataName(True, ntype, None)
self.add_part_policy(PartitionPolicy(node_name.policy_str, self.gpb))
for etype in etypes:
edge_name = HeteroDataName(False, etype, None)
self.add_part_policy(PartitionPolicy(edge_name.policy_str, self.gpb))
if not self.is_backup_server():
for name in node_feats:
# The feature name has the following format: node_type + "/" + feature_name to avoid
# feature name collision for different node types.
ntype, feat_name = name.split('/')
data_name = HeteroDataName(True, ntype, feat_name)
self.init_data(name=str(data_name), policy_str=data_name.policy_str,
data_tensor=node_feats[name])
for name in edge_feats:
# The feature name has the following format: edge_type + "/" + feature_name to avoid
# feature name collision for different edge types.
etype, feat_name = name.split('/')
data_name = HeteroDataName(False, etype, feat_name)
self.init_data(name=str(data_name), policy_str=data_name.policy_str,
data_tensor=edge_feats[name])
def start(self):
""" Start graph store server.
"""
# start server
server_state = ServerState(kv_store=self, local_g=self.client_g, partition_book=self.gpb)
print('start graph service on server {} for part {}'.format(self.server_id, self.part_id))
start_server(server_id=self.server_id,
ip_config=self.ip_config,
num_servers=self.num_servers,
num_clients=self.num_clients, server_state=server_state)
class DistGraph:
'''The class for accessing a distributed graph.
This class provides a subset of DGLGraph APIs for accessing partitioned graph data in
distributed GNN training and inference. Thus, its main use case is to work with
distributed sampling APIs to generate mini-batches and perform forward and
backward computation on the mini-batches.
The class can run in two modes: the standalone mode and the distributed mode.
* When a user runs the training script normally, ``DistGraph`` will be in the standalone mode.
In this mode, the input data must be constructed by
:py:meth:`~dgl.distributed.partition.partition_graph` with only one partition. This mode is
used for testing and debugging purpose. In this mode, users have to provide ``part_config``
so that ``DistGraph`` can load the input graph.
* When a user runs the training script with the distributed launch script, ``DistGraph`` will
be set into the distributed mode. This is used for actual distributed training. All data of
partitions are loaded by the ``DistGraph`` servers, which are created by DGL's launch script.
``DistGraph`` connects with the servers to access the partitioned graph data.
Currently, the ``DistGraph`` servers and clients run on the same set of machines
in the distributed mode. ``DistGraph`` uses shared-memory to access the partition data
in the local machine. This gives the best performance for distributed training
Users may want to run ``DistGraph`` servers and clients on separate sets of machines.
In this case, a user may want to disable shared memory by passing
``disable_shared_mem=False`` when creating ``DistGraphServer``. When shared memory is disabled,
a user has to pass a partition book.
Parameters
----------
graph_name : str
The name of the graph. This name has to be the same as the one used for
partitioning a graph in :py:meth:`dgl.distributed.partition.partition_graph`.
gpb : GraphPartitionBook, optional
The partition book object. Normally, users do not need to provide the partition book.
This argument is necessary only when users want to run server process and trainer
processes on different machines.
part_config : str, optional
The path of partition configuration file generated by
:py:meth:`dgl.distributed.partition.partition_graph`. It's used in the standalone mode.
Examples
--------
The example shows the creation of ``DistGraph`` in the standalone mode.
>>> dgl.distributed.partition_graph(g, 'graph_name', 1, num_hops=1, part_method='metis',
... out_path='output/', reshuffle=True)
>>> g = dgl.distributed.DistGraph('graph_name', part_config='output/graph_name.json')
The example shows the creation of ``DistGraph`` in the distributed mode.
>>> g = dgl.distributed.DistGraph('graph-name')
The code below shows the mini-batch training using ``DistGraph``.
>>> def sample(seeds):
... seeds = th.LongTensor(np.asarray(seeds))
... frontier = dgl.distributed.sample_neighbors(g, seeds, 10)
... return dgl.to_block(frontier, seeds)
>>> dataloader = dgl.distributed.DistDataLoader(dataset=nodes, batch_size=1000,
... collate_fn=sample, shuffle=True)
>>> for block in dataloader:
... feat = g.ndata['features'][block.srcdata[dgl.NID]]
... labels = g.ndata['labels'][block.dstdata[dgl.NID]]
... pred = model(block, feat)
Note
----
DGL's distributed training by default runs server processes and trainer processes on the same
set of machines. If users need to run them on different sets of machines, it requires
manually setting up servers and trainers. The setup is not fully tested yet.
'''
def __init__(self, graph_name, gpb=None, part_config=None):
self.graph_name = graph_name
self._gpb_input = gpb
if os.environ.get('DGL_DIST_MODE', 'standalone') == 'standalone':
assert part_config is not None, \
'When running in the standalone model, the partition config file is required'
self._client = get_kvstore()
assert self._client is not None, \
'Distributed module is not initialized. Please call dgl.distributed.initialize.'
# Load graph partition data.
g, node_feats, edge_feats, self._gpb, _, _, _ = load_partition(part_config, 0)
assert self._gpb.num_partitions() == 1, \
'The standalone mode can only work with the graph data with one partition'
if self._gpb is None:
self._gpb = gpb
self._g = g
for name in node_feats:
# The feature name has the following format: node_type + "/" + feature_name.
ntype, feat_name = name.split('/')
self._client.add_data(str(HeteroDataName(True, ntype, feat_name)),
node_feats[name],
NodePartitionPolicy(self._gpb, ntype=ntype))
for name in edge_feats:
# The feature name has the following format: edge_type + "/" + feature_name.
etype, feat_name = name.split('/')
self._client.add_data(str(HeteroDataName(False, etype, feat_name)),
edge_feats[name],
EdgePartitionPolicy(self._gpb, etype=etype))
self._client.map_shared_data(self._gpb)
rpc.set_num_client(1)
else:
self._init()
# Tell the backup servers to load the graph structure from shared memory.
for server_id in range(self._client.num_servers):
rpc.send_request(server_id, InitGraphRequest(graph_name))
for server_id in range(self._client.num_servers):
rpc.recv_response()
self._client.barrier()
self._ndata_store = {}
self._edata_store = {}
self._ndata = NodeDataView(self)
self._edata = EdgeDataView(self)
self._num_nodes = 0
self._num_edges = 0
for part_md in self._gpb.metadata():
self._num_nodes += int(part_md['num_nodes'])
self._num_edges += int(part_md['num_edges'])
# When we store node/edge types in a list, they are stored in the order of type IDs.
self._ntype_map = {ntype:i for i, ntype in enumerate(self.ntypes)}
self._etype_map = {etype:i for i, etype in enumerate(self.etypes)}
# Get canonical edge types.
# TODO(zhengda) this requires the server to store the graph with coo format.
eid = []
for etype in self.etypes:
type_eid = F.zeros((1,), F.int64, F.cpu())
eid.append(self._gpb.map_to_homo_eid(type_eid, etype))
eid = F.cat(eid, 0)
src, dst = dist_find_edges(self, eid)
src_tids, _ = self._gpb.map_to_per_ntype(src)
dst_tids, _ = self._gpb.map_to_per_ntype(dst)
self._canonical_etypes = []
etype_ids = F.arange(0, len(self.etypes))
for src_tid, etype_id, dst_tid in zip(src_tids, etype_ids, dst_tids):
src_tid = F.as_scalar(src_tid)
etype_id = F.as_scalar(etype_id)
dst_tid = F.as_scalar(dst_tid)
self._canonical_etypes.append((self.ntypes[src_tid], self.etypes[etype_id],
self.ntypes[dst_tid]))
self._etype2canonical = {}
for src_type, etype, dst_type in self._canonical_etypes:
if etype in self._etype2canonical:
self._etype2canonical[etype] = ()
else:
self._etype2canonical[etype] = (src_type, etype, dst_type)
def _init(self):
self._client = get_kvstore()
assert self._client is not None, \
'Distributed module is not initialized. Please call dgl.distributed.initialize.'
self._g = _get_graph_from_shared_mem(self.graph_name)
self._gpb = get_shared_mem_partition_book(self.graph_name, self._g)
if self._gpb is None:
self._gpb = self._gpb_input
self._client.map_shared_data(self._gpb)
def __getstate__(self):
return self.graph_name, self._gpb, self._canonical_etypes
def __setstate__(self, state):
self.graph_name, self._gpb_input, self._canonical_etypes = state
self._init()
self._etype2canonical = {}
for src_type, etype, dst_type in self._canonical_etypes:
if etype in self._etype2canonical:
self._etype2canonical[etype] = ()
else:
self._etype2canonical[etype] = (src_type, etype, dst_type)
self._ndata_store = {}
self._edata_store = {}
self._ndata = NodeDataView(self)
self._edata = EdgeDataView(self)
self._num_nodes = 0
self._num_edges = 0
for part_md in self._gpb.metadata():
self._num_nodes += int(part_md['num_nodes'])
self._num_edges += int(part_md['num_edges'])
@property
def local_partition(self):
''' Return the local partition on the client
DistGraph provides a global view of the distributed graph. Internally,
it may contains a partition of the graph if it is co-located with
the server. When servers and clients run on separate sets of machines,
this returns None.
Returns
-------
DGLGraph
The local partition
'''
return self._g
@property
def nodes(self):
'''Return a node view
'''
return HeteroNodeView(self)
@property
def edges(self):
'''Return an edge view
'''
return HeteroEdgeView(self)
@property
def ndata(self):
"""Return the data view of all the nodes.
Returns
-------
NodeDataView
The data view in the distributed graph storage.
"""
assert len(self.ntypes) == 1, "ndata only works for a graph with one node type."
return self._ndata
@property
def edata(self):
"""Return the data view of all the edges.
Returns
-------
EdgeDataView
The data view in the distributed graph storage.
"""
assert len(self.etypes) == 1, "edata only works for a graph with one edge type."
return self._edata
@property
def idtype(self):
"""The dtype of graph index
Returns
-------
backend dtype object
th.int32/th.int64 or tf.int32/tf.int64 etc.
See Also
--------
long
int
"""
# TODO(da?): describe when self._g is None and idtype shouldn't be called.
return F.int64
@property
def device(self):
"""Get the device context of this graph.
Examples
--------
The following example uses PyTorch backend.
>>> g = dgl.bipartite(([0, 1, 1, 2], [0, 0, 2, 1]), 'user', 'plays', 'game')
>>> print(g.device)
device(type='cpu')
>>> g = g.to('cuda:0')
>>> print(g.device)
device(type='cuda', index=0)
Returns
-------
Device context object
"""
# TODO(da?): describe when self._g is None and device shouldn't be called.
return F.cpu()
@property
def ntypes(self):
"""Return the list of node types of this graph.
Returns
-------
list of str
Examples
--------
>>> g = DistGraph("test")
>>> g.ntypes
['_U']
"""
return self._gpb.ntypes
@property
def etypes(self):
"""Return the list of edge types of this graph.
Returns
-------
list of str
Examples
--------
>>> g = DistGraph("test")
>>> g.etypes
['_E']
"""
# Currently, we only support a graph with one edge type.
return self._gpb.etypes
@property
def canonical_etypes(self):
"""Return all the canonical edge types in the graph.
A canonical edge type is a string triplet ``(str, str, str)``
for source node type, edge type and destination node type.
Returns
-------
list[(str, str, str)]
All the canonical edge type triplets in a list.
Notes
-----
DGL internally assigns an integer ID for each edge type. The returned
edge type names are sorted according to their IDs.
See Also
--------
etypes
Examples
--------
The following example uses PyTorch backend.
>>> import dgl
>>> import torch
>>> g = DistGraph("test")
>>> g.canonical_etypes
[('user', 'follows', 'user'),
('user', 'follows', 'game'),
('user', 'plays', 'game')]
"""
return self._canonical_etypes
def to_canonical_etype(self, etype):
"""Convert an edge type to the corresponding canonical edge type in the graph.
A canonical edge type is a string triplet ``(str, str, str)``
for source node type, edge type and destination node type.
The function expects the given edge type name can uniquely identify a canonical edge
type. DGL will raise error if this is not the case.
Parameters
----------
etype : str or (str, str, str)
If :attr:`etype` is an edge type (str), it returns the corresponding canonical edge
type in the graph. If :attr:`etype` is already a canonical edge type,
it directly returns the input unchanged.
Returns
-------
(str, str, str)
The canonical edge type corresponding to the edge type.
Examples
--------
The following example uses PyTorch backend.
>>> import dgl
>>> import torch
>>> g = DistGraph("test")
>>> g.canonical_etypes
[('user', 'follows', 'user'),
('user', 'follows', 'game'),
('user', 'plays', 'game')]
>>> g.to_canonical_etype('plays')
('user', 'plays', 'game')
>>> g.to_canonical_etype(('user', 'plays', 'game'))
('user', 'plays', 'game')
See Also
--------
canonical_etypes
"""
if etype is None:
if len(self.etypes) != 1:
raise DGLError('Edge type name must be specified if there are more than one '
'edge types.')
etype = self.etypes[0]
if isinstance(etype, tuple):
return etype
else:
ret = self._etype2canonical.get(etype, None)
if ret is None:
raise DGLError('Edge type "{}" does not exist.'.format(etype))
if len(ret) != 3:
raise DGLError('Edge type "{}" is ambiguous. Please use canonical edge type '
'in the form of (srctype, etype, dsttype)'.format(etype))
return ret
def get_ntype_id(self, ntype):
"""Return the ID of the given node type.
ntype can also be None. If so, there should be only one node type in the
graph.
Parameters
----------
ntype : str
Node type
Returns
-------
int
"""
if ntype is None:
if len(self._ntype_map) != 1:
raise DGLError('Node type name must be specified if there are more than one '
'node types.')
return 0
return self._ntype_map[ntype]
def get_etype_id(self, etype):
"""Return the id of the given edge type.
etype can also be None. If so, there should be only one edge type in the
graph.
Parameters
----------
etype : str or tuple of str
Edge type
Returns
-------
int
"""
if etype is None:
if len(self._etype_map) != 1:
raise DGLError('Edge type name must be specified if there are more than one '
'edge types.')
return 0
return self._etype_map[etype]
def number_of_nodes(self, ntype=None):
"""Alias of :func:`num_nodes`"""
return self.num_nodes(ntype)
def number_of_edges(self, etype=None):
"""Alias of :func:`num_edges`"""
return self.num_edges(etype)
def num_nodes(self, ntype=None):
"""Return the total number of nodes in the distributed graph.
Parameters
----------
ntype : str, optional
The node type name. If given, it returns the number of nodes of the
type. If not given (default), it returns the total number of nodes of all types.
Returns
-------
int
The number of nodes
Examples
--------
>>> g = dgl.distributed.DistGraph('ogb-product')
>>> print(g.num_nodes())
2449029
"""
if ntype is None:
if len(self.ntypes) == 1:
return self._gpb._num_nodes(self.ntypes[0])
else:
return sum([self._gpb._num_nodes(ntype) for ntype in self.ntypes])
return self._gpb._num_nodes(ntype)
def num_edges(self, etype=None):
"""Return the total number of edges in the distributed graph.
Parameters
----------
etype : str or (str, str, str), optional
The type name of the edges. The allowed type name formats are:
* ``(str, str, str)`` for source node type, edge type and destination node type.
* or one ``str`` edge type name if the name can uniquely identify a
triplet format in the graph.
If not provided, return the total number of edges regardless of the types
in the graph.
Returns
-------
int
The number of edges
Examples
--------
>>> g = dgl.distributed.DistGraph('ogb-product')
>>> print(g.num_edges())
123718280
"""
if etype is None:
if len(self.etypes) == 1:
return self._gpb._num_edges(self.etypes[0])
else:
return sum([self._gpb._num_edges(etype) for etype in self.etypes])
return self._gpb._num_edges(etype)
def out_degrees(self, u=ALL):
"""Return the out-degree(s) of the given nodes.
It computes the out-degree(s).
It does not support heterogeneous graphs yet.
Parameters
----------
u : node IDs
The node IDs. The allowed formats are:
* ``int``: A single node.
* Int Tensor: Each element is a node ID. The tensor must have the same device type
and ID data type as the graph's.
* iterable[int]: Each element is a node ID.
If not given, return the in-degrees of all the nodes.
Returns
-------
int or Tensor
The out-degree(s) of the node(s) in a Tensor. The i-th element is the out-degree
of the i-th input node. If :attr:`v` is an ``int``, return an ``int`` too.
Examples
--------
The following example uses PyTorch backend.
>>> import dgl
>>> import torch
Query for all nodes.
>>> g.out_degrees()
tensor([2, 2, 0, 0])
Query for nodes 1 and 2.
>>> g.out_degrees(torch.tensor([1, 2]))
tensor([2, 0])
See Also
--------
in_degrees
"""
if is_all(u):
u = F.arange(0, self.number_of_nodes())
return dist_out_degrees(self, u)
def in_degrees(self, v=ALL):
"""Return the in-degree(s) of the given nodes.
It computes the in-degree(s).
It does not support heterogeneous graphs yet.
Parameters
----------
v : node IDs
The node IDs. The allowed formats are:
* ``int``: A single node.
* Int Tensor: Each element is a node ID. The tensor must have the same device type
and ID data type as the graph's.
* iterable[int]: Each element is a node ID.
If not given, return the in-degrees of all the nodes.
Returns
-------
int or Tensor
The in-degree(s) of the node(s) in a Tensor. The i-th element is the in-degree
of the i-th input node. If :attr:`v` is an ``int``, return an ``int`` too.
Examples
--------
The following example uses PyTorch backend.
>>> import dgl
>>> import torch
Query for all nodes.
>>> g.in_degrees()
tensor([0, 2, 1, 1])
Query for nodes 1 and 2.
>>> g.in_degrees(torch.tensor([1, 2]))
tensor([2, 1])
See Also
--------
out_degrees
"""
if is_all(v):
v = F.arange(0, self.number_of_nodes())
return dist_in_degrees(self, v)
def node_attr_schemes(self):
"""Return the node feature schemes.
Each feature scheme is a named tuple that stores the shape and data type
of the node feature.
Returns
-------
dict of str to schemes
The schemes of node feature columns.
Examples
--------
The following uses PyTorch backend.
>>> g.node_attr_schemes()
{'h': Scheme(shape=(4,), dtype=torch.float32)}
See Also
--------
edge_attr_schemes
"""
schemes = {}
for key in self.ndata:
schemes[key] = infer_scheme(self.ndata[key])
return schemes
def edge_attr_schemes(self):
"""Return the edge feature schemes.
Each feature scheme is a named tuple that stores the shape and data type
of the edge feature.
Returns
-------
dict of str to schemes
The schemes of edge feature columns.
Examples
--------
The following uses PyTorch backend.
>>> g.edge_attr_schemes()
{'h': Scheme(shape=(4,), dtype=torch.float32)}
See Also
--------
node_attr_schemes
"""
schemes = {}
for key in self.edata:
schemes[key] = infer_scheme(self.edata[key])
return schemes
def rank(self):
''' The rank of the current DistGraph.
This returns a unique number to identify the DistGraph object among all of
the client processes.
Returns
-------
int
The rank of the current DistGraph.
'''
return role.get_global_rank()
def find_edges(self, edges, etype=None):
""" Given an edge ID array, return the source
and destination node ID array ``s`` and ``d``. ``s[i]`` and ``d[i]``
are source and destination node ID for edge ``eid[i]``.
Parameters
----------
edges : Int Tensor
Each element is an ID. The tensor must have the same device type
and ID data type as the graph's.
etype : str or (str, str, str), optional
The type names of the edges. The allowed type name formats are:
* ``(str, str, str)`` for source node type, edge type and destination node type.
* or one ``str`` edge type name if the name can uniquely identify a
triplet format in the graph.
Can be omitted if the graph has only one type of edges.
Returns
-------
tensor
The source node ID array.
tensor
The destination node ID array.
"""
if etype is None:
assert len(self.etypes) == 1, 'find_edges requires etype for heterogeneous graphs.'
gpb = self.get_partition_book()
if len(gpb.etypes) > 1:
# if etype is a canonical edge type (str, str, str), extract the edge type
if len(etype) == 3:
etype = etype[1]
edges = gpb.map_to_homo_eid(edges, etype)
src, dst = dist_find_edges(self, edges)
if len(gpb.ntypes) > 1:
_, src = gpb.map_to_per_ntype(src)
_, dst = gpb.map_to_per_ntype(dst)
return src, dst
def edge_subgraph(self, edges, relabel_nodes=True, store_ids=True):
"""Return a subgraph induced on the given edges.
An edge-induced subgraph is equivalent to creating a new graph using the given
edges. In addition to extracting the subgraph, DGL also copies the features
of the extracted nodes and edges to the resulting graph. The copy is *lazy*
and incurs data movement only when needed.
If the graph is heterogeneous, DGL extracts a subgraph per relation and composes
them as the resulting graph. Thus, the resulting graph has the same set of relations
as the input one.
Parameters
----------
edges : Int Tensor or dict[(str, str, str), Int Tensor]
The edges to form the subgraph. Each element is an edge ID. The tensor must have
the same device type and ID data type as the graph's.
If the graph is homogeneous, one can directly pass an Int Tensor.
Otherwise, the argument must be a dictionary with keys being edge types
and values being the edge IDs in the above formats.
relabel_nodes : bool, optional
If True, it will remove the isolated nodes and relabel the incident nodes in the
extracted subgraph.
store_ids : bool, optional
If True, it will store the raw IDs of the extracted edges in the ``edata`` of the
resulting graph under name ``dgl.EID``; if ``relabel_nodes`` is ``True``, it will
also store the raw IDs of the incident nodes in the ``ndata`` of the resulting
graph under name ``dgl.NID``.
Returns
-------
G : DGLGraph
The subgraph.
"""
if isinstance(edges, dict):
# TODO(zhengda) we need to directly generate subgraph of all relations with
# one invocation.
if isinstance(edges, tuple):
subg = {etype: self.find_edges(edges[etype], etype[1]) for etype in edges}
else:
subg = {}
for etype in edges:
assert len(self._etype2canonical[etype]) == 3, \
'the etype in input edges is ambiguous'
subg[self._etype2canonical[etype]] = self.find_edges(edges[etype], etype)
num_nodes = {ntype: self.number_of_nodes(ntype) for ntype in self.ntypes}
subg = dgl_heterograph(subg, num_nodes_dict=num_nodes)
for etype in edges:
subg.edges[etype].data[EID] = edges[etype]
else:
assert len(self.etypes) == 1
subg = self.find_edges(edges)
subg = dgl_graph(subg, num_nodes=self.number_of_nodes())
subg.edata[EID] = edges
if relabel_nodes:
subg = compact_graphs(subg)
assert store_ids, 'edge_subgraph always stores original node/edge IDs.'
return subg
def get_partition_book(self):
"""Get the partition information.
Returns
-------
GraphPartitionBook
Object that stores all graph partition information.
"""
return self._gpb
def get_node_partition_policy(self, ntype):
"""Get the partition policy for a node type.
When creating a new distributed tensor, we need to provide a partition policy
that indicates how to distribute data of the distributed tensor in a cluster
of machines. When we load a distributed graph in the cluster, we have pre-defined
partition policies for each node type and each edge type. By providing
the node type, we can reference to the pre-defined partition policy for the node type.
Parameters
----------
ntype : str
The node type
Returns
-------
PartitionPolicy
The partition policy for the node type.
"""
return NodePartitionPolicy(self.get_partition_book(), ntype)
def get_edge_partition_policy(self, etype):
"""Get the partition policy for an edge type.
When creating a new distributed tensor, we need to provide a partition policy
that indicates how to distribute data of the distributed tensor in a cluster
of machines. When we load a distributed graph in the cluster, we have pre-defined
partition policies for each node type and each edge type. By providing
the edge type, we can reference to the pre-defined partition policy for the edge type.
Parameters
----------
etype : str
The edge type
Returns
-------
PartitionPolicy
The partition policy for the edge type.
"""
return EdgePartitionPolicy(self.get_partition_book(), etype)
def barrier(self):
'''Barrier for all client nodes.
This API blocks the current process untill all the clients invoke this API.
Please use this API with caution.
'''
self._client.barrier()
def _get_ndata_names(self, ntype=None):
''' Get the names of all node data.
'''
names = self._client.gdata_name_list()
ndata_names = []
for name in names:
name = parse_hetero_data_name(name)
right_type = (name.get_type() == ntype) if ntype is not None else True
if name.is_node() and right_type:
ndata_names.append(name)
return ndata_names
def _get_edata_names(self, etype=None):
''' Get the names of all edge data.
'''
names = self._client.gdata_name_list()
edata_names = []
for name in names:
name = parse_hetero_data_name(name)
right_type = (name.get_type() == etype) if etype is not None else True
if name.is_edge() and right_type:
edata_names.append(name)
return edata_names
def _get_overlap(mask_arr, ids):
""" Select the IDs given a boolean mask array.
The boolean mask array indicates all of the IDs to be selected. We want to
find the overlap between the IDs selected by the boolean mask array and
the ID array.
Parameters
----------
mask_arr : 1D tensor
A boolean mask array.
ids : 1D tensor
A vector with IDs.
Returns
-------
1D tensor
The selected IDs.
"""
if isinstance(mask_arr, DistTensor):
masks = mask_arr[ids]
return F.boolean_mask(ids, masks)
else:
masks = F.gather_row(F.tensor(mask_arr), ids)
return F.boolean_mask(ids, masks)
def _split_local(partition_book, rank, elements, local_eles):
''' Split the input element list with respect to data locality.
'''
num_clients = role.get_num_trainers()
num_client_per_part = num_clients // partition_book.num_partitions()
if rank is None:
rank = role.get_trainer_rank()
assert rank < num_clients, \
'The input rank ({}) is incorrect. #Trainers: {}'.format(rank, num_clients)
# all ranks of the clients in the same machine are in a contiguous range.
client_id_in_part = rank % num_client_per_part
local_eles = _get_overlap(elements, local_eles)
# get a subset for the local client.
size = len(local_eles) // num_client_per_part
# if this isn't the last client in the partition.
if client_id_in_part + 1 < num_client_per_part:
return local_eles[(size * client_id_in_part):(size * (client_id_in_part + 1))]
else:
return local_eles[(size * client_id_in_part):]
def _even_offset(n, k):
''' Split an array of length n into k segments and the difference of thier length is
at most 1. Return the offset of each segment.
'''
eles_per_part = n // k
offset = np.array([0] + [eles_per_part] * k, dtype=int)
offset[1 : n - eles_per_part * k + 1] += 1
return np.cumsum(offset)
def _split_even_to_part(partition_book, elements):
''' Split the input element list evenly.
'''
# here we divide the element list as evenly as possible. If we use range partitioning,
# the split results also respect the data locality. Range partitioning is the default
# strategy.
# TODO(zhengda) we need another way to divide the list for other partitioning strategy.
if isinstance(elements, DistTensor):
nonzero_count = elements.count_nonzero()
else:
elements = F.tensor(elements)
nonzero_count = F.count_nonzero(elements)
# compute the offset of each split and ensure that the difference of each partition size
# is 1.
offsets = _even_offset(nonzero_count, partition_book.num_partitions())
assert offsets[-1] == nonzero_count
# Get the elements that belong to the partition.
partid = partition_book.partid
left, right = offsets[partid], offsets[partid + 1]
x = y = 0
num_elements = len(elements)
block_size = num_elements // partition_book.num_partitions()
part_eles = None
# compute the nonzero tensor of each partition instead of whole tensor to save memory
for idx in range(0, num_elements, block_size):
nonzero_block = F.nonzero_1d(elements[idx:min(idx+block_size, num_elements)])
x = y
y += len(nonzero_block)
if y > left and x < right:
start = max(x, left) - x
end = min(y, right) - x
tmp = nonzero_block[start:end] + idx
if part_eles is None:
part_eles = tmp
else:
part_eles = F.cat((part_eles, tmp), 0)
elif x >= right:
break
return part_eles
def _split_random_within_part(partition_book, rank, part_eles):
# If there are more than one client in a partition, we need to randomly select a subset of
# elements in the partition for a client. We have to make sure that the set of elements
# for different clients are disjoint.
num_clients = role.get_num_trainers()
num_client_per_part = num_clients // partition_book.num_partitions()
if num_client_per_part == 1:
return part_eles
if rank is None:
rank = role.get_trainer_rank()
assert rank < num_clients, \
'The input rank ({}) is incorrect. #Trainers: {}'.format(rank, num_clients)
client_id_in_part = rank % num_client_per_part
offset = _even_offset(len(part_eles), num_client_per_part)
# We set the random seed for each partition, so that each process (client) in a partition
# permute the elements in a partition in the same way, so each process gets a disjoint subset
# of elements.
np.random.seed(partition_book.partid)
rand_idx = np.random.permutation(len(part_eles))
rand_idx = rand_idx[offset[client_id_in_part] : offset[client_id_in_part + 1]]
idx, _ = F.sort_1d(F.tensor(rand_idx))
return F.gather_row(part_eles, idx)
def _split_by_trainer_id(partition_book, part_eles, trainer_id,
num_client_per_part, client_id_in_part):
# TODO(zhengda): MXNet cannot deal with empty tensors, which makes the implementation
# much more difficult. Let's just use numpy for the computation for now. We just
# perform operations on vectors. It shouldn't be too difficult.
trainer_id = F.asnumpy(trainer_id)
part_eles = F.asnumpy(part_eles)
part_id = trainer_id // num_client_per_part
trainer_id = trainer_id % num_client_per_part
local_eles = part_eles[np.nonzero(part_id[part_eles] == partition_book.partid)[0]]
# these are the Ids of the local elements in the partition. The Ids are global Ids.
remote_eles = part_eles[np.nonzero(part_id[part_eles] != partition_book.partid)[0]]
# these are the Ids of the remote nodes in the partition. The Ids are global Ids.
local_eles_idx = np.concatenate(
[np.nonzero(trainer_id[local_eles] == i)[0] for i in range(num_client_per_part)],
# trainer_id[local_eles] is the trainer ids of local nodes in the partition and we
# pick out the indices where the node belongs to each trainer i respectively, and
# concatenate them.
axis=0
)
# `local_eles_idx` is used to sort `local_eles` according to `trainer_id`. It is a
# permutation of 0...(len(local_eles)-1)
local_eles = local_eles[local_eles_idx]
# evenly split local nodes to trainers
local_offsets = _even_offset(len(local_eles), num_client_per_part)
# evenly split remote nodes to trainers
remote_offsets = _even_offset(len(remote_eles), num_client_per_part)
client_local_eles = local_eles[
local_offsets[client_id_in_part]:local_offsets[client_id_in_part + 1]]
client_remote_eles = remote_eles[
remote_offsets[client_id_in_part]:remote_offsets[client_id_in_part + 1]]
client_eles = np.concatenate([client_local_eles, client_remote_eles], axis=0)
return F.tensor(client_eles)
def node_split(nodes, partition_book=None, ntype='_N', rank=None, force_even=True,
node_trainer_ids=None):
''' Split nodes and return a subset for the local rank.
This function splits the input nodes based on the partition book and
returns a subset of nodes for the local rank. This method is used for
dividing workloads for distributed training.
The input nodes are stored as a vector of masks. The length of the vector is
the same as the number of nodes in a graph; 1 indicates that the vertex in
the corresponding location exists.
There are two strategies to split the nodes. By default, it splits the nodes
in a way to maximize data locality. That is, all nodes that belong to a process
are returned. If ``force_even`` is set to true, the nodes are split evenly so
that each process gets almost the same number of nodes.
When ``force_even`` is True, the data locality is still preserved if a graph is partitioned
with Metis and the node/edge IDs are shuffled.
In this case, majority of the nodes returned for a process are the ones that
belong to the process. If node/edge IDs are not shuffled, data locality is not guaranteed.
Parameters
----------
nodes : 1D tensor or DistTensor
A boolean mask vector that indicates input nodes.
partition_book : GraphPartitionBook, optional
The graph partition book
ntype : str, optional
The node type of the input nodes.
rank : int, optional
The rank of a process. If not given, the rank of the current process is used.
force_even : bool, optional
Force the nodes are split evenly.
node_trainer_ids : 1D tensor or DistTensor, optional
If not None, split the nodes to the trainers on the same machine according to
trainer IDs assigned to each node. Otherwise, split randomly.
Returns
-------
1D-tensor
The vector of node IDs that belong to the rank.
'''
if not isinstance(nodes, DistTensor):
assert partition_book is not None, 'Regular tensor requires a partition book.'
elif partition_book is None:
partition_book = nodes.part_policy.partition_book
assert len(nodes) == partition_book._num_nodes(ntype), \
'The length of boolean mask vector should be the number of nodes in the graph.'
if rank is None:
rank = role.get_trainer_rank()
if force_even:
num_clients = role.get_num_trainers()
num_client_per_part = num_clients // partition_book.num_partitions()
assert num_clients % partition_book.num_partitions() == 0, \
'The total number of clients should be multiple of the number of partitions.'
part_nid = _split_even_to_part(partition_book, nodes)
if num_client_per_part == 1:
return part_nid
elif node_trainer_ids is None:
return _split_random_within_part(partition_book, rank, part_nid)
else:
trainer_id = node_trainer_ids[0:len(node_trainer_ids)]
max_trainer_id = F.as_scalar(F.reduce_max(trainer_id)) + 1
if max_trainer_id > num_clients:
# We hope the partition scheme with trainer_id could be used when the number of
# trainers is less than the `num_trainers_per_machine` previously assigned during
# partitioning.
assert max_trainer_id % num_clients == 0
trainer_id //= (max_trainer_id // num_clients)
client_id_in_part = rank % num_client_per_part
return _split_by_trainer_id(partition_book, part_nid, trainer_id,
num_client_per_part, client_id_in_part)
else:
# Get all nodes that belong to the rank.
local_nids = partition_book.partid2nids(partition_book.partid)
return _split_local(partition_book, rank, nodes, local_nids)
def edge_split(edges, partition_book=None, etype='_E', rank=None, force_even=True,
edge_trainer_ids=None):
''' Split edges and return a subset for the local rank.
This function splits the input edges based on the partition book and
returns a subset of edges for the local rank. This method is used for
dividing workloads for distributed training.
The input edges can be stored as a vector of masks. The length of the vector is
the same as the number of edges in a graph; 1 indicates that the edge in
the corresponding location exists.
There are two strategies to split the edges. By default, it splits the edges
in a way to maximize data locality. That is, all edges that belong to a process
are returned. If ``force_even`` is set to true, the edges are split evenly so
that each process gets almost the same number of edges.
When ``force_even`` is True, the data locality is still preserved if a graph is partitioned
with Metis and the node/edge IDs are shuffled.
In this case, majority of the nodes returned for a process are the ones that
belong to the process. If node/edge IDs are not shuffled, data locality is not guaranteed.
Parameters
----------
edges : 1D tensor or DistTensor
A boolean mask vector that indicates input edges.
partition_book : GraphPartitionBook, optional
The graph partition book
etype : str, optional
The edge type of the input edges.
rank : int, optional
The rank of a process. If not given, the rank of the current process is used.
force_even : bool, optional
Force the edges are split evenly.
edge_trainer_ids : 1D tensor or DistTensor, optional
If not None, split the edges to the trainers on the same machine according to
trainer IDs assigned to each edge. Otherwise, split randomly.
Returns
-------
1D-tensor
The vector of edge IDs that belong to the rank.
'''
if not isinstance(edges, DistTensor):
assert partition_book is not None, 'Regular tensor requires a partition book.'
elif partition_book is None:
partition_book = edges.part_policy.partition_book
assert len(edges) == partition_book._num_edges(etype), \
'The length of boolean mask vector should be the number of edges in the graph.'
if rank is None:
rank = role.get_trainer_rank()
if force_even:
num_clients = role.get_num_trainers()
num_client_per_part = num_clients // partition_book.num_partitions()
assert num_clients % partition_book.num_partitions() == 0, \
'The total number of clients should be multiple of the number of partitions.'
part_eid = _split_even_to_part(partition_book, edges)
if num_client_per_part == 1:
return part_eid
elif edge_trainer_ids is None:
return _split_random_within_part(partition_book, rank, part_eid)
else:
trainer_id = edge_trainer_ids[0:len(edge_trainer_ids)]
max_trainer_id = F.as_scalar(F.reduce_max(trainer_id)) + 1
if max_trainer_id > num_clients:
# We hope the partition scheme with trainer_id could be used when the number of
# trainers is less than the `num_trainers_per_machine` previously assigned during
# partitioning.
assert max_trainer_id % num_clients == 0
trainer_id //= (max_trainer_id // num_clients)
client_id_in_part = rank % num_client_per_part
return _split_by_trainer_id(partition_book, part_eid, trainer_id,
num_client_per_part, client_id_in_part)
else:
# Get all edges that belong to the rank.
local_eids = partition_book.partid2eids(partition_book.partid)
return _split_local(partition_book, rank, edges, local_eids)
rpc.register_service(INIT_GRAPH, InitGraphRequest, InitGraphResponse)
| 38.560772
| 100
| 0.622344
|
from collections.abc import MutableMapping
from collections import namedtuple
import os
import numpy as np
from ..heterograph import DGLHeteroGraph
from ..convert import heterograph as dgl_heterograph
from ..convert import graph as dgl_graph
from ..transform import compact_graphs
from .. import heterograph_index
from .. import backend as F
from ..base import NID, EID, NTYPE, ETYPE, ALL, is_all
from .kvstore import KVServer, get_kvstore
from .._ffi.ndarray import empty_shared_mem
from ..frame import infer_scheme
from .partition import load_partition, load_partition_book
from .graph_partition_book import PartitionPolicy, get_shared_mem_partition_book
from .graph_partition_book import HeteroDataName, parse_hetero_data_name
from .graph_partition_book import NodePartitionPolicy, EdgePartitionPolicy
from .shared_mem_utils import _to_shared_mem, _get_ndata_path, _get_edata_path, DTYPE_DICT
from . import rpc
from . import role
from .server_state import ServerState
from .rpc_server import start_server
from .graph_services import find_edges as dist_find_edges
from .graph_services import out_degrees as dist_out_degrees
from .graph_services import in_degrees as dist_in_degrees
from .dist_tensor import DistTensor
INIT_GRAPH = 800001
class InitGraphRequest(rpc.Request):
def __init__(self, graph_name):
self._graph_name = graph_name
def __getstate__(self):
return self._graph_name
def __setstate__(self, state):
self._graph_name = state
def process_request(self, server_state):
if server_state.graph is None:
server_state.graph = _get_graph_from_shared_mem(self._graph_name)
return InitGraphResponse(self._graph_name)
class InitGraphResponse(rpc.Response):
def __init__(self, graph_name):
self._graph_name = graph_name
def __getstate__(self):
return self._graph_name
def __setstate__(self, state):
self._graph_name = state
def _copy_graph_to_shared_mem(g, graph_name, graph_format):
new_g = g.shared_memory(graph_name, formats=graph_format)
new_g.ndata['inner_node'] = _to_shared_mem(g.ndata['inner_node'],
_get_ndata_path(graph_name, 'inner_node'))
new_g.ndata[NID] = _to_shared_mem(g.ndata[NID], _get_ndata_path(graph_name, NID))
new_g.edata['inner_edge'] = _to_shared_mem(g.edata['inner_edge'],
_get_edata_path(graph_name, 'inner_edge'))
new_g.edata[EID] = _to_shared_mem(g.edata[EID], _get_edata_path(graph_name, EID))
new_g.edata[ETYPE] = _to_shared_mem(g.edata[ETYPE], _get_edata_path(graph_name, ETYPE))
return new_g
FIELD_DICT = {'inner_node': F.int32,
'inner_edge': F.int32,
NID: F.int64,
EID: F.int64,
NTYPE: F.int32,
ETYPE: F.int32}
def _get_shared_mem_ndata(g, graph_name, name):
shape = (g.number_of_nodes(),)
dtype = FIELD_DICT[name]
dtype = DTYPE_DICT[dtype]
data = empty_shared_mem(_get_ndata_path(graph_name, name), False, shape, dtype)
dlpack = data.to_dlpack()
return F.zerocopy_from_dlpack(dlpack)
def _get_shared_mem_edata(g, graph_name, name):
shape = (g.number_of_edges(),)
dtype = FIELD_DICT[name]
dtype = DTYPE_DICT[dtype]
data = empty_shared_mem(_get_edata_path(graph_name, name), False, shape, dtype)
dlpack = data.to_dlpack()
return F.zerocopy_from_dlpack(dlpack)
def _get_graph_from_shared_mem(graph_name):
g, ntypes, etypes = heterograph_index.create_heterograph_from_shared_memory(graph_name)
if g is None:
return None
g = DGLHeteroGraph(g, ntypes, etypes)
g.ndata['inner_node'] = _get_shared_mem_ndata(g, graph_name, 'inner_node')
g.ndata[NID] = _get_shared_mem_ndata(g, graph_name, NID)
g.edata['inner_edge'] = _get_shared_mem_edata(g, graph_name, 'inner_edge')
g.edata[EID] = _get_shared_mem_edata(g, graph_name, EID)
g.edata[ETYPE] = _get_shared_mem_edata(g, graph_name, ETYPE)
return g
NodeSpace = namedtuple('NodeSpace', ['data'])
EdgeSpace = namedtuple('EdgeSpace', ['data'])
class HeteroNodeView(object):
__slots__ = ['_graph']
def __init__(self, graph):
self._graph = graph
def __getitem__(self, key):
assert isinstance(key, str)
return NodeSpace(data=NodeDataView(self._graph, key))
class HeteroEdgeView(object):
__slots__ = ['_graph']
def __init__(self, graph):
self._graph = graph
def __getitem__(self, key):
assert isinstance(key, str)
return EdgeSpace(data=EdgeDataView(self._graph, key))
class NodeDataView(MutableMapping):
__slots__ = ['_graph', '_data']
def __init__(self, g, ntype=None):
self._graph = g
names = g._get_ndata_names(ntype)
if ntype is None:
self._data = g._ndata_store
else:
if ntype in g._ndata_store:
self._data = g._ndata_store[ntype]
else:
self._data = {}
g._ndata_store[ntype] = self._data
for name in names:
assert name.is_node()
policy = PartitionPolicy(name.policy_str, g.get_partition_book())
dtype, shape, _ = g._client.get_data_meta(str(name))
self._data[name.get_name()] = DistTensor(shape, dtype, name.get_name(),
part_policy=policy)
def _get_names(self):
return list(self._data.keys())
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, val):
self._data[key] = val
def __delitem__(self, key):
del self._data[key]
def __len__(self):
# It's not called frequently. It should be fine.
return len(self._data)
def __iter__(self):
return iter(self._data)
def __repr__(self):
reprs = {}
for name in self._data:
dtype = F.dtype(self._data[name])
shape = F.shape(self._data[name])
reprs[name] = 'DistTensor(shape={}, dtype={})'.format(str(shape), str(dtype))
return repr(reprs)
class EdgeDataView(MutableMapping):
__slots__ = ['_graph', '_data']
def __init__(self, g, etype=None):
self._graph = g
names = g._get_edata_names(etype)
if etype is None:
self._data = g._edata_store
else:
if etype in g._edata_store:
self._data = g._edata_store[etype]
else:
self._data = {}
g._edata_store[etype] = self._data
for name in names:
assert name.is_edge()
policy = PartitionPolicy(name.policy_str, g.get_partition_book())
dtype, shape, _ = g._client.get_data_meta(str(name))
self._data[name.get_name()] = DistTensor(shape, dtype, name.get_name(),
part_policy=policy)
def _get_names(self):
return list(self._data.keys())
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, val):
self._data[key] = val
def __delitem__(self, key):
del self._data[key]
def __len__(self):
# It's not called frequently. It should be fine.
return len(self._data)
def __iter__(self):
return iter(self._data)
def __repr__(self):
reprs = {}
for name in self._data:
dtype = F.dtype(self._data[name])
shape = F.shape(self._data[name])
reprs[name] = 'DistTensor(shape={}, dtype={})'.format(str(shape), str(dtype))
return repr(reprs)
class DistGraphServer(KVServer):
def __init__(self, server_id, ip_config, num_servers,
num_clients, part_config, disable_shared_mem=False,
graph_format=('csc', 'coo')):
super(DistGraphServer, self).__init__(server_id=server_id,
ip_config=ip_config,
num_servers=num_servers,
num_clients=num_clients)
self.ip_config = ip_config
self.num_servers = num_servers
if self.is_backup_server():
self.gpb, graph_name, ntypes, etypes = load_partition_book(part_config, self.part_id)
self.client_g = None
else:
self.client_g, node_feats, edge_feats, self.gpb, graph_name, \
ntypes, etypes = load_partition(part_config, self.part_id)
print('load ' + graph_name)
self.client_g = self.client_g.formats(graph_format)
self.client_g.create_formats_()
if not disable_shared_mem:
self.client_g = _copy_graph_to_shared_mem(self.client_g, graph_name, graph_format)
if not disable_shared_mem:
self.gpb.shared_memory(graph_name)
assert self.gpb.partid == self.part_id
for ntype in ntypes:
node_name = HeteroDataName(True, ntype, None)
self.add_part_policy(PartitionPolicy(node_name.policy_str, self.gpb))
for etype in etypes:
edge_name = HeteroDataName(False, etype, None)
self.add_part_policy(PartitionPolicy(edge_name.policy_str, self.gpb))
if not self.is_backup_server():
for name in node_feats:
ntype, feat_name = name.split('/')
data_name = HeteroDataName(True, ntype, feat_name)
self.init_data(name=str(data_name), policy_str=data_name.policy_str,
data_tensor=node_feats[name])
for name in edge_feats:
etype, feat_name = name.split('/')
data_name = HeteroDataName(False, etype, feat_name)
self.init_data(name=str(data_name), policy_str=data_name.policy_str,
data_tensor=edge_feats[name])
def start(self):
server_state = ServerState(kv_store=self, local_g=self.client_g, partition_book=self.gpb)
print('start graph service on server {} for part {}'.format(self.server_id, self.part_id))
start_server(server_id=self.server_id,
ip_config=self.ip_config,
num_servers=self.num_servers,
num_clients=self.num_clients, server_state=server_state)
class DistGraph:
def __init__(self, graph_name, gpb=None, part_config=None):
self.graph_name = graph_name
self._gpb_input = gpb
if os.environ.get('DGL_DIST_MODE', 'standalone') == 'standalone':
assert part_config is not None, \
'When running in the standalone model, the partition config file is required'
self._client = get_kvstore()
assert self._client is not None, \
'Distributed module is not initialized. Please call dgl.distributed.initialize.'
g, node_feats, edge_feats, self._gpb, _, _, _ = load_partition(part_config, 0)
assert self._gpb.num_partitions() == 1, \
'The standalone mode can only work with the graph data with one partition'
if self._gpb is None:
self._gpb = gpb
self._g = g
for name in node_feats:
ntype, feat_name = name.split('/')
self._client.add_data(str(HeteroDataName(True, ntype, feat_name)),
node_feats[name],
NodePartitionPolicy(self._gpb, ntype=ntype))
for name in edge_feats:
etype, feat_name = name.split('/')
self._client.add_data(str(HeteroDataName(False, etype, feat_name)),
edge_feats[name],
EdgePartitionPolicy(self._gpb, etype=etype))
self._client.map_shared_data(self._gpb)
rpc.set_num_client(1)
else:
self._init()
for server_id in range(self._client.num_servers):
rpc.send_request(server_id, InitGraphRequest(graph_name))
for server_id in range(self._client.num_servers):
rpc.recv_response()
self._client.barrier()
self._ndata_store = {}
self._edata_store = {}
self._ndata = NodeDataView(self)
self._edata = EdgeDataView(self)
self._num_nodes = 0
self._num_edges = 0
for part_md in self._gpb.metadata():
self._num_nodes += int(part_md['num_nodes'])
self._num_edges += int(part_md['num_edges'])
self._ntype_map = {ntype:i for i, ntype in enumerate(self.ntypes)}
self._etype_map = {etype:i for i, etype in enumerate(self.etypes)}
eid = []
for etype in self.etypes:
type_eid = F.zeros((1,), F.int64, F.cpu())
eid.append(self._gpb.map_to_homo_eid(type_eid, etype))
eid = F.cat(eid, 0)
src, dst = dist_find_edges(self, eid)
src_tids, _ = self._gpb.map_to_per_ntype(src)
dst_tids, _ = self._gpb.map_to_per_ntype(dst)
self._canonical_etypes = []
etype_ids = F.arange(0, len(self.etypes))
for src_tid, etype_id, dst_tid in zip(src_tids, etype_ids, dst_tids):
src_tid = F.as_scalar(src_tid)
etype_id = F.as_scalar(etype_id)
dst_tid = F.as_scalar(dst_tid)
self._canonical_etypes.append((self.ntypes[src_tid], self.etypes[etype_id],
self.ntypes[dst_tid]))
self._etype2canonical = {}
for src_type, etype, dst_type in self._canonical_etypes:
if etype in self._etype2canonical:
self._etype2canonical[etype] = ()
else:
self._etype2canonical[etype] = (src_type, etype, dst_type)
def _init(self):
self._client = get_kvstore()
assert self._client is not None, \
'Distributed module is not initialized. Please call dgl.distributed.initialize.'
self._g = _get_graph_from_shared_mem(self.graph_name)
self._gpb = get_shared_mem_partition_book(self.graph_name, self._g)
if self._gpb is None:
self._gpb = self._gpb_input
self._client.map_shared_data(self._gpb)
def __getstate__(self):
return self.graph_name, self._gpb, self._canonical_etypes
def __setstate__(self, state):
self.graph_name, self._gpb_input, self._canonical_etypes = state
self._init()
self._etype2canonical = {}
for src_type, etype, dst_type in self._canonical_etypes:
if etype in self._etype2canonical:
self._etype2canonical[etype] = ()
else:
self._etype2canonical[etype] = (src_type, etype, dst_type)
self._ndata_store = {}
self._edata_store = {}
self._ndata = NodeDataView(self)
self._edata = EdgeDataView(self)
self._num_nodes = 0
self._num_edges = 0
for part_md in self._gpb.metadata():
self._num_nodes += int(part_md['num_nodes'])
self._num_edges += int(part_md['num_edges'])
@property
def local_partition(self):
return self._g
@property
def nodes(self):
return HeteroNodeView(self)
@property
def edges(self):
return HeteroEdgeView(self)
@property
def ndata(self):
assert len(self.ntypes) == 1, "ndata only works for a graph with one node type."
return self._ndata
@property
def edata(self):
assert len(self.etypes) == 1, "edata only works for a graph with one edge type."
return self._edata
@property
def idtype(self):
return F.int64
@property
def device(self):
# TODO(da?): describe when self._g is None and device shouldn't be called.
return F.cpu()
@property
def ntypes(self):
return self._gpb.ntypes
@property
def etypes(self):
return self._gpb.etypes
@property
def canonical_etypes(self):
return self._canonical_etypes
def to_canonical_etype(self, etype):
if etype is None:
if len(self.etypes) != 1:
raise DGLError('Edge type name must be specified if there are more than one '
'edge types.')
etype = self.etypes[0]
if isinstance(etype, tuple):
return etype
else:
ret = self._etype2canonical.get(etype, None)
if ret is None:
raise DGLError('Edge type "{}" does not exist.'.format(etype))
if len(ret) != 3:
raise DGLError('Edge type "{}" is ambiguous. Please use canonical edge type '
'in the form of (srctype, etype, dsttype)'.format(etype))
return ret
def get_ntype_id(self, ntype):
if ntype is None:
if len(self._ntype_map) != 1:
raise DGLError('Node type name must be specified if there are more than one '
'node types.')
return 0
return self._ntype_map[ntype]
def get_etype_id(self, etype):
if etype is None:
if len(self._etype_map) != 1:
raise DGLError('Edge type name must be specified if there are more than one '
'edge types.')
return 0
return self._etype_map[etype]
def number_of_nodes(self, ntype=None):
return self.num_nodes(ntype)
def number_of_edges(self, etype=None):
return self.num_edges(etype)
def num_nodes(self, ntype=None):
if ntype is None:
if len(self.ntypes) == 1:
return self._gpb._num_nodes(self.ntypes[0])
else:
return sum([self._gpb._num_nodes(ntype) for ntype in self.ntypes])
return self._gpb._num_nodes(ntype)
def num_edges(self, etype=None):
if etype is None:
if len(self.etypes) == 1:
return self._gpb._num_edges(self.etypes[0])
else:
return sum([self._gpb._num_edges(etype) for etype in self.etypes])
return self._gpb._num_edges(etype)
def out_degrees(self, u=ALL):
if is_all(u):
u = F.arange(0, self.number_of_nodes())
return dist_out_degrees(self, u)
def in_degrees(self, v=ALL):
if is_all(v):
v = F.arange(0, self.number_of_nodes())
return dist_in_degrees(self, v)
def node_attr_schemes(self):
schemes = {}
for key in self.ndata:
schemes[key] = infer_scheme(self.ndata[key])
return schemes
def edge_attr_schemes(self):
schemes = {}
for key in self.edata:
schemes[key] = infer_scheme(self.edata[key])
return schemes
def rank(self):
return role.get_global_rank()
def find_edges(self, edges, etype=None):
if etype is None:
assert len(self.etypes) == 1, 'find_edges requires etype for heterogeneous graphs.'
gpb = self.get_partition_book()
if len(gpb.etypes) > 1:
if len(etype) == 3:
etype = etype[1]
edges = gpb.map_to_homo_eid(edges, etype)
src, dst = dist_find_edges(self, edges)
if len(gpb.ntypes) > 1:
_, src = gpb.map_to_per_ntype(src)
_, dst = gpb.map_to_per_ntype(dst)
return src, dst
def edge_subgraph(self, edges, relabel_nodes=True, store_ids=True):
if isinstance(edges, dict):
if isinstance(edges, tuple):
subg = {etype: self.find_edges(edges[etype], etype[1]) for etype in edges}
else:
subg = {}
for etype in edges:
assert len(self._etype2canonical[etype]) == 3, \
'the etype in input edges is ambiguous'
subg[self._etype2canonical[etype]] = self.find_edges(edges[etype], etype)
num_nodes = {ntype: self.number_of_nodes(ntype) for ntype in self.ntypes}
subg = dgl_heterograph(subg, num_nodes_dict=num_nodes)
for etype in edges:
subg.edges[etype].data[EID] = edges[etype]
else:
assert len(self.etypes) == 1
subg = self.find_edges(edges)
subg = dgl_graph(subg, num_nodes=self.number_of_nodes())
subg.edata[EID] = edges
if relabel_nodes:
subg = compact_graphs(subg)
assert store_ids, 'edge_subgraph always stores original node/edge IDs.'
return subg
def get_partition_book(self):
return self._gpb
def get_node_partition_policy(self, ntype):
return NodePartitionPolicy(self.get_partition_book(), ntype)
def get_edge_partition_policy(self, etype):
return EdgePartitionPolicy(self.get_partition_book(), etype)
def barrier(self):
self._client.barrier()
def _get_ndata_names(self, ntype=None):
names = self._client.gdata_name_list()
ndata_names = []
for name in names:
name = parse_hetero_data_name(name)
right_type = (name.get_type() == ntype) if ntype is not None else True
if name.is_node() and right_type:
ndata_names.append(name)
return ndata_names
def _get_edata_names(self, etype=None):
names = self._client.gdata_name_list()
edata_names = []
for name in names:
name = parse_hetero_data_name(name)
right_type = (name.get_type() == etype) if etype is not None else True
if name.is_edge() and right_type:
edata_names.append(name)
return edata_names
def _get_overlap(mask_arr, ids):
if isinstance(mask_arr, DistTensor):
masks = mask_arr[ids]
return F.boolean_mask(ids, masks)
else:
masks = F.gather_row(F.tensor(mask_arr), ids)
return F.boolean_mask(ids, masks)
def _split_local(partition_book, rank, elements, local_eles):
num_clients = role.get_num_trainers()
num_client_per_part = num_clients // partition_book.num_partitions()
if rank is None:
rank = role.get_trainer_rank()
assert rank < num_clients, \
'The input rank ({}) is incorrect. #Trainers: {}'.format(rank, num_clients)
client_id_in_part = rank % num_client_per_part
local_eles = _get_overlap(elements, local_eles)
size = len(local_eles) // num_client_per_part
if client_id_in_part + 1 < num_client_per_part:
return local_eles[(size * client_id_in_part):(size * (client_id_in_part + 1))]
else:
return local_eles[(size * client_id_in_part):]
def _even_offset(n, k):
eles_per_part = n // k
offset = np.array([0] + [eles_per_part] * k, dtype=int)
offset[1 : n - eles_per_part * k + 1] += 1
return np.cumsum(offset)
def _split_even_to_part(partition_book, elements):
# here we divide the element list as evenly as possible. If we use range partitioning,
# the split results also respect the data locality. Range partitioning is the default
# strategy.
# TODO(zhengda) we need another way to divide the list for other partitioning strategy.
if isinstance(elements, DistTensor):
nonzero_count = elements.count_nonzero()
else:
elements = F.tensor(elements)
nonzero_count = F.count_nonzero(elements)
# compute the offset of each split and ensure that the difference of each partition size
# is 1.
offsets = _even_offset(nonzero_count, partition_book.num_partitions())
assert offsets[-1] == nonzero_count
# Get the elements that belong to the partition.
partid = partition_book.partid
left, right = offsets[partid], offsets[partid + 1]
x = y = 0
num_elements = len(elements)
block_size = num_elements // partition_book.num_partitions()
part_eles = None
# compute the nonzero tensor of each partition instead of whole tensor to save memory
for idx in range(0, num_elements, block_size):
nonzero_block = F.nonzero_1d(elements[idx:min(idx+block_size, num_elements)])
x = y
y += len(nonzero_block)
if y > left and x < right:
start = max(x, left) - x
end = min(y, right) - x
tmp = nonzero_block[start:end] + idx
if part_eles is None:
part_eles = tmp
else:
part_eles = F.cat((part_eles, tmp), 0)
elif x >= right:
break
return part_eles
def _split_random_within_part(partition_book, rank, part_eles):
# If there are more than one client in a partition, we need to randomly select a subset of
# elements in the partition for a client. We have to make sure that the set of elements
# for different clients are disjoint.
num_clients = role.get_num_trainers()
num_client_per_part = num_clients // partition_book.num_partitions()
if num_client_per_part == 1:
return part_eles
if rank is None:
rank = role.get_trainer_rank()
assert rank < num_clients, \
'The input rank ({}) is incorrect.
client_id_in_part = rank % num_client_per_part
offset = _even_offset(len(part_eles), num_client_per_part)
# We set the random seed for each partition, so that each process (client) in a partition
# permute the elements in a partition in the same way, so each process gets a disjoint subset
# of elements.
np.random.seed(partition_book.partid)
rand_idx = np.random.permutation(len(part_eles))
rand_idx = rand_idx[offset[client_id_in_part] : offset[client_id_in_part + 1]]
idx, _ = F.sort_1d(F.tensor(rand_idx))
return F.gather_row(part_eles, idx)
def _split_by_trainer_id(partition_book, part_eles, trainer_id,
num_client_per_part, client_id_in_part):
# TODO(zhengda): MXNet cannot deal with empty tensors, which makes the implementation
# much more difficult. Let's just use numpy for the computation for now. We just
trainer_id = F.asnumpy(trainer_id)
part_eles = F.asnumpy(part_eles)
part_id = trainer_id // num_client_per_part
trainer_id = trainer_id % num_client_per_part
local_eles = part_eles[np.nonzero(part_id[part_eles] == partition_book.partid)[0]]
# these are the Ids of the local elements in the partition. The Ids are global Ids.
remote_eles = part_eles[np.nonzero(part_id[part_eles] != partition_book.partid)[0]]
# these are the Ids of the remote nodes in the partition. The Ids are global Ids.
local_eles_idx = np.concatenate(
[np.nonzero(trainer_id[local_eles] == i)[0] for i in range(num_client_per_part)],
# trainer_id[local_eles] is the trainer ids of local nodes in the partition and we
# pick out the indices where the node belongs to each trainer i respectively, and
# concatenate them.
axis=0
)
# `local_eles_idx` is used to sort `local_eles` according to `trainer_id`. It is a
# permutation of 0...(len(local_eles)-1)
local_eles = local_eles[local_eles_idx]
# evenly split local nodes to trainers
local_offsets = _even_offset(len(local_eles), num_client_per_part)
# evenly split remote nodes to trainers
remote_offsets = _even_offset(len(remote_eles), num_client_per_part)
client_local_eles = local_eles[
local_offsets[client_id_in_part]:local_offsets[client_id_in_part + 1]]
client_remote_eles = remote_eles[
remote_offsets[client_id_in_part]:remote_offsets[client_id_in_part + 1]]
client_eles = np.concatenate([client_local_eles, client_remote_eles], axis=0)
return F.tensor(client_eles)
def node_split(nodes, partition_book=None, ntype='_N', rank=None, force_even=True,
node_trainer_ids=None):
if not isinstance(nodes, DistTensor):
assert partition_book is not None, 'Regular tensor requires a partition book.'
elif partition_book is None:
partition_book = nodes.part_policy.partition_book
assert len(nodes) == partition_book._num_nodes(ntype), \
'The length of boolean mask vector should be the number of nodes in the graph.'
if rank is None:
rank = role.get_trainer_rank()
if force_even:
num_clients = role.get_num_trainers()
num_client_per_part = num_clients // partition_book.num_partitions()
assert num_clients % partition_book.num_partitions() == 0, \
'The total number of clients should be multiple of the number of partitions.'
part_nid = _split_even_to_part(partition_book, nodes)
if num_client_per_part == 1:
return part_nid
elif node_trainer_ids is None:
return _split_random_within_part(partition_book, rank, part_nid)
else:
trainer_id = node_trainer_ids[0:len(node_trainer_ids)]
max_trainer_id = F.as_scalar(F.reduce_max(trainer_id)) + 1
if max_trainer_id > num_clients:
# We hope the partition scheme with trainer_id could be used when the number of
# trainers is less than the `num_trainers_per_machine` previously assigned during
# partitioning.
assert max_trainer_id % num_clients == 0
trainer_id //= (max_trainer_id // num_clients)
client_id_in_part = rank % num_client_per_part
return _split_by_trainer_id(partition_book, part_nid, trainer_id,
num_client_per_part, client_id_in_part)
else:
# Get all nodes that belong to the rank.
local_nids = partition_book.partid2nids(partition_book.partid)
return _split_local(partition_book, rank, nodes, local_nids)
def edge_split(edges, partition_book=None, etype='_E', rank=None, force_even=True,
edge_trainer_ids=None):
if not isinstance(edges, DistTensor):
assert partition_book is not None, 'Regular tensor requires a partition book.'
elif partition_book is None:
partition_book = edges.part_policy.partition_book
assert len(edges) == partition_book._num_edges(etype), \
'The length of boolean mask vector should be the number of edges in the graph.'
if rank is None:
rank = role.get_trainer_rank()
if force_even:
num_clients = role.get_num_trainers()
num_client_per_part = num_clients // partition_book.num_partitions()
assert num_clients % partition_book.num_partitions() == 0, \
'The total number of clients should be multiple of the number of partitions.'
part_eid = _split_even_to_part(partition_book, edges)
if num_client_per_part == 1:
return part_eid
elif edge_trainer_ids is None:
return _split_random_within_part(partition_book, rank, part_eid)
else:
trainer_id = edge_trainer_ids[0:len(edge_trainer_ids)]
max_trainer_id = F.as_scalar(F.reduce_max(trainer_id)) + 1
if max_trainer_id > num_clients:
# We hope the partition scheme with trainer_id could be used when the number of
# trainers is less than the `num_trainers_per_machine` previously assigned during
# partitioning.
assert max_trainer_id % num_clients == 0
trainer_id //= (max_trainer_id // num_clients)
client_id_in_part = rank % num_client_per_part
return _split_by_trainer_id(partition_book, part_eid, trainer_id,
num_client_per_part, client_id_in_part)
else:
# Get all edges that belong to the rank.
local_eids = partition_book.partid2eids(partition_book.partid)
return _split_local(partition_book, rank, edges, local_eids)
rpc.register_service(INIT_GRAPH, InitGraphRequest, InitGraphResponse)
| true
| true
|
f718e88e36241f1a76f15b0b2e64f2b8c425d513
| 1,104
|
py
|
Python
|
OpenPNM/Algorithms/__init__.py
|
khayratk/OpenPNM
|
6c26d27dcc0152b5863e559085754a2183a483c2
|
[
"MIT"
] | null | null | null |
OpenPNM/Algorithms/__init__.py
|
khayratk/OpenPNM
|
6c26d27dcc0152b5863e559085754a2183a483c2
|
[
"MIT"
] | null | null | null |
OpenPNM/Algorithms/__init__.py
|
khayratk/OpenPNM
|
6c26d27dcc0152b5863e559085754a2183a483c2
|
[
"MIT"
] | 1
|
2020-07-02T02:21:10.000Z
|
2020-07-02T02:21:10.000Z
|
r"""
###############################################################################
:mod:`OpenPNM.Algorithms` -- Algorithms on Networks
###############################################################################
Contents
--------
This submodule contains algorithms for performing simulations on pore networks
Classes
-------
.. autoclass:: GenericAlgorithm
:members:
.. autoclass:: Drainage
:members:
.. autoclass:: InvasionPercolation
:members:
.. autoclass:: FickianDiffusion
:members:
.. autoclass:: StokesFlow
:members:
.. autoclass:: OhmicConduction
:members:
.. autoclass:: FourierConduction
:members:
"""
from .__GenericAlgorithm__ import GenericAlgorithm
from .__GenericLinearTransport__ import GenericLinearTransport
from .__FickianDiffusion__ import FickianDiffusion
from .__FourierConduction__ import FourierConduction
from .__OhmicConduction__ import OhmicConduction
from .__StokesFlow__ import StokesFlow
from .__OrdinaryPercolation__ import OrdinaryPercolation
from .__InvasionPercolation__ import InvasionPercolation
from .__Drainage__ import Drainage
| 24.533333
| 79
| 0.677536
|
from .__GenericAlgorithm__ import GenericAlgorithm
from .__GenericLinearTransport__ import GenericLinearTransport
from .__FickianDiffusion__ import FickianDiffusion
from .__FourierConduction__ import FourierConduction
from .__OhmicConduction__ import OhmicConduction
from .__StokesFlow__ import StokesFlow
from .__OrdinaryPercolation__ import OrdinaryPercolation
from .__InvasionPercolation__ import InvasionPercolation
from .__Drainage__ import Drainage
| true
| true
|
f718ea048b0cff1469fdb160a4f667207eea726b
| 8,324
|
py
|
Python
|
tests/test_syllabics_conversion.py
|
eddieantonio/nehiyawewin
|
a161b6ec5441ff56395abc6cc0f548fb81b14562
|
[
"MIT"
] | 6
|
2019-07-16T01:28:28.000Z
|
2021-07-07T15:11:04.000Z
|
tests/test_syllabics_conversion.py
|
eddieantonio/nehiyawewin
|
a161b6ec5441ff56395abc6cc0f548fb81b14562
|
[
"MIT"
] | 16
|
2018-07-16T00:48:17.000Z
|
2018-11-08T18:41:04.000Z
|
tests/test_syllabics_conversion.py
|
eddieantonio/crk_orthography
|
a161b6ec5441ff56395abc6cc0f548fb81b14562
|
[
"MIT"
] | null | null | null |
import pytest # type: ignore
from cree_sro_syllabics import sro2syllabics, syllabics2sro
COMBINING_CIRCUMFLEX = "\u0302"
@pytest.mark.parametrize(
"sro,syllabics",
[
("acimosis", "ᐊᒋᒧᓯᐢ"),
("atahk", "ᐊᑕᕽ"),
("mêriy", "ᒣᕒᐃᐩ"),
("wîstihkêw", "ᐑᐢᑎᐦᑫᐤ"),
("nêhiyawêwin", "ᓀᐦᐃᔭᐍᐏᐣ"),
("tirêyl", "ᑎᕒᐁᐩᓬ"),
("mitêh", "ᒥᑌᐦ"),
],
)
def test_single_words(sro, syllabics):
"""
Test single words with perfect SRO orthography.
"""
# Converting SRO to syllabics should work.
assert sro2syllabics(sro) == syllabics
# Converting syllabics to SRO should work.
assert syllabics2sro(syllabics) == sro
# With "perfect" orthography, each roundtrip should leave the input
# unchanged.
assert sro2syllabics(syllabics2sro(syllabics)) == syllabics
assert syllabics2sro(sro2syllabics(sro)) == sro
@pytest.mark.parametrize(
"sro,syllabics",
[
("Tân'si", "ᑖᓂᓯ"),
("Maskekosihk", "ᒪᐢᑫᑯᓯᕽ"),
],
)
def test_normalize_single_words(sro, syllabics):
"""
Test single word inputs with non-standard orthography.
"""
assert sro2syllabics(sro) == syllabics
def test_unicode_normalization():
"""
Test when the input string is not in NFC-normalized.
"""
water = "nipiy"
leaf = "ni" + COMBINING_CIRCUMFLEX + "piy"
assert water != leaf
assert sro2syllabics(water) != sro2syllabics(leaf)
assert sro2syllabics(water) == "ᓂᐱᐩ"
assert sro2syllabics(leaf) == "ᓃᐱᐩ"
@pytest.mark.parametrize(
"sro,syllabics",
[
("obviously english text", "obviously english text"),
("write nêhiyawêwin", "write ᓀᐦᐃᔭᐍᐏᐣ"),
("\t namoya tataspêyihtam. ", "\t ᓇᒧᔭ ᑕᑕᐢᐯᔨᐦᑕᒼ᙮ "),
],
)
def test_multiple_words(sro, syllabics):
"""
Test transcoding multiple words. The test inputs here can be trivially
converted back-and-forth.
"""
assert sro2syllabics(sro) == syllabics
assert syllabics2sro(syllabics) == sro
def test_alternate_y_final():
"""
From Wikipedia:
Some Plains Cree communities use a final for y which is different from the
usual western final. This is a superposed dot ᐝ, instead of the usual ᐩ,
as in ᓰᐱᐩ (ᓰᐱᐝ) sīpiy “river". When the dot y-final is placed after a
syllabic which has a w-dot, the two dots combine to form a colon-like
symbol, as in ᓅᐦᑖᐏᐩ (ᓅᐦᑖᐃ᛬) nōhtāwiy “my father".
"""
syllabics = "ᓰᐱᐝ"
sro = "sîpiy"
assert syllabics2sro(syllabics) == sro
@pytest.mark.parametrize(
"sro,syllabics",
[
("yōtinipēstāw", "ᔫᑎᓂᐯᐢᑖᐤ"),
("īkatē", "ᐄᑲᑌ"),
],
)
def test_macrons(sro, syllabics):
"""
Test that macrons can be converted
"""
assert sro2syllabics(sro) == syllabics
assert syllabics2sro(syllabics, produce_macrons=True) == sro
@pytest.mark.parametrize(
"sro,syllabics",
[
# NOTE: the embedded NARROW NO-BREAK SPACE (NNBSP) characters
# in the syllabics transliteration may not render properly in
# fixed-width fonts!
("paskwâwi-mostos", "ᐸᐢᒁᐏ ᒧᐢᑐᐢ"),
("amiskwaciy-waskahikan", "ᐊᒥᐢᑿᒋᐩ ᐘᐢᑲᐦᐃᑲᐣ"),
("kâ-mahihkani-pimohtêt isiyihkâsow", "ᑳ ᒪᐦᐃᐦᑲᓂ ᐱᒧᐦᑌᐟ ᐃᓯᔨᐦᑳᓱᐤ"),
],
)
def test_hyphens(sro, syllabics):
"""
Tests that intraword hyphens are converted to NARROW NO-BREAK SPACE
characters in the transliteration.
"""
assert (
sro2syllabics(sro)
== syllabics
== sro2syllabics(sro, hyphens="\N{NARROW NO-BREAK SPACE}")
)
assert syllabics2sro(syllabics) == sro
@pytest.mark.parametrize(
"sro,syllabics,hyphens,alt_syllabics",
[
("osk-âya", "ᐅᐢᑳᔭ", "", "ᐅᐢᐠᐋᔭ"),
# NOTE: this /still/ might not be the right transliteration, but
# the correct transliteration requires even more phonological knowledge,
# so I'm not even going to go there...
("miyw-âyâw", "ᒥᔼᔮᐤ", "", "ᒥᐩᐤᐋᔮᐤ"),
("pîhc-âyihk", "ᐲᐦᒑᔨᕽ", "", "ᐲᐦᐨᐋᔨᕽ"),
# NOTE: not orthographically correct, but demonstrates Sandhi in th-Cree
("wîhth-owin", "ᐑᐦᖪᐏᐣ", "", "ᐑᐦᙾᐅᐏᐣ"),
],
)
def test_sandhi(sro, syllabics, hyphens, alt_syllabics):
"""
Test that sandhi orthographic rule is applied when converting to
syllabics.
See: Wolvengrey 2001, pp. xxvi–xviii
"""
assert sro2syllabics(sro) == sro2syllabics(sro, sandhi=True) == syllabics
assert sro2syllabics(sro, sandhi=False, hyphens=hyphens) == alt_syllabics
@pytest.mark.parametrize(
"sro,syllabics",
[
("êtî nitisiyihkâson.", "ᐁᑏ ᓂᑎᓯᔨᐦᑳᓱᐣ᙮"),
('She told Dr. Thunder: "ninôhtêhkatân."', 'She told Dr. Thunder: "ᓂᓅᐦᑌᐦᑲᑖᐣ᙮"'),
("tânisi. êtî nitisiyihkâson. ", "ᑖᓂᓯ᙮ ᐁᑏ ᓂᑎᓯᔨᐦᑳᓱᐣ᙮ "),
],
)
def test_full_stop(sro, syllabics):
"""
Tests that full stops in SRO get converted into
<U+166E CANADIAN SYLLABICS FULL STOP>, and vice-versa.
"""
assert sro2syllabics(sro) == syllabics
assert syllabics2sro(syllabics) == sro
@pytest.mark.parametrize(
"original_syllabics,sro,clean_syllabics",
[
("ᐋᐧᐱ ᑭᐦᐃᐤ", "wâpi kihiw", "ᐚᐱ ᑭᐦᐃᐤ"),
("ᐋᐱᐦᑕᐃᐧᑯᓯᓵᐣᐃᐢᑫᐧᐤ", "âpihtawikosisâniskwêw", "ᐋᐱᐦᑕᐏᑯᓯᓵᓂᐢᑵᐤ"),
],
)
def test_final_middle_dot(original_syllabics, sro, clean_syllabics):
"""
Test that final middle dots <U+1427> get converted into their "w" syllabic
equivilent.
"""
assert syllabics2sro(original_syllabics) == sro
assert sro2syllabics(syllabics2sro(original_syllabics)) == clean_syllabics
@pytest.mark.parametrize(
"erroneous_syllabics,sro,correct_syllabics",
[
("ᐚᐸ\u1466", "wâpam", "ᐚᐸᒼ"), # ᑦ|ᒼ <U+1466 CANADIAN SYLLABICS T>
("ᓂᐲ\u1541", "nipîhk", "ᓂᐲᕽ"), # ᕁ|ᕽ <U+1541 CANADIAN SYLLABICS SAYISI YI>
("ᓂᐱ\u1540", "nipiy", "ᓂᐱᐩ"), # ᕀ|ᐩ <U+1429 CANADIAN SYLLABICS FINAL PLUS>
],
)
def test_syllabics_lookalikes(erroneous_syllabics, sro, correct_syllabics):
assert erroneous_syllabics != correct_syllabics
assert syllabics2sro(erroneous_syllabics) == sro
assert sro2syllabics(syllabics2sro(erroneous_syllabics)) == correct_syllabics
@pytest.mark.parametrize(
"original_sro,syllabics,sro",
[
("tân'si", "ᑖᓂᓯ", "tânisi"),
("tân\N{RIGHT SINGLE QUOTATION MARK}si", "ᑖᓂᓯ", "tânisi"),
],
)
def test_short_i_ellision(original_sro, syllabics, sro):
"""
Test that an apostrophe can be substituted instead of a short-i.
"""
assert sro2syllabics(original_sro) == syllabics
assert syllabics2sro(sro2syllabics(original_sro)) == sro
@pytest.mark.parametrize(
"sro,syllabics",
[
# I've anecdotally noticed that Saskatchewan writers prefer macrons,
# and th-dialect is primarily spoke in northern Saskatchewan,
# hence, long vowels in this test are written with macrons.
("wīhthēw", "ᐑᐦᖧᐤ"),
("nampithi-sīpīhk", "ᓇᒼᐱᖨ ᓰᐲᕽ"),
("mithomon", "ᒥᖪᒧᐣ"),
("namōtha", "ᓇᒨᖬ"),
("thāhkan", "ᖭᐦᑲᐣ"),
("namēpith", "ᓇᒣᐱᙾ"),
("ikw", "ᐃᐠᐤ"),
("pokw", "ᐳᐠᐤ"),
# Test each syllable.
("thē thi tho tha thī thō thā", "ᖧ ᖨ ᖪ ᖬ ᖩ ᖫ ᖭ"),
],
)
def test_cree_th_dialect(sro, syllabics):
assert sro2syllabics(sro) == syllabics
assert syllabics2sro(syllabics, produce_macrons=True) == sro
def test_rare_nwV_forms():
"""
Not all nwV forms are attested in Western Cree. Only
nwe, nwa, and nwâ exist. However, the UCAS Extended block includes Ojibway
syllabics that fill in the rest of the nwV syllabics. For now, I am NOT
including the Ojibway syllabics; only the syllabics explicitly intended
for Plains Cree.
"""
assert sro2syllabics("nwe nwa nwā") == "ᓊ ᓌ ᓎ"
def test_word_cannot_match_adjacent_vowels():
"""
The word matching should not be able to match adjacent, de-normalized vowels.
"""
assert sro2syllabics("I'm") == "I'm"
@pytest.mark.parametrize(
"sro,syllabics",
[
("âh-ayinânêw", "ᐋᐦᐊᔨᓈᓀᐤ"),
("âh-ayîtaw", "ᐋᐦᐊᔩᑕᐤ"),
("mistah-âya", "ᒥᐢᑕᐦᐋᔭ"),
# This is a fake word, but it tests an edge case:
("atihw-âya", "ᐊᑎᐦᐚᔭ"),
],
)
def test_sandhi_with_h(sro, syllabics):
"""
https://github.com/eddieantonio/cree-sro-syllabics/issues/17
"""
assert sro2syllabics(sro, sandhi=True) == syllabics
| 30.602941
| 88
| 0.641398
|
import pytest
from cree_sro_syllabics import sro2syllabics, syllabics2sro
COMBINING_CIRCUMFLEX = "\u0302"
@pytest.mark.parametrize(
"sro,syllabics",
[
("acimosis", "ᐊᒋᒧᓯᐢ"),
("atahk", "ᐊᑕᕽ"),
("mêriy", "ᒣᕒᐃᐩ"),
("wîstihkêw", "ᐑᐢᑎᐦᑫᐤ"),
("nêhiyawêwin", "ᓀᐦᐃᔭᐍᐏᐣ"),
("tirêyl", "ᑎᕒᐁᐩᓬ"),
("mitêh", "ᒥᑌᐦ"),
],
)
def test_single_words(sro, syllabics):
assert sro2syllabics(sro) == syllabics
assert syllabics2sro(syllabics) == sro
assert sro2syllabics(syllabics2sro(syllabics)) == syllabics
assert syllabics2sro(sro2syllabics(sro)) == sro
@pytest.mark.parametrize(
"sro,syllabics",
[
("Tân'si", "ᑖᓂᓯ"),
("Maskekosihk", "ᒪᐢᑫᑯᓯᕽ"),
],
)
def test_normalize_single_words(sro, syllabics):
assert sro2syllabics(sro) == syllabics
def test_unicode_normalization():
water = "nipiy"
leaf = "ni" + COMBINING_CIRCUMFLEX + "piy"
assert water != leaf
assert sro2syllabics(water) != sro2syllabics(leaf)
assert sro2syllabics(water) == "ᓂᐱᐩ"
assert sro2syllabics(leaf) == "ᓃᐱᐩ"
@pytest.mark.parametrize(
"sro,syllabics",
[
("obviously english text", "obviously english text"),
("write nêhiyawêwin", "write ᓀᐦᐃᔭᐍᐏᐣ"),
("\t namoya tataspêyihtam. ", "\t ᓇᒧᔭ ᑕᑕᐢᐯᔨᐦᑕᒼ᙮ "),
],
)
def test_multiple_words(sro, syllabics):
assert sro2syllabics(sro) == syllabics
assert syllabics2sro(syllabics) == sro
def test_alternate_y_final():
syllabics = "ᓰᐱᐝ"
sro = "sîpiy"
assert syllabics2sro(syllabics) == sro
@pytest.mark.parametrize(
"sro,syllabics",
[
("yōtinipēstāw", "ᔫᑎᓂᐯᐢᑖᐤ"),
("īkatē", "ᐄᑲᑌ"),
],
)
def test_macrons(sro, syllabics):
assert sro2syllabics(sro) == syllabics
assert syllabics2sro(syllabics, produce_macrons=True) == sro
@pytest.mark.parametrize(
"sro,syllabics",
[
# NOTE: the embedded NARROW NO-BREAK SPACE (NNBSP) characters
# in the syllabics transliteration may not render properly in
# fixed-width fonts!
("paskwâwi-mostos", "ᐸᐢᒁᐏ ᒧᐢᑐᐢ"),
("amiskwaciy-waskahikan", "ᐊᒥᐢᑿᒋᐩ ᐘᐢᑲᐦᐃᑲᐣ"),
("kâ-mahihkani-pimohtêt isiyihkâsow", "ᑳ ᒪᐦᐃᐦᑲᓂ ᐱᒧᐦᑌᐟ ᐃᓯᔨᐦᑳᓱᐤ"),
],
)
def test_hyphens(sro, syllabics):
assert (
sro2syllabics(sro)
== syllabics
== sro2syllabics(sro, hyphens="\N{NARROW NO-BREAK SPACE}")
)
assert syllabics2sro(syllabics) == sro
@pytest.mark.parametrize(
"sro,syllabics,hyphens,alt_syllabics",
[
("osk-âya", "ᐅᐢᑳᔭ", "", "ᐅᐢᐠᐋᔭ"),
# NOTE: this /still/ might not be the right transliteration, but
# the correct transliteration requires even more phonological knowledge,
# so I'm not even going to go there...
("miyw-âyâw", "ᒥᔼᔮᐤ", "", "ᒥᐩᐤᐋᔮᐤ"),
("pîhc-âyihk", "ᐲᐦᒑᔨᕽ", "", "ᐲᐦᐨᐋᔨᕽ"),
("wîhth-owin", "ᐑᐦᖪᐏᐣ", "", "ᐑᐦᙾᐅᐏᐣ"),
],
)
def test_sandhi(sro, syllabics, hyphens, alt_syllabics):
assert sro2syllabics(sro) == sro2syllabics(sro, sandhi=True) == syllabics
assert sro2syllabics(sro, sandhi=False, hyphens=hyphens) == alt_syllabics
@pytest.mark.parametrize(
"sro,syllabics",
[
("êtî nitisiyihkâson.", "ᐁᑏ ᓂᑎᓯᔨᐦᑳᓱᐣ᙮"),
('She told Dr. Thunder: "ninôhtêhkatân."', 'She told Dr. Thunder: "ᓂᓅᐦᑌᐦᑲᑖᐣ᙮"'),
("tânisi. êtî nitisiyihkâson. ", "ᑖᓂᓯ᙮ ᐁᑏ ᓂᑎᓯᔨᐦᑳᓱᐣ᙮ "),
],
)
def test_full_stop(sro, syllabics):
assert sro2syllabics(sro) == syllabics
assert syllabics2sro(syllabics) == sro
@pytest.mark.parametrize(
"original_syllabics,sro,clean_syllabics",
[
("ᐋᐧᐱ ᑭᐦᐃᐤ", "wâpi kihiw", "ᐚᐱ ᑭᐦᐃᐤ"),
("ᐋᐱᐦᑕᐃᐧᑯᓯᓵᐣᐃᐢᑫᐧᐤ", "âpihtawikosisâniskwêw", "ᐋᐱᐦᑕᐏᑯᓯᓵᓂᐢᑵᐤ"),
],
)
def test_final_middle_dot(original_syllabics, sro, clean_syllabics):
assert syllabics2sro(original_syllabics) == sro
assert sro2syllabics(syllabics2sro(original_syllabics)) == clean_syllabics
@pytest.mark.parametrize(
"erroneous_syllabics,sro,correct_syllabics",
[
("ᐚᐸ\u1466", "wâpam", "ᐚᐸᒼ"),
("ᓂᐲ\u1541", "nipîhk", "ᓂᐲᕽ"),
("ᓂᐱ\u1540", "nipiy", "ᓂᐱᐩ"),
],
)
def test_syllabics_lookalikes(erroneous_syllabics, sro, correct_syllabics):
assert erroneous_syllabics != correct_syllabics
assert syllabics2sro(erroneous_syllabics) == sro
assert sro2syllabics(syllabics2sro(erroneous_syllabics)) == correct_syllabics
@pytest.mark.parametrize(
"original_sro,syllabics,sro",
[
("tân'si", "ᑖᓂᓯ", "tânisi"),
("tân\N{RIGHT SINGLE QUOTATION MARK}si", "ᑖᓂᓯ", "tânisi"),
],
)
def test_short_i_ellision(original_sro, syllabics, sro):
assert sro2syllabics(original_sro) == syllabics
assert syllabics2sro(sro2syllabics(original_sro)) == sro
@pytest.mark.parametrize(
"sro,syllabics",
[
# I've anecdotally noticed that Saskatchewan writers prefer macrons,
("wīhthēw", "ᐑᐦᖧᐤ"),
("nampithi-sīpīhk", "ᓇᒼᐱᖨ ᓰᐲᕽ"),
("mithomon", "ᒥᖪᒧᐣ"),
("namōtha", "ᓇᒨᖬ"),
("thāhkan", "ᖭᐦᑲᐣ"),
("namēpith", "ᓇᒣᐱᙾ"),
("ikw", "ᐃᐠᐤ"),
("pokw", "ᐳᐠᐤ"),
("thē thi tho tha thī thō thā", "ᖧ ᖨ ᖪ ᖬ ᖩ ᖫ ᖭ"),
],
)
def test_cree_th_dialect(sro, syllabics):
assert sro2syllabics(sro) == syllabics
assert syllabics2sro(syllabics, produce_macrons=True) == sro
def test_rare_nwV_forms():
assert sro2syllabics("nwe nwa nwā") == "ᓊ ᓌ ᓎ"
def test_word_cannot_match_adjacent_vowels():
assert sro2syllabics("I'm") == "I'm"
@pytest.mark.parametrize(
"sro,syllabics",
[
("âh-ayinânêw", "ᐋᐦᐊᔨᓈᓀᐤ"),
("âh-ayîtaw", "ᐋᐦᐊᔩᑕᐤ"),
("mistah-âya", "ᒥᐢᑕᐦᐋᔭ"),
("atihw-âya", "ᐊᑎᐦᐚᔭ"),
],
)
def test_sandhi_with_h(sro, syllabics):
assert sro2syllabics(sro, sandhi=True) == syllabics
| true
| true
|
f718ea668f086785a2ecdd071fd77637573089f4
| 53,142
|
py
|
Python
|
discretisedfield/field.py
|
minrk/discretisedfield
|
251584f8d976a7fafdff5402d16327489407c4dd
|
[
"BSD-3-Clause"
] | null | null | null |
discretisedfield/field.py
|
minrk/discretisedfield
|
251584f8d976a7fafdff5402d16327489407c4dd
|
[
"BSD-3-Clause"
] | null | null | null |
discretisedfield/field.py
|
minrk/discretisedfield
|
251584f8d976a7fafdff5402d16327489407c4dd
|
[
"BSD-3-Clause"
] | null | null | null |
import pyvtk
import struct
import matplotlib
import numpy as np
import mpl_toolkits.axes_grid1
import discretisedfield as df
import ubermagutil.typesystem as ts
import discretisedfield.util as dfu
import matplotlib.pyplot as plt
@ts.typesystem(mesh=ts.Typed(expected_type=df.Mesh),
dim=ts.Scalar(expected_type=int, unsigned=True, const=True),
name=ts.Name(const=True))
class Field:
"""Finite difference field.
This class defines a finite difference field and enables certain
operations for its analysis and visualisation. The field is
defined on a finite difference mesh (`discretisedfield.Mesh`).
Parameters
----------
mesh : discretisedfield.Mesh
Finite difference rectangular mesh.
dim : int, optional
Dimension of the field value. For instance, if `dim=3` the
field is a three-dimensional vector field and for `dim=1`
the field is a scalar field. Defaults to `dim=3`.
value : array_like, callable, optional
Please refer to the `value` property:
:py:func:`~discretisedfield.Field.value`. Defaults to 0,
meaning that if the value is not provided in the
initialisation process, "zero-field" will be defined.
norm : numbers.Real, callable, optional
Please refer to the `norm` property:
:py:func:`~discretisedfield.Field.norm`. Defaults to `None`
(`norm=None` defines no norm).
name : str, optional
Field name (defaults to `'field'`). The field name must be a
valid Python variable name string. More specifically, it must
not contain spaces, or start with underscore or numeric
character.
Examples
--------
1. Creating a uniform three-dimensional vector field on a
nano-sized thin film.
>>> import discretisedfield as df
...
>>> p1 = (-50e-9, -25e-9, 0)
>>> p2 = (50e-9, 25e-9, 5e-9)
>>> cell = (1e-9, 1e-9, 0.1e-9)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
...
>>> dim = 3
>>> value = (0, 0, 1)
>>> field = df.Field(mesh=mesh, dim=dim, value=value)
>>> field
Field(mesh=...)
2. Creating a scalar field.
>>> import discretisedfield as df
...
>>> p1 = (-10, -10, -10)
>>> p2 = (10, 10, 10)
>>> n = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> dim = 1
>>> value = 3.14
>>> field = df.Field(mesh=mesh, dim=dim, value=value)
>>> field
Field(mesh=...)
.. seealso:: :py:func:`~discretisedfield.Mesh`
"""
def __init__(self, mesh, dim=3, value=0, norm=None, name='field'):
self.mesh = mesh
self.dim = dim
self.value = value
self.norm = norm
self.name = name
@property
def value(self):
"""Field value representation.
This propertry returns a representation of the field value if
it exists. Otherwise, the `numpy.ndarray` containing all
values from the field is returned.
Parameters
----------
value : 0, array_like, callable
For scalar fields (`dim=1`) `numbers.Real` values are
allowed. In the case of vector fields, "array_like" (list,
tuple, numpy.ndarray) value with length equal to `dim`
should be used. Finally, the value can also be a callable
(e.g. Python function or another field), which for every
coordinate in the mesh returns a valid value. If
`value=0`, all values in the field will be set to zero
independent of the field dimension.
Returns
-------
array_like, callable, numbers.Real
The value used (representation) for setting the field is
returned. However, if the actual value of the field does
not correspond to the initially used value anymore, a
`numpy.ndarray` is returned containing all field values.
Raises
------
ValueError
If unsupported type is passed
Examples
--------
1. Different ways of setting and getting the field value.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (2, 2, 1)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> value = (0, 0, 1)
>>> # if value is not specified, zero-field is defined
>>> field = df.Field(mesh=mesh, dim=3)
>>> field.value
0
>>> field.value = (0, 0, 1)
>>> field.value
(0, 0, 1)
>>> # Setting the field value using a Python function (callable).
>>> def value_function(pos):
... x, y, z = pos
... if x <= 1:
... return (0, 0, 1)
... else:
... return (0, 0, -1)
>>> field.value = value_function
>>> field.value
<function value_function at ...>
>>> # We now change the value of a single cell so that the
>>> # representation used for initialising field is not valid
>>> # anymore.
>>> field.array[0, 0, 0, :] = (0, 0, 0)
>>> field.value
array(...)
.. seealso:: :py:func:`~discretisedfield.Field.array`
"""
value_array = dfu.as_array(self.mesh, self.dim, self._value)
if np.array_equal(self.array, value_array):
return self._value
else:
return self.array
@value.setter
def value(self, val):
self._value = val
self.array = dfu.as_array(self.mesh, self.dim, val)
@property
def array(self):
"""Numpy array of a field value.
`array` has shape of `(self.mesh.n[0], self.mesh.n[1],
self.mesh.n[2], dim)`.
Parameters
----------
array : numpy.ndarray
Numpy array with dimensions `(self.mesh.n[0],
self.mesh.n[1], self.mesh.n[2], dim)`
Returns
-------
numpy.ndarray
Field values array.
Raises
------
ValueError
If setting the array with wrong type, shape, or value.
Examples
--------
1. Accessing and setting the field array.
>>> import discretisedfield as df
>>> import numpy as np
...
>>> p1 = (0, 0, 0)
>>> p2 = (1, 1, 1)
>>> cell = (0.5, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> value = (0, 0, 1)
>>> field = df.Field(mesh=mesh, dim=3, value=value)
>>> field.array
array(...)
>>> field.array.shape
(2, 1, 1, 3)
>>> field.array = np.ones(field.array.shape)
>>> field.array
array(...)
.. seealso:: :py:func:`~discretisedfield.Field.value`
"""
return self._array
@array.setter
def array(self, val):
if isinstance(val, np.ndarray) and \
val.shape == self.mesh.n + (self.dim,):
self._array = val
else:
msg = (f'Unsupported type(val)={type(val)} '
'or invalid value dimensions.')
raise ValueError(msg)
@property
def norm(self):
"""Norm of a field.
This property computes the norm of the field and returns it as
a `discretisedfield.Field` object with `dim=1`. Norm of a
scalar field cannot be set and `ValueError` is raised.
Parameters
----------
numbers.Real, numpy.ndarray
Norm value
Returns
-------
discretisedfield.Field
Scalar field with norm values.
Raises
------
ValueError
If setting the norm with wrong type, shape, or value. In
addition, if the field is scalar (dim=1) or it contains
zero vector values.
Examples
--------
1. Manipulating the field norm
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (1, 1, 1)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> field = df.Field(mesh=mesh, dim=3, value=(0, 0, 1))
>>> field.norm
Field(...)
>>> field.norm = 2
>>> field.norm
Field(...)
>>> field.value = (1, 0, 0)
>>> field.norm.array
array([[[[1.]]]])
"""
current_norm = np.linalg.norm(self.array, axis=-1)[..., None]
return Field(self.mesh, dim=1, value=current_norm, name='norm')
@norm.setter
def norm(self, val):
if val is not None:
if self.dim == 1:
msg = f'Cannot set norm for field with dim={self.dim}.'
raise ValueError(msg)
if not np.all(self.norm.array):
msg = 'Cannot normalise field with zero values.'
raise ValueError(msg)
self.array /= self.norm.array # normalise to 1
self.array *= dfu.as_array(self.mesh, dim=1, val=val)
@property
def average(self):
"""Field average.
It computes the average of the field over the entire volume of
the mesh.
Returns
-------
tuple
Field average tuple whose length equals to the field's
dimension.
Examples
--------
1. Computing the vector field average.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (5, 5, 5)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> field1 = df.Field(mesh=mesh, dim=3, value=(0, 0, 1))
>>> field1.average
(0.0, 0.0, 1.0)
>>> field2 = df.Field(mesh=mesh, dim=1, value=55)
>>> field2.average
(55.0,)
"""
return tuple(self.array.mean(axis=(0, 1, 2)))
def __repr__(self):
"""Field representation string.
This method returns the string that can ideally be copied in
another Python script so that exactly the same field object
could be defined. However, this is usually not the case due to
complex values used.
Returns
-------
str
Field representation string.
Example
-------
1. Getting field representation string.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (2, 2, 1)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> field = df.Field(mesh, dim=1, value=1)
>>> repr(field)
"Field(mesh=...)"
"""
return (f'Field(mesh={repr(self.mesh)}, '
f'dim={self.dim}, name=\'{self.name}\')')
def __call__(self, point):
"""Sample the field at `point`.
It returns the value of the discreatisation cell `point`
belongs to. It always returns a tuple, whose length is the
same as the dimension of the field.
Parameters
----------
point : (3,) array_like
The mesh point coordinate :math:`(p_{x}, p_{y}, p_{z})`.
Returns
-------
tuple
A tuple, whose length is the same as the dimension of the
field.
Example
-------
1. Sampling the field value
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (20, 20, 20)
>>> n = (20, 20, 20)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 3, 4))
>>> point = (10, 2, 3)
>>> field(point)
(1.0, 3.0, 4.0)
"""
value = self.array[self.mesh.point2index(point)]
if self.dim > 1:
value = tuple(value)
return value
def __getattr__(self, name):
"""Extracting the component of the vector field.
If `'x'`, `'y'`, or `'z'` is accessed, a new scalar field of
that component will be returned. This method is effective for
vector fields with dimension 2 or 3.
Returns
-------
discretisedfield.Field
Scalar field with vector field component values.
Examples
--------
1. Accessing the vector field components.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (2, 2, 2)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> field = df.Field(mesh=mesh, dim=3, value=(0, 0, 1))
>>> field.x
Field(...)
>>> field.y
Field(...)
>>> field.z
Field(...)
>>> field.z.dim
1
"""
if name in list(dfu.axesdict.keys())[:self.dim] and 1 < self.dim <= 3:
# Components x, y, and z make sense only for vector fields
# with typical dimensions 2 and 3.
component_array = self.array[..., dfu.axesdict[name]][..., None]
fieldname = f'{self.name}-{name}'.format(self.name, name)
return Field(mesh=self.mesh, dim=1,
value=component_array, name=fieldname)
else:
msg = f'{type(self).__name__} object has no attribute {name}.'
raise AttributeError(msg.format(type(self).__name__, name))
def __dir__(self):
"""Extension of the tab-completion list.
Adds `'x'`, `'y'`, and `'z'`, depending on the dimension of
the field, to the tab-completion list. This is effective in
IPython or Jupyter notebook environment.
"""
if 1 < self.dim <= 3:
extension = list(dfu.axesdict.keys())[:self.dim]
else:
extension = []
return list(self.__dict__.keys()) + extension
def __iter__(self):
"""Generator yielding coordinates and values of all field cells.
The discretisation cell coordinate corresponds to the cell
centre point.
Yields
------
tuple (2,)
The first value is the mesh cell coordinates (`px`, `py`,
`pz`), whereas the second one is the field value.
Examples
--------
1. Iterating through the field coordinates and values
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (2, 2, 1)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> field = df.Field(mesh, dim=3, value=(0, 0, 1))
>>> for coord, value in field:
... print (coord, value)
(0.5, 0.5, 0.5) (0.0, 0.0, 1.0)
(1.5, 0.5, 0.5) (0.0, 0.0, 1.0)
(0.5, 1.5, 0.5) (0.0, 0.0, 1.0)
(1.5, 1.5, 0.5) (0.0, 0.0, 1.0)
.. seealso:: :py:func:`~discretisedfield.Mesh.indices`
"""
for point in self.mesh.coordinates:
yield point, self.__call__(point)
def line(self, p1, p2, n=100):
"""Sampling the field along the line.
Given two points :math:`p_{1}` and :math:`p_{2}`, :math:`n`
position coordinates are generated and the corresponding field
values.
.. math::
\\mathbf{r}_{i} = i\\frac{\\mathbf{p}_{2} -
\\mathbf{p}_{1}}{n-1}
Parameters
----------
p1, p2 : (3,) array_like
Two points between which the line is generated.
n : int
Number of points on the line.
Yields
------
tuple
The first element is the coordinate of the point on the
line, whereas the second one is the value of the field.
Raises
------
ValueError
If `p1` or `p2` is outside the mesh domain.
Examples
--------
1. Sampling the field along the line.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (2, 2, 2)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> field = df.Field(mesh, dim=2, value=(0, 3))
>>> for coord, value in field.line(p1=(0, 0, 0), p2=(2, 0, 0), n=3):
... print(coord, value)
(0.0, 0.0, 0.0) (0.0, 3.0)
(1.0, 0.0, 0.0) (0.0, 3.0)
(2.0, 0.0, 0.0) (0.0, 3.0)
"""
for point in self.mesh.line(p1=p1, p2=p2, n=n):
yield point, self.__call__(point)
def plane(self, *args, n=None, **kwargs):
"""Slices the field with a plane.
If one of the axes (`'x'`, `'y'`, or `'z'`) is passed as a
string, a plane perpendicular to that axis is generated which
intersects the field at its centre. Alternatively, if a keyword
argument is passed (e.g. `x=1`), a plane perpendicular to the
x-axis and intersecting it at x=1 is generated. The number of
points in two dimensions on the plane can be defined using `n`
(e.g. `n=(10, 15)`). Using the generated plane, a new
"two-dimensional" field is created and returned.
Parameters
----------
n : tuple of length 2
The number of points on the plane in two dimensions
Returns
------
discretisedfield.Field
A field obtained as an intersection of mesh and the plane.
Example
-------
1. Intersecting the field with a plane.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (2, 2, 2)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> field = df.Field(mesh, dim=3)
>>> field.plane(y=1)
Field(mesh=...)
"""
plane_mesh = self.mesh.plane(*args, n=n, **kwargs)
return self.__class__(plane_mesh, dim=self.dim, value=self)
def write(self, filename, representation='txt', extend_scalar=False):
"""Write the field in .ovf, .omf, .ohf, or vtk format.
If the extension of the `filename` is `.vtk`, a VTK file is
written
(:py:func:`~discretisedfield.Field._writevtk`). Otherwise, for
`.ovf`, `.omf`, or `.ohf` extensions, an OOMMF file is written
(:py:func:`~discretisedfield.Field._writeovf`). The
representation (`bin4`, 'bin8', or 'txt') is passed using
`representation` argument.
Parameters
----------
filename : str
Name of the file written. It depends on its extension the
format it is going to be written as.
representation : str
In the case of OOMMF files (`.ovf`, `.omf`, or `.ohf`),
representation can be specified (`bin4`, `bin8`, or
`txt`). Defaults to 'txt'.
extend_scalar : bool
If True, a scalar field will be saved as a vector
field. More precisely, if the value at a cell is 3, that
cell will be saved as (3, 0, 0). This is valid only for
the OVF file formats.
Example
-------
1. Write an .omf file and delete it from the disk
>>> import os
>>> import discretisedfield as df
...
>>> p1 = (0, 0, -5)
>>> p2 = (5, 15, 15)
>>> n = (5, 15, 20)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, value=(5, 6, 7))
>>> filename = 'mytestfile.omf'
>>> field.write(filename) # write the file
>>> os.remove(filename) # delete the file
.. seealso:: :py:func:`~discretisedfield.Field.fromfile`
"""
if any([filename.endswith(ext) for ext in ['.omf', '.ovf', '.ohf']]):
self._writeovf(filename, representation=representation,
extend_scalar=extend_scalar)
elif filename.endswith('.vtk'):
self._writevtk(filename)
else:
msg = ('Allowed extensions for writing the field are '
'.omf, .ovf, .ohf, and .vtk.')
raise ValueError(msg)
def _writeovf(self, filename, representation='txt', extend_scalar=False):
"""Write the field in .ovf, .omf, or .ohf format.
The extension of the `filename` should be `.ovf`, `.omf`, or
`.ohf`. The representation (`bin4`, 'bin8', or 'txt') is
passed using `representation` argument.
Parameters
----------
filename : str
Name of the file written.
representation : str
Representation of the file (`bin4`, `bin8`, or
`txt`). Defaults to 'txt'.
extend_scalar : bool
If True, a scalar field will be saved as a vector
field. More precisely, if the value at a cell is 3, that
cell will be saved as (3, 0, 0).
Example
-------
1. Write an .omf file and delete it from the disk
>>> import os
>>> import discretisedfield as df
...
>>> p1 = (0, 0, -5)
>>> p2 = (5, 15, 15)
>>> n = (5, 15, 20)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, value=(5, 6, 7))
>>> filename = 'mytestfile.omf'
>>> field._writeovf(filename) # write the file
>>> os.remove(filename) # delete the file
"""
if extend_scalar and self.dim == 1:
write_dim = 3
else:
write_dim = self.dim
header = ['OOMMF OVF 2.0',
'',
'Segment count: 1',
'',
'Begin: Segment',
'Begin: Header',
'',
'Title: Field generated omf file',
'Desc: File generated by Field class',
'meshunit: m',
'meshtype: rectangular',
f'xbase: {self.mesh.pmin[0] + self.mesh.cell[0]/2}',
f'ybase: {self.mesh.pmin[1] + self.mesh.cell[1]/2}',
f'zbase: {self.mesh.pmin[2] + self.mesh.cell[2]/2}',
f'xnodes: {self.mesh.n[0]}',
f'ynodes: {self.mesh.n[1]}',
f'znodes: {self.mesh.n[2]}',
f'xstepsize: {self.mesh.cell[0]}',
f'ystepsize: {self.mesh.cell[1]}',
f'zstepsize: {self.mesh.cell[2]}',
f'xmin: {self.mesh.pmin[0]}',
f'ymin: {self.mesh.pmin[1]}',
f'zmin: {self.mesh.pmin[2]}',
f'xmax: {self.mesh.pmax[0]}',
f'ymax: {self.mesh.pmax[1]}',
f'zmax: {self.mesh.pmax[2]}',
f'valuedim: {write_dim}',
f'valuelabels: {self.name}_x {self.name}_y {self.name}_z',
'valueunits: A/m A/m A/m',
'',
'End: Header',
'']
if representation == 'bin4':
header.append('Begin: Data Binary 4')
footer = ['End: Data Binary 4',
'End: Segment']
elif representation == 'bin8':
header.append('Begin: Data Binary 8')
footer = ['End: Data Binary 8',
'End: Segment']
elif representation == 'txt':
header.append('Begin: Data Text')
footer = ['End: Data Text',
'End: Segment']
# Write header lines to the ovf file.
f = open(filename, 'w')
f.write(''.join(map(lambda line: f'# {line}\n', header)))
f.close()
binary_reps = {'bin4': (1234567.0, 'f'),
'bin8': (123456789012345.0, 'd')}
if representation in binary_reps:
# Reopen the file with binary write, appending to the end
# of the file.
f = open(filename, 'ab')
# Add the 8 bit binary check value that OOMMF uses.
packarray = [binary_reps[representation][0]]
# Write data to the ovf file.
for i in self.mesh.indices:
for vi in self.array[i]:
packarray.append(vi)
v_bin = struct.pack(binary_reps[representation][1]*len(packarray),
*packarray)
f.write(v_bin)
f.close()
else:
# Reopen the file for txt representation, appending to the
# file.
f = open(filename, 'a')
for i in self.mesh.indices:
if self.dim == 3:
v = [vi for vi in self.array[i]]
elif self.dim == 1:
if extend_scalar:
v = [self.array[i][0], 0.0, 0.0]
else:
v = [self.array[i][0]]
else:
msg = (f'Cannot write dim={self.dim} field.')
raise TypeError(msg)
for vi in v:
f.write(' ' + str(vi))
f.write('\n')
f.close()
# Write footer lines to OOMMF file.
f = open(filename, 'a')
f.write(''.join(map(lambda line: f'# {line}\n', footer)))
f.close()
def _writevtk(self, filename):
"""Write the field in the VTK format.
The extension of the `filename` should be `.vtk`.
Parameters
----------
filename : str
Name of the file written.
Example
-------
1. Write a .vtk file and delete it from the disk
>>> import os
>>> import discretisedfield as df
...
>>> p1 = (0, 0, -5)
>>> p2 = (5, 15, 15)
>>> n = (5, 15, 20)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, value=(5, 6, 7))
>>> filename = 'mytestfile.vtk'
>>> field._writevtk(filename) # write the file
>>> os.remove(filename) # delete the file
"""
grid = [pmini + np.linspace(0, li, ni+1) for pmini, li, ni in
zip(self.mesh.pmin, self.mesh.l, self.mesh.n)]
structure = pyvtk.RectilinearGrid(*grid)
vtkdata = pyvtk.VtkData(structure)
vectors = [self.__call__(coord) for coord in self.mesh.coordinates]
vtkdata.cell_data.append(pyvtk.Vectors(vectors, self.name))
for i, component in enumerate(dfu.axesdict.keys()):
name = f'{self.name}_{component}'
vtkdata.cell_data.append(pyvtk.Scalars(list(zip(*vectors))[i],
name))
vtkdata.tofile(filename)
@classmethod
def fromfile(cls, filename, norm=None, name='field'):
"""Read the field from .ovf, .omf, or .ohf file.
The extension of the `filename` should be `.ovf`, `.omf`, or
`.ohf`. If the field should be normalised, `norm` argument can
be passed. The `name` of the field defaults to `'field'`. This
is a `classmethod` and should be called as
`discretisedfield.Field.fromfile('myfile.omf')`.
Parameters
----------
filename : str
Name of the file to be read.
norm : numbers.Real, numpy.ndarray, callable
For details, refer to :py:func:`~discretisedfield.Field.value`.
name : str
Name of the field read.
Returns
-------
discretisedfield.Field
Example
-------
1. Read a field from the .ovf file
>>> import os
>>> import discretisedfield as df
...
>>> ovffile = os.path.join(os.path.dirname(__file__),
... 'tests', 'test_sample',
... 'mumax-output-linux.ovf')
>>> field = df.Field.fromfile(ovffile)
>>> field
Field(mesh=...)
.. seealso:: :py:func:`~discretisedfield.Field.write`
"""
mdatalist = ['xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax',
'xstepsize', 'ystepsize', 'zstepsize', 'valuedim']
mdatadict = dict()
try:
with open(filename, 'r', encoding='utf-8') as ovffile:
f = ovffile.read()
lines = f.split('\n')
mdatalines = filter(lambda s: s.startswith('#'), lines)
datalines = np.loadtxt(filter(lambda s: not s.startswith('#'),
lines))
for line in mdatalines:
for mdatum in mdatalist:
if mdatum in line:
mdatadict[mdatum] = float(line.split()[-1])
break
except UnicodeDecodeError:
with open(filename, 'rb') as ovffile:
f = ovffile.read()
lines = f.split(b'\n')
mdatalines = filter(lambda s: s.startswith(bytes('#', 'utf-8')),
lines)
for line in mdatalines:
for mdatum in mdatalist:
if bytes(mdatum, 'utf-8') in line:
mdatadict[mdatum] = float(line.split()[-1])
break
header = b'# Begin: Data Binary '
data_start = f.find(header)
header = f[data_start:data_start + len(header) + 1]
data_start += len(b'# Begin: Data Binary 8')
data_end = f.find(b'# End: Data Binary ')
# ordered by length
newlines = [b'\n\r', b'\r\n', b'\n']
for nl in newlines:
if f.startswith(nl, data_start):
data_start += len(nl)
break
if b'4' in header:
formatstr = '@f'
checkvalue = 1234567.0
elif b'8' in header:
formatstr = '@d'
checkvalue = 123456789012345.0
listdata = list(struct.iter_unpack(formatstr,
f[data_start:data_end]))
datalines = np.array(listdata)
if datalines[0] != checkvalue:
# These two lines cannot be accessed via
# tests. Therefore, they are excluded from coverage.
msg = 'Binary Data cannot be read.' # pragma: no cover
raise AssertionError(msg) # pragma: no cover
datalines = datalines[1:] # check value removal
p1 = (mdatadict[key] for key in ['xmin', 'ymin', 'zmin'])
p2 = (mdatadict[key] for key in ['xmax', 'ymax', 'zmax'])
cell = (mdatadict[key] for key in ['xstepsize', 'ystepsize',
'zstepsize'])
dim = int(mdatadict['valuedim'])
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
field = df.Field(mesh, dim=dim, name=name)
r_tuple = tuple(reversed(field.mesh.n)) + (int(mdatadict['valuedim']),)
t_tuple = tuple(reversed(range(3))) + (3,)
field.array = datalines.reshape(r_tuple).transpose(t_tuple)
field.norm = norm # Normalise if norm is passed
return field
def mpl(self, figsize=None):
"""Plots a field plane using matplotlib.
Before the field can be plotted, it must be sliced with a
plane (e.g. `field.plane(`z`)`). Otherwise, ValueError is
raised. For vector fields, this method plots both `quiver`
(vector) and `imshow` (scalar) plots. The `imshow` plot
represents the value of the out-of-plane vector component and
the `quiver` plot is not coloured. On the other hand, only
`imshow` is plotted for scalar fields. Where the norm of the
field is zero, no vectors are shown and those `imshow` pixels
are not coloured. In order to use this function inside Jupyter
notebook `%matplotlib inline` must be activated after
`discretisedfield` is imported.
Parameters
----------
figsize : tuple, optional
Length-2 tuple passed to the `matplotlib.figure` function.
Raises
------
ValueError
If the field has not been sliced with a plane.
Example
-------
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (100, 100, 100)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 2, 0))
>>> field.plane(z=50, n=(5, 5)).mpl()
.. seealso:: :py:func:`~discretisedfield.Field.k3d_vectors`
"""
if not hasattr(self.mesh, 'info'):
msg = ('Only sliced field can be plotted using mpl. '
'For instance, field.plane(\'x\').mpl().')
raise ValueError(msg)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
planeaxis = dfu.raxesdict[self.mesh.info['planeaxis']]
if self.dim > 1:
# Vector field has both quiver and imshow plots.
self.quiver(ax=ax, headwidth=5)
scfield = getattr(self, planeaxis)
coloredplot = scfield.imshow(ax=ax, norm_field=self.norm)
else:
# Scalar field has only imshow.
scfield = self
coloredplot = scfield.imshow(ax=ax, norm_field=None)
# Add colorbar to imshow plot.
cbar = self.colorbar(ax, coloredplot)
# Add labels.
ax.set_xlabel(dfu.raxesdict[self.mesh.info['axis1']])
ax.set_ylabel(dfu.raxesdict[self.mesh.info['axis2']])
if self.dim > 1:
cbar.ax.set_ylabel(planeaxis + ' component')
def imshow(self, ax, norm_field=None, **kwargs):
"""Plots a scalar field plane using `matplotlib.pyplot.imshow`.
Before the field can be plotted, it must be sliced with a
plane (e.g. `field.plane(`y`)`) and field must be of dimension
1 (scalar field). Otherwise, ValueError is raised. `imshow`
adds the plot to `matplotlib.axes.Axes` passed via `ax`
argument. If the scalar field plotted is extracted from a
vector field, which has coordinates where the norm of the
field is zero, the norm of that vector field can be passed
using `norm_field` argument, so that pixels at those
coordinates are not coloured. All other parameters accepted by
`matplotlib.pyplot.imshow` can be passed. In order to use this
function inside Jupyter notebook `%matplotlib inline` must be
activated after `discretisedfield` is imported.
Parameters
----------
ax : matplotlib.axes.Axes
Axes object to which the scalar plot will be added.
norm_field : discretisedfield.Field, optional
A (scalar) norm field used for determining whether certain
pixels should be coloured.
Returns
-------
matplotlib.image.AxesImage object
Raises
------
ValueError
If the field has not been sliced with a plane or its
dimension is not 1.
Example
-------
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (100, 100, 100)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=1, value=2)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> field.plane('y').imshow(ax=ax)
<matplotlib.image.AxesImage object at ...>
.. seealso:: :py:func:`~discretisedfield.Field.quiver`
"""
if not hasattr(self.mesh, 'info'):
msg = ('Only sliced field can be plotted using imshow. '
'For instance, field.plane(\'x\').imshow(ax=ax).')
raise ValueError(msg)
if self.dim > 1:
msg = ('Only scalar (dim=1) fields can be plotted. Consider '
'plotting one component, e.g. field.x.imshow(ax=ax) '
'or norm field.norm.imshow(ax=ax).')
raise ValueError(msg)
points, values = list(zip(*list(self)))
# If norm_field is passed, set values where norm=0 to np.nan,
# so that they are not plotted.
if norm_field is not None:
values = list(values) # make values mutable
for i, point in enumerate(points):
if norm_field(point) == 0:
values[i] = np.nan
# "Unpack" values inside arrays.
values = [v[0] if not np.isnan(v) else v for v in values]
else:
# "Unpack" values inside arrays.
values = list(zip(*values))
points = list(zip(*points))
extent = [self.mesh.pmin[self.mesh.info['axis1']],
self.mesh.pmax[self.mesh.info['axis1']],
self.mesh.pmin[self.mesh.info['axis2']],
self.mesh.pmax[self.mesh.info['axis2']]]
n = (self.mesh.n[self.mesh.info['axis2']],
self.mesh.n[self.mesh.info['axis1']])
imax = ax.imshow(np.array(values).reshape(n), origin='lower',
extent=extent, **kwargs)
return imax
def quiver(self, ax=None, color_field=None, **kwargs):
"""Plots a vector field plane using `matplotlib.pyplot.quiver`.
Before the field can be plotted, it must be sliced with a
plane (e.g. `field.plane(`y`)`) and field must be of dimension
3 (vector field). Otherwise, ValueError is raised. `quiver`
adds the plot to `matplotlib.axes.Axes` passed via `ax`
argument. If there are coordinates where the norm of the field
is zero, vectors are not plotted at those coordinates. By
default, plot is not coloured, but by passing a
`discretisedfield.Field` object of dimension 1 as
`color_field`, quiver plot will be coloured based on the
values from the field. All other parameters accepted by
`matplotlib.pyplot.quiver` can be passed. In order to use this
function inside Jupyter notebook `%matplotlib inline` must be
activated after `discretisedfield` is imported.
Parameters
----------
ax : matplotlib.axes.Axes
Axes object to which the quiver plot will be added.
color_field : discretisedfield.Field, optional
A (scalar) field used for determining the colour of the
quiver plot.
Returns
-------
matplotlib.quiver.Quiver object
Raises
------
ValueError
If the field has not been sliced with a plane or its
dimension is not 3.
Example
-------
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (100, 100, 100)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 2, 0))
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> field.plane(z=50).quiver(ax=ax, color_field=field.z)
<matplotlib.quiver.Quiver object at ...>
.. seealso:: :py:func:`~discretisedfield.Field.imshow`
"""
if not hasattr(self.mesh, 'info'):
msg = ('Only sliced field can be plotted using quiver. '
'For instance, field.plane(\'x\').quiver(ax=ax).')
raise ValueError(msg)
if self.dim != 3:
msg = 'Only three-dimensional (dim=3) fields can be plotted.'
raise ValueError(msg)
points, values = list(zip(*list(self)))
# Remove values where norm is 0
points, values = list(points), list(values) # make them mutable
points = [p for p, v in zip(points, values)
if not np.equal(v, 0).all()]
values = [v for v in values if not np.equal(v, 0).all()]
if color_field is not None:
colors = [color_field(p) for p in points]
colors = list(zip(*colors))
# "Unpack" values inside arrays.
points, values = list(zip(*points)), list(zip(*values))
# Are there any vectors pointing out-of-plane? If yes, set the scale.
if not any(values[self.mesh.info['axis1']] +
values[self.mesh.info['axis2']]):
kwargs['scale'] = 1
kwargs['pivot'] = 'mid' # arrow at the centre of the cell
if color_field is None:
# quiver plot is not coloured.
qvax = ax.quiver(points[self.mesh.info['axis1']],
points[self.mesh.info['axis2']],
values[self.mesh.info['axis1']],
values[self.mesh.info['axis2']],
**kwargs)
else:
# quiver plot is coloured.
qvax = ax.quiver(points[self.mesh.info['axis1']],
points[self.mesh.info['axis2']],
values[self.mesh.info['axis1']],
values[self.mesh.info['axis2']],
colors,
**kwargs)
return qvax
def colorbar(self, ax, coloredplot, cax=None, **kwargs):
"""Adds a colorbar to the axes using `matplotlib.pyplot.colorbar`.
Axes to which the colorbar should be added is passed via `ax`
argument. If the colorbar axes are made before the method is
called, they should be passed as `cax`. The plot to which the
colorbar should correspond to is passed via `coloredplot`. All
other parameters accepted by `matplotlib.pyplot.colorbar` can
be passed. In order to use this function inside Jupyter
notebook `%matplotlib inline` must be activated after
`discretisedfield` is imported.
Parameters
----------
ax : matplotlib.axes.Axes
Axes object to which the colorbar will be added.
coloredplot : matplotlib.quiver.Quiver, matplotlib.image.AxesImage
A plot to which the colorbar should correspond
cax : matplotlib.axes.Axes, optional
Colorbar axes.
Returns
-------
matplotlib.colorbar.Colorbar
Example
-------
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (100, 100, 100)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 2, 0))
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> coloredplot = field.plane(z=50).quiver(ax=ax, color_field=field.z)
>>> field.colorbar(ax=ax, coloredplot=coloredplot)
<matplotlib.colorbar.Colorbar object at ...>
"""
if cax is None:
divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.1)
cbar = plt.colorbar(coloredplot, cax=cax, **kwargs)
return cbar
def k3d_nonzero(self, color=dfu.colormap[0], plot=None, **kwargs):
"""Plots the voxels where the value of a scalar field is nonzero.
All mesh cells where the value of the field is not zero will
be marked using the same color. Only scalar fields can be
plotted. Otherwise, ValueError is raised. Different colour of
voxels can be passed in the RGB format using `color`
parameter. This function is often used to look at the defined
sample in the finite difference mesh, by inspecting its norm
(`field.norm.k3d_nonzero`). If `plot` is passed as a
`k3d.plot.Plot`, plot is added to it. Otherwise, a new k3d
plot is created. All arguments allowed in `k3d.voxels()` can
be passed. This function is to be called in Jupyter notebook.
Parameters
----------
color : int/hex, optional
Voxel color in hexadecimal format.
plot : k3d.plot.Plot, optional
If this argument is passed, plot is added to
it. Otherwise, a new k3d plot is created.
Example
-------
>>> import discretisedfield as df
...
>>> p1 = (-50, -50, -50)
>>> p2 = (50, 50, 50)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 2, 0))
>>> def normfun(pos):
... x, y, z = pos
... if x**2 + y**2 < 30**2:
... return 1
... else:
... return 0
>>> field.norm = normfun
>>> field.norm.k3d_nonzero()
Plot(...)
.. seealso:: :py:func:`~discretisedfield.Field.k3d_voxels`
"""
if self.dim > 1:
msg = ('Only scalar (dim=1) fields can be plotted. Consider '
'plotting one component, e.g. field.x.k3d_nonzero() '
'or norm field.norm.k3d_nonzero().')
raise ValueError(msg)
plot_array = np.copy(self.array) # make a deep copy
plot_array = np.squeeze(plot_array) # remove an empty dimension
plot_array = np.swapaxes(plot_array, 0, 2) # k3d: arrays are (z, y, x)
plot_array[plot_array != 0] = 1 # all cells have the same colour
# In the case of nano-sized samples, fix the order of
# magnitude of the plot extent to avoid freezing the k3d plot.
if np.any(np.divide(self.mesh.cell, 1e-9) < 1e3):
pmin = np.divide(self.mesh.pmin, 1e-9)
pmax = np.divide(self.mesh.pmax, 1e-9)
else:
pmin = self.mesh.pmin
pmax = self.mesh.pmax
dfu.voxels(plot_array, pmin, pmax, colormap=color,
plot=plot, **kwargs)
def k3d_voxels(self, norm_field=None, plot=None, **kwargs):
"""Plots the scalar field as a coloured `k3d.voxels()` plot.
At all mesh cells, a voxel will be plotted anc coloured
according to its value. If the scalar field plotted is
extracted from a vector field, which has coordinates where the
norm of the field is zero, the norm of that vector field can
be passed using `norm_field` argument, so that voxels at those
coordinates are not showed. Only scalar fields can be
plotted. Otherwise, ValueError is raised. If `plot` is passed
as a `k3d.plot.Plot`, plot is added to it. Otherwise, a new
k3d plot is created. All arguments allowed in `k3d.voxels()`
can be passed. This function is to be called in Jupyter
notebook.
Parameters
----------
norm_field : discretisedfield.Field, optional
A (scalar) norm field used for determining whether certain
voxels should be plotted.
plot : k3d.plot.Plot, optional
If this argument is passed, plot is added to
it. Otherwise, a new k3d plot is created.
Example
-------
>>> import discretisedfield as df
...
>>> p1 = (-50, -50, -50)
>>> p2 = (50, 50, 50)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 2, 0))
>>> def normfun(pos):
... x, y, z = pos
... if x**2 + y**2 < 30**2:
... return 1
... else:
... return 0
>>> field.norm = normfun
>>> field.x.k3d_voxels(norm_field=field.norm)
Plot(...)
.. seealso:: :py:func:`~discretisedfield.Field.k3d_vectors`
"""
if self.dim > 1:
msg = ('Only scalar (dim=1) fields can be plotted. Consider '
'plotting one component, e.g. field.x.k3d_nonzero() '
'or norm field.norm.k3d_nonzero().')
raise ValueError(msg)
plot_array = np.copy(self.array) # make a deep copy
plot_array = plot_array[..., 0] # remove an empty dimension
plot_array -= plot_array.min()
# In the case of uniform fields, division by zero can be
# encountered.
if plot_array.max() != 0:
plot_array /= plot_array.max()
plot_array *= 254
plot_array += 1
plot_array = plot_array.round()
plot_array = plot_array.astype(int)
if norm_field is not None:
for index in self.mesh.indices:
if norm_field(self.mesh.index2point(index)) == 0:
plot_array[index] = 0
plot_array = np.swapaxes(plot_array, 0, 2) # k3d: arrays are (z, y, x)
cmap = matplotlib.cm.get_cmap('viridis', 256)
colormap = [dfu.num2hexcolor(i, cmap) for i in range(cmap.N)]
# In the case of nano-sized samples, fix the order of
# magnitude of the plot extent to avoid freezing the k3d plot.
if np.any(np.divide(self.mesh.cell, 1e-9) < 1e3):
pmin = np.divide(self.mesh.pmin, 1e-9)
pmax = np.divide(self.mesh.pmax, 1e-9)
else:
pmin = self.mesh.pmin
pmax = self.mesh.pmax
dfu.voxels(plot_array, pmin, pmax, colormap=colormap,
plot=plot, **kwargs)
def k3d_vectors(self, color_field=None, points=True, plot=None, **kwargs):
"""Plots the vector field as a `k3d.vectors()` plot.
At all mesh cells, a vector will be plotted if its norm is not
zero. Vectors can be coloured according to the values of the
scalar field passed as `color_field`. Only vector fields can
be plotted. Otherwise, ValueError is raised. Points at the
discretisation cell centres can be added by setting
`points=True`. If `plot` is passed as a `k3d.plot.Plot`, plot
is added to it. Otherwise, a new k3d plot is created. All
arguments allowed in `k3d.vectors()` can be passed. This
function is to be called in Jupyter notebook.
Parameters
----------
color_field : discretisedfield.Field, optional
A (scalar) field used for determining the colours of
vectors.
points : bool, optional
If `True`, points will be added to the discretisation cell
centres.
plot : k3d.plot.Plot, optional
If this argument is passed, plot is added to
it. Otherwise, a new k3d plot is created.
Example
-------
1. Plotting an entire vector field.
>>> import discretisedfield as df
...
>>> p1 = (-50, -50, -50)
>>> p2 = (50, 50, 50)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 2, 0))
>>> field.k3d_vectors(color_field=field.x)
Plot(...)
2. Plotting the slice of a vector field.
>>> import discretisedfield as df
...
>>> p1 = (-50, -50, -50)
>>> p2 = (50, 50, 50)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 2, 0))
>>> field.plane('x').k3d_vectors(color_field=field.x)
Plot(...)
.. seealso:: :py:func:`~discretisedfield.Field.k3d_voxels`
"""
if self.dim != 3:
msg = 'Only three-dimensional (dim=3) fields can be plotted.'
raise ValueError(msg)
coordinates, vectors, color_values = [], [], []
norm = self.norm # assigned to be computed only once
for coord, value in self:
if norm(coord) > 0:
coordinates.append(coord)
vectors.append(value)
if color_field is not None:
color_values.append(color_field(coord)[0])
coordinates, vectors = np.array(coordinates), np.array(vectors)
# In the case of nano-sized samples, fix the order of
# magnitude of the coordinates to avoid freezing the k3d plot.
if np.any(np.divide(self.mesh.cell, 1e-9) < 1e3):
coordinates /= 1e-9
cell = np.divide(self.mesh.cell, 1e-9)
else:
cell = self.mesh.cell
# Scale the vectors to correspond to the size of cells.
vectors /= vectors.max()
vectors *= 0.8*np.array(cell)
# Middle of the arrow is at the cell centre.
coordinates -= 0.5 * vectors
if color_field is not None:
color_values = np.array(color_values)
color_values -= color_values.min()
# In the case of uniform fields, division by zero can be
# encountered.
if color_values.max() != 0:
color_values /= color_values.max()
color_values *= 256
color_values = color_values.round()
color_values = color_values.astype(int)
cmap = matplotlib.cm.get_cmap('viridis', 256)
colors = []
for c in color_values:
color = dfu.num2hexcolor(c, cmap)
colors.append((color, color))
else:
colors = []
plot = dfu.vectors(coordinates, vectors, colors=colors,
plot=plot, **kwargs)
if points:
dfu.points(coordinates + 0.5 * vectors, plot=plot)
| 35.333777
| 79
| 0.527003
|
import pyvtk
import struct
import matplotlib
import numpy as np
import mpl_toolkits.axes_grid1
import discretisedfield as df
import ubermagutil.typesystem as ts
import discretisedfield.util as dfu
import matplotlib.pyplot as plt
@ts.typesystem(mesh=ts.Typed(expected_type=df.Mesh),
dim=ts.Scalar(expected_type=int, unsigned=True, const=True),
name=ts.Name(const=True))
class Field:
def __init__(self, mesh, dim=3, value=0, norm=None, name='field'):
self.mesh = mesh
self.dim = dim
self.value = value
self.norm = norm
self.name = name
@property
def value(self):
value_array = dfu.as_array(self.mesh, self.dim, self._value)
if np.array_equal(self.array, value_array):
return self._value
else:
return self.array
@value.setter
def value(self, val):
self._value = val
self.array = dfu.as_array(self.mesh, self.dim, val)
@property
def array(self):
return self._array
@array.setter
def array(self, val):
if isinstance(val, np.ndarray) and \
val.shape == self.mesh.n + (self.dim,):
self._array = val
else:
msg = (f'Unsupported type(val)={type(val)} '
'or invalid value dimensions.')
raise ValueError(msg)
@property
def norm(self):
current_norm = np.linalg.norm(self.array, axis=-1)[..., None]
return Field(self.mesh, dim=1, value=current_norm, name='norm')
@norm.setter
def norm(self, val):
if val is not None:
if self.dim == 1:
msg = f'Cannot set norm for field with dim={self.dim}.'
raise ValueError(msg)
if not np.all(self.norm.array):
msg = 'Cannot normalise field with zero values.'
raise ValueError(msg)
self.array /= self.norm.array
self.array *= dfu.as_array(self.mesh, dim=1, val=val)
@property
def average(self):
return tuple(self.array.mean(axis=(0, 1, 2)))
def __repr__(self):
return (f'Field(mesh={repr(self.mesh)}, '
f'dim={self.dim}, name=\'{self.name}\')')
def __call__(self, point):
value = self.array[self.mesh.point2index(point)]
if self.dim > 1:
value = tuple(value)
return value
def __getattr__(self, name):
if name in list(dfu.axesdict.keys())[:self.dim] and 1 < self.dim <= 3:
component_array = self.array[..., dfu.axesdict[name]][..., None]
fieldname = f'{self.name}-{name}'.format(self.name, name)
return Field(mesh=self.mesh, dim=1,
value=component_array, name=fieldname)
else:
msg = f'{type(self).__name__} object has no attribute {name}.'
raise AttributeError(msg.format(type(self).__name__, name))
def __dir__(self):
if 1 < self.dim <= 3:
extension = list(dfu.axesdict.keys())[:self.dim]
else:
extension = []
return list(self.__dict__.keys()) + extension
def __iter__(self):
for point in self.mesh.coordinates:
yield point, self.__call__(point)
def line(self, p1, p2, n=100):
for point in self.mesh.line(p1=p1, p2=p2, n=n):
yield point, self.__call__(point)
def plane(self, *args, n=None, **kwargs):
plane_mesh = self.mesh.plane(*args, n=n, **kwargs)
return self.__class__(plane_mesh, dim=self.dim, value=self)
def write(self, filename, representation='txt', extend_scalar=False):
if any([filename.endswith(ext) for ext in ['.omf', '.ovf', '.ohf']]):
self._writeovf(filename, representation=representation,
extend_scalar=extend_scalar)
elif filename.endswith('.vtk'):
self._writevtk(filename)
else:
msg = ('Allowed extensions for writing the field are '
'.omf, .ovf, .ohf, and .vtk.')
raise ValueError(msg)
def _writeovf(self, filename, representation='txt', extend_scalar=False):
if extend_scalar and self.dim == 1:
write_dim = 3
else:
write_dim = self.dim
header = ['OOMMF OVF 2.0',
'',
'Segment count: 1',
'',
'Begin: Segment',
'Begin: Header',
'',
'Title: Field generated omf file',
'Desc: File generated by Field class',
'meshunit: m',
'meshtype: rectangular',
f'xbase: {self.mesh.pmin[0] + self.mesh.cell[0]/2}',
f'ybase: {self.mesh.pmin[1] + self.mesh.cell[1]/2}',
f'zbase: {self.mesh.pmin[2] + self.mesh.cell[2]/2}',
f'xnodes: {self.mesh.n[0]}',
f'ynodes: {self.mesh.n[1]}',
f'znodes: {self.mesh.n[2]}',
f'xstepsize: {self.mesh.cell[0]}',
f'ystepsize: {self.mesh.cell[1]}',
f'zstepsize: {self.mesh.cell[2]}',
f'xmin: {self.mesh.pmin[0]}',
f'ymin: {self.mesh.pmin[1]}',
f'zmin: {self.mesh.pmin[2]}',
f'xmax: {self.mesh.pmax[0]}',
f'ymax: {self.mesh.pmax[1]}',
f'zmax: {self.mesh.pmax[2]}',
f'valuedim: {write_dim}',
f'valuelabels: {self.name}_x {self.name}_y {self.name}_z',
'valueunits: A/m A/m A/m',
'',
'End: Header',
'']
if representation == 'bin4':
header.append('Begin: Data Binary 4')
footer = ['End: Data Binary 4',
'End: Segment']
elif representation == 'bin8':
header.append('Begin: Data Binary 8')
footer = ['End: Data Binary 8',
'End: Segment']
elif representation == 'txt':
header.append('Begin: Data Text')
footer = ['End: Data Text',
'End: Segment']
f = open(filename, 'w')
f.write(''.join(map(lambda line: f'# {line}\n', header)))
f.close()
binary_reps = {'bin4': (1234567.0, 'f'),
'bin8': (123456789012345.0, 'd')}
if representation in binary_reps:
f = open(filename, 'ab')
packarray = [binary_reps[representation][0]]
for i in self.mesh.indices:
for vi in self.array[i]:
packarray.append(vi)
v_bin = struct.pack(binary_reps[representation][1]*len(packarray),
*packarray)
f.write(v_bin)
f.close()
else:
f = open(filename, 'a')
for i in self.mesh.indices:
if self.dim == 3:
v = [vi for vi in self.array[i]]
elif self.dim == 1:
if extend_scalar:
v = [self.array[i][0], 0.0, 0.0]
else:
v = [self.array[i][0]]
else:
msg = (f'Cannot write dim={self.dim} field.')
raise TypeError(msg)
for vi in v:
f.write(' ' + str(vi))
f.write('\n')
f.close()
f = open(filename, 'a')
f.write(''.join(map(lambda line: f'# {line}\n', footer)))
f.close()
def _writevtk(self, filename):
grid = [pmini + np.linspace(0, li, ni+1) for pmini, li, ni in
zip(self.mesh.pmin, self.mesh.l, self.mesh.n)]
structure = pyvtk.RectilinearGrid(*grid)
vtkdata = pyvtk.VtkData(structure)
vectors = [self.__call__(coord) for coord in self.mesh.coordinates]
vtkdata.cell_data.append(pyvtk.Vectors(vectors, self.name))
for i, component in enumerate(dfu.axesdict.keys()):
name = f'{self.name}_{component}'
vtkdata.cell_data.append(pyvtk.Scalars(list(zip(*vectors))[i],
name))
vtkdata.tofile(filename)
@classmethod
def fromfile(cls, filename, norm=None, name='field'):
mdatalist = ['xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax',
'xstepsize', 'ystepsize', 'zstepsize', 'valuedim']
mdatadict = dict()
try:
with open(filename, 'r', encoding='utf-8') as ovffile:
f = ovffile.read()
lines = f.split('\n')
mdatalines = filter(lambda s: s.startswith('#'), lines)
datalines = np.loadtxt(filter(lambda s: not s.startswith('#'),
lines))
for line in mdatalines:
for mdatum in mdatalist:
if mdatum in line:
mdatadict[mdatum] = float(line.split()[-1])
break
except UnicodeDecodeError:
with open(filename, 'rb') as ovffile:
f = ovffile.read()
lines = f.split(b'\n')
mdatalines = filter(lambda s: s.startswith(bytes('#', 'utf-8')),
lines)
for line in mdatalines:
for mdatum in mdatalist:
if bytes(mdatum, 'utf-8') in line:
mdatadict[mdatum] = float(line.split()[-1])
break
header = b'# Begin: Data Binary '
data_start = f.find(header)
header = f[data_start:data_start + len(header) + 1]
data_start += len(b'# Begin: Data Binary 8')
data_end = f.find(b'# End: Data Binary ')
newlines = [b'\n\r', b'\r\n', b'\n']
for nl in newlines:
if f.startswith(nl, data_start):
data_start += len(nl)
break
if b'4' in header:
formatstr = '@f'
checkvalue = 1234567.0
elif b'8' in header:
formatstr = '@d'
checkvalue = 123456789012345.0
listdata = list(struct.iter_unpack(formatstr,
f[data_start:data_end]))
datalines = np.array(listdata)
if datalines[0] != checkvalue:
msg = 'Binary Data cannot be read.'
raise AssertionError(msg)
datalines = datalines[1:]
p1 = (mdatadict[key] for key in ['xmin', 'ymin', 'zmin'])
p2 = (mdatadict[key] for key in ['xmax', 'ymax', 'zmax'])
cell = (mdatadict[key] for key in ['xstepsize', 'ystepsize',
'zstepsize'])
dim = int(mdatadict['valuedim'])
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
field = df.Field(mesh, dim=dim, name=name)
r_tuple = tuple(reversed(field.mesh.n)) + (int(mdatadict['valuedim']),)
t_tuple = tuple(reversed(range(3))) + (3,)
field.array = datalines.reshape(r_tuple).transpose(t_tuple)
field.norm = norm
return field
def mpl(self, figsize=None):
if not hasattr(self.mesh, 'info'):
msg = ('Only sliced field can be plotted using mpl. '
'For instance, field.plane(\'x\').mpl().')
raise ValueError(msg)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
planeaxis = dfu.raxesdict[self.mesh.info['planeaxis']]
if self.dim > 1:
self.quiver(ax=ax, headwidth=5)
scfield = getattr(self, planeaxis)
coloredplot = scfield.imshow(ax=ax, norm_field=self.norm)
else:
scfield = self
coloredplot = scfield.imshow(ax=ax, norm_field=None)
cbar = self.colorbar(ax, coloredplot)
ax.set_xlabel(dfu.raxesdict[self.mesh.info['axis1']])
ax.set_ylabel(dfu.raxesdict[self.mesh.info['axis2']])
if self.dim > 1:
cbar.ax.set_ylabel(planeaxis + ' component')
def imshow(self, ax, norm_field=None, **kwargs):
if not hasattr(self.mesh, 'info'):
msg = ('Only sliced field can be plotted using imshow. '
'For instance, field.plane(\'x\').imshow(ax=ax).')
raise ValueError(msg)
if self.dim > 1:
msg = ('Only scalar (dim=1) fields can be plotted. Consider '
'plotting one component, e.g. field.x.imshow(ax=ax) '
'or norm field.norm.imshow(ax=ax).')
raise ValueError(msg)
points, values = list(zip(*list(self)))
if norm_field is not None:
values = list(values)
for i, point in enumerate(points):
if norm_field(point) == 0:
values[i] = np.nan
values = [v[0] if not np.isnan(v) else v for v in values]
else:
values = list(zip(*values))
points = list(zip(*points))
extent = [self.mesh.pmin[self.mesh.info['axis1']],
self.mesh.pmax[self.mesh.info['axis1']],
self.mesh.pmin[self.mesh.info['axis2']],
self.mesh.pmax[self.mesh.info['axis2']]]
n = (self.mesh.n[self.mesh.info['axis2']],
self.mesh.n[self.mesh.info['axis1']])
imax = ax.imshow(np.array(values).reshape(n), origin='lower',
extent=extent, **kwargs)
return imax
def quiver(self, ax=None, color_field=None, **kwargs):
if not hasattr(self.mesh, 'info'):
msg = ('Only sliced field can be plotted using quiver. '
'For instance, field.plane(\'x\').quiver(ax=ax).')
raise ValueError(msg)
if self.dim != 3:
msg = 'Only three-dimensional (dim=3) fields can be plotted.'
raise ValueError(msg)
points, values = list(zip(*list(self)))
points, values = list(points), list(values)
points = [p for p, v in zip(points, values)
if not np.equal(v, 0).all()]
values = [v for v in values if not np.equal(v, 0).all()]
if color_field is not None:
colors = [color_field(p) for p in points]
colors = list(zip(*colors))
points, values = list(zip(*points)), list(zip(*values))
if not any(values[self.mesh.info['axis1']] +
values[self.mesh.info['axis2']]):
kwargs['scale'] = 1
kwargs['pivot'] = 'mid'
if color_field is None:
qvax = ax.quiver(points[self.mesh.info['axis1']],
points[self.mesh.info['axis2']],
values[self.mesh.info['axis1']],
values[self.mesh.info['axis2']],
**kwargs)
else:
qvax = ax.quiver(points[self.mesh.info['axis1']],
points[self.mesh.info['axis2']],
values[self.mesh.info['axis1']],
values[self.mesh.info['axis2']],
colors,
**kwargs)
return qvax
def colorbar(self, ax, coloredplot, cax=None, **kwargs):
if cax is None:
divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.1)
cbar = plt.colorbar(coloredplot, cax=cax, **kwargs)
return cbar
def k3d_nonzero(self, color=dfu.colormap[0], plot=None, **kwargs):
if self.dim > 1:
msg = ('Only scalar (dim=1) fields can be plotted. Consider '
'plotting one component, e.g. field.x.k3d_nonzero() '
'or norm field.norm.k3d_nonzero().')
raise ValueError(msg)
plot_array = np.copy(self.array)
plot_array = np.squeeze(plot_array)
plot_array = np.swapaxes(plot_array, 0, 2)
plot_array[plot_array != 0] = 1
if np.any(np.divide(self.mesh.cell, 1e-9) < 1e3):
pmin = np.divide(self.mesh.pmin, 1e-9)
pmax = np.divide(self.mesh.pmax, 1e-9)
else:
pmin = self.mesh.pmin
pmax = self.mesh.pmax
dfu.voxels(plot_array, pmin, pmax, colormap=color,
plot=plot, **kwargs)
def k3d_voxels(self, norm_field=None, plot=None, **kwargs):
if self.dim > 1:
msg = ('Only scalar (dim=1) fields can be plotted. Consider '
'plotting one component, e.g. field.x.k3d_nonzero() '
'or norm field.norm.k3d_nonzero().')
raise ValueError(msg)
plot_array = np.copy(self.array)
plot_array = plot_array[..., 0]
plot_array -= plot_array.min()
if plot_array.max() != 0:
plot_array /= plot_array.max()
plot_array *= 254
plot_array += 1
plot_array = plot_array.round()
plot_array = plot_array.astype(int)
if norm_field is not None:
for index in self.mesh.indices:
if norm_field(self.mesh.index2point(index)) == 0:
plot_array[index] = 0
plot_array = np.swapaxes(plot_array, 0, 2)
cmap = matplotlib.cm.get_cmap('viridis', 256)
colormap = [dfu.num2hexcolor(i, cmap) for i in range(cmap.N)]
if np.any(np.divide(self.mesh.cell, 1e-9) < 1e3):
pmin = np.divide(self.mesh.pmin, 1e-9)
pmax = np.divide(self.mesh.pmax, 1e-9)
else:
pmin = self.mesh.pmin
pmax = self.mesh.pmax
dfu.voxels(plot_array, pmin, pmax, colormap=colormap,
plot=plot, **kwargs)
def k3d_vectors(self, color_field=None, points=True, plot=None, **kwargs):
if self.dim != 3:
msg = 'Only three-dimensional (dim=3) fields can be plotted.'
raise ValueError(msg)
coordinates, vectors, color_values = [], [], []
norm = self.norm
for coord, value in self:
if norm(coord) > 0:
coordinates.append(coord)
vectors.append(value)
if color_field is not None:
color_values.append(color_field(coord)[0])
coordinates, vectors = np.array(coordinates), np.array(vectors)
if np.any(np.divide(self.mesh.cell, 1e-9) < 1e3):
coordinates /= 1e-9
cell = np.divide(self.mesh.cell, 1e-9)
else:
cell = self.mesh.cell
vectors /= vectors.max()
vectors *= 0.8*np.array(cell)
coordinates -= 0.5 * vectors
if color_field is not None:
color_values = np.array(color_values)
color_values -= color_values.min()
if color_values.max() != 0:
color_values /= color_values.max()
color_values *= 256
color_values = color_values.round()
color_values = color_values.astype(int)
cmap = matplotlib.cm.get_cmap('viridis', 256)
colors = []
for c in color_values:
color = dfu.num2hexcolor(c, cmap)
colors.append((color, color))
else:
colors = []
plot = dfu.vectors(coordinates, vectors, colors=colors,
plot=plot, **kwargs)
if points:
dfu.points(coordinates + 0.5 * vectors, plot=plot)
| true
| true
|
f718eacfba0628440354e96dd089e07772ef3d94
| 11,677
|
py
|
Python
|
docs/conf.py
|
yahgwai/raiden
|
a76809872468890d7f2a66b293876aff93b6ea97
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
yahgwai/raiden
|
a76809872468890d7f2a66b293876aff93b6ea97
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
yahgwai/raiden
|
a76809872468890d7f2a66b293876aff93b6ea97
|
[
"MIT"
] | null | null | null |
import os
import shlex
import subprocess
import sys
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from sphinx.util import compat
compat.make_admonition = BaseAdmonition
#
# Raiden documentation build configuration file, created by
# sphinx-quickstart2 on Mon Oct 24 10:55:13 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# - Add Raiden path to the path for sphinx to import
sys.path.insert(0, os.path.dirname(os.path.abspath('../raiden/')))
def check_if_nightly(version):
try:
git_version, _ = subprocess.Popen(
shlex.split('git describe --tags --abbrev=8'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).communicate()
git_version = git_version.decode()
if git_version.startswith('v'):
git_version = git_version[1:]
git_version = git_version.strip()
# if this is has commits after the tag, it's a prerelease:
if git_version.count('-') == 2:
return 'nightly'
elif git_version.count('.') == 2:
return git_version
else:
return version
except BaseException:
return version
# Add customized stylesheet
def setup(app):
app.add_stylesheet('css/custom.css')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'releases',
'sphinxcontrib.httpdomain',
'sphinxcontrib.httpexample',
'sphinxcontrib.images',
'sphinx.ext.mathjax',
]
# 'releases' (changelog) settings
releases_issue_uri = "https://github.com/raiden-network/raiden/issues/%s"
releases_release_uri = "https://github.com/raiden-network/raiden/releases/tag/v%s"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Raiden Network'
author = 'Raiden Project'
version_string = '0.100.2'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = check_if_nightly(version_string)
# The full version, including alpha/beta/rc tags.
release = version
# Using rst_epilog we can expose any variables from conf.py to each
# rst file
rst_epilog = """
.. |capitalized_version| replace:: {}
""".format(version.capitalize())
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {'logo_only': True, 'display_version': False}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Raiden vPOC-0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = 'raiden.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_show_copyright = False
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Raidendoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Raiden.tex', 'Raiden Network Documentation', 'Raiden Project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
latex_logo = 'raiden.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'raiden', 'Raiden Network Documentation', [author], 1),
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
'Raiden',
'Raiden Network Documentation',
author,
'Raiden',
'One line description of project.',
'Miscellaneous',
),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Enables 0.x.y releases to not be grouped into feature and bugfix releases
# see: http://releases.readthedocs.io/en/latest/concepts.html#unstable-prehistory-mode
# This needs to be kept enabled even once 1.0 has been reached!
releases_unstable_prehistory = True
| 28.761084
| 91
| 0.701208
|
import os
import shlex
import subprocess
import sys
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from sphinx.util import compat
compat.make_admonition = BaseAdmonition
sys.path.insert(0, os.path.dirname(os.path.abspath('../raiden/')))
def check_if_nightly(version):
try:
git_version, _ = subprocess.Popen(
shlex.split('git describe --tags --abbrev=8'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).communicate()
git_version = git_version.decode()
if git_version.startswith('v'):
git_version = git_version[1:]
git_version = git_version.strip()
if git_version.count('-') == 2:
return 'nightly'
elif git_version.count('.') == 2:
return git_version
else:
return version
except BaseException:
return version
# Add customized stylesheet
def setup(app):
app.add_stylesheet('css/custom.css')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'releases',
'sphinxcontrib.httpdomain',
'sphinxcontrib.httpexample',
'sphinxcontrib.images',
'sphinx.ext.mathjax',
]
# 'releases' (changelog) settings
releases_issue_uri = "https://github.com/raiden-network/raiden/issues/%s"
releases_release_uri = "https://github.com/raiden-network/raiden/releases/tag/v%s"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Raiden Network'
author = 'Raiden Project'
version_string = '0.100.2'
# The version info for the project you're documenting, acts as replacement for
version = check_if_nightly(version_string)
release = version
rst_epilog = """
.. |capitalized_version| replace:: {}
""".format(version.capitalize())
language = None
exclude_patterns = []
pygments_style = 'sphinx'
todo_include_todos = True
html_theme = 'sphinx_rtd_theme'
html_theme_options = {'logo_only': True, 'display_version': False}
html_logo = 'raiden.png'
html_static_path = ['_static']
html_show_copyright = False
htmlhelp_basename = 'Raidendoc'
latex_elements = {
}
latex_documents = [
(master_doc, 'Raiden.tex', 'Raiden Network Documentation', 'Raiden Project', 'manual'),
]
latex_logo = 'raiden.png'
man_pages = [
(master_doc, 'raiden', 'Raiden Network Documentation', [author], 1),
]
texinfo_documents = [
(
master_doc,
'Raiden',
'Raiden Network Documentation',
author,
'Raiden',
'One line description of project.',
'Miscellaneous',
),
]
#
# texinfo_no_detailmenu = False
# Enables 0.x.y releases to not be grouped into feature and bugfix releases
# see: http://releases.readthedocs.io/en/latest/concepts.html#unstable-prehistory-mode
# This needs to be kept enabled even once 1.0 has been reached!
releases_unstable_prehistory = True
| true
| true
|
f718eb56dd751002a5d5f1dc575330589e32f944
| 1,486
|
py
|
Python
|
agent/get_all_agents_info_for_customer.py
|
acronis/acronis-cyber-platform-python-examples
|
cb7837618e7e70bd4ca29c8128d99f597aba1289
|
[
"MIT"
] | 3
|
2021-12-22T08:26:04.000Z
|
2021-12-28T07:57:50.000Z
|
agent/get_all_agents_info_for_customer.py
|
stas-pavlov/acronis-cyber-platform-python-examples
|
c45ffd9fce517ffc65d573a6e332b8f95ae3c34b
|
[
"MIT"
] | null | null | null |
agent/get_all_agents_info_for_customer.py
|
stas-pavlov/acronis-cyber-platform-python-examples
|
c45ffd9fce517ffc65d573a6e332b8f95ae3c34b
|
[
"MIT"
] | 2
|
2021-12-23T03:47:36.000Z
|
2022-02-14T15:12:19.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ************************************************************
# Copyright © 2019-2021 Acronis International GmbH.
# This source code is distributed under MIT software license.
# ************************************************************
import json # used for manipulating JSON data
import pprint # used for formatting the output of JSON objects received in API responses
import os
import sys
base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, base_path)
from common.base_operations import Config, Tenant, Acronis
# Initialize config and read all required values form JSON config
# an API client and a token files
cfg = Config(full=True)
acronis = Acronis(cfg)
customer = Tenant(os.path.join(base_path, 'customer.json'))
# Retrieve int tenant id by uuid tenant id
response = acronis.get(
f"api/1/groups/{customer.tenant_id}"
)
if response.ok:
customer = Tenant(json_str=response.text)
# Get list of all Acronis Agents for tenants subtree
# where the root tenant is
# a previously created customer
response = acronis.get(
f'api/agent_manager/v2/agents?tenant_id={customer.tenant_id}'
)
if response.ok:
with open(os.path.join(base_path, 'customer_agents.json'), 'w') as outfile:
json.dump(response.json(), outfile)
else:
pprint.pprint(response.json())
else:
pprint.pprint(response.json())
| 30.326531
| 89
| 0.644011
|
import json
import pprint
import os
import sys
base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, base_path)
from common.base_operations import Config, Tenant, Acronis
cfg = Config(full=True)
acronis = Acronis(cfg)
customer = Tenant(os.path.join(base_path, 'customer.json'))
response = acronis.get(
f"api/1/groups/{customer.tenant_id}"
)
if response.ok:
customer = Tenant(json_str=response.text)
response = acronis.get(
f'api/agent_manager/v2/agents?tenant_id={customer.tenant_id}'
)
if response.ok:
with open(os.path.join(base_path, 'customer_agents.json'), 'w') as outfile:
json.dump(response.json(), outfile)
else:
pprint.pprint(response.json())
else:
pprint.pprint(response.json())
| true
| true
|
f718ebcec01f5ba7dce969708019e02a04ab848a
| 4,378
|
py
|
Python
|
server/online/models.py
|
Jesterboxboy/mahjong-portal
|
c09362d69a81e81ed30c9159f3a35f9e9def4ac3
|
[
"MIT"
] | 10
|
2018-02-12T10:30:22.000Z
|
2020-06-29T21:06:15.000Z
|
server/online/models.py
|
Jesterboxboy/mahjong-portal
|
c09362d69a81e81ed30c9159f3a35f9e9def4ac3
|
[
"MIT"
] | 62
|
2018-01-05T04:52:38.000Z
|
2021-04-10T07:14:45.000Z
|
server/online/models.py
|
MahjongRepository/mahjong-leaderboard
|
77dfd26cb812c12fa7c2b11e862bb80a9135ccb0
|
[
"MIT"
] | 8
|
2018-05-11T11:05:41.000Z
|
2021-03-10T08:10:50.000Z
|
from django.db import models
from mahjong_portal.models import BaseModel
from tournament.models import Tournament
class TournamentStatus(BaseModel):
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE)
current_round = models.PositiveSmallIntegerField(null=True, blank=True)
end_break_time = models.DateTimeField(null=True, blank=True)
registration_closed = models.BooleanField(default=False)
def __unicode__(self):
return self.tournament.name
class TournamentPlayers(BaseModel):
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE)
telegram_username = models.CharField(max_length=32, null=True, blank=True)
discord_username = models.CharField(max_length=32, null=True, blank=True)
tenhou_username = models.CharField(max_length=8)
pantheon_id = models.PositiveIntegerField(null=True, blank=True)
# was user info synced with pantheon or not
added_to_pantheon = models.BooleanField(default=False)
# is user added to the pantheon seating or not
enabled_in_pantheon = models.BooleanField(default=True)
# affects user scores (replacement get -30000 per game)
is_replacement = models.BooleanField(default=False)
team_name = models.CharField(max_length=1000, null=True, blank=True)
team_number = models.PositiveIntegerField(null=True, blank=True)
def __unicode__(self):
return self.tenhou_username
class TournamentGame(BaseModel):
NEW = 0
STARTED = 1
FAILED_TO_START = 2
FINISHED = 3
STATUSES = [[NEW, "New"], [STARTED, "Started"], [FAILED_TO_START, "Failed to start"], [FINISHED, "Finished"]]
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE)
tournament_round = models.PositiveSmallIntegerField(null=True, blank=True)
log_id = models.CharField(max_length=32, null=True, blank=True)
game_index = models.PositiveSmallIntegerField(default=0)
status = models.PositiveSmallIntegerField(choices=STATUSES, default=NEW)
class Meta:
ordering = ["-tournament", "-tournament_round", "status"]
def __unicode__(self):
return "{}, {}, {}".format(self.id, self.get_status_display(), self.tournament_round)
class TournamentGamePlayer(BaseModel):
player = models.ForeignKey(TournamentPlayers, on_delete=models.CASCADE)
game = models.ForeignKey(TournamentGame, on_delete=models.CASCADE, related_name="game_players")
wind = models.PositiveSmallIntegerField(null=True, blank=True, default=None)
def __unicode__(self):
return "{}, {}, {}, {}".format(
self.player.__unicode__(), self.wind, self.player.pantheon_id, self.player.team_name
)
class TournamentNotification(BaseModel):
TELEGRAM = 0
DISCORD = 1
DESTINATIONS = [[TELEGRAM, "Telegram"], [DISCORD, "Discord"]]
GAME_STARTED = "game_started"
GAME_FAILED = "game_failed"
GAME_FAILED_NO_MEMBERS = "game_failed_no_members"
GAME_ENDED = "game_ended"
GAMES_PREPARED = "games_prepared"
CONFIRMATION_STARTED = "confirmation_started"
CONFIRMATION_ENDED = "confirmation_ended"
ROUND_FINISHED = "round_finished"
TOURNAMENT_FINISHED = "tournament_finished"
GAME_PRE_ENDED = "game_pre_ended"
GAME_PENALTY = "game_penalty"
GAME_LOG_REMINDER = "game_log_reminder"
NOTIFICATION_TYPES = [
[GAME_STARTED, GAME_STARTED],
[GAME_FAILED, GAME_FAILED],
[GAME_FAILED_NO_MEMBERS, GAME_FAILED_NO_MEMBERS],
[GAME_ENDED, GAME_ENDED],
[CONFIRMATION_STARTED, CONFIRMATION_STARTED],
[CONFIRMATION_ENDED, CONFIRMATION_ENDED],
[GAMES_PREPARED, GAMES_PREPARED],
[ROUND_FINISHED, ROUND_FINISHED],
[TOURNAMENT_FINISHED, TOURNAMENT_FINISHED],
[GAME_PRE_ENDED, GAME_PRE_ENDED],
[GAME_PENALTY, GAME_PENALTY],
]
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE)
notification_type = models.CharField(choices=NOTIFICATION_TYPES, max_length=300)
message_kwargs = models.JSONField(blank=True)
destination = models.PositiveSmallIntegerField(choices=DESTINATIONS)
is_processed = models.BooleanField(default=False)
failed = models.BooleanField(default=False)
class Meta:
ordering = ["-created_on"]
def __unicode__(self):
return f"{self.tournament.name} - {self.get_notification_type_display()}"
| 37.101695
| 113
| 0.731156
|
from django.db import models
from mahjong_portal.models import BaseModel
from tournament.models import Tournament
class TournamentStatus(BaseModel):
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE)
current_round = models.PositiveSmallIntegerField(null=True, blank=True)
end_break_time = models.DateTimeField(null=True, blank=True)
registration_closed = models.BooleanField(default=False)
def __unicode__(self):
return self.tournament.name
class TournamentPlayers(BaseModel):
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE)
telegram_username = models.CharField(max_length=32, null=True, blank=True)
discord_username = models.CharField(max_length=32, null=True, blank=True)
tenhou_username = models.CharField(max_length=8)
pantheon_id = models.PositiveIntegerField(null=True, blank=True)
added_to_pantheon = models.BooleanField(default=False)
enabled_in_pantheon = models.BooleanField(default=True)
is_replacement = models.BooleanField(default=False)
team_name = models.CharField(max_length=1000, null=True, blank=True)
team_number = models.PositiveIntegerField(null=True, blank=True)
def __unicode__(self):
return self.tenhou_username
class TournamentGame(BaseModel):
NEW = 0
STARTED = 1
FAILED_TO_START = 2
FINISHED = 3
STATUSES = [[NEW, "New"], [STARTED, "Started"], [FAILED_TO_START, "Failed to start"], [FINISHED, "Finished"]]
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE)
tournament_round = models.PositiveSmallIntegerField(null=True, blank=True)
log_id = models.CharField(max_length=32, null=True, blank=True)
game_index = models.PositiveSmallIntegerField(default=0)
status = models.PositiveSmallIntegerField(choices=STATUSES, default=NEW)
class Meta:
ordering = ["-tournament", "-tournament_round", "status"]
def __unicode__(self):
return "{}, {}, {}".format(self.id, self.get_status_display(), self.tournament_round)
class TournamentGamePlayer(BaseModel):
player = models.ForeignKey(TournamentPlayers, on_delete=models.CASCADE)
game = models.ForeignKey(TournamentGame, on_delete=models.CASCADE, related_name="game_players")
wind = models.PositiveSmallIntegerField(null=True, blank=True, default=None)
def __unicode__(self):
return "{}, {}, {}, {}".format(
self.player.__unicode__(), self.wind, self.player.pantheon_id, self.player.team_name
)
class TournamentNotification(BaseModel):
TELEGRAM = 0
DISCORD = 1
DESTINATIONS = [[TELEGRAM, "Telegram"], [DISCORD, "Discord"]]
GAME_STARTED = "game_started"
GAME_FAILED = "game_failed"
GAME_FAILED_NO_MEMBERS = "game_failed_no_members"
GAME_ENDED = "game_ended"
GAMES_PREPARED = "games_prepared"
CONFIRMATION_STARTED = "confirmation_started"
CONFIRMATION_ENDED = "confirmation_ended"
ROUND_FINISHED = "round_finished"
TOURNAMENT_FINISHED = "tournament_finished"
GAME_PRE_ENDED = "game_pre_ended"
GAME_PENALTY = "game_penalty"
GAME_LOG_REMINDER = "game_log_reminder"
NOTIFICATION_TYPES = [
[GAME_STARTED, GAME_STARTED],
[GAME_FAILED, GAME_FAILED],
[GAME_FAILED_NO_MEMBERS, GAME_FAILED_NO_MEMBERS],
[GAME_ENDED, GAME_ENDED],
[CONFIRMATION_STARTED, CONFIRMATION_STARTED],
[CONFIRMATION_ENDED, CONFIRMATION_ENDED],
[GAMES_PREPARED, GAMES_PREPARED],
[ROUND_FINISHED, ROUND_FINISHED],
[TOURNAMENT_FINISHED, TOURNAMENT_FINISHED],
[GAME_PRE_ENDED, GAME_PRE_ENDED],
[GAME_PENALTY, GAME_PENALTY],
]
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE)
notification_type = models.CharField(choices=NOTIFICATION_TYPES, max_length=300)
message_kwargs = models.JSONField(blank=True)
destination = models.PositiveSmallIntegerField(choices=DESTINATIONS)
is_processed = models.BooleanField(default=False)
failed = models.BooleanField(default=False)
class Meta:
ordering = ["-created_on"]
def __unicode__(self):
return f"{self.tournament.name} - {self.get_notification_type_display()}"
| true
| true
|
f718ebeaad41e74835f2322198fe180518fc5225
| 321
|
py
|
Python
|
osf/migrations/0171_merge_20190827_1908.py
|
tsukaeru/RDM-osf.io
|
2dc3e539322b6110e51772f8bd25ebdeb8e12d0e
|
[
"Apache-2.0"
] | 11
|
2018-12-11T16:39:40.000Z
|
2022-02-26T09:51:32.000Z
|
osf/migrations/0171_merge_20190827_1908.py
|
tsukaeru/RDM-osf.io
|
2dc3e539322b6110e51772f8bd25ebdeb8e12d0e
|
[
"Apache-2.0"
] | 52
|
2018-04-13T05:03:21.000Z
|
2022-03-22T02:56:19.000Z
|
osf/migrations/0171_merge_20190827_1908.py
|
tsukaeru/RDM-osf.io
|
2dc3e539322b6110e51772f8bd25ebdeb8e12d0e
|
[
"Apache-2.0"
] | 16
|
2018-07-09T01:44:51.000Z
|
2021-06-30T01:57:16.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-08-27 19:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0141_mapsync'),
('osf', '0170_merge_20190807_0515'),
]
operations = [
]
| 18.882353
| 49
| 0.64486
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0141_mapsync'),
('osf', '0170_merge_20190807_0515'),
]
operations = [
]
| true
| true
|
f718ed1d545bbd1402bdebf7470241d3c6aa4d2f
| 6,981
|
py
|
Python
|
bs4/builder/_lxml.py
|
joepie91/beautifulsoup
|
f3675639173ebacf212a552362fbdcb1709c98c9
|
[
"MIT"
] | 1
|
2020-01-21T13:08:01.000Z
|
2020-01-21T13:08:01.000Z
|
bs4/builder/_lxml.py
|
joepie91/beautifulsoup
|
f3675639173ebacf212a552362fbdcb1709c98c9
|
[
"MIT"
] | null | null | null |
bs4/builder/_lxml.py
|
joepie91/beautifulsoup
|
f3675639173ebacf212a552362fbdcb1709c98c9
|
[
"MIT"
] | null | null | null |
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from StringIO import StringIO
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
TreeBuilder,
XML)
from bs4.dammit import UnicodeDammit
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
@property
def default_parser(self):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
return etree.XMLParser(target=self, strip_cdata=False, recover=True)
def __init__(self, parser=None, empty_element_tags=None):
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
if parser is None:
# Use the default parser.
parser = self.default_parser
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False)
self.parser = parser
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 3-tuple (markup, original encoding, encoding
declared within markup).
"""
if isinstance(markup, unicode):
return markup, None, None, False
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
if isinstance(markup, basestring):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
self.parser.feed(data)
while data != '':
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if data != '':
self.parser.feed(data)
self.parser.close()
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
@property
def default_parser(self):
return etree.HTMLParser
def feed(self, markup):
self.parser.feed(markup)
self.parser.close()
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| 35.436548
| 82
| 0.618966
|
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from StringIO import StringIO
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
TreeBuilder,
XML)
from bs4.dammit import UnicodeDammit
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
@property
def default_parser(self):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
return etree.XMLParser(target=self, strip_cdata=False, recover=True)
def __init__(self, parser=None, empty_element_tags=None):
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
if parser is None:
# Use the default parser.
parser = self.default_parser
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False)
self.parser = parser
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
if isinstance(markup, unicode):
return markup, None, None, False
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
if isinstance(markup, basestring):
markup = StringIO(markup)
data = markup.read(self.CHUNK_SIZE)
self.parser.feed(data)
while data != '':
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if data != '':
self.parser.feed(data)
self.parser.close()
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
@property
def default_parser(self):
return etree.HTMLParser
def feed(self, markup):
self.parser.feed(markup)
self.parser.close()
def test_fragment_to_document(self, fragment):
return u'<html><body>%s</body></html>' % fragment
| true
| true
|
f718ef5cb741f308a91aa8e5b6d292f61e222651
| 225
|
py
|
Python
|
test/test_models/test_palm.py
|
Fei-Wang/dl-pytorch
|
a7672603e2de7824d0ff7e97b69dedad3fd9d476
|
[
"MIT"
] | null | null | null |
test/test_models/test_palm.py
|
Fei-Wang/dl-pytorch
|
a7672603e2de7824d0ff7e97b69dedad3fd9d476
|
[
"MIT"
] | null | null | null |
test/test_models/test_palm.py
|
Fei-Wang/dl-pytorch
|
a7672603e2de7824d0ff7e97b69dedad3fd9d476
|
[
"MIT"
] | null | null | null |
import torch
from luffy.models.palm import *
def test_palm_tony():
model = PaLMTony(num_tokens=20000)
tokens = torch.randint(0, 20000, (1, 2048))
feat = model(tokens)
assert feat.shape == (1, 2048, 20000)
| 18.75
| 47
| 0.666667
|
import torch
from luffy.models.palm import *
def test_palm_tony():
model = PaLMTony(num_tokens=20000)
tokens = torch.randint(0, 20000, (1, 2048))
feat = model(tokens)
assert feat.shape == (1, 2048, 20000)
| true
| true
|
f718efd56f2b4db5ce4e2364a546b8f1827f61ad
| 11,130
|
py
|
Python
|
dask/diagnostics/profile.py
|
Jeremaiha-xmetix/dask
|
361a2f5472932cb26667216f0200b565ac67487e
|
[
"BSD-3-Clause"
] | null | null | null |
dask/diagnostics/profile.py
|
Jeremaiha-xmetix/dask
|
361a2f5472932cb26667216f0200b565ac67487e
|
[
"BSD-3-Clause"
] | null | null | null |
dask/diagnostics/profile.py
|
Jeremaiha-xmetix/dask
|
361a2f5472932cb26667216f0200b565ac67487e
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import namedtuple
from itertools import starmap
from multiprocessing import Pipe, Process, current_process
from time import sleep
from timeit import default_timer
from ..callbacks import Callback
from ..utils import import_required
# Stores execution data for each task
TaskData = namedtuple(
"TaskData", ("key", "task", "start_time", "end_time", "worker_id")
)
class Profiler(Callback):
"""A profiler for dask execution at the task level.
Records the following information for each task:
1. Key
2. Task
3. Start time in seconds since the epoch
4. Finish time in seconds since the epoch
5. Worker id
Examples
--------
>>> from operator import add, mul
>>> from dask.threaded import get
>>> from dask.diagnostics import Profiler
>>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> with Profiler() as prof:
... get(dsk, 'z')
22
>>> prof.results # doctest: +SKIP
[TaskData(key='y', task=(add, 'x', 10), start_time=..., end_time=..., worker_id=...),
TaskData(key='z', task=(mul, 'y', 2), start_time=..., end_time=..., worker_id=...)]
These results can be visualized in a bokeh plot using the ``visualize``
method. Note that this requires bokeh to be installed.
>>> prof.visualize() # doctest: +SKIP
You can activate the profiler globally
>>> prof.register()
If you use the profiler globally you will need to clear out old results
manually.
>>> prof.clear()
"""
def __init__(self):
self._results = {}
self.results = []
self._dsk = {}
def __enter__(self):
self.clear()
return super().__enter__()
def _start(self, dsk):
self._dsk.update(dsk)
def _pretask(self, key, dsk, state):
start = default_timer()
self._results[key] = (key, dsk[key], start)
def _posttask(self, key, value, dsk, state, id):
end = default_timer()
self._results[key] += (end, id)
def _finish(self, dsk, state, failed):
results = {k: v for k, v in self._results.items() if len(v) == 5}
self.results += list(starmap(TaskData, results.values()))
self._results.clear()
def _plot(self, **kwargs):
from .profile_visualize import plot_tasks
return plot_tasks(self.results, self._dsk, **kwargs)
def visualize(self, **kwargs):
"""Visualize the profiling run in a bokeh plot.
See also
--------
dask.diagnostics.profile_visualize.visualize
"""
from .profile_visualize import visualize
return visualize(self, **kwargs)
def clear(self):
"""Clear out old results from profiler"""
self._results.clear()
del self.results[:]
self._dsk = {}
ResourceData = namedtuple("ResourceData", ("time", "mem", "cpu"))
class ResourceProfiler(Callback):
"""A profiler for resource use.
Records the following each timestep
1. Time in seconds since the epoch
2. Memory usage in MB
3. % CPU usage
Examples
--------
>>> from operator import add, mul
>>> from dask.threaded import get
>>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> with ResourceProfiler() as prof:
... get(dsk, 'z')
22
These results can be visualized in a bokeh plot using the ``visualize``
method. Note that this requires bokeh to be installed.
>>> prof.visualize() # doctest: +SKIP
You can activate the profiler globally
>>> prof.register()
If you use the profiler globally you will need to clear out old results
manually.
>>> prof.clear()
Note that when used as a context manager data will be collected throughout
the duration of the enclosed block. In contrast, when registered globally
data will only be collected while a dask scheduler is active.
"""
def __init__(self, dt=1):
self._dt = dt
self._entered = False
self._tracker = None
self.results = []
def _is_running(self):
return self._tracker is not None and self._tracker.is_alive()
def _start_collect(self):
if not self._is_running():
self._tracker = _Tracker(self._dt)
self._tracker.start()
self._tracker.parent_conn.send("collect")
def _stop_collect(self):
if self._is_running():
self._tracker.parent_conn.send("send_data")
self.results.extend(starmap(ResourceData, self._tracker.parent_conn.recv()))
def __enter__(self):
self._entered = True
self.clear()
self._start_collect()
return super().__enter__()
def __exit__(self, *args):
self._entered = False
self._stop_collect()
self.close()
super().__exit__(*args)
def _start(self, dsk):
self._start_collect()
def _finish(self, dsk, state, failed):
if not self._entered:
self._stop_collect()
def close(self):
"""Shutdown the resource tracker process"""
if self._is_running():
self._tracker.shutdown()
self._tracker = None
__del__ = close
def clear(self):
self.results = []
def _plot(self, **kwargs):
from .profile_visualize import plot_resources
return plot_resources(self.results, **kwargs)
def visualize(self, **kwargs):
"""Visualize the profiling run in a bokeh plot.
See also
--------
dask.diagnostics.profile_visualize.visualize
"""
from .profile_visualize import visualize
return visualize(self, **kwargs)
class _Tracker(Process):
"""Background process for tracking resource usage"""
def __init__(self, dt=1):
super().__init__()
self.daemon = True
self.dt = dt
self.parent_pid = current_process().pid
self.parent_conn, self.child_conn = Pipe()
def shutdown(self):
if not self.parent_conn.closed:
self.parent_conn.send("shutdown")
self.parent_conn.close()
self.join()
def _update_pids(self, pid):
return [self.parent] + [
p for p in self.parent.children() if p.pid != pid and p.status() != "zombie"
]
def run(self):
psutil = import_required(
"psutil", "Tracking resource usage requires `psutil` to be installed"
)
self.parent = psutil.Process(self.parent_pid)
pid = current_process()
data = []
while True:
try:
msg = self.child_conn.recv()
except KeyboardInterrupt:
continue
if msg == "shutdown":
break
elif msg == "collect":
ps = self._update_pids(pid)
while not data or not self.child_conn.poll():
tic = default_timer()
mem = cpu = 0
for p in ps:
try:
mem2 = p.memory_info().rss
cpu2 = p.cpu_percent()
except Exception: # could be a few different exceptions
pass
else:
# Only increment if both were successful
mem += mem2
cpu += cpu2
data.append((tic, mem / 1e6, cpu))
sleep(self.dt)
elif msg == "send_data":
self.child_conn.send(data)
data = []
self.child_conn.close()
CacheData = namedtuple(
"CacheData", ("key", "task", "metric", "cache_time", "free_time")
)
class CacheProfiler(Callback):
"""A profiler for dask execution at the scheduler cache level.
Records the following information for each task:
1. Key
2. Task
3. Size metric
4. Cache entry time in seconds since the epoch
5. Cache exit time in seconds since the epoch
Examples
--------
>>> from operator import add, mul
>>> from dask.threaded import get
>>> from dask.diagnostics import CacheProfiler
>>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> with CacheProfiler() as prof:
... get(dsk, 'z')
22
>>> prof.results # doctest: +SKIP
[CacheData(key='y', task=(add, 'x', 10), metric=1, cache_time=..., free_time=...),
CacheData(key='z', task=(mul, 'y', 2), metric=1, cache_time=..., free_time=...)]
The default is to count each task (``metric`` is 1 for all tasks). Other
functions may used as a metric instead through the ``metric`` keyword. For
example, the ``nbytes`` function found in ``cachey`` can be used to measure
the number of bytes in the cache.
>>> from cachey import nbytes
>>> with CacheProfiler(metric=nbytes) as prof:
... get(dsk, 'z')
22
The profiling results can be visualized in a bokeh plot using the
``visualize`` method. Note that this requires bokeh to be installed.
>>> prof.visualize() # doctest: +SKIP
You can activate the profiler globally
>>> prof.register()
If you use the profiler globally you will need to clear out old results
manually.
>>> prof.clear()
"""
def __init__(self, metric=None, metric_name=None):
self.clear()
self._metric = metric if metric else lambda value: 1
if metric_name:
self._metric_name = metric_name
elif metric:
self._metric_name = metric.__name__
else:
self._metric_name = "count"
def __enter__(self):
self.clear()
return super().__enter__()
def _start(self, dsk):
self._dsk.update(dsk)
if not self._start_time:
self._start_time = default_timer()
def _posttask(self, key, value, dsk, state, id):
t = default_timer()
self._cache[key] = (self._metric(value), t)
for k in state["released"] & self._cache.keys():
metric, start = self._cache.pop(k)
self.results.append(CacheData(k, dsk[k], metric, start, t))
def _finish(self, dsk, state, failed):
t = default_timer()
for k, (metric, start) in self._cache.items():
self.results.append(CacheData(k, dsk[k], metric, start, t))
self._cache.clear()
def _plot(self, **kwargs):
from .profile_visualize import plot_cache
return plot_cache(
self.results, self._dsk, self._start_time, self._metric_name, **kwargs
)
def visualize(self, **kwargs):
"""Visualize the profiling run in a bokeh plot.
See also
--------
dask.diagnostics.profile_visualize.visualize
"""
from .profile_visualize import visualize
return visualize(self, **kwargs)
def clear(self):
"""Clear out old results from profiler"""
self.results = []
self._cache = {}
self._dsk = {}
self._start_time = None
| 28.984375
| 89
| 0.579784
|
from collections import namedtuple
from itertools import starmap
from multiprocessing import Pipe, Process, current_process
from time import sleep
from timeit import default_timer
from ..callbacks import Callback
from ..utils import import_required
TaskData = namedtuple(
"TaskData", ("key", "task", "start_time", "end_time", "worker_id")
)
class Profiler(Callback):
def __init__(self):
self._results = {}
self.results = []
self._dsk = {}
def __enter__(self):
self.clear()
return super().__enter__()
def _start(self, dsk):
self._dsk.update(dsk)
def _pretask(self, key, dsk, state):
start = default_timer()
self._results[key] = (key, dsk[key], start)
def _posttask(self, key, value, dsk, state, id):
end = default_timer()
self._results[key] += (end, id)
def _finish(self, dsk, state, failed):
results = {k: v for k, v in self._results.items() if len(v) == 5}
self.results += list(starmap(TaskData, results.values()))
self._results.clear()
def _plot(self, **kwargs):
from .profile_visualize import plot_tasks
return plot_tasks(self.results, self._dsk, **kwargs)
def visualize(self, **kwargs):
from .profile_visualize import visualize
return visualize(self, **kwargs)
def clear(self):
self._results.clear()
del self.results[:]
self._dsk = {}
ResourceData = namedtuple("ResourceData", ("time", "mem", "cpu"))
class ResourceProfiler(Callback):
def __init__(self, dt=1):
self._dt = dt
self._entered = False
self._tracker = None
self.results = []
def _is_running(self):
return self._tracker is not None and self._tracker.is_alive()
def _start_collect(self):
if not self._is_running():
self._tracker = _Tracker(self._dt)
self._tracker.start()
self._tracker.parent_conn.send("collect")
def _stop_collect(self):
if self._is_running():
self._tracker.parent_conn.send("send_data")
self.results.extend(starmap(ResourceData, self._tracker.parent_conn.recv()))
def __enter__(self):
self._entered = True
self.clear()
self._start_collect()
return super().__enter__()
def __exit__(self, *args):
self._entered = False
self._stop_collect()
self.close()
super().__exit__(*args)
def _start(self, dsk):
self._start_collect()
def _finish(self, dsk, state, failed):
if not self._entered:
self._stop_collect()
def close(self):
if self._is_running():
self._tracker.shutdown()
self._tracker = None
__del__ = close
def clear(self):
self.results = []
def _plot(self, **kwargs):
from .profile_visualize import plot_resources
return plot_resources(self.results, **kwargs)
def visualize(self, **kwargs):
from .profile_visualize import visualize
return visualize(self, **kwargs)
class _Tracker(Process):
def __init__(self, dt=1):
super().__init__()
self.daemon = True
self.dt = dt
self.parent_pid = current_process().pid
self.parent_conn, self.child_conn = Pipe()
def shutdown(self):
if not self.parent_conn.closed:
self.parent_conn.send("shutdown")
self.parent_conn.close()
self.join()
def _update_pids(self, pid):
return [self.parent] + [
p for p in self.parent.children() if p.pid != pid and p.status() != "zombie"
]
def run(self):
psutil = import_required(
"psutil", "Tracking resource usage requires `psutil` to be installed"
)
self.parent = psutil.Process(self.parent_pid)
pid = current_process()
data = []
while True:
try:
msg = self.child_conn.recv()
except KeyboardInterrupt:
continue
if msg == "shutdown":
break
elif msg == "collect":
ps = self._update_pids(pid)
while not data or not self.child_conn.poll():
tic = default_timer()
mem = cpu = 0
for p in ps:
try:
mem2 = p.memory_info().rss
cpu2 = p.cpu_percent()
except Exception:
pass
else:
mem += mem2
cpu += cpu2
data.append((tic, mem / 1e6, cpu))
sleep(self.dt)
elif msg == "send_data":
self.child_conn.send(data)
data = []
self.child_conn.close()
CacheData = namedtuple(
"CacheData", ("key", "task", "metric", "cache_time", "free_time")
)
class CacheProfiler(Callback):
def __init__(self, metric=None, metric_name=None):
self.clear()
self._metric = metric if metric else lambda value: 1
if metric_name:
self._metric_name = metric_name
elif metric:
self._metric_name = metric.__name__
else:
self._metric_name = "count"
def __enter__(self):
self.clear()
return super().__enter__()
def _start(self, dsk):
self._dsk.update(dsk)
if not self._start_time:
self._start_time = default_timer()
def _posttask(self, key, value, dsk, state, id):
t = default_timer()
self._cache[key] = (self._metric(value), t)
for k in state["released"] & self._cache.keys():
metric, start = self._cache.pop(k)
self.results.append(CacheData(k, dsk[k], metric, start, t))
def _finish(self, dsk, state, failed):
t = default_timer()
for k, (metric, start) in self._cache.items():
self.results.append(CacheData(k, dsk[k], metric, start, t))
self._cache.clear()
def _plot(self, **kwargs):
from .profile_visualize import plot_cache
return plot_cache(
self.results, self._dsk, self._start_time, self._metric_name, **kwargs
)
def visualize(self, **kwargs):
from .profile_visualize import visualize
return visualize(self, **kwargs)
def clear(self):
self.results = []
self._cache = {}
self._dsk = {}
self._start_time = None
| true
| true
|
f718f00471bdf766f909f135516dc1fb334ea3c9
| 61,188
|
py
|
Python
|
nova/db/api.py
|
linets/nova
|
936d0a49594e04e3ec08c7a2115784d072e61dee
|
[
"Apache-2.0"
] | null | null | null |
nova/db/api.py
|
linets/nova
|
936d0a49594e04e3ec08c7a2115784d072e61dee
|
[
"Apache-2.0"
] | null | null | null |
nova/db/api.py
|
linets/nova
|
936d0a49594e04e3ec08c7a2115784d072e61dee
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
The underlying driver is loaded as a :class:`LazyPluggable`.
Functions in this module are imported into the nova.db namespace. Call these
functions from nova.db namespace, not the nova.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:db_backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:sql_connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/nova/nova.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova import utils
db_opts = [
cfg.StrOpt('db_backend',
default='sqlalchemy',
help='The backend to use for db'),
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create'),
cfg.StrOpt('instance_name_template',
default='instance-%08x',
help='Template string to be used to generate instance names'),
cfg.StrOpt('volume_name_template',
default='volume-%s',
help='Template string to be used to generate instance names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(db_opts)
IMPL = utils.LazyPluggable('db_backend',
sqlalchemy='nova.db.sqlalchemy.api')
class NoMoreNetworks(exception.NovaException):
"""No more available networks."""
pass
class NoMoreTargets(exception.NovaException):
"""No more available targets"""
pass
###################
def constraint(**conditions):
"""Return a constraint object suitable for use with some updates."""
return IMPL.constraint(**conditions)
def equal_any(*values):
"""Return an equality condition object suitable for use in a constraint.
Equal_any conditions require that a model object's attribute equal any
one of the given values.
"""
return IMPL.equal_any(*values)
def not_equal(*values):
"""Return an inequality condition object suitable for use in a constraint.
Not_equal conditions require that a model object's attribute differs from
all of the given values.
"""
return IMPL.not_equal(*values)
###################
def service_destroy(context, instance_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, instance_id)
def service_get(context, service_id):
"""Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
def service_get_by_host_and_topic(context, host, topic):
"""Get a service by host it's on and topic it listens to."""
return IMPL.service_get_by_host_and_topic(context, host, topic)
def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
def service_get_all_by_host(context, host):
"""Get all services for a given host."""
return IMPL.service_get_all_by_host(context, host)
def service_get_all_compute_by_host(context, host):
"""Get all compute services for a given host."""
return IMPL.service_get_all_compute_by_host(context, host)
def service_get_all_compute_sorted(context):
"""Get all compute services sorted by instance count.
:returns: a list of (Service, instance_count) tuples.
"""
return IMPL.service_get_all_compute_sorted(context)
def service_get_all_volume_sorted(context):
"""Get all volume services sorted by volume count.
:returns: a list of (Service, volume_count) tuples.
"""
return IMPL.service_get_all_volume_sorted(context)
def service_get_by_args(context, host, binary):
"""Get the state of a service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
"""Set the given properties on a service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values)
###################
def compute_node_get(context, compute_id):
"""Get a computeNode."""
return IMPL.compute_node_get(context, compute_id)
def compute_node_get_all(context):
"""Get all computeNodes."""
return IMPL.compute_node_get_all(context)
def compute_node_search_by_hypervisor(context, hypervisor_match):
"""Get computeNodes given a hypervisor hostname match string."""
return IMPL.compute_node_search_by_hypervisor(context, hypervisor_match)
def compute_node_create(context, values):
"""Create a computeNode from the values dictionary."""
return IMPL.compute_node_create(context, values)
def compute_node_update(context, compute_id, values, prune_stats=False):
"""Set the given properties on a computeNode and update it.
Raises NotFound if computeNode does not exist.
"""
return IMPL.compute_node_update(context, compute_id, values, prune_stats)
def compute_node_get_by_host(context, host):
return IMPL.compute_node_get_by_host(context, host)
def compute_node_statistics(context):
return IMPL.compute_node_statistics(context)
###################
def certificate_create(context, values):
"""Create a certificate from the values dictionary."""
return IMPL.certificate_create(context, values)
def certificate_get_all_by_project(context, project_id):
"""Get all certificates for a project."""
return IMPL.certificate_get_all_by_project(context, project_id)
def certificate_get_all_by_user(context, user_id):
"""Get all certificates for a user."""
return IMPL.certificate_get_all_by_user(context, user_id)
def certificate_get_all_by_user_and_project(context, user_id, project_id):
"""Get all certificates for a user and project."""
return IMPL.certificate_get_all_by_user_and_project(context,
user_id,
project_id)
###################
def floating_ip_get(context, id):
return IMPL.floating_ip_get(context, id)
def floating_ip_get_pools(context):
"""Returns a list of floating ip pools"""
return IMPL.floating_ip_get_pools(context)
def floating_ip_allocate_address(context, project_id, pool):
"""Allocate free floating ip from specified pool and return the address.
Raises if one is not available.
"""
return IMPL.floating_ip_allocate_address(context, project_id, pool)
def floating_ip_bulk_create(context, ips):
"""Create a lot of floating ips from the values dictionary."""
return IMPL.floating_ip_bulk_create(context, ips)
def floating_ip_create(context, values):
"""Create a floating ip from the values dictionary."""
return IMPL.floating_ip_create(context, values)
def floating_ip_count_by_project(context, project_id, session=None):
"""Count floating ips used by project."""
return IMPL.floating_ip_count_by_project(context, project_id,
session=session)
def floating_ip_deallocate(context, address):
"""Deallocate a floating ip by address."""
return IMPL.floating_ip_deallocate(context, address)
def floating_ip_destroy(context, address):
"""Destroy the floating_ip or raise if it does not exist."""
return IMPL.floating_ip_destroy(context, address)
def floating_ip_disassociate(context, address):
"""Disassociate a floating ip from a fixed ip by address.
:returns: the address of the existing fixed ip.
"""
return IMPL.floating_ip_disassociate(context, address)
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
"""Associate a floating ip to a fixed_ip by address."""
return IMPL.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address,
host)
def floating_ip_get_all(context):
"""Get all floating ips."""
return IMPL.floating_ip_get_all(context)
def floating_ip_get_all_by_host(context, host):
"""Get all floating ips by host."""
return IMPL.floating_ip_get_all_by_host(context, host)
def floating_ip_get_all_by_project(context, project_id):
"""Get all floating ips by project."""
return IMPL.floating_ip_get_all_by_project(context, project_id)
def floating_ip_get_by_address(context, address):
"""Get a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_get_by_address(context, address)
def floating_ip_get_by_fixed_address(context, fixed_address):
"""Get a floating ips by fixed address"""
return IMPL.floating_ip_get_by_fixed_address(context, fixed_address)
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
"""Get a floating ips by fixed address"""
return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id)
def floating_ip_update(context, address, values):
"""Update a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_update(context, address, values)
def floating_ip_set_auto_assigned(context, address):
"""Set auto_assigned flag to floating ip"""
return IMPL.floating_ip_set_auto_assigned(context, address)
def dnsdomain_list(context):
"""Get a list of all zones in our database, public and private."""
return IMPL.dnsdomain_list(context)
def dnsdomain_register_for_zone(context, fqdomain, zone):
"""Associated a DNS domain with an availability zone"""
return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone)
def dnsdomain_register_for_project(context, fqdomain, project):
"""Associated a DNS domain with a project id"""
return IMPL.dnsdomain_register_for_project(context, fqdomain, project)
def dnsdomain_unregister(context, fqdomain):
"""Purge associations for the specified DNS zone"""
return IMPL.dnsdomain_unregister(context, fqdomain)
def dnsdomain_get(context, fqdomain):
"""Get the db record for the specified domain."""
return IMPL.dnsdomain_get(context, fqdomain)
####################
def migration_update(context, id, values):
"""Update a migration instance."""
return IMPL.migration_update(context, id, values)
def migration_create(context, values):
"""Create a migration record."""
return IMPL.migration_create(context, values)
def migration_get(context, migration_id):
"""Finds a migration by the id."""
return IMPL.migration_get(context, migration_id)
def migration_get_by_instance_and_status(context, instance_uuid, status):
"""Finds a migration by the instance uuid its migrating."""
return IMPL.migration_get_by_instance_and_status(context, instance_uuid,
status)
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
dest_compute):
"""
Finds all unconfirmed migrations within the confirmation window for
a specific destination compute host.
"""
return IMPL.migration_get_unconfirmed_by_dest_compute(context,
confirm_window, dest_compute)
####################
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved=False):
"""Associate fixed ip to instance.
Raises if fixed ip is not available.
"""
return IMPL.fixed_ip_associate(context, address, instance_uuid, network_id,
reserved)
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None):
"""Find free ip in network and associate it to instance or host.
Raises if one is not available.
"""
return IMPL.fixed_ip_associate_pool(context, network_id,
instance_uuid, host)
def fixed_ip_create(context, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_create(context, values)
def fixed_ip_bulk_create(context, ips):
"""Create a lot of fixed ips from the values dictionary."""
return IMPL.fixed_ip_bulk_create(context, ips)
def fixed_ip_disassociate(context, address):
"""Disassociate a fixed ip from an instance by address."""
return IMPL.fixed_ip_disassociate(context, address)
def fixed_ip_disassociate_all_by_timeout(context, host, time):
"""Disassociate old fixed ips from host."""
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
def fixed_ip_get(context, id):
"""Get fixed ip by id or raise if it does not exist."""
return IMPL.fixed_ip_get(context, id)
def fixed_ip_get_all(context):
"""Get all defined fixed ips."""
return IMPL.fixed_ip_get_all(context)
def fixed_ip_get_by_address(context, address):
"""Get a fixed ip by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address(context, address)
def fixed_ip_get_by_instance(context, instance_uuid):
"""Get fixed ips by instance or raise if none exist."""
return IMPL.fixed_ip_get_by_instance(context, instance_uuid)
def fixed_ip_get_by_network_host(context, network_uuid, host):
"""Get fixed ip for a host in a network."""
return IMPL.fixed_ip_get_by_network_host(context, network_uuid, host)
def fixed_ips_by_virtual_interface(context, vif_id):
"""Get fixed ips by virtual interface or raise if none exist."""
return IMPL.fixed_ips_by_virtual_interface(context, vif_id)
def fixed_ip_get_network(context, address):
"""Get a network for a fixed ip by address."""
return IMPL.fixed_ip_get_network(context, address)
def fixed_ip_update(context, address, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_update(context, address, values)
####################
def virtual_interface_create(context, values):
"""Create a virtual interface record in the database."""
return IMPL.virtual_interface_create(context, values)
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table,"""
return IMPL.virtual_interface_get(context, vif_id)
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table filtering on address."""
return IMPL.virtual_interface_get_by_address(context, address)
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table filtering on vif uuid."""
return IMPL.virtual_interface_get_by_uuid(context, vif_uuid)
def virtual_interface_get_by_instance(context, instance_id):
"""Gets all virtual_interfaces for instance."""
return IMPL.virtual_interface_get_by_instance(context, instance_id)
def virtual_interface_get_by_instance_and_network(context, instance_id,
network_id):
"""Gets all virtual interfaces for instance."""
return IMPL.virtual_interface_get_by_instance_and_network(context,
instance_id,
network_id)
def virtual_interface_delete(context, vif_id):
"""Delete virtual interface record from the database."""
return IMPL.virtual_interface_delete(context, vif_id)
def virtual_interface_delete_by_instance(context, instance_id):
"""Delete virtual interface records associated with instance."""
return IMPL.virtual_interface_delete_by_instance(context, instance_id)
def virtual_interface_get_all(context):
"""Gets all virtual interfaces from the table"""
return IMPL.virtual_interface_get_all(context)
####################
def instance_create(context, values):
"""Create an instance from the values dictionary."""
return IMPL.instance_create(context, values)
def instance_data_get_for_project(context, project_id, session=None):
"""Get (instance_count, total_cores, total_ram) for project."""
return IMPL.instance_data_get_for_project(context, project_id,
session=session)
def instance_destroy(context, instance_uuid, constraint=None):
"""Destroy the instance or raise if it does not exist."""
return IMPL.instance_destroy(context, instance_uuid, constraint)
def instance_get_by_uuid(context, uuid):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get_by_uuid(context, uuid)
def instance_get(context, instance_id):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get(context, instance_id)
def instance_get_all(context, columns_to_join=None):
"""Get all instances."""
return IMPL.instance_get_all(context, columns_to_join=columns_to_join)
def instance_get_all_by_filters(context, filters, sort_key='created_at',
sort_dir='desc'):
"""Get all instances that match all filters."""
return IMPL.instance_get_all_by_filters(context, filters, sort_key,
sort_dir)
def instance_get_active_by_window(context, begin, end=None, project_id=None,
host=None):
"""Get instances active during a certain time window.
Specifying a project_id will filter for a certain project.
Specifying a host will filter for instances on a given compute host.
"""
return IMPL.instance_get_active_by_window(context, begin, end,
project_id, host)
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None):
"""Get instances and joins active during a certain time window.
Specifying a project_id will filter for a certain project.
Specifying a host will filter for instances on a given compute host.
"""
return IMPL.instance_get_active_by_window_joined(context, begin, end,
project_id, host)
def instance_get_all_by_project(context, project_id):
"""Get all instances belonging to a project."""
return IMPL.instance_get_all_by_project(context, project_id)
def instance_get_all_by_host(context, host):
"""Get all instances belonging to a host."""
return IMPL.instance_get_all_by_host(context, host)
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
"""Get all instances belonging to a host with a different type_id."""
return IMPL.instance_get_all_by_host_and_not_type(context, host, type_id)
def instance_get_all_by_reservation(context, reservation_id):
"""Get all instances belonging to a reservation."""
return IMPL.instance_get_all_by_reservation(context, reservation_id)
def instance_get_floating_address(context, instance_id):
"""Get the first floating ip address of an instance."""
return IMPL.instance_get_floating_address(context, instance_id)
def instance_get_all_hung_in_rebooting(context, reboot_window):
"""Get all instances stuck in a rebooting state."""
return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window)
def instance_test_and_set(context, instance_uuid, attr, ok_states,
new_state):
"""Atomically check if an instance is in a valid state, and if it is, set
the instance into a new state.
"""
return IMPL.instance_test_and_set(context, instance_uuid, attr,
ok_states, new_state)
def instance_update(context, instance_uuid, values):
"""Set the given properties on an instance and update it.
Raises NotFound if instance does not exist.
"""
return IMPL.instance_update(context, instance_uuid, values)
def instance_update_and_get_original(context, instance_uuid, values):
"""Set the given properties on an instance and update it. Return
a shallow copy of the original instance reference, as well as the
updated one.
:param context: = request context object
:param instance_id: = instance id or uuid
:param values: = dict containing column values
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
Raises NotFound if instance does not exist.
"""
return IMPL.instance_update_and_get_original(context, instance_uuid,
values)
def instance_add_security_group(context, instance_id, security_group_id):
"""Associate the given security group with the given instance."""
return IMPL.instance_add_security_group(context, instance_id,
security_group_id)
def instance_remove_security_group(context, instance_id, security_group_id):
"""Disassociate the given security group from the given instance."""
return IMPL.instance_remove_security_group(context, instance_id,
security_group_id)
###################
def instance_info_cache_create(context, values):
"""Create a new instance cache record in the table.
:param context: = request context object
:param values: = dict containing column values
"""
return IMPL.instance_info_cache_create(context, values)
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
"""
return IMPL.instance_info_cache_get(context, instance_uuid)
def instance_info_cache_update(context, instance_uuid, values):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
"""
return IMPL.instance_info_cache_update(context, instance_uuid, values)
def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
"""
return IMPL.instance_info_cache_delete(context, instance_uuid)
###################
def key_pair_create(context, values):
"""Create a key_pair from the values dictionary."""
return IMPL.key_pair_create(context, values)
def key_pair_destroy(context, user_id, name):
"""Destroy the key_pair or raise if it does not exist."""
return IMPL.key_pair_destroy(context, user_id, name)
def key_pair_destroy_all_by_user(context, user_id):
"""Destroy all key_pairs by user."""
return IMPL.key_pair_destroy_all_by_user(context, user_id)
def key_pair_get(context, user_id, name):
"""Get a key_pair or raise if it does not exist."""
return IMPL.key_pair_get(context, user_id, name)
def key_pair_get_all_by_user(context, user_id):
"""Get all key_pairs by user."""
return IMPL.key_pair_get_all_by_user(context, user_id)
def key_pair_count_by_user(context, user_id):
"""Count number of key pairs for the given user ID."""
return IMPL.key_pair_count_by_user(context, user_id)
####################
def network_associate(context, project_id, network_id=None, force=False):
"""Associate a free network to a project."""
return IMPL.network_associate(context, project_id, network_id, force)
def network_count(context):
"""Return the number of networks."""
return IMPL.network_count(context)
def network_count_reserved_ips(context, network_id):
"""Return the number of reserved ips in the network."""
return IMPL.network_count_reserved_ips(context, network_id)
def network_create_safe(context, values):
"""Create a network from the values dict.
The network is only returned if the create succeeds. If the create violates
constraints because the network already exists, no exception is raised.
"""
return IMPL.network_create_safe(context, values)
def network_delete_safe(context, network_id):
"""Delete network with key network_id.
This method assumes that the network is not associated with any project
"""
return IMPL.network_delete_safe(context, network_id)
def network_create_fixed_ips(context, network_id, num_vpn_clients):
"""Create the ips for the network, reserving sepecified ips."""
return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients)
def network_disassociate(context, network_id):
"""Disassociate the network from project or raise if it does not exist."""
return IMPL.network_disassociate(context, network_id)
def network_get(context, network_id):
"""Get a network or raise if it does not exist."""
return IMPL.network_get(context, network_id)
def network_get_all(context):
"""Return all defined networks."""
return IMPL.network_get_all(context)
def network_get_all_by_uuids(context, network_uuids, project_id=None):
"""Return networks by ids."""
return IMPL.network_get_all_by_uuids(context, network_uuids, project_id)
# pylint: disable=C0103
def network_get_associated_fixed_ips(context, network_id, host=None):
"""Get all network's ips that have been associated."""
return IMPL.network_get_associated_fixed_ips(context, network_id, host)
def network_get_by_bridge(context, bridge):
"""Get a network by bridge or raise if it does not exist."""
return IMPL.network_get_by_bridge(context, bridge)
def network_get_by_uuid(context, uuid):
"""Get a network by uuid or raise if it does not exist."""
return IMPL.network_get_by_uuid(context, uuid)
def network_get_by_cidr(context, cidr):
"""Get a network by cidr or raise if it does not exist"""
return IMPL.network_get_by_cidr(context, cidr)
def network_get_by_instance(context, instance_id):
"""Get a network by instance id or raise if it does not exist."""
return IMPL.network_get_by_instance(context, instance_id)
def network_get_all_by_instance(context, instance_id):
"""Get all networks by instance id or raise if none exist."""
return IMPL.network_get_all_by_instance(context, instance_id)
def network_get_all_by_host(context, host):
"""All networks for which the given host is the network host."""
return IMPL.network_get_all_by_host(context, host)
def network_get_index(context, network_id):
"""Get non-conflicting index for network."""
return IMPL.network_get_index(context, network_id)
def network_set_cidr(context, network_id, cidr):
"""Set the Classless Inner Domain Routing for the network."""
return IMPL.network_set_cidr(context, network_id, cidr)
def network_set_host(context, network_id, host_id):
"""Safely set the host for network."""
return IMPL.network_set_host(context, network_id, host_id)
def network_update(context, network_id, values):
"""Set the given properties on a network and update it.
Raises NotFound if network does not exist.
"""
return IMPL.network_update(context, network_id, values)
###################
def iscsi_target_count_by_host(context, host):
"""Return count of export devices."""
return IMPL.iscsi_target_count_by_host(context, host)
def iscsi_target_create_safe(context, values):
"""Create an iscsi_target from the values dictionary.
The device is not returned. If the create violates the unique
constraints because the iscsi_target and host already exist,
no exception is raised.
"""
return IMPL.iscsi_target_create_safe(context, values)
###############
def quota_create(context, project_id, resource, limit):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit)
def quota_get(context, project_id, resource):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id, resource)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_update(context, project_id, resource, limit):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit)
def quota_destroy(context, project_id, resource):
"""Destroy the quota or raise if it does not exist."""
return IMPL.quota_destroy(context, project_id, resource)
###################
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource."""
return IMPL.quota_class_create(context, class_name, resource, limit)
def quota_class_get(context, class_name, resource):
"""Retrieve a quota class or raise if it does not exist."""
return IMPL.quota_class_get(context, class_name, resource)
def quota_class_get_all_by_name(context, class_name):
"""Retrieve all quotas associated with a given quota class."""
return IMPL.quota_class_get_all_by_name(context, class_name)
def quota_class_update(context, class_name, resource, limit):
"""Update a quota class or raise if it does not exist."""
return IMPL.quota_class_update(context, class_name, resource, limit)
def quota_class_destroy(context, class_name, resource):
"""Destroy the quota class or raise if it does not exist."""
return IMPL.quota_class_destroy(context, class_name, resource)
def quota_class_destroy_all_by_name(context, class_name):
"""Destroy all quotas associated with a given quota class."""
return IMPL.quota_class_destroy_all_by_name(context, class_name)
###################
def quota_usage_create(context, project_id, resource, in_use, reserved,
until_refresh):
"""Create a quota usage for the given project and resource."""
return IMPL.quota_usage_create(context, project_id, resource,
in_use, reserved, until_refresh)
def quota_usage_get(context, project_id, resource):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource)
def quota_usage_get_all_by_project(context, project_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project(context, project_id)
def quota_usage_update(context, project_id, resource, in_use, reserved,
until_refresh):
"""Update a quota usage or raise if it does not exist."""
return IMPL.quota_usage_update(context, project_id, resource,
in_use, reserved, until_refresh)
def quota_usage_destroy(context, project_id, resource):
"""Destroy the quota usage or raise if it does not exist."""
return IMPL.quota_usage_destroy(context, project_id, resource)
###################
def reservation_create(context, uuid, usage, project_id, resource, delta,
expire):
"""Create a reservation for the given project and resource."""
return IMPL.reservation_create(context, uuid, usage, project_id,
resource, delta, expire)
def reservation_get(context, uuid):
"""Retrieve a reservation or raise if it does not exist."""
return IMPL.reservation_get(context, uuid)
def reservation_get_all_by_project(context, project_id):
"""Retrieve all reservations associated with a given project."""
return IMPL.reservation_get_all_by_project(context, project_id)
def reservation_destroy(context, uuid):
"""Destroy the reservation or raise if it does not exist."""
return IMPL.reservation_destroy(context, uuid)
###################
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age)
def reservation_commit(context, reservations):
"""Commit quota reservations."""
return IMPL.reservation_commit(context, reservations)
def reservation_rollback(context, reservations):
"""Roll back quota reservations."""
return IMPL.reservation_rollback(context, reservations)
def quota_destroy_all_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_destroy_all_by_project(context, project_id)
def reservation_expire(context):
"""Roll back any expired reservations."""
return IMPL.reservation_expire(context)
###################
def volume_allocate_iscsi_target(context, volume_id, host):
"""Atomically allocate a free iscsi_target from the pool."""
return IMPL.volume_allocate_iscsi_target(context, volume_id, host)
def volume_attached(context, volume_id, instance_id, mountpoint):
"""Ensure that a volume is set as attached."""
return IMPL.volume_attached(context, volume_id, instance_id, mountpoint)
def volume_create(context, values):
"""Create a volume from the values dictionary."""
return IMPL.volume_create(context, values)
def volume_data_get_for_project(context, project_id, session=None):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_project(context, project_id,
session=session)
def volume_destroy(context, volume_id):
"""Destroy the volume or raise if it does not exist."""
return IMPL.volume_destroy(context, volume_id)
def volume_detached(context, volume_id):
"""Ensure that a volume is set as detached."""
return IMPL.volume_detached(context, volume_id)
def volume_get(context, volume_id):
"""Get a volume or raise if it does not exist."""
return IMPL.volume_get(context, volume_id)
def volume_get_all(context):
"""Get all volumes."""
return IMPL.volume_get_all(context)
def volume_get_all_by_host(context, host):
"""Get all volumes belonging to a host."""
return IMPL.volume_get_all_by_host(context, host)
def volume_get_all_by_instance_uuid(context, instance_uuid):
"""Get all volumes belonging to an instance."""
return IMPL.volume_get_all_by_instance_uuid(context, instance_uuid)
def volume_get_all_by_project(context, project_id):
"""Get all volumes belonging to a project."""
return IMPL.volume_get_all_by_project(context, project_id)
def volume_get_by_ec2_id(context, ec2_id):
"""Get a volume by ec2 id."""
return IMPL.volume_get_by_ec2_id(context, ec2_id)
def volume_get_iscsi_target_num(context, volume_id):
"""Get the target num (tid) allocated to the volume."""
return IMPL.volume_get_iscsi_target_num(context, volume_id)
def volume_update(context, volume_id, values):
"""Set the given properties on a volume and update it.
Raises NotFound if volume does not exist.
"""
return IMPL.volume_update(context, volume_id, values)
def get_ec2_volume_id_by_uuid(context, volume_id):
return IMPL.get_ec2_volume_id_by_uuid(context, volume_id)
def get_volume_uuid_by_ec2_id(context, ec2_id):
return IMPL.get_volume_uuid_by_ec2_id(context, ec2_id)
def ec2_volume_create(context, volume_id, forced_id=None):
return IMPL.ec2_volume_create(context, volume_id, forced_id)
def get_snapshot_uuid_by_ec2_id(context, ec2_id):
return IMPL.get_snapshot_uuid_by_ec2_id(context, ec2_id)
def get_ec2_snapshot_id_by_uuid(context, snapshot_id):
return IMPL.get_ec2_snapshot_id_by_uuid(context, snapshot_id)
def ec2_snapshot_create(context, snapshot_id, forced_id=None):
return IMPL.ec2_snapshot_create(context, snapshot_id, forced_id)
####################
def snapshot_create(context, values):
"""Create a snapshot from the values dictionary."""
return IMPL.snapshot_create(context, values)
def snapshot_destroy(context, snapshot_id):
"""Destroy the snapshot or raise if it does not exist."""
return IMPL.snapshot_destroy(context, snapshot_id)
def snapshot_get(context, snapshot_id):
"""Get a snapshot or raise if it does not exist."""
return IMPL.snapshot_get(context, snapshot_id)
def snapshot_get_all(context):
"""Get all snapshots."""
return IMPL.snapshot_get_all(context)
def snapshot_get_all_by_project(context, project_id):
"""Get all snapshots belonging to a project."""
return IMPL.snapshot_get_all_by_project(context, project_id)
def snapshot_get_all_for_volume(context, volume_id):
"""Get all snapshots for a volume."""
return IMPL.snapshot_get_all_for_volume(context, volume_id)
def snapshot_update(context, snapshot_id, values):
"""Set the given properties on a snapshot and update it.
Raises NotFound if snapshot does not exist.
"""
return IMPL.snapshot_update(context, snapshot_id, values)
####################
def block_device_mapping_create(context, values):
"""Create an entry of block device mapping"""
return IMPL.block_device_mapping_create(context, values)
def block_device_mapping_update(context, bdm_id, values):
"""Update an entry of block device mapping"""
return IMPL.block_device_mapping_update(context, bdm_id, values)
def block_device_mapping_update_or_create(context, values):
"""Update an entry of block device mapping.
If not existed, create a new entry"""
return IMPL.block_device_mapping_update_or_create(context, values)
def block_device_mapping_get_all_by_instance(context, instance_uuid):
"""Get all block device mapping belonging to an instance"""
return IMPL.block_device_mapping_get_all_by_instance(context,
instance_uuid)
def block_device_mapping_destroy(context, bdm_id):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy(context, bdm_id)
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy_by_instance_and_device(
context, instance_uuid, device_name)
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy_by_instance_and_volume(
context, instance_uuid, volume_id)
####################
def security_group_get_all(context):
"""Get all security groups."""
return IMPL.security_group_get_all(context)
def security_group_get(context, security_group_id):
"""Get security group by its id."""
return IMPL.security_group_get(context, security_group_id)
def security_group_get_by_name(context, project_id, group_name):
"""Returns a security group with the specified name from a project."""
return IMPL.security_group_get_by_name(context, project_id, group_name)
def security_group_get_by_project(context, project_id):
"""Get all security groups belonging to a project."""
return IMPL.security_group_get_by_project(context, project_id)
def security_group_get_by_instance(context, instance_id):
"""Get security groups to which the instance is assigned."""
return IMPL.security_group_get_by_instance(context, instance_id)
def security_group_exists(context, project_id, group_name):
"""Indicates if a group name exists in a project."""
return IMPL.security_group_exists(context, project_id, group_name)
def security_group_in_use(context, group_id):
"""Indicates if a security group is currently in use."""
return IMPL.security_group_in_use(context, group_id)
def security_group_create(context, values):
"""Create a new security group."""
return IMPL.security_group_create(context, values)
def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id."""
return IMPL.security_group_ensure_default(context)
def security_group_destroy(context, security_group_id):
"""Deletes a security group."""
return IMPL.security_group_destroy(context, security_group_id)
def security_group_count_by_project(context, project_id, session=None):
"""Count number of security groups in a project."""
return IMPL.security_group_count_by_project(context, project_id,
session=session)
####################
def security_group_rule_create(context, values):
"""Create a new security group."""
return IMPL.security_group_rule_create(context, values)
def security_group_rule_get_by_security_group(context, security_group_id):
"""Get all rules for a given security group."""
return IMPL.security_group_rule_get_by_security_group(context,
security_group_id)
def security_group_rule_get_by_security_group_grantee(context,
security_group_id):
"""Get all rules that grant access to the given security group."""
return IMPL.security_group_rule_get_by_security_group_grantee(context,
security_group_id)
def security_group_rule_destroy(context, security_group_rule_id):
"""Deletes a security group rule."""
return IMPL.security_group_rule_destroy(context, security_group_rule_id)
def security_group_rule_get(context, security_group_rule_id):
"""Gets a security group rule."""
return IMPL.security_group_rule_get(context, security_group_rule_id)
def security_group_rule_count_by_group(context, security_group_id):
"""Count rules in a given security group."""
return IMPL.security_group_rule_count_by_group(context, security_group_id)
###################
def provider_fw_rule_create(context, rule):
"""Add a firewall rule at the provider level (all hosts & instances)."""
return IMPL.provider_fw_rule_create(context, rule)
def provider_fw_rule_get_all(context):
"""Get all provider-level firewall rules."""
return IMPL.provider_fw_rule_get_all(context)
def provider_fw_rule_destroy(context, rule_id):
"""Delete a provider firewall rule from the database."""
return IMPL.provider_fw_rule_destroy(context, rule_id)
###################
def project_get_networks(context, project_id, associate=True):
"""Return the network associated with the project.
If associate is true, it will attempt to associate a new
network if one is not found, otherwise it returns None.
"""
return IMPL.project_get_networks(context, project_id, associate)
###################
def console_pool_create(context, values):
"""Create console pool."""
return IMPL.console_pool_create(context, values)
def console_pool_get_by_host_type(context, compute_host, proxy_host,
console_type):
"""Fetch a console pool for a given proxy host, compute host, and type."""
return IMPL.console_pool_get_by_host_type(context,
compute_host,
proxy_host,
console_type)
def console_pool_get_all_by_host_type(context, host, console_type):
"""Fetch all pools for given proxy host and type."""
return IMPL.console_pool_get_all_by_host_type(context,
host,
console_type)
def console_create(context, values):
"""Create a console."""
return IMPL.console_create(context, values)
def console_delete(context, console_id):
"""Delete a console."""
return IMPL.console_delete(context, console_id)
def console_get_by_pool_instance(context, pool_id, instance_uuid):
"""Get console entry for a given instance and pool."""
return IMPL.console_get_by_pool_instance(context, pool_id, instance_uuid)
def console_get_all_by_instance(context, instance_uuid):
"""Get consoles for a given instance."""
return IMPL.console_get_all_by_instance(context, instance_uuid)
def console_get(context, console_id, instance_uuid=None):
"""Get a specific console (possibly on a given instance)."""
return IMPL.console_get(context, console_id, instance_uuid)
##################
def instance_type_create(context, values):
"""Create a new instance type."""
return IMPL.instance_type_create(context, values)
def instance_type_get_all(context, inactive=False, filters=None):
"""Get all instance types."""
return IMPL.instance_type_get_all(
context, inactive=inactive, filters=filters)
def instance_type_get(context, id):
"""Get instance type by id."""
return IMPL.instance_type_get(context, id)
def instance_type_get_by_name(context, name):
"""Get instance type by name."""
return IMPL.instance_type_get_by_name(context, name)
def instance_type_get_by_flavor_id(context, id):
"""Get instance type by flavor id."""
return IMPL.instance_type_get_by_flavor_id(context, id)
def instance_type_destroy(context, name):
"""Delete an instance type."""
return IMPL.instance_type_destroy(context, name)
def instance_type_access_get_by_flavor_id(context, flavor_id):
"""Get flavor access by flavor id."""
return IMPL.instance_type_access_get_by_flavor_id(context, flavor_id)
def instance_type_access_add(context, flavor_id, project_id):
"""Add flavor access for project."""
return IMPL.instance_type_access_add(context, flavor_id, project_id)
def instance_type_access_remove(context, flavor_id, project_id):
"""Remove flavor access for project."""
return IMPL.instance_type_access_remove(context, flavor_id, project_id)
####################
def instance_metadata_get(context, instance_uuid):
"""Get all metadata for an instance."""
return IMPL.instance_metadata_get(context, instance_uuid)
def instance_metadata_delete(context, instance_uuid, key):
"""Delete the given metadata item."""
IMPL.instance_metadata_delete(context, instance_uuid, key)
def instance_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.instance_metadata_update(context, instance_uuid,
metadata, delete)
####################
def instance_system_metadata_get(context, instance_uuid):
"""Get all system metadata for an instance."""
return IMPL.instance_system_metadata_get(context, instance_uuid)
def instance_system_metadata_delete(context, instance_uuid, key):
"""Delete the given system metadata item."""
IMPL.instance_system_metadata_delete(context, instance_uuid, key)
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
IMPL.instance_system_metadata_update(
context, instance_uuid, metadata, delete)
####################
def agent_build_create(context, values):
"""Create a new agent build entry."""
return IMPL.agent_build_create(context, values)
def agent_build_get_by_triple(context, hypervisor, os, architecture):
"""Get agent build by hypervisor/OS/architecture triple."""
return IMPL.agent_build_get_by_triple(context, hypervisor, os,
architecture)
def agent_build_get_all(context):
"""Get all agent builds."""
return IMPL.agent_build_get_all(context)
def agent_build_destroy(context, agent_update_id):
"""Destroy agent build entry."""
IMPL.agent_build_destroy(context, agent_update_id)
def agent_build_update(context, agent_build_id, values):
"""Update agent build entry."""
IMPL.agent_build_update(context, agent_build_id, values)
####################
def bw_usage_get_by_uuids(context, uuids, start_period):
"""Return bw usages for instance(s) in a given audit period."""
return IMPL.bw_usage_get_by_uuids(context, uuids, start_period)
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_refreshed=None):
"""Update cached bandwidth usage for an instance's network based on mac
address. Creates new record if needed.
"""
return IMPL.bw_usage_update(context, uuid, mac, start_period, bw_in,
bw_out, last_refreshed=last_refreshed)
####################
def instance_type_extra_specs_get(context, flavor_id):
"""Get all extra specs for an instance type."""
return IMPL.instance_type_extra_specs_get(context, flavor_id)
def instance_type_extra_specs_delete(context, flavor_id, key):
"""Delete the given extra specs item."""
IMPL.instance_type_extra_specs_delete(context, flavor_id, key)
def instance_type_extra_specs_update_or_create(context, flavor_id,
extra_specs):
"""Create or update instance type extra specs. This adds or modifies the
key/value pairs specified in the extra specs dict argument"""
IMPL.instance_type_extra_specs_update_or_create(context, flavor_id,
extra_specs)
##################
def volume_metadata_get(context, volume_id):
"""Get all metadata for a volume."""
return IMPL.volume_metadata_get(context, volume_id)
def volume_metadata_delete(context, volume_id, key):
"""Delete the given metadata item."""
IMPL.volume_metadata_delete(context, volume_id, key)
def volume_metadata_update(context, volume_id, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
IMPL.volume_metadata_update(context, volume_id, metadata, delete)
##################
def volume_type_create(context, values):
"""Create a new volume type."""
return IMPL.volume_type_create(context, values)
def volume_type_get_all(context, inactive=False):
"""Get all volume types."""
return IMPL.volume_type_get_all(context, inactive)
def volume_type_get(context, id):
"""Get volume type by id."""
return IMPL.volume_type_get(context, id)
def volume_type_get_by_name(context, name):
"""Get volume type by name."""
return IMPL.volume_type_get_by_name(context, name)
def volume_type_destroy(context, name):
"""Delete a volume type."""
return IMPL.volume_type_destroy(context, name)
def volume_get_active_by_window(context, begin, end=None, project_id=None):
"""Get all the volumes inside the window.
Specifying a project_id will filter for a certain project."""
return IMPL.volume_get_active_by_window(context, begin, end, project_id)
####################
def volume_type_extra_specs_get(context, volume_type_id):
"""Get all extra specs for a volume type."""
return IMPL.volume_type_extra_specs_get(context, volume_type_id)
def volume_type_extra_specs_delete(context, volume_type_id, key):
"""Delete the given extra specs item."""
IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
def volume_type_extra_specs_update_or_create(context, volume_type_id,
extra_specs):
"""Create or update volume type extra specs. This adds or modifies the
key/value pairs specified in the extra specs dict argument"""
IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id,
extra_specs)
###################
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id"""
return IMPL.s3_image_get(context, image_id)
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid"""
return IMPL.s3_image_get_by_uuid(context, image_uuid)
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid"""
return IMPL.s3_image_create(context, image_uuid)
####################
def sm_backend_conf_create(context, values):
"""Create a new SM Backend Config entry."""
return IMPL.sm_backend_conf_create(context, values)
def sm_backend_conf_update(context, sm_backend_conf_id, values):
"""Update a SM Backend Config entry."""
return IMPL.sm_backend_conf_update(context, sm_backend_conf_id, values)
def sm_backend_conf_delete(context, sm_backend_conf_id):
"""Delete a SM Backend Config."""
return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id)
def sm_backend_conf_get(context, sm_backend_conf_id):
"""Get a specific SM Backend Config."""
return IMPL.sm_backend_conf_get(context, sm_backend_conf_id)
def sm_backend_conf_get_by_sr(context, sr_uuid):
"""Get a specific SM Backend Config."""
return IMPL.sm_backend_conf_get_by_sr(context, sr_uuid)
def sm_backend_conf_get_all(context):
"""Get all SM Backend Configs."""
return IMPL.sm_backend_conf_get_all(context)
####################
def sm_flavor_create(context, values):
"""Create a new SM Flavor entry."""
return IMPL.sm_flavor_create(context, values)
def sm_flavor_update(context, sm_flavor_id, values):
"""Update a SM Flavor entry."""
return IMPL.sm_flavor_update(context, sm_flavor_id, values)
def sm_flavor_delete(context, sm_flavor_id):
"""Delete a SM Flavor."""
return IMPL.sm_flavor_delete(context, sm_flavor_id)
def sm_flavor_get(context, sm_flavor_id):
"""Get a specific SM Flavor."""
return IMPL.sm_flavor_get(context, sm_flavor_id)
def sm_flavor_get_all(context):
"""Get all SM Flavors."""
return IMPL.sm_flavor_get_all(context)
def sm_flavor_get_by_label(context, sm_flavor_label):
"""Get a specific SM Flavor given label."""
return IMPL.sm_flavor_get_by_label(context, sm_flavor_label)
####################
def sm_volume_create(context, values):
"""Create a new child Zone entry."""
return IMPL.sm_volume_create(context, values)
def sm_volume_update(context, volume_id, values):
"""Update a child Zone entry."""
return IMPL.sm_volume_update(context, values)
def sm_volume_delete(context, volume_id):
"""Delete a child Zone."""
return IMPL.sm_volume_delete(context, volume_id)
def sm_volume_get(context, volume_id):
"""Get a specific child Zone."""
return IMPL.sm_volume_get(context, volume_id)
def sm_volume_get_all(context):
"""Get all child Zones."""
return IMPL.sm_volume_get_all(context)
####################
def aggregate_create(context, values, metadata=None):
"""Create a new aggregate with metadata."""
return IMPL.aggregate_create(context, values, metadata)
def aggregate_get(context, aggregate_id):
"""Get a specific aggregate by id."""
return IMPL.aggregate_get(context, aggregate_id)
def aggregate_get_by_host(context, host, key=None):
"""Get a list of aggregates that host belongs to"""
return IMPL.aggregate_get_by_host(context, host, key)
def aggregate_metadata_get_by_host(context, host, key=None):
"""Get metadata for all aggregates that host belongs to.
Returns a dictionary where each value is a set, this is to cover the case
where there two aggregates have different values for the same key.
Optional key filter"""
return IMPL.aggregate_metadata_get_by_host(context, host, key)
def aggregate_update(context, aggregate_id, values):
"""Update the attributes of an aggregates. If values contains a metadata
key, it updates the aggregate metadata too."""
return IMPL.aggregate_update(context, aggregate_id, values)
def aggregate_delete(context, aggregate_id):
"""Delete an aggregate."""
return IMPL.aggregate_delete(context, aggregate_id)
def aggregate_get_all(context):
"""Get all aggregates."""
return IMPL.aggregate_get_all(context)
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
"""Add/update metadata. If set_delete=True, it adds only."""
IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete)
def aggregate_metadata_get(context, aggregate_id):
"""Get metadata for the specified aggregate."""
return IMPL.aggregate_metadata_get(context, aggregate_id)
def aggregate_metadata_delete(context, aggregate_id, key):
"""Delete the given metadata key."""
IMPL.aggregate_metadata_delete(context, aggregate_id, key)
def aggregate_host_add(context, aggregate_id, host):
"""Add host to the aggregate."""
IMPL.aggregate_host_add(context, aggregate_id, host)
def aggregate_host_get_all(context, aggregate_id):
"""Get hosts for the specified aggregate."""
return IMPL.aggregate_host_get_all(context, aggregate_id)
def aggregate_host_delete(context, aggregate_id, host):
"""Delete the given host from the aggregate."""
IMPL.aggregate_host_delete(context, aggregate_id, host)
####################
def instance_fault_create(context, values):
"""Create a new Instance Fault."""
return IMPL.instance_fault_create(context, values)
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
return IMPL.instance_fault_get_by_instance_uuids(context, instance_uuids)
####################
def get_ec2_instance_id_by_uuid(context, instance_id):
"""Get ec2 id through uuid from instance_id_mappings table"""
return IMPL.get_ec2_instance_id_by_uuid(context, instance_id)
def get_instance_uuid_by_ec2_id(context, ec2_id):
"""Get uuid through ec2 id from instance_id_mappings table"""
return IMPL.get_instance_uuid_by_ec2_id(context, ec2_id)
def ec2_instance_create(context, instance_ec2_id):
"""Create the ec2 id to instance uuid mapping on demand"""
return IMPL.ec2_instance_create(context, instance_ec2_id)
####################
def task_log_end_task(context, task_name,
period_beginning,
period_ending,
host,
errors,
message=None,
session=None):
"""Mark a task as complete for a given host/time period"""
return IMPL.task_log_end_task(context, task_name,
period_beginning,
period_ending,
host,
errors,
message,
session)
def task_log_begin_task(context, task_name,
period_beginning,
period_ending,
host,
task_items=None,
message=None,
session=None):
"""Mark a task as started for a given host/time period"""
return IMPL.task_log_begin_task(context, task_name,
period_beginning,
period_ending,
host,
task_items,
message,
session)
def task_log_get_all(context, task_name, period_beginning,
period_ending, host=None, state=None, session=None):
return IMPL.task_log_get_all(context, task_name, period_beginning,
period_ending, host, state, session)
def task_log_get(context, task_name, period_beginning,
period_ending, host, state=None, session=None):
return IMPL.task_log_get(context, task_name, period_beginning,
period_ending, host, state, session)
| 32.119685
| 79
| 0.708064
|
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova import utils
db_opts = [
cfg.StrOpt('db_backend',
default='sqlalchemy',
help='The backend to use for db'),
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create'),
cfg.StrOpt('instance_name_template',
default='instance-%08x',
help='Template string to be used to generate instance names'),
cfg.StrOpt('volume_name_template',
default='volume-%s',
help='Template string to be used to generate instance names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(db_opts)
IMPL = utils.LazyPluggable('db_backend',
sqlalchemy='nova.db.sqlalchemy.api')
class NoMoreNetworks(exception.NovaException):
pass
class NoMoreTargets(exception.NovaException):
pass
IMPL.not_equal(*values)
text, service_id)
def service_get_by_host_and_topic(context, host, topic):
return IMPL.service_get_by_host_and_topic(context, host, topic)
def service_get_all(context, disabled=None):
return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
return IMPL.service_get_all_by_topic(context, topic)
def service_get_all_by_host(context, host):
return IMPL.service_get_all_by_host(context, host)
def service_get_all_compute_by_host(context, host):
return IMPL.service_get_all_compute_by_host(context, host)
def service_get_all_compute_sorted(context):
return IMPL.service_get_all_compute_sorted(context)
def service_get_all_volume_sorted(context):
return IMPL.service_get_all_volume_sorted(context)
def service_get_by_args(context, host, binary):
return IMPL.service_get_by_args(context, host, binary)
def service_create(context, values):
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
return IMPL.service_update(context, service_id, values)
ll(context)
def compute_node_search_by_hypervisor(context, hypervisor_match):
return IMPL.compute_node_search_by_hypervisor(context, hypervisor_match)
def compute_node_create(context, values):
return IMPL.compute_node_create(context, values)
def compute_node_update(context, compute_id, values, prune_stats=False):
return IMPL.compute_node_update(context, compute_id, values, prune_stats)
def compute_node_get_by_host(context, host):
return IMPL.compute_node_get_by_host(context, host)
def compute_node_statistics(context):
return IMPL.compute_node_statistics(context)
certificate_get_all_by_project(context, project_id)
def certificate_get_all_by_user(context, user_id):
return IMPL.certificate_get_all_by_user(context, user_id)
def certificate_get_all_by_user_and_project(context, user_id, project_id):
return IMPL.certificate_get_all_by_user_and_project(context,
user_id,
project_id)
floating_ip_allocate_address(context, project_id, pool):
return IMPL.floating_ip_allocate_address(context, project_id, pool)
def floating_ip_bulk_create(context, ips):
return IMPL.floating_ip_bulk_create(context, ips)
def floating_ip_create(context, values):
return IMPL.floating_ip_create(context, values)
def floating_ip_count_by_project(context, project_id, session=None):
return IMPL.floating_ip_count_by_project(context, project_id,
session=session)
def floating_ip_deallocate(context, address):
return IMPL.floating_ip_deallocate(context, address)
def floating_ip_destroy(context, address):
return IMPL.floating_ip_destroy(context, address)
def floating_ip_disassociate(context, address):
return IMPL.floating_ip_disassociate(context, address)
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
return IMPL.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address,
host)
def floating_ip_get_all(context):
return IMPL.floating_ip_get_all(context)
def floating_ip_get_all_by_host(context, host):
return IMPL.floating_ip_get_all_by_host(context, host)
def floating_ip_get_all_by_project(context, project_id):
return IMPL.floating_ip_get_all_by_project(context, project_id)
def floating_ip_get_by_address(context, address):
return IMPL.floating_ip_get_by_address(context, address)
def floating_ip_get_by_fixed_address(context, fixed_address):
return IMPL.floating_ip_get_by_fixed_address(context, fixed_address)
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id)
def floating_ip_update(context, address, values):
return IMPL.floating_ip_update(context, address, values)
def floating_ip_set_auto_assigned(context, address):
return IMPL.floating_ip_set_auto_assigned(context, address)
def dnsdomain_list(context):
return IMPL.dnsdomain_list(context)
def dnsdomain_register_for_zone(context, fqdomain, zone):
return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone)
def dnsdomain_register_for_project(context, fqdomain, project):
return IMPL.dnsdomain_register_for_project(context, fqdomain, project)
def dnsdomain_unregister(context, fqdomain):
return IMPL.dnsdomain_unregister(context, fqdomain)
def dnsdomain_get(context, fqdomain):
return IMPL.dnsdomain_get(context, fqdomain)
def migration_get(context, migration_id):
return IMPL.migration_get(context, migration_id)
def migration_get_by_instance_and_status(context, instance_uuid, status):
return IMPL.migration_get_by_instance_and_status(context, instance_uuid,
status)
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
dest_compute):
return IMPL.migration_get_unconfirmed_by_dest_compute(context,
confirm_window, dest_compute)
k_id,
reserved)
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None):
return IMPL.fixed_ip_associate_pool(context, network_id,
instance_uuid, host)
def fixed_ip_create(context, values):
return IMPL.fixed_ip_create(context, values)
def fixed_ip_bulk_create(context, ips):
return IMPL.fixed_ip_bulk_create(context, ips)
def fixed_ip_disassociate(context, address):
return IMPL.fixed_ip_disassociate(context, address)
def fixed_ip_disassociate_all_by_timeout(context, host, time):
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
def fixed_ip_get(context, id):
return IMPL.fixed_ip_get(context, id)
def fixed_ip_get_all(context):
return IMPL.fixed_ip_get_all(context)
def fixed_ip_get_by_address(context, address):
return IMPL.fixed_ip_get_by_address(context, address)
def fixed_ip_get_by_instance(context, instance_uuid):
return IMPL.fixed_ip_get_by_instance(context, instance_uuid)
def fixed_ip_get_by_network_host(context, network_uuid, host):
return IMPL.fixed_ip_get_by_network_host(context, network_uuid, host)
def fixed_ips_by_virtual_interface(context, vif_id):
return IMPL.fixed_ips_by_virtual_interface(context, vif_id)
def fixed_ip_get_network(context, address):
return IMPL.fixed_ip_get_network(context, address)
def fixed_ip_update(context, address, values):
return IMPL.fixed_ip_update(context, address, values)
t(context, vif_id)
def virtual_interface_get_by_address(context, address):
return IMPL.virtual_interface_get_by_address(context, address)
def virtual_interface_get_by_uuid(context, vif_uuid):
return IMPL.virtual_interface_get_by_uuid(context, vif_uuid)
def virtual_interface_get_by_instance(context, instance_id):
return IMPL.virtual_interface_get_by_instance(context, instance_id)
def virtual_interface_get_by_instance_and_network(context, instance_id,
network_id):
return IMPL.virtual_interface_get_by_instance_and_network(context,
instance_id,
network_id)
def virtual_interface_delete(context, vif_id):
return IMPL.virtual_interface_delete(context, vif_id)
def virtual_interface_delete_by_instance(context, instance_id):
return IMPL.virtual_interface_delete_by_instance(context, instance_id)
def virtual_interface_get_all(context):
return IMPL.virtual_interface_get_all(context)
a_get_for_project(context, project_id,
session=session)
def instance_destroy(context, instance_uuid, constraint=None):
return IMPL.instance_destroy(context, instance_uuid, constraint)
def instance_get_by_uuid(context, uuid):
return IMPL.instance_get_by_uuid(context, uuid)
def instance_get(context, instance_id):
return IMPL.instance_get(context, instance_id)
def instance_get_all(context, columns_to_join=None):
return IMPL.instance_get_all(context, columns_to_join=columns_to_join)
def instance_get_all_by_filters(context, filters, sort_key='created_at',
sort_dir='desc'):
return IMPL.instance_get_all_by_filters(context, filters, sort_key,
sort_dir)
def instance_get_active_by_window(context, begin, end=None, project_id=None,
host=None):
return IMPL.instance_get_active_by_window(context, begin, end,
project_id, host)
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None):
return IMPL.instance_get_active_by_window_joined(context, begin, end,
project_id, host)
def instance_get_all_by_project(context, project_id):
return IMPL.instance_get_all_by_project(context, project_id)
def instance_get_all_by_host(context, host):
return IMPL.instance_get_all_by_host(context, host)
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
return IMPL.instance_get_all_by_host_and_not_type(context, host, type_id)
def instance_get_all_by_reservation(context, reservation_id):
return IMPL.instance_get_all_by_reservation(context, reservation_id)
def instance_get_floating_address(context, instance_id):
return IMPL.instance_get_floating_address(context, instance_id)
def instance_get_all_hung_in_rebooting(context, reboot_window):
return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window)
def instance_test_and_set(context, instance_uuid, attr, ok_states,
new_state):
return IMPL.instance_test_and_set(context, instance_uuid, attr,
ok_states, new_state)
def instance_update(context, instance_uuid, values):
return IMPL.instance_update(context, instance_uuid, values)
def instance_update_and_get_original(context, instance_uuid, values):
return IMPL.instance_update_and_get_original(context, instance_uuid,
values)
def instance_add_security_group(context, instance_id, security_group_id):
return IMPL.instance_add_security_group(context, instance_id,
security_group_id)
def instance_remove_security_group(context, instance_id, security_group_id):
return IMPL.instance_remove_security_group(context, instance_id,
security_group_id)
return IMPL.instance_info_cache_get(context, instance_uuid)
def instance_info_cache_update(context, instance_uuid, values):
return IMPL.instance_info_cache_update(context, instance_uuid, values)
def instance_info_cache_delete(context, instance_uuid):
return IMPL.instance_info_cache_delete(context, instance_uuid)
context, user_id, name)
def key_pair_destroy_all_by_user(context, user_id):
return IMPL.key_pair_destroy_all_by_user(context, user_id)
def key_pair_get(context, user_id, name):
return IMPL.key_pair_get(context, user_id, name)
def key_pair_get_all_by_user(context, user_id):
return IMPL.key_pair_get_all_by_user(context, user_id)
def key_pair_count_by_user(context, user_id):
return IMPL.key_pair_count_by_user(context, user_id)
n IMPL.network_count(context)
def network_count_reserved_ips(context, network_id):
return IMPL.network_count_reserved_ips(context, network_id)
def network_create_safe(context, values):
return IMPL.network_create_safe(context, values)
def network_delete_safe(context, network_id):
return IMPL.network_delete_safe(context, network_id)
def network_create_fixed_ips(context, network_id, num_vpn_clients):
return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients)
def network_disassociate(context, network_id):
return IMPL.network_disassociate(context, network_id)
def network_get(context, network_id):
return IMPL.network_get(context, network_id)
def network_get_all(context):
return IMPL.network_get_all(context)
def network_get_all_by_uuids(context, network_uuids, project_id=None):
return IMPL.network_get_all_by_uuids(context, network_uuids, project_id)
def network_get_associated_fixed_ips(context, network_id, host=None):
return IMPL.network_get_associated_fixed_ips(context, network_id, host)
def network_get_by_bridge(context, bridge):
return IMPL.network_get_by_bridge(context, bridge)
def network_get_by_uuid(context, uuid):
return IMPL.network_get_by_uuid(context, uuid)
def network_get_by_cidr(context, cidr):
return IMPL.network_get_by_cidr(context, cidr)
def network_get_by_instance(context, instance_id):
return IMPL.network_get_by_instance(context, instance_id)
def network_get_all_by_instance(context, instance_id):
return IMPL.network_get_all_by_instance(context, instance_id)
def network_get_all_by_host(context, host):
return IMPL.network_get_all_by_host(context, host)
def network_get_index(context, network_id):
return IMPL.network_get_index(context, network_id)
def network_set_cidr(context, network_id, cidr):
return IMPL.network_set_cidr(context, network_id, cidr)
def network_set_host(context, network_id, host_id):
return IMPL.network_set_host(context, network_id, host_id)
def network_update(context, network_id, values):
return IMPL.network_update(context, network_id, values)
L.iscsi_target_create_safe(context, values)
id, resource, limit)
def quota_get(context, project_id, resource):
return IMPL.quota_get(context, project_id, resource)
def quota_get_all_by_project(context, project_id):
return IMPL.quota_get_all_by_project(context, project_id)
def quota_update(context, project_id, resource, limit):
return IMPL.quota_update(context, project_id, resource, limit)
def quota_destroy(context, project_id, resource):
return IMPL.quota_destroy(context, project_id, resource)
ass_name, resource):
return IMPL.quota_class_get(context, class_name, resource)
def quota_class_get_all_by_name(context, class_name):
return IMPL.quota_class_get_all_by_name(context, class_name)
def quota_class_update(context, class_name, resource, limit):
return IMPL.quota_class_update(context, class_name, resource, limit)
def quota_class_destroy(context, class_name, resource):
return IMPL.quota_class_destroy(context, class_name, resource)
def quota_class_destroy_all_by_name(context, class_name):
return IMPL.quota_class_destroy_all_by_name(context, class_name)
esource,
in_use, reserved, until_refresh)
def quota_usage_get(context, project_id, resource):
return IMPL.quota_usage_get(context, project_id, resource)
def quota_usage_get_all_by_project(context, project_id):
return IMPL.quota_usage_get_all_by_project(context, project_id)
def quota_usage_update(context, project_id, resource, in_use, reserved,
until_refresh):
return IMPL.quota_usage_update(context, project_id, resource,
in_use, reserved, until_refresh)
def quota_usage_destroy(context, project_id, resource):
return IMPL.quota_usage_destroy(context, project_id, resource)
ct_id,
resource, delta, expire)
def reservation_get(context, uuid):
return IMPL.reservation_get(context, uuid)
def reservation_get_all_by_project(context, project_id):
return IMPL.reservation_get_all_by_project(context, project_id)
def reservation_destroy(context, uuid):
return IMPL.reservation_destroy(context, uuid)
s, expire,
until_refresh, max_age)
def reservation_commit(context, reservations):
return IMPL.reservation_commit(context, reservations)
def reservation_rollback(context, reservations):
return IMPL.reservation_rollback(context, reservations)
def quota_destroy_all_by_project(context, project_id):
return IMPL.quota_destroy_all_by_project(context, project_id)
def reservation_expire(context):
return IMPL.reservation_expire(context)
_id, instance_id, mountpoint):
return IMPL.volume_attached(context, volume_id, instance_id, mountpoint)
def volume_create(context, values):
return IMPL.volume_create(context, values)
def volume_data_get_for_project(context, project_id, session=None):
return IMPL.volume_data_get_for_project(context, project_id,
session=session)
def volume_destroy(context, volume_id):
return IMPL.volume_destroy(context, volume_id)
def volume_detached(context, volume_id):
return IMPL.volume_detached(context, volume_id)
def volume_get(context, volume_id):
return IMPL.volume_get(context, volume_id)
def volume_get_all(context):
return IMPL.volume_get_all(context)
def volume_get_all_by_host(context, host):
return IMPL.volume_get_all_by_host(context, host)
def volume_get_all_by_instance_uuid(context, instance_uuid):
return IMPL.volume_get_all_by_instance_uuid(context, instance_uuid)
def volume_get_all_by_project(context, project_id):
return IMPL.volume_get_all_by_project(context, project_id)
def volume_get_by_ec2_id(context, ec2_id):
return IMPL.volume_get_by_ec2_id(context, ec2_id)
def volume_get_iscsi_target_num(context, volume_id):
return IMPL.volume_get_iscsi_target_num(context, volume_id)
def volume_update(context, volume_id, values):
return IMPL.volume_update(context, volume_id, values)
def get_ec2_volume_id_by_uuid(context, volume_id):
return IMPL.get_ec2_volume_id_by_uuid(context, volume_id)
def get_volume_uuid_by_ec2_id(context, ec2_id):
return IMPL.get_volume_uuid_by_ec2_id(context, ec2_id)
def ec2_volume_create(context, volume_id, forced_id=None):
return IMPL.ec2_volume_create(context, volume_id, forced_id)
def get_snapshot_uuid_by_ec2_id(context, ec2_id):
return IMPL.get_snapshot_uuid_by_ec2_id(context, ec2_id)
def get_ec2_snapshot_id_by_uuid(context, snapshot_id):
return IMPL.get_ec2_snapshot_id_by_uuid(context, snapshot_id)
def ec2_snapshot_create(context, snapshot_id, forced_id=None):
return IMPL.ec2_snapshot_create(context, snapshot_id, forced_id)
def snapshot_get(context, snapshot_id):
return IMPL.snapshot_get(context, snapshot_id)
def snapshot_get_all(context):
return IMPL.snapshot_get_all(context)
def snapshot_get_all_by_project(context, project_id):
return IMPL.snapshot_get_all_by_project(context, project_id)
def snapshot_get_all_for_volume(context, volume_id):
return IMPL.snapshot_get_all_for_volume(context, volume_id)
def snapshot_update(context, snapshot_id, values):
return IMPL.snapshot_update(context, snapshot_id, values)
block_device_mapping_update(context, bdm_id, values)
def block_device_mapping_update_or_create(context, values):
return IMPL.block_device_mapping_update_or_create(context, values)
def block_device_mapping_get_all_by_instance(context, instance_uuid):
return IMPL.block_device_mapping_get_all_by_instance(context,
instance_uuid)
def block_device_mapping_destroy(context, bdm_id):
return IMPL.block_device_mapping_destroy(context, bdm_id)
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
return IMPL.block_device_mapping_destroy_by_instance_and_device(
context, instance_uuid, device_name)
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
return IMPL.block_device_mapping_destroy_by_instance_and_volume(
context, instance_uuid, volume_id)
rity_group_id)
def security_group_get_by_name(context, project_id, group_name):
return IMPL.security_group_get_by_name(context, project_id, group_name)
def security_group_get_by_project(context, project_id):
return IMPL.security_group_get_by_project(context, project_id)
def security_group_get_by_instance(context, instance_id):
return IMPL.security_group_get_by_instance(context, instance_id)
def security_group_exists(context, project_id, group_name):
return IMPL.security_group_exists(context, project_id, group_name)
def security_group_in_use(context, group_id):
return IMPL.security_group_in_use(context, group_id)
def security_group_create(context, values):
return IMPL.security_group_create(context, values)
def security_group_ensure_default(context):
return IMPL.security_group_ensure_default(context)
def security_group_destroy(context, security_group_id):
return IMPL.security_group_destroy(context, security_group_id)
def security_group_count_by_project(context, project_id, session=None):
return IMPL.security_group_count_by_project(context, project_id,
session=session)
return IMPL.security_group_rule_get_by_security_group(context,
security_group_id)
def security_group_rule_get_by_security_group_grantee(context,
security_group_id):
return IMPL.security_group_rule_get_by_security_group_grantee(context,
security_group_id)
def security_group_rule_destroy(context, security_group_rule_id):
return IMPL.security_group_rule_destroy(context, security_group_rule_id)
def security_group_rule_get(context, security_group_rule_id):
return IMPL.security_group_rule_get(context, security_group_rule_id)
def security_group_rule_count_by_group(context, security_group_id):
return IMPL.security_group_rule_count_by_group(context, security_group_id)
rule_get_all(context)
def provider_fw_rule_destroy(context, rule_id):
return IMPL.provider_fw_rule_destroy(context, rule_id)
nsole_type):
return IMPL.console_pool_get_by_host_type(context,
compute_host,
proxy_host,
console_type)
def console_pool_get_all_by_host_type(context, host, console_type):
return IMPL.console_pool_get_all_by_host_type(context,
host,
console_type)
def console_create(context, values):
return IMPL.console_create(context, values)
def console_delete(context, console_id):
return IMPL.console_delete(context, console_id)
def console_get_by_pool_instance(context, pool_id, instance_uuid):
return IMPL.console_get_by_pool_instance(context, pool_id, instance_uuid)
def console_get_all_by_instance(context, instance_uuid):
return IMPL.console_get_all_by_instance(context, instance_uuid)
def console_get(context, console_id, instance_uuid=None):
return IMPL.console_get(context, console_id, instance_uuid)
filters=None):
return IMPL.instance_type_get_all(
context, inactive=inactive, filters=filters)
def instance_type_get(context, id):
return IMPL.instance_type_get(context, id)
def instance_type_get_by_name(context, name):
return IMPL.instance_type_get_by_name(context, name)
def instance_type_get_by_flavor_id(context, id):
return IMPL.instance_type_get_by_flavor_id(context, id)
def instance_type_destroy(context, name):
return IMPL.instance_type_destroy(context, name)
def instance_type_access_get_by_flavor_id(context, flavor_id):
return IMPL.instance_type_access_get_by_flavor_id(context, flavor_id)
def instance_type_access_add(context, flavor_id, project_id):
return IMPL.instance_type_access_add(context, flavor_id, project_id)
def instance_type_access_remove(context, flavor_id, project_id):
return IMPL.instance_type_access_remove(context, flavor_id, project_id)
ance_metadata_delete(context, instance_uuid, key)
def instance_metadata_update(context, instance_uuid, metadata, delete):
return IMPL.instance_metadata_update(context, instance_uuid,
metadata, delete)
, key):
IMPL.instance_system_metadata_delete(context, instance_uuid, key)
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
IMPL.instance_system_metadata_update(
context, instance_uuid, metadata, delete)
build_get_by_triple(context, hypervisor, os,
architecture)
def agent_build_get_all(context):
return IMPL.agent_build_get_all(context)
def agent_build_destroy(context, agent_update_id):
IMPL.agent_build_destroy(context, agent_update_id)
def agent_build_update(context, agent_build_id, values):
IMPL.agent_build_update(context, agent_build_id, values)
bw_out,
last_refreshed=None):
return IMPL.bw_usage_update(context, uuid, mac, start_period, bw_in,
bw_out, last_refreshed=last_refreshed)
IMPL.instance_type_extra_specs_delete(context, flavor_id, key)
def instance_type_extra_specs_update_or_create(context, flavor_id,
extra_specs):
IMPL.instance_type_extra_specs_update_or_create(context, flavor_id,
extra_specs)
key):
IMPL.volume_metadata_delete(context, volume_id, key)
def volume_metadata_update(context, volume_id, metadata, delete):
IMPL.volume_metadata_update(context, volume_id, metadata, delete)
eturn IMPL.volume_type_get_all(context, inactive)
def volume_type_get(context, id):
return IMPL.volume_type_get(context, id)
def volume_type_get_by_name(context, name):
return IMPL.volume_type_get_by_name(context, name)
def volume_type_destroy(context, name):
return IMPL.volume_type_destroy(context, name)
def volume_get_active_by_window(context, begin, end=None, project_id=None):
return IMPL.volume_get_active_by_window(context, begin, end, project_id)
, key):
IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
def volume_type_extra_specs_update_or_create(context, volume_type_id,
extra_specs):
IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id,
extra_specs)
id(context, image_uuid)
def s3_image_create(context, image_uuid):
return IMPL.s3_image_create(context, image_uuid)
backend_conf_update(context, sm_backend_conf_id, values)
def sm_backend_conf_delete(context, sm_backend_conf_id):
return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id)
def sm_backend_conf_get(context, sm_backend_conf_id):
return IMPL.sm_backend_conf_get(context, sm_backend_conf_id)
def sm_backend_conf_get_by_sr(context, sr_uuid):
return IMPL.sm_backend_conf_get_by_sr(context, sr_uuid)
def sm_backend_conf_get_all(context):
return IMPL.sm_backend_conf_get_all(context)
m_flavor_id, values)
def sm_flavor_delete(context, sm_flavor_id):
return IMPL.sm_flavor_delete(context, sm_flavor_id)
def sm_flavor_get(context, sm_flavor_id):
return IMPL.sm_flavor_get(context, sm_flavor_id)
def sm_flavor_get_all(context):
return IMPL.sm_flavor_get_all(context)
def sm_flavor_get_by_label(context, sm_flavor_label):
return IMPL.sm_flavor_get_by_label(context, sm_flavor_label)
es)
def sm_volume_delete(context, volume_id):
return IMPL.sm_volume_delete(context, volume_id)
def sm_volume_get(context, volume_id):
return IMPL.sm_volume_get(context, volume_id)
def sm_volume_get_all(context):
return IMPL.sm_volume_get_all(context)
(context, aggregate_id)
def aggregate_get_by_host(context, host, key=None):
return IMPL.aggregate_get_by_host(context, host, key)
def aggregate_metadata_get_by_host(context, host, key=None):
return IMPL.aggregate_metadata_get_by_host(context, host, key)
def aggregate_update(context, aggregate_id, values):
return IMPL.aggregate_update(context, aggregate_id, values)
def aggregate_delete(context, aggregate_id):
return IMPL.aggregate_delete(context, aggregate_id)
def aggregate_get_all(context):
return IMPL.aggregate_get_all(context)
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete)
def aggregate_metadata_get(context, aggregate_id):
return IMPL.aggregate_metadata_get(context, aggregate_id)
def aggregate_metadata_delete(context, aggregate_id, key):
IMPL.aggregate_metadata_delete(context, aggregate_id, key)
def aggregate_host_add(context, aggregate_id, host):
IMPL.aggregate_host_add(context, aggregate_id, host)
def aggregate_host_get_all(context, aggregate_id):
return IMPL.aggregate_host_get_all(context, aggregate_id)
def aggregate_host_delete(context, aggregate_id, host):
IMPL.aggregate_host_delete(context, aggregate_id, host)
tance_fault_get_by_instance_uuids(context, instance_uuids)
L.get_instance_uuid_by_ec2_id(context, ec2_id)
def ec2_instance_create(context, instance_ec2_id):
return IMPL.ec2_instance_create(context, instance_ec2_id)
message=None,
session=None):
return IMPL.task_log_end_task(context, task_name,
period_beginning,
period_ending,
host,
errors,
message,
session)
def task_log_begin_task(context, task_name,
period_beginning,
period_ending,
host,
task_items=None,
message=None,
session=None):
return IMPL.task_log_begin_task(context, task_name,
period_beginning,
period_ending,
host,
task_items,
message,
session)
def task_log_get_all(context, task_name, period_beginning,
period_ending, host=None, state=None, session=None):
return IMPL.task_log_get_all(context, task_name, period_beginning,
period_ending, host, state, session)
def task_log_get(context, task_name, period_beginning,
period_ending, host, state=None, session=None):
return IMPL.task_log_get(context, task_name, period_beginning,
period_ending, host, state, session)
| true
| true
|
f718f09f6947d861bde6f19e620f9dea6e20e15e
| 1,128
|
py
|
Python
|
codes/webdav/app.py
|
e7217/control-tv
|
85533786528fe6379fd20700612d23de545111ab
|
[
"MIT"
] | null | null | null |
codes/webdav/app.py
|
e7217/control-tv
|
85533786528fe6379fd20700612d23de545111ab
|
[
"MIT"
] | null | null | null |
codes/webdav/app.py
|
e7217/control-tv
|
85533786528fe6379fd20700612d23de545111ab
|
[
"MIT"
] | null | null | null |
import os
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class Target:
watchDir = os.getcwd()
#watchDir에 감시하려는 디렉토리를 명시한다.
def __init__(self):
self.observer = Observer() #observer객체를 만듦
def run(self):
event_handler = Handler()
self.observer.schedule(event_handler, self.watchDir,
recursive=True)
self.observer.start()
try:
while True:
time.sleep(1)
except:
self.observer.stop()
print("Error")
self.observer.join()
class Handler(FileSystemEventHandler):
#FileSystemEventHandler 클래스를 상속받음.
#아래 핸들러들을 오버라이드 함
#파일, 디렉터리가 move 되거나 rename 되면 실행
def on_moved(self, event):
print(event)
def on_created(self, event): #파일, 디렉터리가 생성되면 실행
print(event)
def on_deleted(self, event): #파일, 디렉터리가 삭제되면 실행
print(event)
def on_modified(self, event): #파일, 디렉터리가 수정되면 실행
print(event)
if __name__ == ‘__main__’: #본 파일에서 실행될 때만 실행되도록 함
w = Target()
w.run()
| 24.521739
| 60
| 0.606383
|
import os
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class Target:
watchDir = os.getcwd()
def __init__(self):
self.observer = Observer()
def run(self):
event_handler = Handler()
self.observer.schedule(event_handler, self.watchDir,
recursive=True)
self.observer.start()
try:
while True:
time.sleep(1)
except:
self.observer.stop()
print("Error")
self.observer.join()
class Handler(FileSystemEventHandler):
def on_moved(self, event):
print(event)
def on_created(self, event):
print(event)
def on_deleted(self, event):
print(event)
def on_modified(self, event):
print(event)
if __name__ == ‘__main__’:
w = Target()
w.run()
| false
| true
|
f718f27b7eda35619be65225a957b0ca51a830f4
| 122
|
py
|
Python
|
dist/Basilisk/fswAlgorithms/sunSafeACS/__init__.py
|
ian-cooke/basilisk_mag
|
a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14
|
[
"0BSD"
] | null | null | null |
dist/Basilisk/fswAlgorithms/sunSafeACS/__init__.py
|
ian-cooke/basilisk_mag
|
a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14
|
[
"0BSD"
] | 1
|
2019-03-13T20:52:22.000Z
|
2019-03-13T20:52:22.000Z
|
dist/Basilisk/fswAlgorithms/sunSafeACS/__init__.py
|
ian-cooke/basilisk_mag
|
a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14
|
[
"0BSD"
] | null | null | null |
# This __init__.py file for the sunSafeACS package is automatically generated by the build system
from sunSafeACS import *
| 61
| 97
| 0.827869
|
from sunSafeACS import *
| true
| true
|
f718f27c1be88d449be28eb8f385613384671af6
| 17,084
|
py
|
Python
|
pysnmp-with-texts/CADANT-REMOTE-QUERY-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/CADANT-REMOTE-QUERY-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/CADANT-REMOTE-QUERY-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module CADANT-REMOTE-QUERY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CADANT-REMOTE-QUERY-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:46:22 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion")
cadIfCmtsCmStatusEntry, = mibBuilder.importSymbols("CADANT-CMTS-MAC-MIB", "cadIfCmtsCmStatusEntry")
cadCmRemoteQuery, = mibBuilder.importSymbols("CADANT-PRODUCTS-MIB", "cadCmRemoteQuery")
TenthdBmV, TenthdB = mibBuilder.importSymbols("DOCS-IF-MIB", "TenthdBmV", "TenthdB")
InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
NotificationType, ModuleIdentity, Counter64, iso, MibIdentifier, Unsigned32, Integer32, Gauge32, Bits, ObjectIdentity, Counter32, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "ModuleIdentity", "Counter64", "iso", "MibIdentifier", "Unsigned32", "Integer32", "Gauge32", "Bits", "ObjectIdentity", "Counter32", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks")
TimeStamp, TextualConvention, DisplayString, MacAddress, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "TimeStamp", "TextualConvention", "DisplayString", "MacAddress", "TruthValue")
cadCmRemoteQueryMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1))
cadCmRemoteQueryMib.setRevisions(('2004-08-31 00:00', '2006-09-27 00:00', '2009-01-15 00:00', '2009-01-20 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: cadCmRemoteQueryMib.setRevisionsDescriptions(('Initial version of this MIB. ', 'Added IPaddressType and IPaddress. ', 'Fixed start/stop time for upTime rollover. ', 'Fixed poll time for upTime rollover. ',))
if mibBuilder.loadTexts: cadCmRemoteQueryMib.setLastUpdated('200901200000Z')
if mibBuilder.loadTexts: cadCmRemoteQueryMib.setOrganization('Arris International')
if mibBuilder.loadTexts: cadCmRemoteQueryMib.setContactInfo(' Arris Support Postal: ARRIS Phone: +1 770 622 8530 E-mail: support@arrisi.com')
if mibBuilder.loadTexts: cadCmRemoteQueryMib.setDescription('This MIB module provides the management of the Cadant C4 Cable Modem Termination Systems (CMTS) Remote Query feature. This feature, implemented on a CMTS, facilitates SNMP polling of remote cable modems (CMs). This MIB includes the configuration and status objects of the CMTS CM Poller and remote CMs that polled by the CMTS CM Poller')
cadCmRemoteQueryPoller = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1))
cadCmRemoteQueryPollerEnable = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 1), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadCmRemoteQueryPollerEnable.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryPollerEnable.setDescription('An indication of whether the CMTS CM Poller is running. When the poller is enabled, it polls the remote CMs periodically specified in the cadCmRemoteQueryPollerInterval mib object. The operation can be expensive depending on how many CMs that the poller would be polling. It is suggested to have it disabled when not needed.')
cadCmRemoteQueryPollerInterval = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(30, 86400)).clone(1800)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadCmRemoteQueryPollerInterval.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryPollerInterval.setDescription("An interval between two polling cycles of the CMTS CM Poller. The poller will not start the next cycle until it finished polling for the last CM , even though the time has expired. If the cadCmRemoteQueryPollerInterval is too small with a large number of CMs, the poller would tie up the CPU and resources and possibly degrade the system's performance.")
cadCmRemoteQueryPollerStartTime = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 3), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQueryPollerStartTime.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryPollerStartTime.setDescription("The value of sysUpTime in seconds when the last polling cycle started. cadCmRemoteQueryPollerStartTime is set to 0 when the CMTS is first restarted and doesn't get reset after the poller is disabled.")
cadCmRemoteQueryPollerStopTime = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 4), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQueryPollerStopTime.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryPollerStopTime.setDescription("The value of sysUpTime in seconds when the last polling cycle finished. cadCmRemoteQueryPollerStopTime is set to 0 when the CMTS is first restarted and doesn't get reset after the poller is disabled.")
cadCmRemoteQueryPollerCommunity = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32)).clone('public')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadCmRemoteQueryPollerCommunity.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryPollerCommunity.setDescription('The read community string is used for polling the CMs. Any change in the cadCmRemoteQueryPollerCommunity may not be reflected if the poller has already been enabled.')
cadCmRemoteQueryMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 6), MacAddress().clone(hexValue="000000000000")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadCmRemoteQueryMacAddress.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryMacAddress.setDescription('The object is used to trigger an immediate poll of a specific CM. This object always read back as a value of 0. The value of sysUpTime should be read and saved before writing this object. Then, the value of cadCmRemoteQueryPollTime can be read and compared to the saved sysUpTime to determine when the immediate poll of retmoe query data has been completed.')
cadCmRemoteQuerySourceIpAddrType = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 7), InetAddressType().clone('ipv4')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQuerySourceIpAddrType.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQuerySourceIpAddrType.setDescription('The type of internet address of cadCmRemoteQuerySourceIpAddr.')
cadCmRemoteQuerySourceIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 8), InetAddress().clone(hexValue="00000000")).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQuerySourceIpAddr.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQuerySourceIpAddr.setDescription('The source IP address of the SNMP remote queries to the CMs. This value is changed from the Poller when the ifIndex of the cadSysSourceInterfaceRemoteQuery in the CADANT-CMTS-SYSTEM-MIB is changed due to using the cli command .. configure cable modem remote-query source-interface ..')
cadCmRemoteQueryStatusTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2), )
if mibBuilder.loadTexts: cadCmRemoteQueryStatusTable.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryStatusTable.setDescription('This table contains the status of the cable modems that are polled by the CMTS CM Poller. The information will be overwritten when a new polling cycle starts. Depending on how many CMs that the poller is polling, polling this table constantly can be quite expensive; Excessive polling could degrade performance.')
cadCmRemoteQueryStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1), )
cadIfCmtsCmStatusEntry.registerAugmentions(("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryStatusEntry"))
cadCmRemoteQueryStatusEntry.setIndexNames(*cadIfCmtsCmStatusEntry.getIndexNames())
if mibBuilder.loadTexts: cadCmRemoteQueryStatusEntry.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryStatusEntry.setDescription("A list of the cable modem's attributes that are polled by a CMTS. ")
cadCmRemoteQueryPollTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1, 1), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQueryPollTime.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryPollTime.setDescription('The value of sysUpTime when this CM entry was polled. If this value is larger than the cadCmRemoteQueryPollerStopTime, it indicates that the status has already been overwritten by a new polling cycle. To avoid this happening, the NMS can increase the cadCmRemoteQueryPollInterval so that the cadCmRemoteQueryPollTime would fall in between the cadCmRemoteQueryPollerStartTime and the cadCmRemoteQueryPollerStopTime.')
cadCmRemoteQueryDownChannelPower = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1, 2), TenthdBmV()).setUnits('dBmV').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQueryDownChannelPower.setReference('DOCSIS Radio Frequency Interface Specification, Table 4-12 and Table 4-13.')
if mibBuilder.loadTexts: cadCmRemoteQueryDownChannelPower.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryDownChannelPower.setDescription("The CM's received power level. This object may be set to zero if the CM does not support power level measurement. If the CM downstream interface is down, this object either returns the most current value or the value of 0.")
cadCmRemoteQueryStatusTxPower = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1, 3), TenthdBmV()).setUnits('dBmV').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQueryStatusTxPower.setReference('DOCSIS Radio Frequency Interface specification, Section 4.2.8.')
if mibBuilder.loadTexts: cadCmRemoteQueryStatusTxPower.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryStatusTxPower.setDescription('The operational transmit power for the CM upstream channel.')
cadCmRemoteQueryUpChnlTxTimingOffset = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQueryUpChnlTxTimingOffset.setReference('DOCSIS Radio Frequency Interface Specification, Section 6.5.')
if mibBuilder.loadTexts: cadCmRemoteQueryUpChnlTxTimingOffset.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryUpChnlTxTimingOffset.setDescription('A measure of the current round trip time. Used for timing of CM upstream transmissions to ensure synchronized arrivals at the CMTS.')
cadCmRemoteQuerySigQSignalNoise = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1, 5), TenthdB()).setUnits('dB').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQuerySigQSignalNoise.setReference('DOCSIS Radio Frequency Interface specification, Table 2-1 and 2-2')
if mibBuilder.loadTexts: cadCmRemoteQuerySigQSignalNoise.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQuerySigQSignalNoise.setDescription('Signal/Noise ratio as perceived for the CM downstream channel.')
cadCmRemoteQuerySigQMicroreflections = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setUnits('dBc').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQuerySigQMicroreflections.setReference('DOCSIS Radio Frequency Interface specification, Table 2-1 and 2-2')
if mibBuilder.loadTexts: cadCmRemoteQuerySigQMicroreflections.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQuerySigQMicroreflections.setDescription('Total microreflections including in-channel response as perceived on the CM downstream, measured in dBc below the signal level. This object is not assumed to return an absolutely accurate value, but is meant to give a rough indication of microreflections received on this interface. It is up to the implementor to provide information as accurate as possible.')
cadCmRemoteQuerySysDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQuerySysDescr.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQuerySysDescr.setDescription('A textual description of the entity.')
cadCmRemoteQueryConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 3))
cadCmRemoteQueryDocsRemoteQueryCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 3, 1))
cadCmRemoteQueryDocsRemoteQueryGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 3, 2))
cadCmRemoteQueryDocsRemoteQueryCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 3, 1, 1)).setObjects(("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryPollerGroup"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryStatusGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cadCmRemoteQueryDocsRemoteQueryCompliance = cadCmRemoteQueryDocsRemoteQueryCompliance.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryDocsRemoteQueryCompliance.setDescription('The compliance statement for entities which implement the Remote Query MIB')
cadCmRemoteQueryPollerGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 3, 2, 1)).setObjects(("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryPollerEnable"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryPollerInterval"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryPollerStartTime"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryPollerStopTime"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryPollerCommunity"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cadCmRemoteQueryPollerGroup = cadCmRemoteQueryPollerGroup.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryPollerGroup.setDescription('Group of objects implemented in Cable Modem Termination Systems (CMTS) for configuring and monitoring the CMTS CM Poller.')
cadCmRemoteQueryStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 3, 2, 2)).setObjects(("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryPollTime"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryDownChannelPower"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryStatusTxPower"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryUpChnlTxTimingOffset"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQuerySigQSignalNoise"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQuerySigQMicroreflections"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQuerySysDescr"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cadCmRemoteQueryStatusGroup = cadCmRemoteQueryStatusGroup.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryStatusGroup.setDescription('Group of objects implemented in Cable Modem Termination Systems (CMTS) for monitoring cable modems via CMTS CM Poller.')
mibBuilder.exportSymbols("CADANT-REMOTE-QUERY-MIB", cadCmRemoteQueryConformance=cadCmRemoteQueryConformance, cadCmRemoteQueryStatusTxPower=cadCmRemoteQueryStatusTxPower, cadCmRemoteQueryPollerStopTime=cadCmRemoteQueryPollerStopTime, cadCmRemoteQueryPollerInterval=cadCmRemoteQueryPollerInterval, cadCmRemoteQueryMacAddress=cadCmRemoteQueryMacAddress, cadCmRemoteQueryUpChnlTxTimingOffset=cadCmRemoteQueryUpChnlTxTimingOffset, cadCmRemoteQueryMib=cadCmRemoteQueryMib, cadCmRemoteQuerySigQSignalNoise=cadCmRemoteQuerySigQSignalNoise, PYSNMP_MODULE_ID=cadCmRemoteQueryMib, cadCmRemoteQueryPoller=cadCmRemoteQueryPoller, cadCmRemoteQuerySigQMicroreflections=cadCmRemoteQuerySigQMicroreflections, cadCmRemoteQuerySourceIpAddr=cadCmRemoteQuerySourceIpAddr, cadCmRemoteQueryStatusGroup=cadCmRemoteQueryStatusGroup, cadCmRemoteQueryDocsRemoteQueryGroups=cadCmRemoteQueryDocsRemoteQueryGroups, cadCmRemoteQueryPollerGroup=cadCmRemoteQueryPollerGroup, cadCmRemoteQueryPollerCommunity=cadCmRemoteQueryPollerCommunity, cadCmRemoteQueryStatusEntry=cadCmRemoteQueryStatusEntry, cadCmRemoteQueryDocsRemoteQueryCompliances=cadCmRemoteQueryDocsRemoteQueryCompliances, cadCmRemoteQueryPollTime=cadCmRemoteQueryPollTime, cadCmRemoteQueryPollerEnable=cadCmRemoteQueryPollerEnable, cadCmRemoteQueryPollerStartTime=cadCmRemoteQueryPollerStartTime, cadCmRemoteQueryDownChannelPower=cadCmRemoteQueryDownChannelPower, cadCmRemoteQueryStatusTable=cadCmRemoteQueryStatusTable, cadCmRemoteQuerySourceIpAddrType=cadCmRemoteQuerySourceIpAddrType, cadCmRemoteQuerySysDescr=cadCmRemoteQuerySysDescr, cadCmRemoteQueryDocsRemoteQueryCompliance=cadCmRemoteQueryDocsRemoteQueryCompliance)
| 165.864078
| 1,652
| 0.808242
|
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion")
cadIfCmtsCmStatusEntry, = mibBuilder.importSymbols("CADANT-CMTS-MAC-MIB", "cadIfCmtsCmStatusEntry")
cadCmRemoteQuery, = mibBuilder.importSymbols("CADANT-PRODUCTS-MIB", "cadCmRemoteQuery")
TenthdBmV, TenthdB = mibBuilder.importSymbols("DOCS-IF-MIB", "TenthdBmV", "TenthdB")
InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
NotificationType, ModuleIdentity, Counter64, iso, MibIdentifier, Unsigned32, Integer32, Gauge32, Bits, ObjectIdentity, Counter32, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "ModuleIdentity", "Counter64", "iso", "MibIdentifier", "Unsigned32", "Integer32", "Gauge32", "Bits", "ObjectIdentity", "Counter32", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks")
TimeStamp, TextualConvention, DisplayString, MacAddress, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "TimeStamp", "TextualConvention", "DisplayString", "MacAddress", "TruthValue")
cadCmRemoteQueryMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1))
cadCmRemoteQueryMib.setRevisions(('2004-08-31 00:00', '2006-09-27 00:00', '2009-01-15 00:00', '2009-01-20 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: cadCmRemoteQueryMib.setRevisionsDescriptions(('Initial version of this MIB. ', 'Added IPaddressType and IPaddress. ', 'Fixed start/stop time for upTime rollover. ', 'Fixed poll time for upTime rollover. ',))
if mibBuilder.loadTexts: cadCmRemoteQueryMib.setLastUpdated('200901200000Z')
if mibBuilder.loadTexts: cadCmRemoteQueryMib.setOrganization('Arris International')
if mibBuilder.loadTexts: cadCmRemoteQueryMib.setContactInfo(' Arris Support Postal: ARRIS Phone: +1 770 622 8530 E-mail: support@arrisi.com')
if mibBuilder.loadTexts: cadCmRemoteQueryMib.setDescription('This MIB module provides the management of the Cadant C4 Cable Modem Termination Systems (CMTS) Remote Query feature. This feature, implemented on a CMTS, facilitates SNMP polling of remote cable modems (CMs). This MIB includes the configuration and status objects of the CMTS CM Poller and remote CMs that polled by the CMTS CM Poller')
cadCmRemoteQueryPoller = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1))
cadCmRemoteQueryPollerEnable = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 1), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadCmRemoteQueryPollerEnable.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryPollerEnable.setDescription('An indication of whether the CMTS CM Poller is running. When the poller is enabled, it polls the remote CMs periodically specified in the cadCmRemoteQueryPollerInterval mib object. The operation can be expensive depending on how many CMs that the poller would be polling. It is suggested to have it disabled when not needed.')
cadCmRemoteQueryPollerInterval = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(30, 86400)).clone(1800)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadCmRemoteQueryPollerInterval.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryPollerInterval.setDescription("An interval between two polling cycles of the CMTS CM Poller. The poller will not start the next cycle until it finished polling for the last CM , even though the time has expired. If the cadCmRemoteQueryPollerInterval is too small with a large number of CMs, the poller would tie up the CPU and resources and possibly degrade the system's performance.")
cadCmRemoteQueryPollerStartTime = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 3), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQueryPollerStartTime.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryPollerStartTime.setDescription("The value of sysUpTime in seconds when the last polling cycle started. cadCmRemoteQueryPollerStartTime is set to 0 when the CMTS is first restarted and doesn't get reset after the poller is disabled.")
cadCmRemoteQueryPollerStopTime = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 4), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQueryPollerStopTime.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryPollerStopTime.setDescription("The value of sysUpTime in seconds when the last polling cycle finished. cadCmRemoteQueryPollerStopTime is set to 0 when the CMTS is first restarted and doesn't get reset after the poller is disabled.")
cadCmRemoteQueryPollerCommunity = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32)).clone('public')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadCmRemoteQueryPollerCommunity.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryPollerCommunity.setDescription('The read community string is used for polling the CMs. Any change in the cadCmRemoteQueryPollerCommunity may not be reflected if the poller has already been enabled.')
cadCmRemoteQueryMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 6), MacAddress().clone(hexValue="000000000000")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cadCmRemoteQueryMacAddress.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryMacAddress.setDescription('The object is used to trigger an immediate poll of a specific CM. This object always read back as a value of 0. The value of sysUpTime should be read and saved before writing this object. Then, the value of cadCmRemoteQueryPollTime can be read and compared to the saved sysUpTime to determine when the immediate poll of retmoe query data has been completed.')
cadCmRemoteQuerySourceIpAddrType = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 7), InetAddressType().clone('ipv4')).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQuerySourceIpAddrType.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQuerySourceIpAddrType.setDescription('The type of internet address of cadCmRemoteQuerySourceIpAddr.')
cadCmRemoteQuerySourceIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 1, 8), InetAddress().clone(hexValue="00000000")).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQuerySourceIpAddr.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQuerySourceIpAddr.setDescription('The source IP address of the SNMP remote queries to the CMs. This value is changed from the Poller when the ifIndex of the cadSysSourceInterfaceRemoteQuery in the CADANT-CMTS-SYSTEM-MIB is changed due to using the cli command .. configure cable modem remote-query source-interface ..')
cadCmRemoteQueryStatusTable = MibTable((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2), )
if mibBuilder.loadTexts: cadCmRemoteQueryStatusTable.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryStatusTable.setDescription('This table contains the status of the cable modems that are polled by the CMTS CM Poller. The information will be overwritten when a new polling cycle starts. Depending on how many CMs that the poller is polling, polling this table constantly can be quite expensive; Excessive polling could degrade performance.')
cadCmRemoteQueryStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1), )
cadIfCmtsCmStatusEntry.registerAugmentions(("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryStatusEntry"))
cadCmRemoteQueryStatusEntry.setIndexNames(*cadIfCmtsCmStatusEntry.getIndexNames())
if mibBuilder.loadTexts: cadCmRemoteQueryStatusEntry.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryStatusEntry.setDescription("A list of the cable modem's attributes that are polled by a CMTS. ")
cadCmRemoteQueryPollTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1, 1), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQueryPollTime.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryPollTime.setDescription('The value of sysUpTime when this CM entry was polled. If this value is larger than the cadCmRemoteQueryPollerStopTime, it indicates that the status has already been overwritten by a new polling cycle. To avoid this happening, the NMS can increase the cadCmRemoteQueryPollInterval so that the cadCmRemoteQueryPollTime would fall in between the cadCmRemoteQueryPollerStartTime and the cadCmRemoteQueryPollerStopTime.')
cadCmRemoteQueryDownChannelPower = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1, 2), TenthdBmV()).setUnits('dBmV').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQueryDownChannelPower.setReference('DOCSIS Radio Frequency Interface Specification, Table 4-12 and Table 4-13.')
if mibBuilder.loadTexts: cadCmRemoteQueryDownChannelPower.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryDownChannelPower.setDescription("The CM's received power level. This object may be set to zero if the CM does not support power level measurement. If the CM downstream interface is down, this object either returns the most current value or the value of 0.")
cadCmRemoteQueryStatusTxPower = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1, 3), TenthdBmV()).setUnits('dBmV').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQueryStatusTxPower.setReference('DOCSIS Radio Frequency Interface specification, Section 4.2.8.')
if mibBuilder.loadTexts: cadCmRemoteQueryStatusTxPower.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryStatusTxPower.setDescription('The operational transmit power for the CM upstream channel.')
cadCmRemoteQueryUpChnlTxTimingOffset = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQueryUpChnlTxTimingOffset.setReference('DOCSIS Radio Frequency Interface Specification, Section 6.5.')
if mibBuilder.loadTexts: cadCmRemoteQueryUpChnlTxTimingOffset.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryUpChnlTxTimingOffset.setDescription('A measure of the current round trip time. Used for timing of CM upstream transmissions to ensure synchronized arrivals at the CMTS.')
cadCmRemoteQuerySigQSignalNoise = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1, 5), TenthdB()).setUnits('dB').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQuerySigQSignalNoise.setReference('DOCSIS Radio Frequency Interface specification, Table 2-1 and 2-2')
if mibBuilder.loadTexts: cadCmRemoteQuerySigQSignalNoise.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQuerySigQSignalNoise.setDescription('Signal/Noise ratio as perceived for the CM downstream channel.')
cadCmRemoteQuerySigQMicroreflections = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setUnits('dBc').setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQuerySigQMicroreflections.setReference('DOCSIS Radio Frequency Interface specification, Table 2-1 and 2-2')
if mibBuilder.loadTexts: cadCmRemoteQuerySigQMicroreflections.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQuerySigQMicroreflections.setDescription('Total microreflections including in-channel response as perceived on the CM downstream, measured in dBc below the signal level. This object is not assumed to return an absolutely accurate value, but is meant to give a rough indication of microreflections received on this interface. It is up to the implementor to provide information as accurate as possible.')
cadCmRemoteQuerySysDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 2, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cadCmRemoteQuerySysDescr.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQuerySysDescr.setDescription('A textual description of the entity.')
cadCmRemoteQueryConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 3))
cadCmRemoteQueryDocsRemoteQueryCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 3, 1))
cadCmRemoteQueryDocsRemoteQueryGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 3, 2))
cadCmRemoteQueryDocsRemoteQueryCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 3, 1, 1)).setObjects(("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryPollerGroup"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryStatusGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cadCmRemoteQueryDocsRemoteQueryCompliance = cadCmRemoteQueryDocsRemoteQueryCompliance.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryDocsRemoteQueryCompliance.setDescription('The compliance statement for entities which implement the Remote Query MIB')
cadCmRemoteQueryPollerGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 3, 2, 1)).setObjects(("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryPollerEnable"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryPollerInterval"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryPollerStartTime"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryPollerStopTime"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryPollerCommunity"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cadCmRemoteQueryPollerGroup = cadCmRemoteQueryPollerGroup.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryPollerGroup.setDescription('Group of objects implemented in Cable Modem Termination Systems (CMTS) for configuring and monitoring the CMTS CM Poller.')
cadCmRemoteQueryStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4998, 1, 1, 55, 1, 3, 2, 2)).setObjects(("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryPollTime"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryDownChannelPower"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryStatusTxPower"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQueryUpChnlTxTimingOffset"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQuerySigQSignalNoise"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQuerySigQMicroreflections"), ("CADANT-REMOTE-QUERY-MIB", "cadCmRemoteQuerySysDescr"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cadCmRemoteQueryStatusGroup = cadCmRemoteQueryStatusGroup.setStatus('current')
if mibBuilder.loadTexts: cadCmRemoteQueryStatusGroup.setDescription('Group of objects implemented in Cable Modem Termination Systems (CMTS) for monitoring cable modems via CMTS CM Poller.')
mibBuilder.exportSymbols("CADANT-REMOTE-QUERY-MIB", cadCmRemoteQueryConformance=cadCmRemoteQueryConformance, cadCmRemoteQueryStatusTxPower=cadCmRemoteQueryStatusTxPower, cadCmRemoteQueryPollerStopTime=cadCmRemoteQueryPollerStopTime, cadCmRemoteQueryPollerInterval=cadCmRemoteQueryPollerInterval, cadCmRemoteQueryMacAddress=cadCmRemoteQueryMacAddress, cadCmRemoteQueryUpChnlTxTimingOffset=cadCmRemoteQueryUpChnlTxTimingOffset, cadCmRemoteQueryMib=cadCmRemoteQueryMib, cadCmRemoteQuerySigQSignalNoise=cadCmRemoteQuerySigQSignalNoise, PYSNMP_MODULE_ID=cadCmRemoteQueryMib, cadCmRemoteQueryPoller=cadCmRemoteQueryPoller, cadCmRemoteQuerySigQMicroreflections=cadCmRemoteQuerySigQMicroreflections, cadCmRemoteQuerySourceIpAddr=cadCmRemoteQuerySourceIpAddr, cadCmRemoteQueryStatusGroup=cadCmRemoteQueryStatusGroup, cadCmRemoteQueryDocsRemoteQueryGroups=cadCmRemoteQueryDocsRemoteQueryGroups, cadCmRemoteQueryPollerGroup=cadCmRemoteQueryPollerGroup, cadCmRemoteQueryPollerCommunity=cadCmRemoteQueryPollerCommunity, cadCmRemoteQueryStatusEntry=cadCmRemoteQueryStatusEntry, cadCmRemoteQueryDocsRemoteQueryCompliances=cadCmRemoteQueryDocsRemoteQueryCompliances, cadCmRemoteQueryPollTime=cadCmRemoteQueryPollTime, cadCmRemoteQueryPollerEnable=cadCmRemoteQueryPollerEnable, cadCmRemoteQueryPollerStartTime=cadCmRemoteQueryPollerStartTime, cadCmRemoteQueryDownChannelPower=cadCmRemoteQueryDownChannelPower, cadCmRemoteQueryStatusTable=cadCmRemoteQueryStatusTable, cadCmRemoteQuerySourceIpAddrType=cadCmRemoteQuerySourceIpAddrType, cadCmRemoteQuerySysDescr=cadCmRemoteQuerySysDescr, cadCmRemoteQueryDocsRemoteQueryCompliance=cadCmRemoteQueryDocsRemoteQueryCompliance)
| true
| true
|
f718f34ad9ae971fc9eb04e25793ee4524e4cf8e
| 9,062
|
py
|
Python
|
src/generator.py
|
GovernorGecko/ObjFile
|
1b7555447a25655a0436484d1f915f4fb759fb73
|
[
"MIT"
] | null | null | null |
src/generator.py
|
GovernorGecko/ObjFile
|
1b7555447a25655a0436484d1f915f4fb759fb73
|
[
"MIT"
] | null | null | null |
src/generator.py
|
GovernorGecko/ObjFile
|
1b7555447a25655a0436484d1f915f4fb759fb73
|
[
"MIT"
] | null | null | null |
"""
obj generator
Order of faces position/texcoord/normal
https://www.loc.gov/preservation/digital/formats/fdd/fdd000508.shtml
Ka: specifies ambient color, to account for light that is scattered about
the entire scene.
Kd: specifies diffuse color, which typically contributes most of the color
to an object.
Ks: specifies specular color, the color seen where the surface is shiny
and mirror-like.
Ns: defines the focus of specular highlights in the material. Ns values
normally range from 0 to 1000, with a high value resulting in a tight,
concentrated highlight.
Ni: defines the optical density (aka index of refraction) in the current
material. The values can range from 0.001 to 10. A value of 1.0 means that
light does not bend as it passes through an object.
d: specifies a factor for dissolve, how much this material dissolves into
the background. A factor of 1.0 is fully opaque. A factor of 0.0 is
completely transparent.
illum: specifies an illumination model, using a numeric value.
The value 0 represents the simplest illumination model, relying on the Kd
for the material modified by a texture map specified in a map_Kd statement
if present. The compilers of this resource believe that the choice of
illumination model is irrelevant for 3D printing use and is ignored on
import by some software applications. For example, the MTL Loader in
the threejs Javascript library appears to ignore illum statements.
map_Kd: specifies a color texture file to be applied to the diffuse
reflectivity of the material. During rendering, map_Kd values are
multiplied by the Kd values to derive the RGB components.
"""
import os
from shutil import copyfile
from .MultiD.src.triangle import Triangle
from .MultiD.src.vector import Vector3
class Generator():
"""
parameters:
(required)
string name of the Generator, used for saving.
(optional)
string path to image to use for texcoords
string name of the image to use for texcoords
"""
__slots__ = [
"__name", "__image_path", "__image_name",
"__ka", "__kd", "__ks", "__illum", "__ns", "__ni", "__d",
"__faces", "__v", "__vn", "__vt"
]
def __init__(
self, name, image_path="./", image_name=None
):
if not isinstance(name, str):
raise ValueError("Name must be a string.")
elif (
image_name is not None and
not isinstance(image_name, str)
):
raise ValueError("Image Name must be a string.")
elif (
not isinstance(image_path, str) or
not os.path.exists(image_path)
):
raise ValueError("Image Path must exist.")
# Name is used for storing the obj/mtl files
self.__name = name
# Face/Vertex Defaults
self.__faces = []
self.__v = []
self.__vn = []
self.__vt = []
# Store Image
self.__image_name = image_name
self.__image_path = image_path
# Set Material Defaults
self.__ka = Vector3(1.0, 1.0, 1.0)
self.__kd = Vector3(1.0, 1.0, 1.0)
self.__ks = Vector3(0.0, 0.0, 0.0)
self.__ns = 1.0
self.__ni = 1.0
self.__d = 1.0
self.__illum = 1
def __str__(self):
"""
returns:
string representing our obj data.
"""
return (
f"{self.get_obj_as_string()}\n\n"
f"{self.get_mtl_as_string()}"
)
def add_triangle(self, positions, texcoords=None):
"""
parameters:
(required)
List[List[Float, Float, Float]] for Positions
(optional)
List[List[Float, Float]] for Texcoords
"""
# Create a Triangle
triangle = Triangle(positions, texcoords=texcoords)
# Face Data to add to our faces
face_data = []
# Triangles are made up of three pieces ofvertex data
for i in range(0, 3):
# List of Vertexes to Face Indexes
vertex_face_indexes = []
# Positions
vertex_face_indexes.append(
self.__get_index_of_vector_in_list(
triangle.get_positions()[i],
self.__v
)
)
# TexCoords
if texcoords is not None:
vertex_face_indexes.append(
self.__get_index_of_vector_in_list(
triangle.get_texcoords()[i],
self.__vt
)
)
else:
vertex_face_indexes.append('')
# Normals
vertex_face_indexes.append(
self.__get_index_of_vector_in_list(
triangle.get_normals(),
self.__vn
)
)
# Add to Face Data
face_data.append(vertex_face_indexes)
# Add to Faces
self.__faces.append(face_data)
def __get_index_of_vector_in_list(self, vector_data, vector_list):
"""
parameters:
Vector2 or Vector3
List of Vector2s or Vector3s
returns:
int of which face this Vector2 or Vector3 is at.
"""
if not type(vector_data).__name__ == "Vector":
raise ValueError("Vector Data must be a Vector.")
elif not isinstance(vector_list, list):
raise ValueError("Vector List must be a list.")
face_data = None
if vector_data in vector_list:
face_data = vector_list.index(vector_data)
else:
face_data = len(vector_list)
vector_list.append(vector_data)
return face_data + 1
def get_mtl_as_string(self):
"""
returns
string representing our Mtl
"""
# We have an image name?
if self.__image_name is None:
return ""
# Base mtl
mtl_as_string = "newmtl material0\n"
# Ambience
mtl_as_string += "Ka " + " ".join(
map(str, self.__ka.get_list())
) + "\n"
# Diffuse
mtl_as_string += "Kd " + " ".join(
map(str, self.__kd.get_list())
) + "\n"
# Specular
mtl_as_string += "Ks " + " ".join(
map(str, self.__ks.get_list())
) + "\n"
# Return!
return (
f"{mtl_as_string}"
f"Ns {self.__ns}\n"
f"Ni {self.__ni}\n"
f"d {self.__d}\n"
f"illum {self.__illum}\n"
f"map_Kd {self.__image_name}"
)
def get_obj_as_string(self):
"""
returns:
string representing our Obj.
"""
# Base Obj
obj_as_string = (
f"o {self.__name}\n"
)
if self.__image_name is not None:
obj_as_string += f"\nmtllib {self.__name}.mtl\n"
# Positions
obj_as_string += "\nv " + "\nv ".join(
" ".join(map(str, v.get_list())) for v in self.__v
)
# Normals
obj_as_string += "\nvn " + "\nvn ".join(
" ".join(map(str, vn.get_list())) for vn in self.__vn
)
# TexCoords?
if len(self.__vt):
obj_as_string += "\nvt " + "\nvt ".join(
" ".join(map(str, vt.get_list())) for vt in self.__vt
)
# Add Material
obj_as_string += "\n\n"
if self.__image_name is not None:
obj_as_string += "usemtl material0"
else:
obj_as_string += "usemtl Default"
obj_as_string += "\n"
# Iterate Faces
obj_as_string += "f " + "\nf ".join(
[" ".join(
["/".join(map(str, k)) for k in j]
) for j in self.__faces]
)
# Return
return obj_as_string
# def update_mtl(self, ks)
def save(self, path):
"""
parameters:
string path to store the obj file.
"""
# String and path exists?
if (
not isinstance(path, str) or
not os.path.exists(path)
):
raise ValueError(f"{path} doesn't exist!")
# File Path
with open(os.path.join(path, self.__name + ".obj"), "w") as file:
file.writelines(self.get_obj_as_string())
# Have an image?
if self.__image_name is not None:
# Copy the image to our path.
copyfile(
os.path.join(
self.__image_path,
self.__image_name
),
os.path.join(
path,
self.__image_name
)
)
# Create the mtl file
with open(os.path.join(path, self.__name + ".mtl"), "w") as file:
file.writelines(self.get_mtl_as_string())
| 29.517915
| 78
| 0.543258
|
import os
from shutil import copyfile
from .MultiD.src.triangle import Triangle
from .MultiD.src.vector import Vector3
class Generator():
__slots__ = [
"__name", "__image_path", "__image_name",
"__ka", "__kd", "__ks", "__illum", "__ns", "__ni", "__d",
"__faces", "__v", "__vn", "__vt"
]
def __init__(
self, name, image_path="./", image_name=None
):
if not isinstance(name, str):
raise ValueError("Name must be a string.")
elif (
image_name is not None and
not isinstance(image_name, str)
):
raise ValueError("Image Name must be a string.")
elif (
not isinstance(image_path, str) or
not os.path.exists(image_path)
):
raise ValueError("Image Path must exist.")
self.__name = name
self.__faces = []
self.__v = []
self.__vn = []
self.__vt = []
self.__image_name = image_name
self.__image_path = image_path
self.__ka = Vector3(1.0, 1.0, 1.0)
self.__kd = Vector3(1.0, 1.0, 1.0)
self.__ks = Vector3(0.0, 0.0, 0.0)
self.__ns = 1.0
self.__ni = 1.0
self.__d = 1.0
self.__illum = 1
def __str__(self):
return (
f"{self.get_obj_as_string()}\n\n"
f"{self.get_mtl_as_string()}"
)
def add_triangle(self, positions, texcoords=None):
triangle = Triangle(positions, texcoords=texcoords)
face_data = []
for i in range(0, 3):
vertex_face_indexes = []
vertex_face_indexes.append(
self.__get_index_of_vector_in_list(
triangle.get_positions()[i],
self.__v
)
)
if texcoords is not None:
vertex_face_indexes.append(
self.__get_index_of_vector_in_list(
triangle.get_texcoords()[i],
self.__vt
)
)
else:
vertex_face_indexes.append('')
vertex_face_indexes.append(
self.__get_index_of_vector_in_list(
triangle.get_normals(),
self.__vn
)
)
face_data.append(vertex_face_indexes)
self.__faces.append(face_data)
def __get_index_of_vector_in_list(self, vector_data, vector_list):
if not type(vector_data).__name__ == "Vector":
raise ValueError("Vector Data must be a Vector.")
elif not isinstance(vector_list, list):
raise ValueError("Vector List must be a list.")
face_data = None
if vector_data in vector_list:
face_data = vector_list.index(vector_data)
else:
face_data = len(vector_list)
vector_list.append(vector_data)
return face_data + 1
def get_mtl_as_string(self):
if self.__image_name is None:
return ""
mtl_as_string = "newmtl material0\n"
mtl_as_string += "Ka " + " ".join(
map(str, self.__ka.get_list())
) + "\n"
mtl_as_string += "Kd " + " ".join(
map(str, self.__kd.get_list())
) + "\n"
mtl_as_string += "Ks " + " ".join(
map(str, self.__ks.get_list())
) + "\n"
return (
f"{mtl_as_string}"
f"Ns {self.__ns}\n"
f"Ni {self.__ni}\n"
f"d {self.__d}\n"
f"illum {self.__illum}\n"
f"map_Kd {self.__image_name}"
)
def get_obj_as_string(self):
obj_as_string = (
f"o {self.__name}\n"
)
if self.__image_name is not None:
obj_as_string += f"\nmtllib {self.__name}.mtl\n"
obj_as_string += "\nv " + "\nv ".join(
" ".join(map(str, v.get_list())) for v in self.__v
)
obj_as_string += "\nvn " + "\nvn ".join(
" ".join(map(str, vn.get_list())) for vn in self.__vn
)
if len(self.__vt):
obj_as_string += "\nvt " + "\nvt ".join(
" ".join(map(str, vt.get_list())) for vt in self.__vt
)
obj_as_string += "\n\n"
if self.__image_name is not None:
obj_as_string += "usemtl material0"
else:
obj_as_string += "usemtl Default"
obj_as_string += "\n"
obj_as_string += "f " + "\nf ".join(
[" ".join(
["/".join(map(str, k)) for k in j]
) for j in self.__faces]
)
return obj_as_string
def save(self, path):
if (
not isinstance(path, str) or
not os.path.exists(path)
):
raise ValueError(f"{path} doesn't exist!")
# File Path
with open(os.path.join(path, self.__name + ".obj"), "w") as file:
file.writelines(self.get_obj_as_string())
# Have an image?
if self.__image_name is not None:
# Copy the image to our path.
copyfile(
os.path.join(
self.__image_path,
self.__image_name
),
os.path.join(
path,
self.__image_name
)
)
# Create the mtl file
with open(os.path.join(path, self.__name + ".mtl"), "w") as file:
file.writelines(self.get_mtl_as_string())
| true
| true
|
f718f363b4dff8ac31c80d174fd8a06173a11675
| 10,934
|
py
|
Python
|
frankly/websocket/http.py
|
franklyinc-public/frankly-python
|
50a1d2dead3a55e63d105ddf9e5b0cdc233c4b4e
|
[
"MIT"
] | 1
|
2018-08-31T16:12:39.000Z
|
2018-08-31T16:12:39.000Z
|
frankly/websocket/http.py
|
franklyinc-public/frankly-python
|
50a1d2dead3a55e63d105ddf9e5b0cdc233c4b4e
|
[
"MIT"
] | null | null | null |
frankly/websocket/http.py
|
franklyinc-public/frankly-python
|
50a1d2dead3a55e63d105ddf9e5b0cdc233c4b4e
|
[
"MIT"
] | null | null | null |
##
# The MIT License (MIT)
#
# Copyright (c) 2015 Frankly Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from http_parser.http import HttpStream
from http_parser.http import NoMoreData
from http_parser.http import HTTP_REQUEST
from http_parser.http import HTTP_RESPONSE
from http_parser.parser import HttpParser
from http_parser.reader import SocketReader
from http_parser.util import IOrderedDict as HttpFields
from http_parser.util import status_reasons as reasons
import six
from six.moves.urllib.parse import urlencode
from six.moves.urllib.parse import urlparse
from six.moves.urllib.parse import urlunparse
from six.moves.urllib.parse import parse_qs
from wsgiref.util import guess_scheme
from . import net
__all__ = [
'HTTP_10',
'HTTP_11',
'HttpSocket',
'HttpConnection',
'HttpClient',
'HttpFields',
'HttpServer',
'bind',
'connect',
'reasons',
]
HTTP_10 = 'HTTP/1.0'
HTTP_11 = 'HTTP/1.1'
def parse_query_value(value):
if isinstance(value, list):
if len(value) == 0: return None
if len(value) == 1: return parse_query_value(value[0])
return [parse_query_value(x) for x in value]
if value == '': return None
if value == 'true': return True
if value == 'false': return False
return value
def format_query_value(value):
if value is None: return ''
if value is True: return 'true'
if value is False: return 'false'
if isinstance(value, list): return [format_query_value(x) for x in value]
if isinstance(value, tuple): return [format_query_value(x) for x in value]
return value
class HttpSocket(object):
def __init__(self, socket):
self.socket = socket
def __enter__(self):
return self
def __exit__(self, *args):
try:
self.close()
except Exception as e:
pass
def close(self):
if self.socket is not None:
try:
self.socket.close()
finally:
self.socket = None
def detach(self):
socket, self.socket = self.socket, None
return socket
def fileno(self):
return -1 if self.socket is None else self.socket.fileno()
@property
def timeout(self):
return self.socket.gettimeout()
@timeout.setter
def timeout(self, value):
self.socket.settimeout(value)
class HttpConnection(HttpSocket):
def __init__(self, socket=None, host=None):
HttpSocket.__init__(self, socket)
self.version = HTTP_11
self.host = host
self.reader = None if socket is None else SocketReader(self.socket)
def connect(self, host, port='http', timeout=None, secure=None, **kwargs):
assert self.socket is None, "http connection already established"
if secure is None and port == 'https':
secure = True
self.socket = net.connect(host, port, timeout=timeout, secure=secure, **kwargs)
self.reader = SocketReader(self.socket)
self.host = host
if port not in ('http', 'https'):
self.host += ':%s' % port
def close(self):
try:
if self.reader is not None:
self.reader.close()
except Exception:
pass
finally:
self.reader = None
self.host = None
HttpSocket.close(self)
def shutdown(self):
self.socket.shutdown()
def recv(self):
try:
stream = HttpStream(self.reader, kind=HTTP_RESPONSE, parser_class=HttpParser, decompress=True)
status = stream.status_code()
version = stream.version()
fields = stream.headers()
content = stream.body_file()
self.version = 'HTTP/%s.%s' % version
return status, fields, content
except NoMoreData:
pass
def send(self, method, path, query=None, fragment=None, fields=None, content=None, version=None):
if fields is None:
fields = HttpFields()
elif not isinstance(fields, HttpFields):
fields = HttpFields(fields)
if query is None:
query = { }
if content is None:
content = b''
if version is None:
version = self.version
assert version in (HTTP_10, HTTP_11), "invalid http version: %s" % version
fields.setdefault('Content-Length', six.text_type(len(content)))
fields.setdefault('Host', self.host)
for k, v in six.iteritems(dict(query)):
query[k] = format_query_value(v)
if six.PY3:
query = urlencode(query, encoding='utf-8')
else:
query = urlencode(query)
header = ''
header += method
header += ' '
header += urlunparse(('', '', path, '', query, fragment if bool(fragment) else ''))
header += ' %s\r\n' % version
header += ''.join('%s: %s\r\n' % (k, v) for k, v in six.iteritems(fields))
header += '\r\n'
header = header.encode('utf-8')
self.socket.sendall(header + content)
def request(self, *args, **kwargs):
self.send(*args, **kwargs)
return self.recv()
def delete(self, *args, **kwargs):
return self.request('DELETE', *args, **kwargs)
def get(self, *args, **kwargs):
return self.request('GET', *args, **kwargs)
def head(self, *args, **kwargs):
return self.request('HEAD', *args, **kwargs)
def options(self, *args, **kwargs):
return self.request('OPTIONS', *args, **kwargs)
def post(self, *args, **kwargs):
return self.request('POST', *args, **kwargs)
def put(self, *args, **kwargs):
return self.request('PUT', *args, **kwargs)
class HttpClient(HttpSocket):
def __init__(self, socket, address, server):
HttpSocket.__init__(self, socket)
self.address = address
self.reader = SocketReader(self.socket)
self.server = server
self.version = HTTP_11
def __iter__(self):
while True:
request = self.recv()
if request is None:
break
yield request
def iter_wsgi(self):
while True:
environ = self.recv(True)
if environ is None:
break
yield environ
def close(self):
try:
if self.reader is not None:
self.reader.close()
except Exception:
pass
finally:
self.reader = None
HttpSocket.close(self)
def recv(self, wsgi=False):
try:
stream = HttpStream(self.reader, kind=HTTP_REQUEST, parser_class=HttpParser, decompress=True)
if bool(wsgi):
environ = stream.wsgi_environ()
environ['wsgi.url_scheme'] = guess_scheme(environ)
environ['wsgi.input'] = stream.body_file()
environ['wsgi.socket'] = self.socket
return environ
# BUG:
# http-parser has an issue here, if we call 'method' before 'headers'
# and invalid method name is returned...
fields = stream.headers()
method = stream.method()
url = stream.url()
version = stream.version()
content = stream.body_file()
url = urlparse(url)
path = url.path
query = parse_qs(url.query, keep_blank_values=True)
fragment = url.fragment
for k, v in six.iteritems(dict(query)):
query[k] = parse_query_value(v)
self.version = 'HTTP/%s.%s' % version
return method, path, query, fragment, fields, content
except NoMoreData:
pass
def send(self, status, fields=None, content=None, version=None):
if fields is None:
fields = { }
elif not isinstance(fields, HttpFields):
fields = HttpFields(fields)
if content is None:
content = b''
if version is None:
version = self.version
assert version in (HTTP_10, HTTP_11), "invalid http version: %s" % version
fields.setdefault('Content-Length', six.text_type(len(content)))
if self.server is not None:
fields.setdefault('Server', self.server)
if isinstance(status, int):
status = '%s %s' % (status, reasons[status])
header = ''
header += '%s %s\r\n' % (version, status)
header += ''.join('%s: %s\r\n' % (k, v) for k, v in six.iteritems(fields))
header += '\r\n'
header = header.encode('utf-8')
return self.socket.sendall(header + content)
class HttpServer(HttpSocket):
def __init__(self, socket=None, server='maestro-http'):
HttpSocket.__init__(self, socket)
self.server = server
def __iter__(self):
while True:
yield self.accept()
def accept(self):
assert self.socket is not None, "http server not bound to any network interface"
socket, address = self.socket.accept()
socket.settimeout(self.socket.gettimeout())
return HttpClient(socket, address, self.server)
def bind(self, *args, **kwargs):
assert self.socket is None, "http server already bound to network interface"
self.socket = net.bind(*args, **kwargs)
def bind(*args, **kwargs):
server = HttpServer()
try:
server.bind(*args, **kwargs)
except:
server.close()
raise
return server
def connect(*args, **kwargs):
conn = HttpConnection()
try:
conn.connect(*args, **kwargs)
except:
conn.close()
raise
return conn
| 30.541899
| 107
| 0.605908
|
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from http_parser.http import HttpStream
from http_parser.http import NoMoreData
from http_parser.http import HTTP_REQUEST
from http_parser.http import HTTP_RESPONSE
from http_parser.parser import HttpParser
from http_parser.reader import SocketReader
from http_parser.util import IOrderedDict as HttpFields
from http_parser.util import status_reasons as reasons
import six
from six.moves.urllib.parse import urlencode
from six.moves.urllib.parse import urlparse
from six.moves.urllib.parse import urlunparse
from six.moves.urllib.parse import parse_qs
from wsgiref.util import guess_scheme
from . import net
__all__ = [
'HTTP_10',
'HTTP_11',
'HttpSocket',
'HttpConnection',
'HttpClient',
'HttpFields',
'HttpServer',
'bind',
'connect',
'reasons',
]
HTTP_10 = 'HTTP/1.0'
HTTP_11 = 'HTTP/1.1'
def parse_query_value(value):
if isinstance(value, list):
if len(value) == 0: return None
if len(value) == 1: return parse_query_value(value[0])
return [parse_query_value(x) for x in value]
if value == '': return None
if value == 'true': return True
if value == 'false': return False
return value
def format_query_value(value):
if value is None: return ''
if value is True: return 'true'
if value is False: return 'false'
if isinstance(value, list): return [format_query_value(x) for x in value]
if isinstance(value, tuple): return [format_query_value(x) for x in value]
return value
class HttpSocket(object):
def __init__(self, socket):
self.socket = socket
def __enter__(self):
return self
def __exit__(self, *args):
try:
self.close()
except Exception as e:
pass
def close(self):
if self.socket is not None:
try:
self.socket.close()
finally:
self.socket = None
def detach(self):
socket, self.socket = self.socket, None
return socket
def fileno(self):
return -1 if self.socket is None else self.socket.fileno()
@property
def timeout(self):
return self.socket.gettimeout()
@timeout.setter
def timeout(self, value):
self.socket.settimeout(value)
class HttpConnection(HttpSocket):
def __init__(self, socket=None, host=None):
HttpSocket.__init__(self, socket)
self.version = HTTP_11
self.host = host
self.reader = None if socket is None else SocketReader(self.socket)
def connect(self, host, port='http', timeout=None, secure=None, **kwargs):
assert self.socket is None, "http connection already established"
if secure is None and port == 'https':
secure = True
self.socket = net.connect(host, port, timeout=timeout, secure=secure, **kwargs)
self.reader = SocketReader(self.socket)
self.host = host
if port not in ('http', 'https'):
self.host += ':%s' % port
def close(self):
try:
if self.reader is not None:
self.reader.close()
except Exception:
pass
finally:
self.reader = None
self.host = None
HttpSocket.close(self)
def shutdown(self):
self.socket.shutdown()
def recv(self):
try:
stream = HttpStream(self.reader, kind=HTTP_RESPONSE, parser_class=HttpParser, decompress=True)
status = stream.status_code()
version = stream.version()
fields = stream.headers()
content = stream.body_file()
self.version = 'HTTP/%s.%s' % version
return status, fields, content
except NoMoreData:
pass
def send(self, method, path, query=None, fragment=None, fields=None, content=None, version=None):
if fields is None:
fields = HttpFields()
elif not isinstance(fields, HttpFields):
fields = HttpFields(fields)
if query is None:
query = { }
if content is None:
content = b''
if version is None:
version = self.version
assert version in (HTTP_10, HTTP_11), "invalid http version: %s" % version
fields.setdefault('Content-Length', six.text_type(len(content)))
fields.setdefault('Host', self.host)
for k, v in six.iteritems(dict(query)):
query[k] = format_query_value(v)
if six.PY3:
query = urlencode(query, encoding='utf-8')
else:
query = urlencode(query)
header = ''
header += method
header += ' '
header += urlunparse(('', '', path, '', query, fragment if bool(fragment) else ''))
header += ' %s\r\n' % version
header += ''.join('%s: %s\r\n' % (k, v) for k, v in six.iteritems(fields))
header += '\r\n'
header = header.encode('utf-8')
self.socket.sendall(header + content)
def request(self, *args, **kwargs):
self.send(*args, **kwargs)
return self.recv()
def delete(self, *args, **kwargs):
return self.request('DELETE', *args, **kwargs)
def get(self, *args, **kwargs):
return self.request('GET', *args, **kwargs)
def head(self, *args, **kwargs):
return self.request('HEAD', *args, **kwargs)
def options(self, *args, **kwargs):
return self.request('OPTIONS', *args, **kwargs)
def post(self, *args, **kwargs):
return self.request('POST', *args, **kwargs)
def put(self, *args, **kwargs):
return self.request('PUT', *args, **kwargs)
class HttpClient(HttpSocket):
def __init__(self, socket, address, server):
HttpSocket.__init__(self, socket)
self.address = address
self.reader = SocketReader(self.socket)
self.server = server
self.version = HTTP_11
def __iter__(self):
while True:
request = self.recv()
if request is None:
break
yield request
def iter_wsgi(self):
while True:
environ = self.recv(True)
if environ is None:
break
yield environ
def close(self):
try:
if self.reader is not None:
self.reader.close()
except Exception:
pass
finally:
self.reader = None
HttpSocket.close(self)
def recv(self, wsgi=False):
try:
stream = HttpStream(self.reader, kind=HTTP_REQUEST, parser_class=HttpParser, decompress=True)
if bool(wsgi):
environ = stream.wsgi_environ()
environ['wsgi.url_scheme'] = guess_scheme(environ)
environ['wsgi.input'] = stream.body_file()
environ['wsgi.socket'] = self.socket
return environ
fields = stream.headers()
method = stream.method()
url = stream.url()
version = stream.version()
content = stream.body_file()
url = urlparse(url)
path = url.path
query = parse_qs(url.query, keep_blank_values=True)
fragment = url.fragment
for k, v in six.iteritems(dict(query)):
query[k] = parse_query_value(v)
self.version = 'HTTP/%s.%s' % version
return method, path, query, fragment, fields, content
except NoMoreData:
pass
def send(self, status, fields=None, content=None, version=None):
if fields is None:
fields = { }
elif not isinstance(fields, HttpFields):
fields = HttpFields(fields)
if content is None:
content = b''
if version is None:
version = self.version
assert version in (HTTP_10, HTTP_11), "invalid http version: %s" % version
fields.setdefault('Content-Length', six.text_type(len(content)))
if self.server is not None:
fields.setdefault('Server', self.server)
if isinstance(status, int):
status = '%s %s' % (status, reasons[status])
header = ''
header += '%s %s\r\n' % (version, status)
header += ''.join('%s: %s\r\n' % (k, v) for k, v in six.iteritems(fields))
header += '\r\n'
header = header.encode('utf-8')
return self.socket.sendall(header + content)
class HttpServer(HttpSocket):
def __init__(self, socket=None, server='maestro-http'):
HttpSocket.__init__(self, socket)
self.server = server
def __iter__(self):
while True:
yield self.accept()
def accept(self):
assert self.socket is not None, "http server not bound to any network interface"
socket, address = self.socket.accept()
socket.settimeout(self.socket.gettimeout())
return HttpClient(socket, address, self.server)
def bind(self, *args, **kwargs):
assert self.socket is None, "http server already bound to network interface"
self.socket = net.bind(*args, **kwargs)
def bind(*args, **kwargs):
server = HttpServer()
try:
server.bind(*args, **kwargs)
except:
server.close()
raise
return server
def connect(*args, **kwargs):
conn = HttpConnection()
try:
conn.connect(*args, **kwargs)
except:
conn.close()
raise
return conn
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.