hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c34abec34bf7691a079a5f3bedd254cf3a0d57e | 842 | py | Python | main.py | AaronDewes/lnd-lnurl | 8d641d9076ac2faca3244bafcd6c63e8574c1943 | [
"MIT"
] | 6 | 2021-12-05T17:29:00.000Z | 2022-01-14T21:58:44.000Z | main.py | AaronDewes/lnd-lnurl | 8d641d9076ac2faca3244bafcd6c63e8574c1943 | [
"MIT"
] | 4 | 2021-12-05T18:41:35.000Z | 2022-02-10T12:10:00.000Z | main.py | AaronDewes/lnd-lnurl | 8d641d9076ac2faca3244bafcd6c63e8574c1943 | [
"MIT"
] | 3 | 2021-12-05T17:29:52.000Z | 2022-01-25T18:30:17.000Z | import argparse
import sys
import configparser
from lndlnurl import LndLnurl
config = configparser.ConfigParser()
def main():
argument_parser = get_argument_parser()
arguments = argument_parser.parse_args()
try:
with open(arguments.configfile) as f:
config.read_file(f)
except IOError:
raise ValueError('No configuration found')
return LndLnurl(config, arguments).run()
def get_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
dest="LNURL",
help="The LNURL to parse",
)
parser.add_argument(
"--config",
"-c",
default="lndlnurl.conf",
dest="configfile",
help="location of configuration file"
)
return parser
try:
success = main()
except Exception as e:
print("Error: %s" % e)
| 22.157895 | 50 | 0.643705 | import argparse
import sys
import configparser
from lndlnurl import LndLnurl
config = configparser.ConfigParser()
def main():
argument_parser = get_argument_parser()
arguments = argument_parser.parse_args()
try:
with open(arguments.configfile) as f:
config.read_file(f)
except IOError:
raise ValueError('No configuration found')
return LndLnurl(config, arguments).run()
def get_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
dest="LNURL",
help="The LNURL to parse",
)
parser.add_argument(
"--config",
"-c",
default="lndlnurl.conf",
dest="configfile",
help="location of configuration file"
)
return parser
try:
success = main()
except Exception as e:
print("Error: %s" % e)
| true | true |
1c34abed7ed01b108334ab107f73a52326de0062 | 1,334 | py | Python | src/__main__.py | gbmarc1/ExploranceAnonymizer | ff3616ef929269b5c8420266b3c32225cfa4b4a3 | [
"MIT"
] | null | null | null | src/__main__.py | gbmarc1/ExploranceAnonymizer | ff3616ef929269b5c8420266b3c32225cfa4b4a3 | [
"MIT"
] | null | null | null | src/__main__.py | gbmarc1/ExploranceAnonymizer | ff3616ef929269b5c8420266b3c32225cfa4b4a3 | [
"MIT"
] | null | null | null | from argparse import ArgumentParser
from pathlib import Path
import pandas as pd
import spacy
from tqdm import tqdm
def get_parser():
parser = ArgumentParser()
parser.add_argument("--file", type=Path)
return parser
def replace_entities(text, ents):
new_text = ''
start = 0
for e in sorted(ents, key=lambda x: x.start):
new_text += text[start:e.start_char] + '<' + e.label_ + '>'
start = e.end_char
new_text += text[start:]
return new_text
def anonymize(data):
nlp = spacy.load("en_core_web_sm")
anonimized_data, entities, raw_data = [], [], []
valid_entities = ['PERSON']
for d in tqdm(data.iloc[:, 0]):
raw_data.append(d)
doc = nlp(d, disable=["tagger", "parser"])
ents = [e for e in doc.ents if e.label_ in valid_entities]
anonimized_data.append(replace_entities(d, ents))
entities.append(str([((e.start, e.end), e.text+'->'+e.label_) for e in ents]))
return pd.DataFrame(
{"Raw": raw_data, "Anonymized": anonimized_data, "Entities": entities}
)
def main(args):
args = get_parser().parse_args(args)
data = pd.read_csv(args.file, header=None)
data = anonymize(data)
data.to_csv(args.file.parent/('anonymized_'+args.file.name), index=False)
if __name__ == "__main__":
main(None)
| 25.169811 | 86 | 0.63943 | from argparse import ArgumentParser
from pathlib import Path
import pandas as pd
import spacy
from tqdm import tqdm
def get_parser():
parser = ArgumentParser()
parser.add_argument("--file", type=Path)
return parser
def replace_entities(text, ents):
new_text = ''
start = 0
for e in sorted(ents, key=lambda x: x.start):
new_text += text[start:e.start_char] + '<' + e.label_ + '>'
start = e.end_char
new_text += text[start:]
return new_text
def anonymize(data):
nlp = spacy.load("en_core_web_sm")
anonimized_data, entities, raw_data = [], [], []
valid_entities = ['PERSON']
for d in tqdm(data.iloc[:, 0]):
raw_data.append(d)
doc = nlp(d, disable=["tagger", "parser"])
ents = [e for e in doc.ents if e.label_ in valid_entities]
anonimized_data.append(replace_entities(d, ents))
entities.append(str([((e.start, e.end), e.text+'->'+e.label_) for e in ents]))
return pd.DataFrame(
{"Raw": raw_data, "Anonymized": anonimized_data, "Entities": entities}
)
def main(args):
args = get_parser().parse_args(args)
data = pd.read_csv(args.file, header=None)
data = anonymize(data)
data.to_csv(args.file.parent/('anonymized_'+args.file.name), index=False)
if __name__ == "__main__":
main(None)
| true | true |
1c34abf6ef901b6d9bcbef4062f98bddec85587d | 15,640 | py | Python | scripts/slave/recipe_modules/archive/api.py | yjbanov/chromium_build | 22e3872f14dbf367cd787caa638f3ac948eac7d7 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipe_modules/archive/api.py | yjbanov/chromium_build | 22e3872f14dbf367cd787caa638f3ac948eac7d7 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipe_modules/archive/api.py | yjbanov/chromium_build | 22e3872f14dbf367cd787caa638f3ac948eac7d7 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T11:05:06.000Z | 2020-07-23T11:05:06.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from recipe_engine import recipe_api
# TODO(machenbach): Chromium specific data should move out of the archive
# module, into e.g. the chromium test configs.
EXCLUDED_FILES_ALL_PLATFORMS = [
'.landmines',
'.ninja_deps',
'.ninja_log',
'gen',
'obj',
]
# Excluded files on specific platforms.
EXCLUDED_FILES = {
'win': set(EXCLUDED_FILES_ALL_PLATFORMS + [
'cfinstaller_archive',
'installer_archive',
'lib',
]),
'mac': set(EXCLUDED_FILES_ALL_PLATFORMS + [
'.deps',
'App Shim Socket',
# We copy the framework into the app bundle, we don't need the second
# copy outside the app.
# TODO(mark): Since r28431, the copy in the build directory is actually
# used by tests. Putting two copies in the .zip isn't great, so maybe
# we can find another workaround.
# 'Chromium Framework.framework',
# 'Google Chrome Framework.framework',
# We copy the Helper into the app bundle, we don't need the second
# copy outside the app.
'Chromium Helper.app',
'Google Chrome Helper.app',
# We don't need the arm bits v8 builds.
'd8_arm',
'v8_shell_arm',
'lib',
'obj.host',
'obj.target',
# pdfsqueeze is a build helper, no need to copy it to testers.
'pdfsqueeze',
]),
'linux': set(EXCLUDED_FILES_ALL_PLATFORMS + [
'.deps',
# Scons build cruft.
'.sconsign.dblite',
# Intermediate build directories (full of .o, .d, etc.).
'appcache',
'glue',
'lib.host',
# Build helper, not needed on testers.
'mksnapshot',
'obj.host',
'obj.target',
'src',
]),
}
# Pattern for excluded files on specific platforms.
EXCLUDED_FILES_PATTERN = {
'win': re.compile(r'^.+\.(obj|lib|pch|exp)$'),
'mac': re.compile(r'^.+\.(a)$'),
'linux': re.compile(r'^.+\.(o|a|d)$'),
}
# Regular expression to identify a Git hash.
GIT_COMMIT_HASH_RE = re.compile(r'[a-zA-Z0-9]{40}')
# The Google Storage metadata key for the full commit position.
GS_COMMIT_POSITION_KEY = 'Cr-Commit-Position'
# The Google Storage metadata key for the commit position number.
GS_COMMIT_POSITION_NUMBER_KEY = 'Cr-Commit-Position-Number'
# The Google Storage metadata key for the Git commit hash.
GS_GIT_COMMIT_KEY = 'Cr-Git-Commit'
class ArchiveApi(recipe_api.RecipeApi):
"""Chromium specific module for zipping, uploading and downloading build
artifacts implemented as a wrapper around zip_build.py script.
If you need to upload or download build artifacts (or any other files) for
something other than Chromium flavor, consider using 'zip' + 'gsutil' or
'isolate' modules instead.
"""
def zip_and_upload_build(
self, step_name, target, build_url=None, src_dir=None,
build_revision=None, cros_board=None, package_dsym_files=False,
exclude_files=None, **kwargs):
"""Returns a step invoking zip_build.py to zip up a Chromium build.
If build_url is specified, also uploads the build."""
args = ['--target', target]
if build_url:
args.extend(['--build-url', build_url])
if build_revision:
args.extend(['--build_revision', build_revision])
elif src_dir:
args.extend(['--src-dir', src_dir])
if cros_board:
args.extend(['--cros-board', cros_board])
if package_dsym_files:
args.append('--package-dsym-files')
if exclude_files:
args.extend(['--exclude-files', exclude_files])
if 'gs_acl' in self.m.properties:
args.extend(['--gs-acl', self.m.properties['gs_acl']])
properties_json = self.m.json.dumps(self.m.properties.legacy())
args.extend(['--factory-properties', properties_json,
'--build-properties', properties_json])
kwargs['allow_subannotations'] = True
self.m.python(
step_name,
self.m.path['build'].join('scripts', 'slave', 'zip_build.py'),
args,
infra_step=True,
**kwargs
)
def _cf_should_package_file(self, filename):
"""Returns true if the file should be a part of the resulting archive."""
if EXCLUDED_FILES_PATTERN[self.m.platform.name].match(filename):
return False
# Skip files that we don't care about. Mostly directories.
if filename in EXCLUDED_FILES[self.m.platform.name]:
return False
return True
def _get_commit_position(self, update_properties, primary_project):
"""Returns the commit position of the project (or the specified primary
project).
"""
if primary_project:
key = 'got_%s_revision_cp' % primary_project
else:
key = 'got_revision_cp'
return update_properties[key]
def _get_git_commit(self, update_properties, primary_project):
"""Returns: (str/None) the git commit hash for a given project.
Attempts to identify the git commit hash for a given project. If
'primary_project' is None, or if there is no git commit hash for the
specified primary project, the checkout-wide commit hash will be used.
If none of the candidate configurations are present, the value None will be
returned.
"""
if primary_project:
commit = update_properties.get('got_%s_revision_git' % primary_project)
if commit:
return commit
commit = update_properties.get('got_%s_revision' % primary_project)
if commit and GIT_COMMIT_HASH_RE.match(commit):
return commit
commit = update_properties.get('got_revision_git')
if commit:
return commit
commit = update_properties.get('got_revision')
if commit and GIT_COMMIT_HASH_RE.match(commit):
return commit
return None
def _get_comparable_upload_path_for_sort_key(self, branch, number):
"""Returns a sortable string corresponding to the commit position."""
if branch and branch != 'refs/heads/master':
branch = branch.replace('/', '_')
return '%s-%s' % (branch, number)
return str(number)
def clusterfuzz_archive(
self, build_dir, update_properties, gs_bucket,
archive_prefix, archive_subdir_suffix='', gs_acl=None,
revision_dir=None, primary_project=None,
fixed_staging_dir=False, **kwargs):
# TODO(machenbach): Merge revision_dir and primary_project. The
# revision_dir is only used for building the archive name while the
# primary_project is authoritative for the commit position.
"""Archives and uploads a build to google storage.
The build is filtered by a list of file exclusions and then zipped. It is
uploaded to google storage with some metadata about the commit position
and revision attached. The zip file follows the naming pattern used by
clusterfuzz. The file pattern is:
<archive name>-<platform>-<target><optional component>-<sort-key>.zip
Example: cool-project-linux-release-refs_heads_b1-12345.zip
The archive name is "cool-project" and there's no component build. The
commit is on a branch called b1 at commit position number 12345.
Example: cool-project-mac-debug-x10-component-234.zip
The archive name is "cool-project" and the component's name is "x10". The
component is checked out in branch master with commit position number 234.
Args:
build_dir: The absolute path to the build output directory, e.g.
[slave-build]/src/out/Release
update_properties: The properties from the bot_update step (containing
commit information)
gs_bucket: Name of the google storage bucket to upload to
archive_prefix: Prefix of the archive zip file
archive_subdir_suffix: Optional suffix to the google storage subdirectory
name that contains the archive files
gs_acl: ACL used for the file on google storage
revision_dir: Optional component name if the main revision for this
archive is a component revision
primary_project: Optional project name for specifying the revision of the
checkout
fixed_staging_dir: Use a fixed directory on the same drive rather than a
temp dir, which can lead to problems on windows.
"""
target = self.m.path.split(build_dir)[-1]
commit_position = self._get_commit_position(
update_properties, primary_project)
cp_branch, cp_number = self.m.commit_position.parse(commit_position)
build_git_commit = self._get_git_commit(update_properties, primary_project)
if fixed_staging_dir:
staging_dir = self.m.path['slave_build'].join('chrome_staging')
self.m.file.rmtree('purge staging dir', staging_dir)
self.m.file.makedirs('create staging dir', staging_dir)
else:
staging_dir = self.m.path.mkdtemp('chrome_staging')
# Build the list of files to archive.
zip_file_list = [f for f in self.m.file.listdir('build_dir', build_dir)
if self._cf_should_package_file(f)]
# Use the legacy platform name as Clusterfuzz has some expectations on
# this (it only affects Windows, where it replace 'win' by 'win32').
pieces = [self.legacy_platform_name(), target.lower()]
if archive_subdir_suffix:
pieces.append(archive_subdir_suffix)
subdir = '-'.join(pieces)
# Components like v8 get a <name>-v8-component-<revision> infix.
component = ''
if revision_dir:
component = '-%s-component' % revision_dir
sortkey_path = self._get_comparable_upload_path_for_sort_key(
cp_branch, cp_number)
zip_file_base_name = '%s-%s-%s%s-%s' % (archive_prefix,
self.legacy_platform_name(),
target.lower(),
component,
sortkey_path)
zip_file_name = '%s.zip' % zip_file_base_name
self.m.python(
'zipping',
self.resource('zip_archive.py'),
[
staging_dir,
zip_file_base_name,
self.m.json.input(zip_file_list),
build_dir,
],
infra_step=True,
**kwargs
)
zip_file = staging_dir.join(zip_file_name)
gs_metadata = {
GS_COMMIT_POSITION_NUMBER_KEY: cp_number,
}
if commit_position:
gs_metadata[GS_COMMIT_POSITION_KEY] = commit_position
if build_git_commit:
gs_metadata[GS_GIT_COMMIT_KEY] = build_git_commit
gs_args = []
if gs_acl:
gs_args.extend(['-a', gs_acl])
self.m.gsutil.upload(
zip_file,
gs_bucket,
"/".join([subdir, zip_file_name]),
args=gs_args,
metadata=gs_metadata,
use_retry_wrapper=False,
)
self.m.file.remove(zip_file_name, zip_file)
def download_and_unzip_build(
self, step_name, target, build_url, src_dir=None,
build_revision=None, build_archive_url=None, **kwargs):
"""Returns a step invoking extract_build.py to download and unzip
a Chromium build."""
args = ['--target', target]
if build_archive_url:
args.extend(['--build-archive-url', build_archive_url])
else:
args.extend(['--build-url', build_url])
if build_revision:
args.extend(['--build_revision', build_revision])
elif src_dir:
args.extend(['--src-dir', src_dir])
properties = (
('mastername', '--master-name'),
('buildnumber', '--build-number'),
('parent_builddir', '--parent-build-dir'),
('parentname', '--parent-builder-name'),
('parentslavename', '--parent-slave-name'),
('parent_buildnumber', '--parent-build-number'),
('webkit_dir', '--webkit-dir'),
('revision_dir', '--revision-dir'),
)
for property_name, switch_name in properties:
if self.m.properties.get(property_name):
args.extend([switch_name, self.m.properties[property_name]])
# TODO(phajdan.jr): Always halt on missing build.
if self.m.properties.get('halt_on_missing_build'): # pragma: no cover
args.append('--halt-on-missing-build')
self.m.python(
step_name,
self.m.path['build'].join('scripts', 'slave', 'extract_build.py'),
args,
infra_step=True,
**kwargs
)
def legacy_platform_name(self):
"""Replicates the behavior of PlatformName() in chromium_utils.py."""
if self.m.platform.is_win:
return 'win32'
return self.m.platform.name
def _legacy_url(self, is_download, gs_bucket_name, extra_url_components):
"""Computes a build_url suitable for uploading a zipped Chromium
build to Google Storage.
The reason this is named 'legacy' is that there are a large number
of dependencies on the exact form of this URL. The combination of
zip_build.py, extract_build.py, slave_utils.py, and runtest.py
require that:
* The platform name be exactly one of 'win32', 'mac', or 'linux'
* The upload URL only name the directory on GS into which the
build goes (zip_build.py computes the name of the file)
* The download URL contain the unversioned name of the zip archive
* The revision on the builder and tester machines be exactly the
same
There were too many dependencies to tease apart initially, so this
function simply emulates the form of the URL computed by the
underlying scripts.
extra_url_components, if specified, should be a string without a
trailing '/' which is inserted in the middle of the URL.
The builder_name, or parent_buildername, is always automatically
inserted into the URL."""
result = ('gs://' + gs_bucket_name)
if extra_url_components:
result += ('/' + extra_url_components)
if is_download:
result += ('/' + self.m.properties['parent_buildername'] + '/' +
'full-build-' + self.legacy_platform_name() +
'.zip')
else:
result += '/' + self.m.properties['buildername']
return result
def legacy_upload_url(self, gs_bucket_name, extra_url_components=None):
"""Returns a url suitable for uploading a Chromium build to Google
Storage.
extra_url_components, if specified, should be a string without a
trailing '/' which is inserted in the middle of the URL.
The builder_name, or parent_buildername, is always automatically
inserted into the URL."""
return self._legacy_url(False, gs_bucket_name, extra_url_components)
def legacy_download_url(self, gs_bucket_name, extra_url_components=None):
"""Returns a url suitable for downloading a Chromium build from
Google Storage.
extra_url_components, if specified, should be a string without a
trailing '/' which is inserted in the middle of the URL.
The builder_name, or parent_buildername, is always automatically
inserted into the URL."""
return self._legacy_url(True, gs_bucket_name, extra_url_components)
def archive_dependencies(
self, step_name, target, master, builder, build, **kwargs):
"""Returns a step invoking archive_dependencies.py to zip up and upload
build dependency information for the build."""
try:
script = self.m.path['build'].join('scripts',
'slave',
'archive_dependencies.py')
args = []
args.extend(['--src-dir', self.m.path['checkout']])
args.extend(['--target', target])
args.extend(['--master', master])
args.extend(['--builder', builder])
args.extend(['--build', build])
self.m.python(step_name, script, args, infra_step=True, **kwargs)
except self.m.step.StepFailure:
pass
| 37.326969 | 79 | 0.670588 |
import re
from recipe_engine import recipe_api
EXCLUDED_FILES_ALL_PLATFORMS = [
'.landmines',
'.ninja_deps',
'.ninja_log',
'gen',
'obj',
]
EXCLUDED_FILES = {
'win': set(EXCLUDED_FILES_ALL_PLATFORMS + [
'cfinstaller_archive',
'installer_archive',
'lib',
]),
'mac': set(EXCLUDED_FILES_ALL_PLATFORMS + [
'.deps',
'App Shim Socket',
# copy outside the app.
# TODO(mark): Since r28431, the copy in the build directory is actually
# used by tests. Putting two copies in the .zip isn't great, so maybe
# copy outside the app.
'Chromium Helper.app',
'Google Chrome Helper.app',
# We don't need the arm bits v8 builds.
'd8_arm',
'v8_shell_arm',
'lib',
'obj.host',
'obj.target',
'pdfsqueeze',
]),
'linux': set(EXCLUDED_FILES_ALL_PLATFORMS + [
'.deps',
'.sconsign.dblite',
'appcache',
'glue',
'lib.host',
'mksnapshot',
'obj.host',
'obj.target',
'src',
]),
}
EXCLUDED_FILES_PATTERN = {
'win': re.compile(r'^.+\.(obj|lib|pch|exp)$'),
'mac': re.compile(r'^.+\.(a)$'),
'linux': re.compile(r'^.+\.(o|a|d)$'),
}
GIT_COMMIT_HASH_RE = re.compile(r'[a-zA-Z0-9]{40}')
GS_COMMIT_POSITION_KEY = 'Cr-Commit-Position'
GS_COMMIT_POSITION_NUMBER_KEY = 'Cr-Commit-Position-Number'
GS_GIT_COMMIT_KEY = 'Cr-Git-Commit'
class ArchiveApi(recipe_api.RecipeApi):
def zip_and_upload_build(
self, step_name, target, build_url=None, src_dir=None,
build_revision=None, cros_board=None, package_dsym_files=False,
exclude_files=None, **kwargs):
args = ['--target', target]
if build_url:
args.extend(['--build-url', build_url])
if build_revision:
args.extend(['--build_revision', build_revision])
elif src_dir:
args.extend(['--src-dir', src_dir])
if cros_board:
args.extend(['--cros-board', cros_board])
if package_dsym_files:
args.append('--package-dsym-files')
if exclude_files:
args.extend(['--exclude-files', exclude_files])
if 'gs_acl' in self.m.properties:
args.extend(['--gs-acl', self.m.properties['gs_acl']])
properties_json = self.m.json.dumps(self.m.properties.legacy())
args.extend(['--factory-properties', properties_json,
'--build-properties', properties_json])
kwargs['allow_subannotations'] = True
self.m.python(
step_name,
self.m.path['build'].join('scripts', 'slave', 'zip_build.py'),
args,
infra_step=True,
**kwargs
)
def _cf_should_package_file(self, filename):
if EXCLUDED_FILES_PATTERN[self.m.platform.name].match(filename):
return False
if filename in EXCLUDED_FILES[self.m.platform.name]:
return False
return True
def _get_commit_position(self, update_properties, primary_project):
if primary_project:
key = 'got_%s_revision_cp' % primary_project
else:
key = 'got_revision_cp'
return update_properties[key]
def _get_git_commit(self, update_properties, primary_project):
if primary_project:
commit = update_properties.get('got_%s_revision_git' % primary_project)
if commit:
return commit
commit = update_properties.get('got_%s_revision' % primary_project)
if commit and GIT_COMMIT_HASH_RE.match(commit):
return commit
commit = update_properties.get('got_revision_git')
if commit:
return commit
commit = update_properties.get('got_revision')
if commit and GIT_COMMIT_HASH_RE.match(commit):
return commit
return None
def _get_comparable_upload_path_for_sort_key(self, branch, number):
if branch and branch != 'refs/heads/master':
branch = branch.replace('/', '_')
return '%s-%s' % (branch, number)
return str(number)
def clusterfuzz_archive(
self, build_dir, update_properties, gs_bucket,
archive_prefix, archive_subdir_suffix='', gs_acl=None,
revision_dir=None, primary_project=None,
fixed_staging_dir=False, **kwargs):
# TODO(machenbach): Merge revision_dir and primary_project. The
# revision_dir is only used for building the archive name while the
# primary_project is authoritative for the commit position.
target = self.m.path.split(build_dir)[-1]
commit_position = self._get_commit_position(
update_properties, primary_project)
cp_branch, cp_number = self.m.commit_position.parse(commit_position)
build_git_commit = self._get_git_commit(update_properties, primary_project)
if fixed_staging_dir:
staging_dir = self.m.path['slave_build'].join('chrome_staging')
self.m.file.rmtree('purge staging dir', staging_dir)
self.m.file.makedirs('create staging dir', staging_dir)
else:
staging_dir = self.m.path.mkdtemp('chrome_staging')
# Build the list of files to archive.
zip_file_list = [f for f in self.m.file.listdir('build_dir', build_dir)
if self._cf_should_package_file(f)]
# Use the legacy platform name as Clusterfuzz has some expectations on
# this (it only affects Windows, where it replace 'win' by 'win32').
pieces = [self.legacy_platform_name(), target.lower()]
if archive_subdir_suffix:
pieces.append(archive_subdir_suffix)
subdir = '-'.join(pieces)
# Components like v8 get a <name>-v8-component-<revision> infix.
component = ''
if revision_dir:
component = '-%s-component' % revision_dir
sortkey_path = self._get_comparable_upload_path_for_sort_key(
cp_branch, cp_number)
zip_file_base_name = '%s-%s-%s%s-%s' % (archive_prefix,
self.legacy_platform_name(),
target.lower(),
component,
sortkey_path)
zip_file_name = '%s.zip' % zip_file_base_name
self.m.python(
'zipping',
self.resource('zip_archive.py'),
[
staging_dir,
zip_file_base_name,
self.m.json.input(zip_file_list),
build_dir,
],
infra_step=True,
**kwargs
)
zip_file = staging_dir.join(zip_file_name)
gs_metadata = {
GS_COMMIT_POSITION_NUMBER_KEY: cp_number,
}
if commit_position:
gs_metadata[GS_COMMIT_POSITION_KEY] = commit_position
if build_git_commit:
gs_metadata[GS_GIT_COMMIT_KEY] = build_git_commit
gs_args = []
if gs_acl:
gs_args.extend(['-a', gs_acl])
self.m.gsutil.upload(
zip_file,
gs_bucket,
"/".join([subdir, zip_file_name]),
args=gs_args,
metadata=gs_metadata,
use_retry_wrapper=False,
)
self.m.file.remove(zip_file_name, zip_file)
def download_and_unzip_build(
self, step_name, target, build_url, src_dir=None,
build_revision=None, build_archive_url=None, **kwargs):
args = ['--target', target]
if build_archive_url:
args.extend(['--build-archive-url', build_archive_url])
else:
args.extend(['--build-url', build_url])
if build_revision:
args.extend(['--build_revision', build_revision])
elif src_dir:
args.extend(['--src-dir', src_dir])
properties = (
('mastername', '--master-name'),
('buildnumber', '--build-number'),
('parent_builddir', '--parent-build-dir'),
('parentname', '--parent-builder-name'),
('parentslavename', '--parent-slave-name'),
('parent_buildnumber', '--parent-build-number'),
('webkit_dir', '--webkit-dir'),
('revision_dir', '--revision-dir'),
)
for property_name, switch_name in properties:
if self.m.properties.get(property_name):
args.extend([switch_name, self.m.properties[property_name]])
# TODO(phajdan.jr): Always halt on missing build.
if self.m.properties.get('halt_on_missing_build'): # pragma: no cover
args.append('--halt-on-missing-build')
self.m.python(
step_name,
self.m.path['build'].join('scripts', 'slave', 'extract_build.py'),
args,
infra_step=True,
**kwargs
)
def legacy_platform_name(self):
if self.m.platform.is_win:
return 'win32'
return self.m.platform.name
def _legacy_url(self, is_download, gs_bucket_name, extra_url_components):
result = ('gs://' + gs_bucket_name)
if extra_url_components:
result += ('/' + extra_url_components)
if is_download:
result += ('/' + self.m.properties['parent_buildername'] + '/' +
'full-build-' + self.legacy_platform_name() +
'.zip')
else:
result += '/' + self.m.properties['buildername']
return result
def legacy_upload_url(self, gs_bucket_name, extra_url_components=None):
return self._legacy_url(False, gs_bucket_name, extra_url_components)
def legacy_download_url(self, gs_bucket_name, extra_url_components=None):
return self._legacy_url(True, gs_bucket_name, extra_url_components)
def archive_dependencies(
self, step_name, target, master, builder, build, **kwargs):
try:
script = self.m.path['build'].join('scripts',
'slave',
'archive_dependencies.py')
args = []
args.extend(['--src-dir', self.m.path['checkout']])
args.extend(['--target', target])
args.extend(['--master', master])
args.extend(['--builder', builder])
args.extend(['--build', build])
self.m.python(step_name, script, args, infra_step=True, **kwargs)
except self.m.step.StepFailure:
pass
| true | true |
1c34acd2e5e79e60abec7fa52794f10ca1591c74 | 753 | py | Python | textWindow.py | TitaniumHocker/Slip-Out | dffc5b99207ae025901284cfa1c9fefedc282e09 | [
"MIT"
] | null | null | null | textWindow.py | TitaniumHocker/Slip-Out | dffc5b99207ae025901284cfa1c9fefedc282e09 | [
"MIT"
] | null | null | null | textWindow.py | TitaniumHocker/Slip-Out | dffc5b99207ae025901284cfa1c9fefedc282e09 | [
"MIT"
] | 4 | 2019-05-16T22:57:20.000Z | 2019-06-08T01:09:07.000Z | # -*- coding: utf-8 -*-
from pygame.sprite import Sprite
from pygame import Surface
from pygame import image
from pygame import display
from pygame import SRCALPHA
from pygame import Color
class TextWindow(Sprite):
def __init__(self, img):
Sprite.__init__(self)
self.surface = Surface((1280, 720), SRCALPHA)
self.image = image.load(img).convert_alpha()
self.surface.blit(self.image, (0, 0))
self.name = 'TextWindow'
def upload(self, img):
self.image = image.load(img).convert_alpha()
self.surface = Surface((1280, 720), SRCALPHA)
self.surface.blit(self.image, (0, 0))
def update(self):
pass
def draw(self, surface):
surface.blit(self.surface, (0, 0))
| 25.965517 | 53 | 0.645418 |
from pygame.sprite import Sprite
from pygame import Surface
from pygame import image
from pygame import display
from pygame import SRCALPHA
from pygame import Color
class TextWindow(Sprite):
def __init__(self, img):
Sprite.__init__(self)
self.surface = Surface((1280, 720), SRCALPHA)
self.image = image.load(img).convert_alpha()
self.surface.blit(self.image, (0, 0))
self.name = 'TextWindow'
def upload(self, img):
self.image = image.load(img).convert_alpha()
self.surface = Surface((1280, 720), SRCALPHA)
self.surface.blit(self.image, (0, 0))
def update(self):
pass
def draw(self, surface):
surface.blit(self.surface, (0, 0))
| true | true |
1c34acd4a569d395db08b152cb5f398552cf3fa5 | 1,956 | py | Python | tests/functional/test_templates.py | ChrisLi0329/warehouse | 0241ccb3f2974a235815b759065b0c8a9c326009 | [
"Apache-2.0"
] | 4 | 2017-12-07T17:45:12.000Z | 2021-11-15T11:14:44.000Z | tests/functional/test_templates.py | ChrisLi0329/warehouse | 0241ccb3f2974a235815b759065b0c8a9c326009 | [
"Apache-2.0"
] | null | null | null | tests/functional/test_templates.py | ChrisLi0329/warehouse | 0241ccb3f2974a235815b759065b0c8a9c326009 | [
"Apache-2.0"
] | 2 | 2017-12-07T17:45:15.000Z | 2019-11-25T23:47:20.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warehouse
from jinja2 import Environment, FileSystemLoader
def test_templates_for_empty_titles():
"""
Test if all HTML templates have defined the title block. See
https://github.com/pypa/warehouse/issues/784
"""
dir_name = os.path.join(os.path.dirname(warehouse.__file__), 'templates')
env = Environment(
loader=FileSystemLoader(dir_name),
extensions=[],
cache_size=0,
)
env.filters.update({
"format_date": "warehouse.i18n.filters:format_date",
"format_datetime": "warehouse.i18n.filters:format_datetime",
"format_rfc822_datetime":
"warehouse.i18n.filters:format_rfc822_datetime",
"format_tags": "warehouse.filters:format_tags",
"json": "warehouse.filters:tojson",
"readme": "warehouse.filters:readme",
"shorten_number": "warehouse.filters:shorten_number",
"urlparse": "warehouse.filters:urlparse",
})
for dir_, _, files in os.walk(dir_name):
if dir_.find("/includes") > -1 or \
dir_.find("/legacy") > -1:
continue
for file_name in files:
if file_name.endswith(".html"):
rel_dir = os.path.relpath(dir_, dir_name)
rel_file = os.path.join(rel_dir, file_name)
template = env.get_template(rel_file)
assert 'title' in template.blocks
| 36.222222 | 77 | 0.665644 |
import os
import warehouse
from jinja2 import Environment, FileSystemLoader
def test_templates_for_empty_titles():
dir_name = os.path.join(os.path.dirname(warehouse.__file__), 'templates')
env = Environment(
loader=FileSystemLoader(dir_name),
extensions=[],
cache_size=0,
)
env.filters.update({
"format_date": "warehouse.i18n.filters:format_date",
"format_datetime": "warehouse.i18n.filters:format_datetime",
"format_rfc822_datetime":
"warehouse.i18n.filters:format_rfc822_datetime",
"format_tags": "warehouse.filters:format_tags",
"json": "warehouse.filters:tojson",
"readme": "warehouse.filters:readme",
"shorten_number": "warehouse.filters:shorten_number",
"urlparse": "warehouse.filters:urlparse",
})
for dir_, _, files in os.walk(dir_name):
if dir_.find("/includes") > -1 or \
dir_.find("/legacy") > -1:
continue
for file_name in files:
if file_name.endswith(".html"):
rel_dir = os.path.relpath(dir_, dir_name)
rel_file = os.path.join(rel_dir, file_name)
template = env.get_template(rel_file)
assert 'title' in template.blocks
| true | true |
1c34ad1a177fb2f1e3912e3283617e2717c429b1 | 508 | py | Python | PSET6/mario_more.py | jimcs1/CS50-1 | 1ae26f14070718fb7d98afbb5416f97693b6ae68 | [
"MIT"
] | 5 | 2018-04-23T22:38:17.000Z | 2020-09-10T20:54:30.000Z | PSET6/mario_more.py | jimcs1/CS50-1 | 1ae26f14070718fb7d98afbb5416f97693b6ae68 | [
"MIT"
] | 3 | 2017-02-12T13:52:30.000Z | 2017-02-15T15:18:27.000Z | PSET6/mario_more.py | jimcs1/CS50-1 | 1ae26f14070718fb7d98afbb5416f97693b6ae68 | [
"MIT"
] | 11 | 2018-07-03T08:56:29.000Z | 2022-02-02T14:23:55.000Z | #!/usr/bin/env python3
def main():
height = get_height()
print__double_half_pyramid(height)
def get_height():
while True:
try:
height = int(input("Height: "))
except ValueError:
continue
if 0 < height < 24:
break
return height
def print__double_half_pyramid(height):
for i in range(1, height + 1):
print("{0:>{width}}{1:2}{0:<{width}}".format("#" * i, "", width=height))
if __name__ == "__main__":
main()
| 16.933333 | 80 | 0.55315 |
def main():
height = get_height()
print__double_half_pyramid(height)
def get_height():
while True:
try:
height = int(input("Height: "))
except ValueError:
continue
if 0 < height < 24:
break
return height
def print__double_half_pyramid(height):
for i in range(1, height + 1):
print("{0:>{width}}{1:2}{0:<{width}}".format("#" * i, "", width=height))
if __name__ == "__main__":
main()
| true | true |
1c34b01861f826daeba5ddd70b5dfe81126c4a1a | 1,065 | py | Python | python/09_queue/array_queue.py | shipan3452/algo | 0494cc0d8f5daf108daf4358c4531a29279dd380 | [
"Apache-2.0"
] | 22,028 | 2018-09-27T05:55:19.000Z | 2022-03-30T10:44:46.000Z | python/09_queue/array_queue.py | wangjing013/algo | b2c1228ff915287ad7ebeae4355fa26854ea1557 | [
"Apache-2.0"
] | 164 | 2018-10-06T15:11:08.000Z | 2022-03-28T10:04:34.000Z | python/09_queue/array_queue.py | wangjing013/algo | b2c1228ff915287ad7ebeae4355fa26854ea1557 | [
"Apache-2.0"
] | 7,250 | 2018-09-30T00:45:25.000Z | 2022-03-31T20:15:33.000Z | """
Queue based upon array
用数组实现的队列
Author: Wenru
"""
from typing import Optional
class ArrayQueue:
def __init__(self, capacity: int):
self._items = []
self._capacity = capacity
self._head = 0
self._tail = 0
def enqueue(self, item: str) -> bool:
if self._tail == self._capacity:
if self._head == 0:
return False
else:
for i in range(0, self._tail - self._head):
self._items[i] = self._items[i + self._head]
self._tail = self._tail - self._head
self._head = 0
self._items.insert(self._tail, item)
self._tail += 1
return True
def dequeue(self) -> Optional[str]:
if self._head != self._tail:
item = self._items[self._head]
self._head += 1
return item
else:
return None
def __repr__(self) -> str:
return " ".join(item for item in self._items[self._head : self._tail])
| 24.767442 | 78 | 0.514554 |
from typing import Optional
class ArrayQueue:
def __init__(self, capacity: int):
self._items = []
self._capacity = capacity
self._head = 0
self._tail = 0
def enqueue(self, item: str) -> bool:
if self._tail == self._capacity:
if self._head == 0:
return False
else:
for i in range(0, self._tail - self._head):
self._items[i] = self._items[i + self._head]
self._tail = self._tail - self._head
self._head = 0
self._items.insert(self._tail, item)
self._tail += 1
return True
def dequeue(self) -> Optional[str]:
if self._head != self._tail:
item = self._items[self._head]
self._head += 1
return item
else:
return None
def __repr__(self) -> str:
return " ".join(item for item in self._items[self._head : self._tail])
| true | true |
1c34b0ec85debbe97a11b8bf53e0dd66861058ab | 282 | py | Python | app/errors.py | lawr3nc/artwork_prov | b6a61e586e28c3afafaa75bb4681bb723fcaaf36 | [
"MIT"
] | null | null | null | app/errors.py | lawr3nc/artwork_prov | b6a61e586e28c3afafaa75bb4681bb723fcaaf36 | [
"MIT"
] | null | null | null | app/errors.py | lawr3nc/artwork_prov | b6a61e586e28c3afafaa75bb4681bb723fcaaf36 | [
"MIT"
] | null | null | null | from flask import render_template
from app import app, db
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(error):
db.session.rollback()
return render_template('500.html'), 500 | 21.692308 | 43 | 0.758865 | from flask import render_template
from app import app, db
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(error):
db.session.rollback()
return render_template('500.html'), 500 | true | true |
1c34b0f1c3bffce28462e151e7a163199cb82efe | 3,799 | py | Python | route_66/model.py | Casper-Smet/turing-route-66 | b797485586c3491ddbcd76367aff88b7672d8d9a | [
"MIT"
] | 3 | 2019-12-03T09:47:02.000Z | 2019-12-03T09:47:51.000Z | route_66/model.py | Casper-Smet/turing-route-66 | b797485586c3491ddbcd76367aff88b7672d8d9a | [
"MIT"
] | null | null | null | route_66/model.py | Casper-Smet/turing-route-66 | b797485586c3491ddbcd76367aff88b7672d8d9a | [
"MIT"
] | null | null | null | import numpy as np
from mesa import Model
from mesa.datacollection import DataCollector
from mesa.space import SingleGrid
from mesa.time import StagedActivation
from route_66.agent import CarAgent, TrafficLight
def get_average_velocity(model):
"""
Gets the total average velocity over all the agents
:param model: The model (environment) where the agents exist
:return: The total average velocity over all the agents
"""
df = model.datacollector.get_agent_vars_dataframe()
df.reset_index(inplace=True)
velocities = df["Velocity"]
return velocities.mean()
def get_standard_deviation_velocity(model):
"""
Gets the total standard deviation of the velocity over all agents
:param model: The model (environment) where the agents exist
:return: The total standard deviation over all agents
"""
df = model.datacollector.get_agent_vars_dataframe()
df.reset_index(inplace=True)
velocities = df["Velocity"]
return velocities.std()
def get_on_ramp_queue(model):
"""Gets the total queue on the on ramp after the simulation"""
return model.traffic_light.on_ramp_queue
def get_waiting_queue(model):
"""Gets the total queue on the waiting ramp after the simulation"""
return model.traffic_light.wait_queue
class RoadModel(Model):
"""
A model with a number of cars, Nagel-Schreckenberg
"""
def __init__(self, N, length=100, lanes=1, timer=3):
self.num_agents = N
self.grid = SingleGrid(length, lanes, torus=True)
model_stages = ["acceleration", "braking", "randomisation", "move", "delete"]
self.schedule = StagedActivation(self, stage_list=model_stages)
# Create agent
for i in range(self.num_agents):
agent = CarAgent(i, self, False)
# Add to schedule
self.schedule.add(agent)
# Add to grid (randomly)
self.grid.position_agent(agent)
# Add the traffic light
self.traffic_light = TrafficLight(0, self, timer, 20, 20)
self.average_velocity = CarAgent.init_velocity
self.datacollector = DataCollector(agent_reporters={
"Position": "pos",
"Velocity": "velocity"},
model_reporters={
"Average Velocity": "average_velocity",
"Amount of cars": "agent_count",
"On Ramp Queue": get_on_ramp_queue,
"Waiting Queue": get_waiting_queue})
self.running = True
def step(self):
"""
The model takes a new step and updates
"""
# Calculate amount of agents
self.agent_count = len(self.schedule.agents)
# Calculate average velocity
self.average_velocity = np.mean([a.velocity for a in self.schedule.agents])
# Collect data
self.datacollector.collect(self)
# Run a step of the traffic light
self.traffic_light.step()
# Run next step
self.schedule.step()
def add_agent(self, label, x_corr):
"""
Adds an agent to the scheduler and model on a particular coordinate
:param label: The label of the agents that gets created
:param x_corr: The x-coordinate of where the agent will be spawned
"""
# Create agent
agent = CarAgent(label, self, True)
# Add to schedule
self.schedule.add(agent)
# Add to grid on a certain position
self.grid.position_agent(agent, x_corr, 0)
def delete_agent(self, agent):
"""
Deletes an agent from the scheduler and model
:param agent: The agents that gets deleted
"""
# remove from schedule
self.schedule.remove(agent)
# remove from grid
self.grid.remove_agent(agent)
| 31.139344 | 85 | 0.646486 | import numpy as np
from mesa import Model
from mesa.datacollection import DataCollector
from mesa.space import SingleGrid
from mesa.time import StagedActivation
from route_66.agent import CarAgent, TrafficLight
def get_average_velocity(model):
df = model.datacollector.get_agent_vars_dataframe()
df.reset_index(inplace=True)
velocities = df["Velocity"]
return velocities.mean()
def get_standard_deviation_velocity(model):
df = model.datacollector.get_agent_vars_dataframe()
df.reset_index(inplace=True)
velocities = df["Velocity"]
return velocities.std()
def get_on_ramp_queue(model):
return model.traffic_light.on_ramp_queue
def get_waiting_queue(model):
return model.traffic_light.wait_queue
class RoadModel(Model):
def __init__(self, N, length=100, lanes=1, timer=3):
self.num_agents = N
self.grid = SingleGrid(length, lanes, torus=True)
model_stages = ["acceleration", "braking", "randomisation", "move", "delete"]
self.schedule = StagedActivation(self, stage_list=model_stages)
for i in range(self.num_agents):
agent = CarAgent(i, self, False)
self.schedule.add(agent)
self.grid.position_agent(agent)
self.traffic_light = TrafficLight(0, self, timer, 20, 20)
self.average_velocity = CarAgent.init_velocity
self.datacollector = DataCollector(agent_reporters={
"Position": "pos",
"Velocity": "velocity"},
model_reporters={
"Average Velocity": "average_velocity",
"Amount of cars": "agent_count",
"On Ramp Queue": get_on_ramp_queue,
"Waiting Queue": get_waiting_queue})
self.running = True
def step(self):
self.agent_count = len(self.schedule.agents)
self.average_velocity = np.mean([a.velocity for a in self.schedule.agents])
self.datacollector.collect(self)
self.traffic_light.step()
self.schedule.step()
def add_agent(self, label, x_corr):
agent = CarAgent(label, self, True)
self.schedule.add(agent)
self.grid.position_agent(agent, x_corr, 0)
def delete_agent(self, agent):
self.schedule.remove(agent)
self.grid.remove_agent(agent)
| true | true |
1c34b127406f89dabe2bc19eed6d0ab68bc5c09a | 25,008 | py | Python | server/views_admin.py | sampsontan/virtual-clinic | 5d8cbb199fb7ff1658c8d1245d8756da7d4e2d34 | [
"MIT"
] | 50 | 2019-02-04T12:33:49.000Z | 2022-03-14T04:56:20.000Z | server/views_admin.py | sampsontan/virtual-clinic | 5d8cbb199fb7ff1658c8d1245d8756da7d4e2d34 | [
"MIT"
] | 6 | 2020-05-23T09:34:21.000Z | 2021-06-08T19:11:57.000Z | server/views_admin.py | sampsontan/virtual-clinic | 5d8cbb199fb7ff1658c8d1245d8756da7d4e2d34 | [
"MIT"
] | 27 | 2018-04-04T03:16:00.000Z | 2022-03-30T11:08:35.000Z | from csv import QUOTE_MINIMAL, writer
import re
import sqlite3
import sys
from django.core.management import call_command
from django.core import serializers
from django.shortcuts import render,redirect
from django.http import HttpResponseRedirect, HttpResponse
from django.db.utils import IntegrityError
from server.forms import SpecialityForm, SymptomForm, EmployeeRegistrationForm, ImportForm, ExportForm, HospitalForm, StatisticsForm
from server.models import Speciality, Account, Action, Hospital, Location, Statistics, Symptom, Profile, Appointment, Message, Prescription, MedicalInfo, MedicalTest
from server import logger
from server import views
def parse_speciality_delete(request):
# Authentication check.
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
# Get the template data from the session
template_data = views.parse_session(request)
# Proceed with the rest of the view
if request.method == 'POST':
if 'delete' in request.POST and 'pk' in request.POST:
pk = request.POST['pk']
try:
speciality = Speciality.objects.get(pk=pk)
except Exception:
template_data['alert_danger'] = "Unable to cancel the speciality. Please try again later."
return
speciality.delete()
logger.log(Action.ACTION_ADMIN, 'Speciality cancelled', request.user.account)
template_data['alert_success'] = "The speciality has been deleted."
return HttpResponseRedirect('/admin/speciality/')
def parse_symptom_delete(request):
# Authentication check.
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
# Get the template data from the session
template_data = views.parse_session(request)
# Proceed with the rest of the view
if request.method == 'POST':
if 'delete' in request.POST and 'pk' in request.POST:
pk = request.POST['pk']
try:
symptom = Symptom.objects.get(pk=pk)
except Exception:
template_data['alert_danger'] = "Unable to delete the symptom. Please try again later."
return
symptom.delete()
logger.log(Action.ACTION_ADMIN, 'Symptom cancelled', request.user.account)
template_data['alert_success'] = "The symptom has been deleted."
return HttpResponseRedirect('/admin/symptom/')
def user_archive(request):
# Authentication check.
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
# Get the template data from the session
template_data = views.parse_session(request)
# Proceed with the rest of the view
if request.method == 'POST':
if 'delete' in request.POST and 'pk' in request.POST:
pk = request.POST['pk']
try:
user = Account.objects.get(pk=pk)
except Exception:
template_data['alert_danger'] = "Unable to delete the user. Please try again later"
return
user.archive = True
user.save()
#logger.log(Action.ACTION_ADMIN, 'Admin deleted a user',user)
template_data['alert_success'] = "The user has been deleted."
return HttpResponseRedirect('/admin/users')
def view_archived_users(request):
# Authentication check.
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
# Get the template data from the session
template_data = views.parse_session(request)
# Proceed with the rest of the view
template_data['query'] = Account.objects.filter(archive=True)
return render(request, 'virtualclinic/admin/archived_users.html', template_data)
def restore_user(request):
# Authentication check.
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
# Get the template data from the session
template_data = views.parse_session(request)
# Proceed with the rest of the view
if request.method == 'POST':
if 'restore' in request.POST and 'pk' in request.POST:
pk = request.POST['pk']
try:
user = Account.objects.get(pk=pk)
except Exception:
template_data['alert_danger'] = "Unable to delete the user. Please try again later"
return HttpResponseRedirect('/admin/users')
user.archive = False
user.save()
logger.log(Action.ACTION_ADMIN, 'Admin restored the user',user)
template_data['alert_success'] = "The user has been restored."
return HttpResponseRedirect('/admin/users')
return HttpResponseRedirect('/admin/users')
def users_view(request):
# Authentication check.
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
# Get the template data from the session
template_data = views.parse_session(request)
# Proceed with the rest of the view
if request.method == 'POST':
pk = request.POST['pk']
role = request.POST['role']
account = Account.objects.get(pk=pk)
if account is not None:
account.role = role
account.save()
logger.log(Action.ACTION_ADMIN, 'Admin modified ' + account.user.username + "'s role", request.user.account)
template_data['alert_success'] = "Updated" + account.user.username + "'s role!"
# Parse search sorting
template_data['query'] = Account.objects.filter(archive=False).order_by('-role')
return render(request,'virtualclinic/admin/users.html', template_data)
def activity_view(request):
# Authentication check.
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None: return authentication_result
# Get the template data from the session
template_data = views.parse_session(request)
# Proceed with the rest of the view
# Parse search sorting
template_data['query'] = Action.objects.all().order_by('-timePerformed')
return render(request,'virtualclinic/admin/activity.html',template_data)
def view_speciality(request):
# Authentication check.
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None: return authentication_result
# Get the template data from the session
template_data = views.parse_session(request)
# Proceed with the rest of the view
template_data['query'] = Speciality.objects.all()
return render(request, 'virtualclinic/admin/speciality.html', template_data)
def view_symptom(request):
# Authentication check.
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None: return authentication_result
# Get the template data from the session
template_data = views.parse_session(request)
# Proceed with the rest of the view
template_data['query'] = Symptom.objects.all()
return render(request, 'virtualclinic/admin/symptoms.html', template_data)
def add_speciality(request):
# Authentication check
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
# Get template data from the session
template_data = views.parse_session(
request,
{'form_button': "Add Speciality"}
)
#parse_speciality_delete(request, template_data) # parse appointment cancelling
# Proceed with the rest of the view
if request.method == 'POST':
form = SpecialityForm(request.POST)
if form.is_valid():
speciality = Speciality(
name=form.cleaned_data['name'],
description=form.cleaned_data['description']
)
speciality.save()
form = SpecialityForm() # Clean the form when page is redisplayed
template_data['alert_success'] = "Successfully added the Speciality!"
logger.log(Action.ACTION_ADMIN, 'Admin added ' + speciality.name, request.user.account)
return HttpResponseRedirect('/admin/speciality')
else:
form = SpecialityForm()
template_data['form'] = form
return render(request, 'virtualclinic/admin/add_speciality.html', template_data)
def add_symptom(request):
# Authentication check
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
# Get template data from the session
template_data = views.parse_session(
request,
{'form_button': "Add Symptom"}
)
#parse_speciality_delete(request, template_data) # parse appointment cancelling
# Proceed with the rest of the view
if request.method == 'POST':
form = SymptomForm(request.POST)
if form.is_valid():
symptom = Symptom(
name=form.cleaned_data['name'],
description=form.cleaned_data['description']
)
symptom.save()
form = SymptomForm() # Clean the form when page is redisplayed
template_data['alert_success'] = "Successfully added the Symptom!"
logger.log(Action.ACTION_ADMIN, 'Admin added ' + symptom.name, request.user.account)
return HttpResponseRedirect('/admin/symptom')
else:
form = SymptomForm()
template_data['form'] = form
return render(request, 'virtualclinic/admin/add_symptom.html', template_data)
def add_hospital_view(request):
# Authentication check.
authentication_result = views.authentication_check(
request,
[Account.ACCOUNT_ADMIN]
)
if authentication_result is not None: return authentication_result
# Get the template data from the session
template_data = views.parse_session(
request,
{'form_button':"Add Hospital"}
)
# Proceed with the rest of the view
if request.method == 'POST':
form = HospitalForm(request.POST)
if form.is_valid():
location = location(
city = form.cleaned_data['city'],
zip = form.cleaned_data['zip'],
state = form.cleaned_data['state'],
address = form.cleaned_data['address']
)
location.save()
hospital = Hospital(
name = form.cleaned_data['name'],
phone = form.cleaned_data['phone'],
location = location,
)
hospital.save()
form = HospitalForm() # Clean the form when page is redisplayed
template_data['alert_success'] = "Successfully added the hospital!"
else:
form = HospitalForm()
template_data['form'] = form
return render(request,'virtualclinic/admin/add_hospital.html', template_data)
def createemployee_view(request):
# Authentication check.
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None: return authentication_result
# Get the template data from the session
template_data = views.parse_session(request,{'form_button':"Register"})
# Proceed with the rest of the view
if request.method == 'POST':
form = EmployeeRegistrationForm(request.POST)
if form.is_valid():
user = views.register_user(
form.cleaned_data['email'],
form.cleaned_data['password_first'],
form.cleaned_data['firstname'],
form.cleaned_data['lastname'],
form.cleaned_data['employee'],
form.cleaned_data['speciality']
)
logger.log(Action.ACTION_ADMIN, 'Admin registered '+ user.username, request.user.account)
request.session['alert_success'] = "Successfully created new employee account"
return HttpResponseRedirect('/admin/users/')
else:
form = EmployeeRegistrationForm()
template_data['form'] = form
return render(request,'virtualclinic/admin/createemployee.html', template_data)
def statistic_view(request):
# Authentication check.
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None: return authentication_result
# Get the template data from the session
template_data = views.parse_session(request, {'form_button': "Get Statistics"})
# Proceed with the rest of the view
default = {}
request.POST._mutable = True
request.POST.update(default)
predate_filter = Action.objects.all()
template_data['pre_filter'] = predate_filter.count()
form = StatisticsForm(request.POST)
if request.method == 'POST':
if form.is_valid():
statistics = Statistics(
startDate = form.cleaned_data['startDate'],
endDate = form.cleaned_data['endDate'],
)
date_filter = Action.objects.all().filter(timePerformed__range = (statistics.startDate,statistics.endDate))
template_data['temp'] = date_filter.count()
template_data['start'] = statistics.startDate
template_data['end'] = statistics.endDate
template_data['total_logins'] = Action.objects.filter(description__icontains="Account login",timePerformed__range = (statistics.startDate, statistics.endDate) ).count()
template_data['total_logouts'] = Action.objects.filter(description__icontains="Account logout",timePerformed__range = (statistics.startDate, statistics.endDate)).count()
template_data['total_appointments'] = Action.objects.filter(description__icontains="Appointment created",timePerformed__range = (statistics.startDate, statistics.endDate)).count()
template_data['total_med_tests'] = Action.objects.filter(description__icontains="Medical Test created",timePerformed__range = (statistics.startDate, statistics.endDate)).count()
template_data['total_registered'] = Action.objects.filter(description__icontains="registered",timePerformed__range = (statistics.startDate, statistics.endDate)).count()
else:
form._errors = {}
statistics = Statistics(
startDate = 0,
endDate = 0,
)
errdate_filter = Action.objects.all()
template_data['errdate_filter'] = errdate_filter.count()
template_data['start'] = statistics.startDate
template_data['end'] = statistics.endDate
template_data['total_logins'] = 0
template_data['total_logouts'] = 0
template_data['total_appointments'] = 0
template_data['total_med_tests'] = 0
template_data['total_registered'] = 0
template_data['form'] =form
return render(request,'virtualclinic/admin/statistics.html', template_data)
def csv_import_view(request):
# Authentication check.
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None: return authentication_result
template_data = views.parse_session(request, {'form_button': "Submit"})
if request.method=='POST':
form = ImportForm(request.POST, request.FILES)
if form.is_valid():
file = request.FILES['upload']
for line in file:
first_word = re.split('[,]',line.decode("utf-8").strip())[0].lower()
if first_word == 'firstname':
count = handle_user_csv(file)
m = str(count[0])+' users are successfully uploaded, '+str(count[1])+' duplicate accounts.'
if count[0] == 0:
template_data['alert_danger'] = m
else:
template_data['alert_success'] = m
elif first_word=='name':
count = handle_hospital_csv(file)
m = str(count[0])+' hospitals are successfully uploaded, '+str(count[1])+' duplicate hospitals.'
if count[0] == 0:
template_data['alert_danger'] = m
else:
template_data['alert_success'] = m
else:
template_data['alert_danger'] = "Invalid CSV format."
template_data['form'] = form
return render(request,'virtualclinic/admin/import.html', template_data)
else:
template_data['alert_danger'] = "Please choose a file to upload"
form = ImportForm()
template_data['form'] = form
return render(request,'virtualclinic/admin/import.html',template_data)
def handle_user_csv(f):
"""
Handles a CSV containing User information.
The first row should contain the following information
FirstName,LastName,Account,Username,Email,Hospital
with the following lines containing information about zero or more Users.
:param f: The file object containing the CSV
:return: The # of successes and failures
"""
success = 0
fail = 0
is_first = True
for row in f:
if is_first:
is_first=False
continue # breaks out of for loop
line = re.split('[,]',row.decode("utf-8").strip())
if line[0]:
f_name = line[0]
l_name = line[1]
role = line[2].lower()
username = line[3].lower()
try:
if role== "doctor":
views.register_user(
username,'password',f_name,l_name,
Account.ACCOUNT_DOCTOR,
)
elif role == "admin":
views.register_user(
username, 'password', f_name, l_name,
Account.ACCOUNT_ADMIN,
)
elif role == "lab":
views.register_user(
username, 'password', f_name, l_name,
Account.ACCOUNT_LAB,
)
elif role == "chemist":
views.register_user(
username, 'password', f_name, l_name,
Account.ACCOUNT_CHEMIST,
)
else:
views.register_user(
username, 'password', f_name, l_name,
Account.ACCOUNT_PATIENT,
)
success+=1
except (IntegrityError, ValueError):
fail+=1
continue
return success,fail
def handle_hospital_csv(f):
"""
Handles a CSV containing Hospital information.
The first row should contain the following information:
Name
with the following lines containing information about zero or more Hospitals.
:param f: The file containing the CSV
:return: The # of successes and failures
"""
success = 0
fail = 0
is_first = True
for row in f:
if is_first:
is_first = False
continue
line = re.split('[,]',row.decode("utf-8").strip())
if line[0]:
hosp = line[0]
address = line[1]
city = line[2]
state = line[3]
zip = line[4]
phone = line[5]
try:
location = Location(
city = city,
zip = zip,
state = state,
address = address
)
location.save()
hospital = Hospital(
name = hosp,
phone = phone,
location = location,
)
hospital.save()
success+=1
except (IntegrityError, ValueError):
fail+=1
continue
return success, fail
def csv_export_view(request):
# Authentication check
authentication_result = views.authentication_check(request,[Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
template_data = views.parse_session(request,{'form_button':"Submit"})
if request.method == 'POST':
if request.POST['export'] == 'hospitals':
return generate_hospital_csv()
elif request.POST['export'] == 'users':
return generate_user_csv()
else:
template_data['alert_danger'] = 'Please choose a file to download'
template_data['form'] = ExportForm()
return render(request,'virtualclinic/admin/export.html', template_data)
def generate_user_csv():
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="users.csv"'
write = writer(response, delimiter=',', quotechar='"', quoting=QUOTE_MINIMAL)
write.writerow(['FirstName', 'LastName', 'Role', 'Username'])
for a in Account.objects.all():
firstname = a.profile.firstname
lastname = a.profile.lastname
role = a.role
username = a.user.username
if role== 10:
role = 'Patient'
elif role == 20:
role='Doctor'
elif role== 30:
role='Admin'
elif role == 40:
role='Lab'
elif role == 50:
role='Chemist'
else:
role='Unknown'
write.writerow([firstname,lastname,role,username])
return response
def generate_hospital_csv():
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="hospitals.csv"'
write = writer(response,delimiter=',', quotechar='"',quoting=QUOTE_MINIMAL)
write.writerow(['Name','Address','City','State','Zip','Phone'])
for h in Hospital.objects.all():
write.writerow([h.name,h.location.address,h.location.city,h.location.state,h.location.zip,h.phone])
return response
def backup_data(request):
# Authentication check.
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None: return authentication_result
# Get the template data from the session
template_data = views.parse_session(request)
# Proceed with the rest of the view
data = serializers.serialize("json", Speciality.objects.all())
out = open("backups/speciality.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Symptom.objects.all())
out = open("backups/symptom.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Location.objects.all())
out = open("backups/location.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Hospital.objects.all())
out = open("backups/hospital.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Profile.objects.all())
out = open("backups/profile.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Account.objects.all())
out = open("backups/account.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Action.objects.all())
out = open("backups/action.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Appointment.objects.all())
out = open("backups/appointment.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Message.objects.all())
out = open("backups/message.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Prescription.objects.all())
out = open("backups/prescription.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", MedicalInfo.objects.all())
out = open("backups/medical_info.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", MedicalTest.objects.all())
out = open("backups/medical_test.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Statistics.objects.all())
out = open("backups/statistics.json", "w")
out.write(data)
out.close()
return HttpResponseRedirect('/admin/activity')
| 40.996721 | 191 | 0.636356 | from csv import QUOTE_MINIMAL, writer
import re
import sqlite3
import sys
from django.core.management import call_command
from django.core import serializers
from django.shortcuts import render,redirect
from django.http import HttpResponseRedirect, HttpResponse
from django.db.utils import IntegrityError
from server.forms import SpecialityForm, SymptomForm, EmployeeRegistrationForm, ImportForm, ExportForm, HospitalForm, StatisticsForm
from server.models import Speciality, Account, Action, Hospital, Location, Statistics, Symptom, Profile, Appointment, Message, Prescription, MedicalInfo, MedicalTest
from server import logger
from server import views
def parse_speciality_delete(request):
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
template_data = views.parse_session(request)
if request.method == 'POST':
if 'delete' in request.POST and 'pk' in request.POST:
pk = request.POST['pk']
try:
speciality = Speciality.objects.get(pk=pk)
except Exception:
template_data['alert_danger'] = "Unable to cancel the speciality. Please try again later."
return
speciality.delete()
logger.log(Action.ACTION_ADMIN, 'Speciality cancelled', request.user.account)
template_data['alert_success'] = "The speciality has been deleted."
return HttpResponseRedirect('/admin/speciality/')
def parse_symptom_delete(request):
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
template_data = views.parse_session(request)
if request.method == 'POST':
if 'delete' in request.POST and 'pk' in request.POST:
pk = request.POST['pk']
try:
symptom = Symptom.objects.get(pk=pk)
except Exception:
template_data['alert_danger'] = "Unable to delete the symptom. Please try again later."
return
symptom.delete()
logger.log(Action.ACTION_ADMIN, 'Symptom cancelled', request.user.account)
template_data['alert_success'] = "The symptom has been deleted."
return HttpResponseRedirect('/admin/symptom/')
def user_archive(request):
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
template_data = views.parse_session(request)
if request.method == 'POST':
if 'delete' in request.POST and 'pk' in request.POST:
pk = request.POST['pk']
try:
user = Account.objects.get(pk=pk)
except Exception:
template_data['alert_danger'] = "Unable to delete the user. Please try again later"
return
user.archive = True
user.save()
template_data['alert_success'] = "The user has been deleted."
return HttpResponseRedirect('/admin/users')
def view_archived_users(request):
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
template_data = views.parse_session(request)
template_data['query'] = Account.objects.filter(archive=True)
return render(request, 'virtualclinic/admin/archived_users.html', template_data)
def restore_user(request):
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
template_data = views.parse_session(request)
if request.method == 'POST':
if 'restore' in request.POST and 'pk' in request.POST:
pk = request.POST['pk']
try:
user = Account.objects.get(pk=pk)
except Exception:
template_data['alert_danger'] = "Unable to delete the user. Please try again later"
return HttpResponseRedirect('/admin/users')
user.archive = False
user.save()
logger.log(Action.ACTION_ADMIN, 'Admin restored the user',user)
template_data['alert_success'] = "The user has been restored."
return HttpResponseRedirect('/admin/users')
return HttpResponseRedirect('/admin/users')
def users_view(request):
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
template_data = views.parse_session(request)
if request.method == 'POST':
pk = request.POST['pk']
role = request.POST['role']
account = Account.objects.get(pk=pk)
if account is not None:
account.role = role
account.save()
logger.log(Action.ACTION_ADMIN, 'Admin modified ' + account.user.username + "'s role", request.user.account)
template_data['alert_success'] = "Updated" + account.user.username + "'s role!"
template_data['query'] = Account.objects.filter(archive=False).order_by('-role')
return render(request,'virtualclinic/admin/users.html', template_data)
def activity_view(request):
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None: return authentication_result
template_data = views.parse_session(request)
template_data['query'] = Action.objects.all().order_by('-timePerformed')
return render(request,'virtualclinic/admin/activity.html',template_data)
def view_speciality(request):
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None: return authentication_result
template_data = views.parse_session(request)
template_data['query'] = Speciality.objects.all()
return render(request, 'virtualclinic/admin/speciality.html', template_data)
def view_symptom(request):
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None: return authentication_result
template_data = views.parse_session(request)
template_data['query'] = Symptom.objects.all()
return render(request, 'virtualclinic/admin/symptoms.html', template_data)
def add_speciality(request):
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
template_data = views.parse_session(
request,
{'form_button': "Add Speciality"}
)
'POST':
form = SpecialityForm(request.POST)
if form.is_valid():
speciality = Speciality(
name=form.cleaned_data['name'],
description=form.cleaned_data['description']
)
speciality.save()
form = SpecialityForm()
template_data['alert_success'] = "Successfully added the Speciality!"
logger.log(Action.ACTION_ADMIN, 'Admin added ' + speciality.name, request.user.account)
return HttpResponseRedirect('/admin/speciality')
else:
form = SpecialityForm()
template_data['form'] = form
return render(request, 'virtualclinic/admin/add_speciality.html', template_data)
def add_symptom(request):
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
template_data = views.parse_session(
request,
{'form_button': "Add Symptom"}
)
'POST':
form = SymptomForm(request.POST)
if form.is_valid():
symptom = Symptom(
name=form.cleaned_data['name'],
description=form.cleaned_data['description']
)
symptom.save()
form = SymptomForm()
template_data['alert_success'] = "Successfully added the Symptom!"
logger.log(Action.ACTION_ADMIN, 'Admin added ' + symptom.name, request.user.account)
return HttpResponseRedirect('/admin/symptom')
else:
form = SymptomForm()
template_data['form'] = form
return render(request, 'virtualclinic/admin/add_symptom.html', template_data)
def add_hospital_view(request):
authentication_result = views.authentication_check(
request,
[Account.ACCOUNT_ADMIN]
)
if authentication_result is not None: return authentication_result
template_data = views.parse_session(
request,
{'form_button':"Add Hospital"}
)
if request.method == 'POST':
form = HospitalForm(request.POST)
if form.is_valid():
location = location(
city = form.cleaned_data['city'],
zip = form.cleaned_data['zip'],
state = form.cleaned_data['state'],
address = form.cleaned_data['address']
)
location.save()
hospital = Hospital(
name = form.cleaned_data['name'],
phone = form.cleaned_data['phone'],
location = location,
)
hospital.save()
form = HospitalForm()
template_data['alert_success'] = "Successfully added the hospital!"
else:
form = HospitalForm()
template_data['form'] = form
return render(request,'virtualclinic/admin/add_hospital.html', template_data)
def createemployee_view(request):
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None: return authentication_result
template_data = views.parse_session(request,{'form_button':"Register"})
if request.method == 'POST':
form = EmployeeRegistrationForm(request.POST)
if form.is_valid():
user = views.register_user(
form.cleaned_data['email'],
form.cleaned_data['password_first'],
form.cleaned_data['firstname'],
form.cleaned_data['lastname'],
form.cleaned_data['employee'],
form.cleaned_data['speciality']
)
logger.log(Action.ACTION_ADMIN, 'Admin registered '+ user.username, request.user.account)
request.session['alert_success'] = "Successfully created new employee account"
return HttpResponseRedirect('/admin/users/')
else:
form = EmployeeRegistrationForm()
template_data['form'] = form
return render(request,'virtualclinic/admin/createemployee.html', template_data)
def statistic_view(request):
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None: return authentication_result
template_data = views.parse_session(request, {'form_button': "Get Statistics"})
default = {}
request.POST._mutable = True
request.POST.update(default)
predate_filter = Action.objects.all()
template_data['pre_filter'] = predate_filter.count()
form = StatisticsForm(request.POST)
if request.method == 'POST':
if form.is_valid():
statistics = Statistics(
startDate = form.cleaned_data['startDate'],
endDate = form.cleaned_data['endDate'],
)
date_filter = Action.objects.all().filter(timePerformed__range = (statistics.startDate,statistics.endDate))
template_data['temp'] = date_filter.count()
template_data['start'] = statistics.startDate
template_data['end'] = statistics.endDate
template_data['total_logins'] = Action.objects.filter(description__icontains="Account login",timePerformed__range = (statistics.startDate, statistics.endDate) ).count()
template_data['total_logouts'] = Action.objects.filter(description__icontains="Account logout",timePerformed__range = (statistics.startDate, statistics.endDate)).count()
template_data['total_appointments'] = Action.objects.filter(description__icontains="Appointment created",timePerformed__range = (statistics.startDate, statistics.endDate)).count()
template_data['total_med_tests'] = Action.objects.filter(description__icontains="Medical Test created",timePerformed__range = (statistics.startDate, statistics.endDate)).count()
template_data['total_registered'] = Action.objects.filter(description__icontains="registered",timePerformed__range = (statistics.startDate, statistics.endDate)).count()
else:
form._errors = {}
statistics = Statistics(
startDate = 0,
endDate = 0,
)
errdate_filter = Action.objects.all()
template_data['errdate_filter'] = errdate_filter.count()
template_data['start'] = statistics.startDate
template_data['end'] = statistics.endDate
template_data['total_logins'] = 0
template_data['total_logouts'] = 0
template_data['total_appointments'] = 0
template_data['total_med_tests'] = 0
template_data['total_registered'] = 0
template_data['form'] =form
return render(request,'virtualclinic/admin/statistics.html', template_data)
def csv_import_view(request):
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None: return authentication_result
template_data = views.parse_session(request, {'form_button': "Submit"})
if request.method=='POST':
form = ImportForm(request.POST, request.FILES)
if form.is_valid():
file = request.FILES['upload']
for line in file:
first_word = re.split('[,]',line.decode("utf-8").strip())[0].lower()
if first_word == 'firstname':
count = handle_user_csv(file)
m = str(count[0])+' users are successfully uploaded, '+str(count[1])+' duplicate accounts.'
if count[0] == 0:
template_data['alert_danger'] = m
else:
template_data['alert_success'] = m
elif first_word=='name':
count = handle_hospital_csv(file)
m = str(count[0])+' hospitals are successfully uploaded, '+str(count[1])+' duplicate hospitals.'
if count[0] == 0:
template_data['alert_danger'] = m
else:
template_data['alert_success'] = m
else:
template_data['alert_danger'] = "Invalid CSV format."
template_data['form'] = form
return render(request,'virtualclinic/admin/import.html', template_data)
else:
template_data['alert_danger'] = "Please choose a file to upload"
form = ImportForm()
template_data['form'] = form
return render(request,'virtualclinic/admin/import.html',template_data)
def handle_user_csv(f):
success = 0
fail = 0
is_first = True
for row in f:
if is_first:
is_first=False
continue
line = re.split('[,]',row.decode("utf-8").strip())
if line[0]:
f_name = line[0]
l_name = line[1]
role = line[2].lower()
username = line[3].lower()
try:
if role== "doctor":
views.register_user(
username,'password',f_name,l_name,
Account.ACCOUNT_DOCTOR,
)
elif role == "admin":
views.register_user(
username, 'password', f_name, l_name,
Account.ACCOUNT_ADMIN,
)
elif role == "lab":
views.register_user(
username, 'password', f_name, l_name,
Account.ACCOUNT_LAB,
)
elif role == "chemist":
views.register_user(
username, 'password', f_name, l_name,
Account.ACCOUNT_CHEMIST,
)
else:
views.register_user(
username, 'password', f_name, l_name,
Account.ACCOUNT_PATIENT,
)
success+=1
except (IntegrityError, ValueError):
fail+=1
continue
return success,fail
def handle_hospital_csv(f):
success = 0
fail = 0
is_first = True
for row in f:
if is_first:
is_first = False
continue
line = re.split('[,]',row.decode("utf-8").strip())
if line[0]:
hosp = line[0]
address = line[1]
city = line[2]
state = line[3]
zip = line[4]
phone = line[5]
try:
location = Location(
city = city,
zip = zip,
state = state,
address = address
)
location.save()
hospital = Hospital(
name = hosp,
phone = phone,
location = location,
)
hospital.save()
success+=1
except (IntegrityError, ValueError):
fail+=1
continue
return success, fail
def csv_export_view(request):
authentication_result = views.authentication_check(request,[Account.ACCOUNT_ADMIN])
if authentication_result is not None:
return authentication_result
template_data = views.parse_session(request,{'form_button':"Submit"})
if request.method == 'POST':
if request.POST['export'] == 'hospitals':
return generate_hospital_csv()
elif request.POST['export'] == 'users':
return generate_user_csv()
else:
template_data['alert_danger'] = 'Please choose a file to download'
template_data['form'] = ExportForm()
return render(request,'virtualclinic/admin/export.html', template_data)
def generate_user_csv():
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="users.csv"'
write = writer(response, delimiter=',', quotechar='"', quoting=QUOTE_MINIMAL)
write.writerow(['FirstName', 'LastName', 'Role', 'Username'])
for a in Account.objects.all():
firstname = a.profile.firstname
lastname = a.profile.lastname
role = a.role
username = a.user.username
if role== 10:
role = 'Patient'
elif role == 20:
role='Doctor'
elif role== 30:
role='Admin'
elif role == 40:
role='Lab'
elif role == 50:
role='Chemist'
else:
role='Unknown'
write.writerow([firstname,lastname,role,username])
return response
def generate_hospital_csv():
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="hospitals.csv"'
write = writer(response,delimiter=',', quotechar='"',quoting=QUOTE_MINIMAL)
write.writerow(['Name','Address','City','State','Zip','Phone'])
for h in Hospital.objects.all():
write.writerow([h.name,h.location.address,h.location.city,h.location.state,h.location.zip,h.phone])
return response
def backup_data(request):
authentication_result = views.authentication_check(request, [Account.ACCOUNT_ADMIN])
if authentication_result is not None: return authentication_result
template_data = views.parse_session(request)
data = serializers.serialize("json", Speciality.objects.all())
out = open("backups/speciality.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Symptom.objects.all())
out = open("backups/symptom.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Location.objects.all())
out = open("backups/location.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Hospital.objects.all())
out = open("backups/hospital.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Profile.objects.all())
out = open("backups/profile.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Account.objects.all())
out = open("backups/account.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Action.objects.all())
out = open("backups/action.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Appointment.objects.all())
out = open("backups/appointment.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Message.objects.all())
out = open("backups/message.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Prescription.objects.all())
out = open("backups/prescription.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", MedicalInfo.objects.all())
out = open("backups/medical_info.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", MedicalTest.objects.all())
out = open("backups/medical_test.json", "w")
out.write(data)
out.close()
data = serializers.serialize("json", Statistics.objects.all())
out = open("backups/statistics.json", "w")
out.write(data)
out.close()
return HttpResponseRedirect('/admin/activity')
| true | true |
1c34b152f6690e26ed80dd650bad60f1f8282b89 | 3,838 | py | Python | tests/test_create_wish.py | Zeebrow/wish | 9a0efeb70e1646ed12cac03b2419cbeca10e3c1c | [
"MIT"
] | null | null | null | tests/test_create_wish.py | Zeebrow/wish | 9a0efeb70e1646ed12cac03b2419cbeca10e3c1c | [
"MIT"
] | 3 | 2021-09-26T11:33:24.000Z | 2021-10-16T01:39:19.000Z | tests/test_create_wish.py | Zeebrow/wish | 9a0efeb70e1646ed12cac03b2419cbeca10e3c1c | [
"MIT"
] | null | null | null | import unittest
from random import randint
import shutil
from os.path import basename
from os import PathLike
from pathlib import Path
from wishlist import Wish
class TestCreateWish(unittest.TestCase):
def gen_temp_wishlist(self, identifier: str):
"""
Generates a temporary wishlist (prj-skel directory and wishlist.md)
by copying a template
"""
tempdir_name = f"{identifier}_{randint(1000,9999)}_repo"
# for when run from wish repo's home
basedir = Path(__file__).parent.resolve()
newdir = basedir / tempdir_name
shutil.copytree(Path(basedir/"fixture_repo"), newdir)
return newdir
def setUp(self):
""" """
self.this_repo = self.gen_temp_wishlist(identifier="test_create")
self.this_wishlist = self.this_repo / "wishlist.md"
self.w1 = Wish("test1", repo_path=self.this_repo)
self.w2 = Wish("test2", repo_path=self.this_repo)
self.w3 = Wish("test3", repo_path=self.this_repo)
self.w4 = Wish("test4", repo_path=self.this_repo)
def tearDown(self):
shutil.rmtree(self.this_repo)
def test_wish_doesnt_exist_until_create(self):
"""Not an attribute test, since depends on success of create()"""
new_w5 = Wish("new_wish_5", repo_path=self.this_repo)
self.assertFalse(new_w5.exists)
new_w5.create()
self.assertTrue(new_w5.exists)
def test_create_raises_on_fail(self):
"""TODO
Decide what constitutes 'failure to create wish' - fail to write file?
fail to git commit?
???
"""
pass
def test_create_wish_name_is_configurable(self):
"""this might be a frivolous test"""
rand_wishname = f"new_wish_{randint(1000,9999)}"
new_w5 = Wish(rand_wishname, repo_path=self.this_repo)
self.assertEqual(new_w5.name, rand_wishname)
def test_wish_attributes(self):
return
"""TODO: not exclusive to 'Wish().create()', needs to move"""
new_w5 = Wish("new_wish_5", repo_path=self.this_repo)
self.assertEqual(new_w5.repo_path, self.this_repo)
self.assertEqual(new_w5.prj_path, self.wish_prj_base_dir)
self.assertEqual(new_w5.readme, self.wish_readme)
self.assertIsInstance(new_w5.prj_path, PathLike)
def test_create_wish_name_equals_prj_skel_dir_name(self):
"""
Changes to how directories and files are named should fail tests
"""
new_w5 = Wish("new_wish_5", repo_path=self.this_repo)
new_w5.create()
self.assertEqual(new_w5.name, basename(new_w5.prj_path))
def test_create_wish_creates_prj_skel(self):
"""Test for README.md"""
wishname = "new_wish_5"
new_w5 = Wish(wishname, repo_path=self.this_repo)
self.assertFalse(Path(self.this_repo / "prj-skel" / wishname / "README.md").exists())
new_w5.create()
self.assertTrue(Path(self.this_repo / "prj-skel" / wishname / "README.md").exists())
def test_created_wish_block_equals_prj_readme(self):
new_w5 = Wish("new_wish_5", repo_path=self.this_repo)
new_w5.create()
with open(new_w5.readme, 'r') as md:
self.assertEqual(new_w5.block, md.read())
def test_create_wish_appends_to_wishlist_non_destructively(self):
with open(self.this_wishlist, 'r') as wl:
before_create = wl.read()
new_w5 = Wish("new_wish_5", repo_path=self.this_repo)
new_w5.create()
with open(self.this_wishlist, 'r') as wl:
after_create = wl.read()
self.assertEqual(len(before_create), len(after_create) - len(new_w5.block))
# def test_create_on_existing_wish_throws(self):
# self.assertTrue(False)
if __name__ == '__main__':
unittest.main()
| 38 | 93 | 0.657634 | import unittest
from random import randint
import shutil
from os.path import basename
from os import PathLike
from pathlib import Path
from wishlist import Wish
class TestCreateWish(unittest.TestCase):
def gen_temp_wishlist(self, identifier: str):
tempdir_name = f"{identifier}_{randint(1000,9999)}_repo"
basedir = Path(__file__).parent.resolve()
newdir = basedir / tempdir_name
shutil.copytree(Path(basedir/"fixture_repo"), newdir)
return newdir
def setUp(self):
self.this_repo = self.gen_temp_wishlist(identifier="test_create")
self.this_wishlist = self.this_repo / "wishlist.md"
self.w1 = Wish("test1", repo_path=self.this_repo)
self.w2 = Wish("test2", repo_path=self.this_repo)
self.w3 = Wish("test3", repo_path=self.this_repo)
self.w4 = Wish("test4", repo_path=self.this_repo)
def tearDown(self):
shutil.rmtree(self.this_repo)
def test_wish_doesnt_exist_until_create(self):
new_w5 = Wish("new_wish_5", repo_path=self.this_repo)
self.assertFalse(new_w5.exists)
new_w5.create()
self.assertTrue(new_w5.exists)
def test_create_raises_on_fail(self):
pass
def test_create_wish_name_is_configurable(self):
rand_wishname = f"new_wish_{randint(1000,9999)}"
new_w5 = Wish(rand_wishname, repo_path=self.this_repo)
self.assertEqual(new_w5.name, rand_wishname)
def test_wish_attributes(self):
return
new_w5 = Wish("new_wish_5", repo_path=self.this_repo)
self.assertEqual(new_w5.repo_path, self.this_repo)
self.assertEqual(new_w5.prj_path, self.wish_prj_base_dir)
self.assertEqual(new_w5.readme, self.wish_readme)
self.assertIsInstance(new_w5.prj_path, PathLike)
def test_create_wish_name_equals_prj_skel_dir_name(self):
new_w5 = Wish("new_wish_5", repo_path=self.this_repo)
new_w5.create()
self.assertEqual(new_w5.name, basename(new_w5.prj_path))
def test_create_wish_creates_prj_skel(self):
wishname = "new_wish_5"
new_w5 = Wish(wishname, repo_path=self.this_repo)
self.assertFalse(Path(self.this_repo / "prj-skel" / wishname / "README.md").exists())
new_w5.create()
self.assertTrue(Path(self.this_repo / "prj-skel" / wishname / "README.md").exists())
def test_created_wish_block_equals_prj_readme(self):
new_w5 = Wish("new_wish_5", repo_path=self.this_repo)
new_w5.create()
with open(new_w5.readme, 'r') as md:
self.assertEqual(new_w5.block, md.read())
def test_create_wish_appends_to_wishlist_non_destructively(self):
with open(self.this_wishlist, 'r') as wl:
before_create = wl.read()
new_w5 = Wish("new_wish_5", repo_path=self.this_repo)
new_w5.create()
with open(self.this_wishlist, 'r') as wl:
after_create = wl.read()
self.assertEqual(len(before_create), len(after_create) - len(new_w5.block))
# def test_create_on_existing_wish_throws(self):
# self.assertTrue(False)
if __name__ == '__main__':
unittest.main()
| true | true |
1c34b16a4fd6526b3ce139414ebb3d023c1737ec | 761 | py | Python | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/numtrees_100/rule_16.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/numtrees_100/rule_16.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/numtrees_100/rule_16.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | def findDecision(obj): #obj[0]: Driving_to, obj[1]: Passanger, obj[2]: Weather, obj[3]: Temperature, obj[4]: Time, obj[5]: Coupon, obj[6]: Coupon_validity, obj[7]: Gender, obj[8]: Age, obj[9]: Maritalstatus, obj[10]: Children, obj[11]: Education, obj[12]: Occupation, obj[13]: Income, obj[14]: Bar, obj[15]: Coffeehouse, obj[16]: Carryaway, obj[17]: Restaurantlessthan20, obj[18]: Restaurant20to50, obj[19]: Direction_same, obj[20]: Distance
# {"feature": "Children", "instances": 10, "metric_value": 0.8813, "depth": 1}
if obj[10]<=0:
return 'True'
elif obj[10]>0:
# {"feature": "Gender", "instances": 4, "metric_value": 0.8113, "depth": 2}
if obj[7]>0:
return 'False'
elif obj[7]<=0:
return 'True'
else: return 'True'
else: return 'False'
| 58.538462 | 441 | 0.65703 | def findDecision(obj):
if obj[10]<=0:
return 'True'
elif obj[10]>0:
if obj[7]>0:
return 'False'
elif obj[7]<=0:
return 'True'
else: return 'True'
else: return 'False'
| true | true |
1c34b18830edf23d2222dc9e79344ee1793436e5 | 33,667 | py | Python | sdk/python/pulumi_azure_nextgen/web/v20200901/list_web_app_auth_settings.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/web/v20200901/list_web_app_auth_settings.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/web/v20200901/list_web_app_auth_settings.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListWebAppAuthSettingsResult',
'AwaitableListWebAppAuthSettingsResult',
'list_web_app_auth_settings',
]
@pulumi.output_type
class ListWebAppAuthSettingsResult:
"""
Configuration settings for the Azure App Service Authentication / Authorization feature.
"""
def __init__(__self__, aad_claims_authorization=None, additional_login_params=None, allowed_audiences=None, allowed_external_redirect_urls=None, auth_file_path=None, client_id=None, client_secret=None, client_secret_certificate_thumbprint=None, client_secret_setting_name=None, default_provider=None, enabled=None, facebook_app_id=None, facebook_app_secret=None, facebook_app_secret_setting_name=None, facebook_o_auth_scopes=None, git_hub_client_id=None, git_hub_client_secret=None, git_hub_client_secret_setting_name=None, git_hub_o_auth_scopes=None, google_client_id=None, google_client_secret=None, google_client_secret_setting_name=None, google_o_auth_scopes=None, id=None, is_auth_from_file=None, issuer=None, kind=None, microsoft_account_client_id=None, microsoft_account_client_secret=None, microsoft_account_client_secret_setting_name=None, microsoft_account_o_auth_scopes=None, name=None, runtime_version=None, system_data=None, token_refresh_extension_hours=None, token_store_enabled=None, twitter_consumer_key=None, twitter_consumer_secret=None, twitter_consumer_secret_setting_name=None, type=None, unauthenticated_client_action=None, validate_issuer=None):
if aad_claims_authorization and not isinstance(aad_claims_authorization, str):
raise TypeError("Expected argument 'aad_claims_authorization' to be a str")
pulumi.set(__self__, "aad_claims_authorization", aad_claims_authorization)
if additional_login_params and not isinstance(additional_login_params, list):
raise TypeError("Expected argument 'additional_login_params' to be a list")
pulumi.set(__self__, "additional_login_params", additional_login_params)
if allowed_audiences and not isinstance(allowed_audiences, list):
raise TypeError("Expected argument 'allowed_audiences' to be a list")
pulumi.set(__self__, "allowed_audiences", allowed_audiences)
if allowed_external_redirect_urls and not isinstance(allowed_external_redirect_urls, list):
raise TypeError("Expected argument 'allowed_external_redirect_urls' to be a list")
pulumi.set(__self__, "allowed_external_redirect_urls", allowed_external_redirect_urls)
if auth_file_path and not isinstance(auth_file_path, str):
raise TypeError("Expected argument 'auth_file_path' to be a str")
pulumi.set(__self__, "auth_file_path", auth_file_path)
if client_id and not isinstance(client_id, str):
raise TypeError("Expected argument 'client_id' to be a str")
pulumi.set(__self__, "client_id", client_id)
if client_secret and not isinstance(client_secret, str):
raise TypeError("Expected argument 'client_secret' to be a str")
pulumi.set(__self__, "client_secret", client_secret)
if client_secret_certificate_thumbprint and not isinstance(client_secret_certificate_thumbprint, str):
raise TypeError("Expected argument 'client_secret_certificate_thumbprint' to be a str")
pulumi.set(__self__, "client_secret_certificate_thumbprint", client_secret_certificate_thumbprint)
if client_secret_setting_name and not isinstance(client_secret_setting_name, str):
raise TypeError("Expected argument 'client_secret_setting_name' to be a str")
pulumi.set(__self__, "client_secret_setting_name", client_secret_setting_name)
if default_provider and not isinstance(default_provider, str):
raise TypeError("Expected argument 'default_provider' to be a str")
pulumi.set(__self__, "default_provider", default_provider)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if facebook_app_id and not isinstance(facebook_app_id, str):
raise TypeError("Expected argument 'facebook_app_id' to be a str")
pulumi.set(__self__, "facebook_app_id", facebook_app_id)
if facebook_app_secret and not isinstance(facebook_app_secret, str):
raise TypeError("Expected argument 'facebook_app_secret' to be a str")
pulumi.set(__self__, "facebook_app_secret", facebook_app_secret)
if facebook_app_secret_setting_name and not isinstance(facebook_app_secret_setting_name, str):
raise TypeError("Expected argument 'facebook_app_secret_setting_name' to be a str")
pulumi.set(__self__, "facebook_app_secret_setting_name", facebook_app_secret_setting_name)
if facebook_o_auth_scopes and not isinstance(facebook_o_auth_scopes, list):
raise TypeError("Expected argument 'facebook_o_auth_scopes' to be a list")
pulumi.set(__self__, "facebook_o_auth_scopes", facebook_o_auth_scopes)
if git_hub_client_id and not isinstance(git_hub_client_id, str):
raise TypeError("Expected argument 'git_hub_client_id' to be a str")
pulumi.set(__self__, "git_hub_client_id", git_hub_client_id)
if git_hub_client_secret and not isinstance(git_hub_client_secret, str):
raise TypeError("Expected argument 'git_hub_client_secret' to be a str")
pulumi.set(__self__, "git_hub_client_secret", git_hub_client_secret)
if git_hub_client_secret_setting_name and not isinstance(git_hub_client_secret_setting_name, str):
raise TypeError("Expected argument 'git_hub_client_secret_setting_name' to be a str")
pulumi.set(__self__, "git_hub_client_secret_setting_name", git_hub_client_secret_setting_name)
if git_hub_o_auth_scopes and not isinstance(git_hub_o_auth_scopes, list):
raise TypeError("Expected argument 'git_hub_o_auth_scopes' to be a list")
pulumi.set(__self__, "git_hub_o_auth_scopes", git_hub_o_auth_scopes)
if google_client_id and not isinstance(google_client_id, str):
raise TypeError("Expected argument 'google_client_id' to be a str")
pulumi.set(__self__, "google_client_id", google_client_id)
if google_client_secret and not isinstance(google_client_secret, str):
raise TypeError("Expected argument 'google_client_secret' to be a str")
pulumi.set(__self__, "google_client_secret", google_client_secret)
if google_client_secret_setting_name and not isinstance(google_client_secret_setting_name, str):
raise TypeError("Expected argument 'google_client_secret_setting_name' to be a str")
pulumi.set(__self__, "google_client_secret_setting_name", google_client_secret_setting_name)
if google_o_auth_scopes and not isinstance(google_o_auth_scopes, list):
raise TypeError("Expected argument 'google_o_auth_scopes' to be a list")
pulumi.set(__self__, "google_o_auth_scopes", google_o_auth_scopes)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_auth_from_file and not isinstance(is_auth_from_file, str):
raise TypeError("Expected argument 'is_auth_from_file' to be a str")
pulumi.set(__self__, "is_auth_from_file", is_auth_from_file)
if issuer and not isinstance(issuer, str):
raise TypeError("Expected argument 'issuer' to be a str")
pulumi.set(__self__, "issuer", issuer)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if microsoft_account_client_id and not isinstance(microsoft_account_client_id, str):
raise TypeError("Expected argument 'microsoft_account_client_id' to be a str")
pulumi.set(__self__, "microsoft_account_client_id", microsoft_account_client_id)
if microsoft_account_client_secret and not isinstance(microsoft_account_client_secret, str):
raise TypeError("Expected argument 'microsoft_account_client_secret' to be a str")
pulumi.set(__self__, "microsoft_account_client_secret", microsoft_account_client_secret)
if microsoft_account_client_secret_setting_name and not isinstance(microsoft_account_client_secret_setting_name, str):
raise TypeError("Expected argument 'microsoft_account_client_secret_setting_name' to be a str")
pulumi.set(__self__, "microsoft_account_client_secret_setting_name", microsoft_account_client_secret_setting_name)
if microsoft_account_o_auth_scopes and not isinstance(microsoft_account_o_auth_scopes, list):
raise TypeError("Expected argument 'microsoft_account_o_auth_scopes' to be a list")
pulumi.set(__self__, "microsoft_account_o_auth_scopes", microsoft_account_o_auth_scopes)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if runtime_version and not isinstance(runtime_version, str):
raise TypeError("Expected argument 'runtime_version' to be a str")
pulumi.set(__self__, "runtime_version", runtime_version)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if token_refresh_extension_hours and not isinstance(token_refresh_extension_hours, float):
raise TypeError("Expected argument 'token_refresh_extension_hours' to be a float")
pulumi.set(__self__, "token_refresh_extension_hours", token_refresh_extension_hours)
if token_store_enabled and not isinstance(token_store_enabled, bool):
raise TypeError("Expected argument 'token_store_enabled' to be a bool")
pulumi.set(__self__, "token_store_enabled", token_store_enabled)
if twitter_consumer_key and not isinstance(twitter_consumer_key, str):
raise TypeError("Expected argument 'twitter_consumer_key' to be a str")
pulumi.set(__self__, "twitter_consumer_key", twitter_consumer_key)
if twitter_consumer_secret and not isinstance(twitter_consumer_secret, str):
raise TypeError("Expected argument 'twitter_consumer_secret' to be a str")
pulumi.set(__self__, "twitter_consumer_secret", twitter_consumer_secret)
if twitter_consumer_secret_setting_name and not isinstance(twitter_consumer_secret_setting_name, str):
raise TypeError("Expected argument 'twitter_consumer_secret_setting_name' to be a str")
pulumi.set(__self__, "twitter_consumer_secret_setting_name", twitter_consumer_secret_setting_name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unauthenticated_client_action and not isinstance(unauthenticated_client_action, str):
raise TypeError("Expected argument 'unauthenticated_client_action' to be a str")
pulumi.set(__self__, "unauthenticated_client_action", unauthenticated_client_action)
if validate_issuer and not isinstance(validate_issuer, bool):
raise TypeError("Expected argument 'validate_issuer' to be a bool")
pulumi.set(__self__, "validate_issuer", validate_issuer)
@property
@pulumi.getter(name="aadClaimsAuthorization")
def aad_claims_authorization(self) -> Optional[str]:
"""
Gets a JSON string containing the Azure AD Acl settings.
"""
return pulumi.get(self, "aad_claims_authorization")
@property
@pulumi.getter(name="additionalLoginParams")
def additional_login_params(self) -> Optional[Sequence[str]]:
"""
Login parameters to send to the OpenID Connect authorization endpoint when
a user logs in. Each parameter must be in the form "key=value".
"""
return pulumi.get(self, "additional_login_params")
@property
@pulumi.getter(name="allowedAudiences")
def allowed_audiences(self) -> Optional[Sequence[str]]:
"""
Allowed audience values to consider when validating JWTs issued by
Azure Active Directory. Note that the <code>ClientID</code> value is always considered an
allowed audience, regardless of this setting.
"""
return pulumi.get(self, "allowed_audiences")
@property
@pulumi.getter(name="allowedExternalRedirectUrls")
def allowed_external_redirect_urls(self) -> Optional[Sequence[str]]:
"""
External URLs that can be redirected to as part of logging in or logging out of the app. Note that the query string part of the URL is ignored.
This is an advanced setting typically only needed by Windows Store application backends.
Note that URLs within the current domain are always implicitly allowed.
"""
return pulumi.get(self, "allowed_external_redirect_urls")
@property
@pulumi.getter(name="authFilePath")
def auth_file_path(self) -> Optional[str]:
"""
The path of the config file containing auth settings.
If the path is relative, base will the site's root directory.
"""
return pulumi.get(self, "auth_file_path")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[str]:
"""
The Client ID of this relying party application, known as the client_id.
This setting is required for enabling OpenID Connection authentication with Azure Active Directory or
other 3rd party OpenID Connect providers.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[str]:
"""
The Client Secret of this relying party application (in Azure Active Directory, this is also referred to as the Key).
This setting is optional. If no client secret is configured, the OpenID Connect implicit auth flow is used to authenticate end users.
Otherwise, the OpenID Connect Authorization Code Flow is used to authenticate end users.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html
"""
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="clientSecretCertificateThumbprint")
def client_secret_certificate_thumbprint(self) -> Optional[str]:
"""
An alternative to the client secret, that is the thumbprint of a certificate used for signing purposes. This property acts as
a replacement for the Client Secret. It is also optional.
"""
return pulumi.get(self, "client_secret_certificate_thumbprint")
@property
@pulumi.getter(name="clientSecretSettingName")
def client_secret_setting_name(self) -> Optional[str]:
"""
The app setting name that contains the client secret of the relying party application.
"""
return pulumi.get(self, "client_secret_setting_name")
@property
@pulumi.getter(name="defaultProvider")
def default_provider(self) -> Optional[str]:
"""
The default authentication provider to use when multiple providers are configured.
This setting is only needed if multiple providers are configured and the unauthenticated client
action is set to "RedirectToLoginPage".
"""
return pulumi.get(self, "default_provider")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
<code>true</code> if the Authentication / Authorization feature is enabled for the current app; otherwise, <code>false</code>.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="facebookAppId")
def facebook_app_id(self) -> Optional[str]:
"""
The App ID of the Facebook app used for login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
"""
return pulumi.get(self, "facebook_app_id")
@property
@pulumi.getter(name="facebookAppSecret")
def facebook_app_secret(self) -> Optional[str]:
"""
The App Secret of the Facebook app used for Facebook Login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
"""
return pulumi.get(self, "facebook_app_secret")
@property
@pulumi.getter(name="facebookAppSecretSettingName")
def facebook_app_secret_setting_name(self) -> Optional[str]:
"""
The app setting name that contains the app secret used for Facebook Login.
"""
return pulumi.get(self, "facebook_app_secret_setting_name")
@property
@pulumi.getter(name="facebookOAuthScopes")
def facebook_o_auth_scopes(self) -> Optional[Sequence[str]]:
"""
The OAuth 2.0 scopes that will be requested as part of Facebook Login authentication.
This setting is optional.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
"""
return pulumi.get(self, "facebook_o_auth_scopes")
@property
@pulumi.getter(name="gitHubClientId")
def git_hub_client_id(self) -> Optional[str]:
"""
The Client Id of the GitHub app used for login.
This setting is required for enabling Github login
"""
return pulumi.get(self, "git_hub_client_id")
@property
@pulumi.getter(name="gitHubClientSecret")
def git_hub_client_secret(self) -> Optional[str]:
"""
The Client Secret of the GitHub app used for Github Login.
This setting is required for enabling Github login.
"""
return pulumi.get(self, "git_hub_client_secret")
@property
@pulumi.getter(name="gitHubClientSecretSettingName")
def git_hub_client_secret_setting_name(self) -> Optional[str]:
"""
The app setting name that contains the client secret of the Github
app used for GitHub Login.
"""
return pulumi.get(self, "git_hub_client_secret_setting_name")
@property
@pulumi.getter(name="gitHubOAuthScopes")
def git_hub_o_auth_scopes(self) -> Optional[Sequence[str]]:
"""
The OAuth 2.0 scopes that will be requested as part of GitHub Login authentication.
This setting is optional
"""
return pulumi.get(self, "git_hub_o_auth_scopes")
@property
@pulumi.getter(name="googleClientId")
def google_client_id(self) -> Optional[str]:
"""
The OpenID Connect Client ID for the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
"""
return pulumi.get(self, "google_client_id")
@property
@pulumi.getter(name="googleClientSecret")
def google_client_secret(self) -> Optional[str]:
"""
The client secret associated with the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
"""
return pulumi.get(self, "google_client_secret")
@property
@pulumi.getter(name="googleClientSecretSettingName")
def google_client_secret_setting_name(self) -> Optional[str]:
"""
The app setting name that contains the client secret associated with
the Google web application.
"""
return pulumi.get(self, "google_client_secret_setting_name")
@property
@pulumi.getter(name="googleOAuthScopes")
def google_o_auth_scopes(self) -> Optional[Sequence[str]]:
"""
The OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication.
This setting is optional. If not specified, "openid", "profile", and "email" are used as default scopes.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
"""
return pulumi.get(self, "google_o_auth_scopes")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isAuthFromFile")
def is_auth_from_file(self) -> Optional[str]:
"""
"true" if the auth config settings should be read from a file,
"false" otherwise
"""
return pulumi.get(self, "is_auth_from_file")
@property
@pulumi.getter
def issuer(self) -> Optional[str]:
"""
The OpenID Connect Issuer URI that represents the entity which issues access tokens for this application.
When using Azure Active Directory, this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/.
This URI is a case-sensitive identifier for the token issuer.
More information on OpenID Connect Discovery: http://openid.net/specs/openid-connect-discovery-1_0.html
"""
return pulumi.get(self, "issuer")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="microsoftAccountClientId")
def microsoft_account_client_id(self) -> Optional[str]:
"""
The OAuth 2.0 client ID that was created for the app used for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm
"""
return pulumi.get(self, "microsoft_account_client_id")
@property
@pulumi.getter(name="microsoftAccountClientSecret")
def microsoft_account_client_secret(self) -> Optional[str]:
"""
The OAuth 2.0 client secret that was created for the app used for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm
"""
return pulumi.get(self, "microsoft_account_client_secret")
@property
@pulumi.getter(name="microsoftAccountClientSecretSettingName")
def microsoft_account_client_secret_setting_name(self) -> Optional[str]:
"""
The app setting name containing the OAuth 2.0 client secret that was created for the
app used for authentication.
"""
return pulumi.get(self, "microsoft_account_client_secret_setting_name")
@property
@pulumi.getter(name="microsoftAccountOAuthScopes")
def microsoft_account_o_auth_scopes(self) -> Optional[Sequence[str]]:
"""
The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication.
This setting is optional. If not specified, "wl.basic" is used as the default scope.
Microsoft Account Scopes and permissions documentation: https://msdn.microsoft.com/en-us/library/dn631845.aspx
"""
return pulumi.get(self, "microsoft_account_o_auth_scopes")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="runtimeVersion")
def runtime_version(self) -> Optional[str]:
"""
The RuntimeVersion of the Authentication / Authorization feature in use for the current app.
The setting in this value can control the behavior of certain features in the Authentication / Authorization module.
"""
return pulumi.get(self, "runtime_version")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="tokenRefreshExtensionHours")
def token_refresh_extension_hours(self) -> Optional[float]:
"""
The number of hours after session token expiration that a session token can be used to
call the token refresh API. The default is 72 hours.
"""
return pulumi.get(self, "token_refresh_extension_hours")
@property
@pulumi.getter(name="tokenStoreEnabled")
def token_store_enabled(self) -> Optional[bool]:
"""
<code>true</code> to durably store platform-specific security tokens that are obtained during login flows; otherwise, <code>false</code>.
The default is <code>false</code>.
"""
return pulumi.get(self, "token_store_enabled")
@property
@pulumi.getter(name="twitterConsumerKey")
def twitter_consumer_key(self) -> Optional[str]:
"""
The OAuth 1.0a consumer key of the Twitter application used for sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in
"""
return pulumi.get(self, "twitter_consumer_key")
@property
@pulumi.getter(name="twitterConsumerSecret")
def twitter_consumer_secret(self) -> Optional[str]:
"""
The OAuth 1.0a consumer secret of the Twitter application used for sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in
"""
return pulumi.get(self, "twitter_consumer_secret")
@property
@pulumi.getter(name="twitterConsumerSecretSettingName")
def twitter_consumer_secret_setting_name(self) -> Optional[str]:
"""
The app setting name that contains the OAuth 1.0a consumer secret of the Twitter
application used for sign-in.
"""
return pulumi.get(self, "twitter_consumer_secret_setting_name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="unauthenticatedClientAction")
def unauthenticated_client_action(self) -> Optional[str]:
"""
The action to take when an unauthenticated client attempts to access the app.
"""
return pulumi.get(self, "unauthenticated_client_action")
@property
@pulumi.getter(name="validateIssuer")
def validate_issuer(self) -> Optional[bool]:
"""
Gets a value indicating whether the issuer should be a valid HTTPS url and be validated as such.
"""
return pulumi.get(self, "validate_issuer")
class AwaitableListWebAppAuthSettingsResult(ListWebAppAuthSettingsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppAuthSettingsResult(
aad_claims_authorization=self.aad_claims_authorization,
additional_login_params=self.additional_login_params,
allowed_audiences=self.allowed_audiences,
allowed_external_redirect_urls=self.allowed_external_redirect_urls,
auth_file_path=self.auth_file_path,
client_id=self.client_id,
client_secret=self.client_secret,
client_secret_certificate_thumbprint=self.client_secret_certificate_thumbprint,
client_secret_setting_name=self.client_secret_setting_name,
default_provider=self.default_provider,
enabled=self.enabled,
facebook_app_id=self.facebook_app_id,
facebook_app_secret=self.facebook_app_secret,
facebook_app_secret_setting_name=self.facebook_app_secret_setting_name,
facebook_o_auth_scopes=self.facebook_o_auth_scopes,
git_hub_client_id=self.git_hub_client_id,
git_hub_client_secret=self.git_hub_client_secret,
git_hub_client_secret_setting_name=self.git_hub_client_secret_setting_name,
git_hub_o_auth_scopes=self.git_hub_o_auth_scopes,
google_client_id=self.google_client_id,
google_client_secret=self.google_client_secret,
google_client_secret_setting_name=self.google_client_secret_setting_name,
google_o_auth_scopes=self.google_o_auth_scopes,
id=self.id,
is_auth_from_file=self.is_auth_from_file,
issuer=self.issuer,
kind=self.kind,
microsoft_account_client_id=self.microsoft_account_client_id,
microsoft_account_client_secret=self.microsoft_account_client_secret,
microsoft_account_client_secret_setting_name=self.microsoft_account_client_secret_setting_name,
microsoft_account_o_auth_scopes=self.microsoft_account_o_auth_scopes,
name=self.name,
runtime_version=self.runtime_version,
system_data=self.system_data,
token_refresh_extension_hours=self.token_refresh_extension_hours,
token_store_enabled=self.token_store_enabled,
twitter_consumer_key=self.twitter_consumer_key,
twitter_consumer_secret=self.twitter_consumer_secret,
twitter_consumer_secret_setting_name=self.twitter_consumer_secret_setting_name,
type=self.type,
unauthenticated_client_action=self.unauthenticated_client_action,
validate_issuer=self.validate_issuer)
def list_web_app_auth_settings(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppAuthSettingsResult:
"""
Configuration settings for the Azure App Service Authentication / Authorization feature.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:web/v20200901:listWebAppAuthSettings', __args__, opts=opts, typ=ListWebAppAuthSettingsResult).value
return AwaitableListWebAppAuthSettingsResult(
aad_claims_authorization=__ret__.aad_claims_authorization,
additional_login_params=__ret__.additional_login_params,
allowed_audiences=__ret__.allowed_audiences,
allowed_external_redirect_urls=__ret__.allowed_external_redirect_urls,
auth_file_path=__ret__.auth_file_path,
client_id=__ret__.client_id,
client_secret=__ret__.client_secret,
client_secret_certificate_thumbprint=__ret__.client_secret_certificate_thumbprint,
client_secret_setting_name=__ret__.client_secret_setting_name,
default_provider=__ret__.default_provider,
enabled=__ret__.enabled,
facebook_app_id=__ret__.facebook_app_id,
facebook_app_secret=__ret__.facebook_app_secret,
facebook_app_secret_setting_name=__ret__.facebook_app_secret_setting_name,
facebook_o_auth_scopes=__ret__.facebook_o_auth_scopes,
git_hub_client_id=__ret__.git_hub_client_id,
git_hub_client_secret=__ret__.git_hub_client_secret,
git_hub_client_secret_setting_name=__ret__.git_hub_client_secret_setting_name,
git_hub_o_auth_scopes=__ret__.git_hub_o_auth_scopes,
google_client_id=__ret__.google_client_id,
google_client_secret=__ret__.google_client_secret,
google_client_secret_setting_name=__ret__.google_client_secret_setting_name,
google_o_auth_scopes=__ret__.google_o_auth_scopes,
id=__ret__.id,
is_auth_from_file=__ret__.is_auth_from_file,
issuer=__ret__.issuer,
kind=__ret__.kind,
microsoft_account_client_id=__ret__.microsoft_account_client_id,
microsoft_account_client_secret=__ret__.microsoft_account_client_secret,
microsoft_account_client_secret_setting_name=__ret__.microsoft_account_client_secret_setting_name,
microsoft_account_o_auth_scopes=__ret__.microsoft_account_o_auth_scopes,
name=__ret__.name,
runtime_version=__ret__.runtime_version,
system_data=__ret__.system_data,
token_refresh_extension_hours=__ret__.token_refresh_extension_hours,
token_store_enabled=__ret__.token_store_enabled,
twitter_consumer_key=__ret__.twitter_consumer_key,
twitter_consumer_secret=__ret__.twitter_consumer_secret,
twitter_consumer_secret_setting_name=__ret__.twitter_consumer_secret_setting_name,
type=__ret__.type,
unauthenticated_client_action=__ret__.unauthenticated_client_action,
validate_issuer=__ret__.validate_issuer)
| 51.715822 | 1,173 | 0.716221 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListWebAppAuthSettingsResult',
'AwaitableListWebAppAuthSettingsResult',
'list_web_app_auth_settings',
]
@pulumi.output_type
class ListWebAppAuthSettingsResult:
def __init__(__self__, aad_claims_authorization=None, additional_login_params=None, allowed_audiences=None, allowed_external_redirect_urls=None, auth_file_path=None, client_id=None, client_secret=None, client_secret_certificate_thumbprint=None, client_secret_setting_name=None, default_provider=None, enabled=None, facebook_app_id=None, facebook_app_secret=None, facebook_app_secret_setting_name=None, facebook_o_auth_scopes=None, git_hub_client_id=None, git_hub_client_secret=None, git_hub_client_secret_setting_name=None, git_hub_o_auth_scopes=None, google_client_id=None, google_client_secret=None, google_client_secret_setting_name=None, google_o_auth_scopes=None, id=None, is_auth_from_file=None, issuer=None, kind=None, microsoft_account_client_id=None, microsoft_account_client_secret=None, microsoft_account_client_secret_setting_name=None, microsoft_account_o_auth_scopes=None, name=None, runtime_version=None, system_data=None, token_refresh_extension_hours=None, token_store_enabled=None, twitter_consumer_key=None, twitter_consumer_secret=None, twitter_consumer_secret_setting_name=None, type=None, unauthenticated_client_action=None, validate_issuer=None):
if aad_claims_authorization and not isinstance(aad_claims_authorization, str):
raise TypeError("Expected argument 'aad_claims_authorization' to be a str")
pulumi.set(__self__, "aad_claims_authorization", aad_claims_authorization)
if additional_login_params and not isinstance(additional_login_params, list):
raise TypeError("Expected argument 'additional_login_params' to be a list")
pulumi.set(__self__, "additional_login_params", additional_login_params)
if allowed_audiences and not isinstance(allowed_audiences, list):
raise TypeError("Expected argument 'allowed_audiences' to be a list")
pulumi.set(__self__, "allowed_audiences", allowed_audiences)
if allowed_external_redirect_urls and not isinstance(allowed_external_redirect_urls, list):
raise TypeError("Expected argument 'allowed_external_redirect_urls' to be a list")
pulumi.set(__self__, "allowed_external_redirect_urls", allowed_external_redirect_urls)
if auth_file_path and not isinstance(auth_file_path, str):
raise TypeError("Expected argument 'auth_file_path' to be a str")
pulumi.set(__self__, "auth_file_path", auth_file_path)
if client_id and not isinstance(client_id, str):
raise TypeError("Expected argument 'client_id' to be a str")
pulumi.set(__self__, "client_id", client_id)
if client_secret and not isinstance(client_secret, str):
raise TypeError("Expected argument 'client_secret' to be a str")
pulumi.set(__self__, "client_secret", client_secret)
if client_secret_certificate_thumbprint and not isinstance(client_secret_certificate_thumbprint, str):
raise TypeError("Expected argument 'client_secret_certificate_thumbprint' to be a str")
pulumi.set(__self__, "client_secret_certificate_thumbprint", client_secret_certificate_thumbprint)
if client_secret_setting_name and not isinstance(client_secret_setting_name, str):
raise TypeError("Expected argument 'client_secret_setting_name' to be a str")
pulumi.set(__self__, "client_secret_setting_name", client_secret_setting_name)
if default_provider and not isinstance(default_provider, str):
raise TypeError("Expected argument 'default_provider' to be a str")
pulumi.set(__self__, "default_provider", default_provider)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if facebook_app_id and not isinstance(facebook_app_id, str):
raise TypeError("Expected argument 'facebook_app_id' to be a str")
pulumi.set(__self__, "facebook_app_id", facebook_app_id)
if facebook_app_secret and not isinstance(facebook_app_secret, str):
raise TypeError("Expected argument 'facebook_app_secret' to be a str")
pulumi.set(__self__, "facebook_app_secret", facebook_app_secret)
if facebook_app_secret_setting_name and not isinstance(facebook_app_secret_setting_name, str):
raise TypeError("Expected argument 'facebook_app_secret_setting_name' to be a str")
pulumi.set(__self__, "facebook_app_secret_setting_name", facebook_app_secret_setting_name)
if facebook_o_auth_scopes and not isinstance(facebook_o_auth_scopes, list):
raise TypeError("Expected argument 'facebook_o_auth_scopes' to be a list")
pulumi.set(__self__, "facebook_o_auth_scopes", facebook_o_auth_scopes)
if git_hub_client_id and not isinstance(git_hub_client_id, str):
raise TypeError("Expected argument 'git_hub_client_id' to be a str")
pulumi.set(__self__, "git_hub_client_id", git_hub_client_id)
if git_hub_client_secret and not isinstance(git_hub_client_secret, str):
raise TypeError("Expected argument 'git_hub_client_secret' to be a str")
pulumi.set(__self__, "git_hub_client_secret", git_hub_client_secret)
if git_hub_client_secret_setting_name and not isinstance(git_hub_client_secret_setting_name, str):
raise TypeError("Expected argument 'git_hub_client_secret_setting_name' to be a str")
pulumi.set(__self__, "git_hub_client_secret_setting_name", git_hub_client_secret_setting_name)
if git_hub_o_auth_scopes and not isinstance(git_hub_o_auth_scopes, list):
raise TypeError("Expected argument 'git_hub_o_auth_scopes' to be a list")
pulumi.set(__self__, "git_hub_o_auth_scopes", git_hub_o_auth_scopes)
if google_client_id and not isinstance(google_client_id, str):
raise TypeError("Expected argument 'google_client_id' to be a str")
pulumi.set(__self__, "google_client_id", google_client_id)
if google_client_secret and not isinstance(google_client_secret, str):
raise TypeError("Expected argument 'google_client_secret' to be a str")
pulumi.set(__self__, "google_client_secret", google_client_secret)
if google_client_secret_setting_name and not isinstance(google_client_secret_setting_name, str):
raise TypeError("Expected argument 'google_client_secret_setting_name' to be a str")
pulumi.set(__self__, "google_client_secret_setting_name", google_client_secret_setting_name)
if google_o_auth_scopes and not isinstance(google_o_auth_scopes, list):
raise TypeError("Expected argument 'google_o_auth_scopes' to be a list")
pulumi.set(__self__, "google_o_auth_scopes", google_o_auth_scopes)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_auth_from_file and not isinstance(is_auth_from_file, str):
raise TypeError("Expected argument 'is_auth_from_file' to be a str")
pulumi.set(__self__, "is_auth_from_file", is_auth_from_file)
if issuer and not isinstance(issuer, str):
raise TypeError("Expected argument 'issuer' to be a str")
pulumi.set(__self__, "issuer", issuer)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if microsoft_account_client_id and not isinstance(microsoft_account_client_id, str):
raise TypeError("Expected argument 'microsoft_account_client_id' to be a str")
pulumi.set(__self__, "microsoft_account_client_id", microsoft_account_client_id)
if microsoft_account_client_secret and not isinstance(microsoft_account_client_secret, str):
raise TypeError("Expected argument 'microsoft_account_client_secret' to be a str")
pulumi.set(__self__, "microsoft_account_client_secret", microsoft_account_client_secret)
if microsoft_account_client_secret_setting_name and not isinstance(microsoft_account_client_secret_setting_name, str):
raise TypeError("Expected argument 'microsoft_account_client_secret_setting_name' to be a str")
pulumi.set(__self__, "microsoft_account_client_secret_setting_name", microsoft_account_client_secret_setting_name)
if microsoft_account_o_auth_scopes and not isinstance(microsoft_account_o_auth_scopes, list):
raise TypeError("Expected argument 'microsoft_account_o_auth_scopes' to be a list")
pulumi.set(__self__, "microsoft_account_o_auth_scopes", microsoft_account_o_auth_scopes)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if runtime_version and not isinstance(runtime_version, str):
raise TypeError("Expected argument 'runtime_version' to be a str")
pulumi.set(__self__, "runtime_version", runtime_version)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if token_refresh_extension_hours and not isinstance(token_refresh_extension_hours, float):
raise TypeError("Expected argument 'token_refresh_extension_hours' to be a float")
pulumi.set(__self__, "token_refresh_extension_hours", token_refresh_extension_hours)
if token_store_enabled and not isinstance(token_store_enabled, bool):
raise TypeError("Expected argument 'token_store_enabled' to be a bool")
pulumi.set(__self__, "token_store_enabled", token_store_enabled)
if twitter_consumer_key and not isinstance(twitter_consumer_key, str):
raise TypeError("Expected argument 'twitter_consumer_key' to be a str")
pulumi.set(__self__, "twitter_consumer_key", twitter_consumer_key)
if twitter_consumer_secret and not isinstance(twitter_consumer_secret, str):
raise TypeError("Expected argument 'twitter_consumer_secret' to be a str")
pulumi.set(__self__, "twitter_consumer_secret", twitter_consumer_secret)
if twitter_consumer_secret_setting_name and not isinstance(twitter_consumer_secret_setting_name, str):
raise TypeError("Expected argument 'twitter_consumer_secret_setting_name' to be a str")
pulumi.set(__self__, "twitter_consumer_secret_setting_name", twitter_consumer_secret_setting_name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unauthenticated_client_action and not isinstance(unauthenticated_client_action, str):
raise TypeError("Expected argument 'unauthenticated_client_action' to be a str")
pulumi.set(__self__, "unauthenticated_client_action", unauthenticated_client_action)
if validate_issuer and not isinstance(validate_issuer, bool):
raise TypeError("Expected argument 'validate_issuer' to be a bool")
pulumi.set(__self__, "validate_issuer", validate_issuer)
@property
@pulumi.getter(name="aadClaimsAuthorization")
def aad_claims_authorization(self) -> Optional[str]:
return pulumi.get(self, "aad_claims_authorization")
@property
@pulumi.getter(name="additionalLoginParams")
def additional_login_params(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "additional_login_params")
@property
@pulumi.getter(name="allowedAudiences")
def allowed_audiences(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "allowed_audiences")
@property
@pulumi.getter(name="allowedExternalRedirectUrls")
def allowed_external_redirect_urls(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "allowed_external_redirect_urls")
@property
@pulumi.getter(name="authFilePath")
def auth_file_path(self) -> Optional[str]:
return pulumi.get(self, "auth_file_path")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[str]:
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[str]:
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="clientSecretCertificateThumbprint")
def client_secret_certificate_thumbprint(self) -> Optional[str]:
return pulumi.get(self, "client_secret_certificate_thumbprint")
@property
@pulumi.getter(name="clientSecretSettingName")
def client_secret_setting_name(self) -> Optional[str]:
return pulumi.get(self, "client_secret_setting_name")
@property
@pulumi.getter(name="defaultProvider")
def default_provider(self) -> Optional[str]:
return pulumi.get(self, "default_provider")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="facebookAppId")
def facebook_app_id(self) -> Optional[str]:
return pulumi.get(self, "facebook_app_id")
@property
@pulumi.getter(name="facebookAppSecret")
def facebook_app_secret(self) -> Optional[str]:
return pulumi.get(self, "facebook_app_secret")
@property
@pulumi.getter(name="facebookAppSecretSettingName")
def facebook_app_secret_setting_name(self) -> Optional[str]:
return pulumi.get(self, "facebook_app_secret_setting_name")
@property
@pulumi.getter(name="facebookOAuthScopes")
def facebook_o_auth_scopes(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "facebook_o_auth_scopes")
@property
@pulumi.getter(name="gitHubClientId")
def git_hub_client_id(self) -> Optional[str]:
return pulumi.get(self, "git_hub_client_id")
@property
@pulumi.getter(name="gitHubClientSecret")
def git_hub_client_secret(self) -> Optional[str]:
return pulumi.get(self, "git_hub_client_secret")
@property
@pulumi.getter(name="gitHubClientSecretSettingName")
def git_hub_client_secret_setting_name(self) -> Optional[str]:
return pulumi.get(self, "git_hub_client_secret_setting_name")
@property
@pulumi.getter(name="gitHubOAuthScopes")
def git_hub_o_auth_scopes(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "git_hub_o_auth_scopes")
@property
@pulumi.getter(name="googleClientId")
def google_client_id(self) -> Optional[str]:
return pulumi.get(self, "google_client_id")
@property
@pulumi.getter(name="googleClientSecret")
def google_client_secret(self) -> Optional[str]:
return pulumi.get(self, "google_client_secret")
@property
@pulumi.getter(name="googleClientSecretSettingName")
def google_client_secret_setting_name(self) -> Optional[str]:
return pulumi.get(self, "google_client_secret_setting_name")
@property
@pulumi.getter(name="googleOAuthScopes")
def google_o_auth_scopes(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "google_o_auth_scopes")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isAuthFromFile")
def is_auth_from_file(self) -> Optional[str]:
return pulumi.get(self, "is_auth_from_file")
@property
@pulumi.getter
def issuer(self) -> Optional[str]:
return pulumi.get(self, "issuer")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="microsoftAccountClientId")
def microsoft_account_client_id(self) -> Optional[str]:
return pulumi.get(self, "microsoft_account_client_id")
@property
@pulumi.getter(name="microsoftAccountClientSecret")
def microsoft_account_client_secret(self) -> Optional[str]:
return pulumi.get(self, "microsoft_account_client_secret")
@property
@pulumi.getter(name="microsoftAccountClientSecretSettingName")
def microsoft_account_client_secret_setting_name(self) -> Optional[str]:
return pulumi.get(self, "microsoft_account_client_secret_setting_name")
@property
@pulumi.getter(name="microsoftAccountOAuthScopes")
def microsoft_account_o_auth_scopes(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "microsoft_account_o_auth_scopes")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="runtimeVersion")
def runtime_version(self) -> Optional[str]:
return pulumi.get(self, "runtime_version")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="tokenRefreshExtensionHours")
def token_refresh_extension_hours(self) -> Optional[float]:
return pulumi.get(self, "token_refresh_extension_hours")
@property
@pulumi.getter(name="tokenStoreEnabled")
def token_store_enabled(self) -> Optional[bool]:
return pulumi.get(self, "token_store_enabled")
@property
@pulumi.getter(name="twitterConsumerKey")
def twitter_consumer_key(self) -> Optional[str]:
return pulumi.get(self, "twitter_consumer_key")
@property
@pulumi.getter(name="twitterConsumerSecret")
def twitter_consumer_secret(self) -> Optional[str]:
return pulumi.get(self, "twitter_consumer_secret")
@property
@pulumi.getter(name="twitterConsumerSecretSettingName")
def twitter_consumer_secret_setting_name(self) -> Optional[str]:
return pulumi.get(self, "twitter_consumer_secret_setting_name")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="unauthenticatedClientAction")
def unauthenticated_client_action(self) -> Optional[str]:
return pulumi.get(self, "unauthenticated_client_action")
@property
@pulumi.getter(name="validateIssuer")
def validate_issuer(self) -> Optional[bool]:
return pulumi.get(self, "validate_issuer")
class AwaitableListWebAppAuthSettingsResult(ListWebAppAuthSettingsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppAuthSettingsResult(
aad_claims_authorization=self.aad_claims_authorization,
additional_login_params=self.additional_login_params,
allowed_audiences=self.allowed_audiences,
allowed_external_redirect_urls=self.allowed_external_redirect_urls,
auth_file_path=self.auth_file_path,
client_id=self.client_id,
client_secret=self.client_secret,
client_secret_certificate_thumbprint=self.client_secret_certificate_thumbprint,
client_secret_setting_name=self.client_secret_setting_name,
default_provider=self.default_provider,
enabled=self.enabled,
facebook_app_id=self.facebook_app_id,
facebook_app_secret=self.facebook_app_secret,
facebook_app_secret_setting_name=self.facebook_app_secret_setting_name,
facebook_o_auth_scopes=self.facebook_o_auth_scopes,
git_hub_client_id=self.git_hub_client_id,
git_hub_client_secret=self.git_hub_client_secret,
git_hub_client_secret_setting_name=self.git_hub_client_secret_setting_name,
git_hub_o_auth_scopes=self.git_hub_o_auth_scopes,
google_client_id=self.google_client_id,
google_client_secret=self.google_client_secret,
google_client_secret_setting_name=self.google_client_secret_setting_name,
google_o_auth_scopes=self.google_o_auth_scopes,
id=self.id,
is_auth_from_file=self.is_auth_from_file,
issuer=self.issuer,
kind=self.kind,
microsoft_account_client_id=self.microsoft_account_client_id,
microsoft_account_client_secret=self.microsoft_account_client_secret,
microsoft_account_client_secret_setting_name=self.microsoft_account_client_secret_setting_name,
microsoft_account_o_auth_scopes=self.microsoft_account_o_auth_scopes,
name=self.name,
runtime_version=self.runtime_version,
system_data=self.system_data,
token_refresh_extension_hours=self.token_refresh_extension_hours,
token_store_enabled=self.token_store_enabled,
twitter_consumer_key=self.twitter_consumer_key,
twitter_consumer_secret=self.twitter_consumer_secret,
twitter_consumer_secret_setting_name=self.twitter_consumer_secret_setting_name,
type=self.type,
unauthenticated_client_action=self.unauthenticated_client_action,
validate_issuer=self.validate_issuer)
def list_web_app_auth_settings(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppAuthSettingsResult:
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:web/v20200901:listWebAppAuthSettings', __args__, opts=opts, typ=ListWebAppAuthSettingsResult).value
return AwaitableListWebAppAuthSettingsResult(
aad_claims_authorization=__ret__.aad_claims_authorization,
additional_login_params=__ret__.additional_login_params,
allowed_audiences=__ret__.allowed_audiences,
allowed_external_redirect_urls=__ret__.allowed_external_redirect_urls,
auth_file_path=__ret__.auth_file_path,
client_id=__ret__.client_id,
client_secret=__ret__.client_secret,
client_secret_certificate_thumbprint=__ret__.client_secret_certificate_thumbprint,
client_secret_setting_name=__ret__.client_secret_setting_name,
default_provider=__ret__.default_provider,
enabled=__ret__.enabled,
facebook_app_id=__ret__.facebook_app_id,
facebook_app_secret=__ret__.facebook_app_secret,
facebook_app_secret_setting_name=__ret__.facebook_app_secret_setting_name,
facebook_o_auth_scopes=__ret__.facebook_o_auth_scopes,
git_hub_client_id=__ret__.git_hub_client_id,
git_hub_client_secret=__ret__.git_hub_client_secret,
git_hub_client_secret_setting_name=__ret__.git_hub_client_secret_setting_name,
git_hub_o_auth_scopes=__ret__.git_hub_o_auth_scopes,
google_client_id=__ret__.google_client_id,
google_client_secret=__ret__.google_client_secret,
google_client_secret_setting_name=__ret__.google_client_secret_setting_name,
google_o_auth_scopes=__ret__.google_o_auth_scopes,
id=__ret__.id,
is_auth_from_file=__ret__.is_auth_from_file,
issuer=__ret__.issuer,
kind=__ret__.kind,
microsoft_account_client_id=__ret__.microsoft_account_client_id,
microsoft_account_client_secret=__ret__.microsoft_account_client_secret,
microsoft_account_client_secret_setting_name=__ret__.microsoft_account_client_secret_setting_name,
microsoft_account_o_auth_scopes=__ret__.microsoft_account_o_auth_scopes,
name=__ret__.name,
runtime_version=__ret__.runtime_version,
system_data=__ret__.system_data,
token_refresh_extension_hours=__ret__.token_refresh_extension_hours,
token_store_enabled=__ret__.token_store_enabled,
twitter_consumer_key=__ret__.twitter_consumer_key,
twitter_consumer_secret=__ret__.twitter_consumer_secret,
twitter_consumer_secret_setting_name=__ret__.twitter_consumer_secret_setting_name,
type=__ret__.type,
unauthenticated_client_action=__ret__.unauthenticated_client_action,
validate_issuer=__ret__.validate_issuer)
| true | true |
1c34b472a4717165764bc88060bf3b13180e200d | 128 | py | Python | examples/examples/urls.py | moccu/django-adminjournal | 20501affd904026025841de07d4c9f12d9a64e1d | [
"MIT"
] | 1 | 2019-11-25T23:03:14.000Z | 2019-11-25T23:03:14.000Z | examples/examples/urls.py | moccu/django-adminjournal | 20501affd904026025841de07d4c9f12d9a64e1d | [
"MIT"
] | 2 | 2019-08-23T10:48:33.000Z | 2021-06-10T21:03:42.000Z | examples/examples/urls.py | moccu/django-adminjournal | 20501affd904026025841de07d4c9f12d9a64e1d | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 16 | 37 | 0.71875 | from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
]
| true | true |
1c34b5c8e5475042fc481e1d8dbf825169c80fe4 | 23,511 | py | Python | pandas/tests/scalar/timestamp/test_timestamp.py | Arghya-Banerjee/pandas | 9a4fcea8de798938a434fcaf67a0aa5a46b76b5b | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2022-03-04T10:09:24.000Z | 2022-03-04T10:09:24.000Z | pandas/tests/scalar/timestamp/test_timestamp.py | Arghya-Banerjee/pandas | 9a4fcea8de798938a434fcaf67a0aa5a46b76b5b | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/scalar/timestamp/test_timestamp.py | Arghya-Banerjee/pandas | 9a4fcea8de798938a434fcaf67a0aa5a46b76b5b | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | """ test the scalar Timestamp """
import calendar
from datetime import (
datetime,
timedelta,
)
import locale
import pickle
import unicodedata
from dateutil.tz import tzutc
import numpy as np
import pytest
import pytz
from pytz import (
timezone,
utc,
)
from pandas._libs.tslibs.timezones import (
dateutil_gettz as gettz,
get_timezone,
)
import pandas.util._test_decorators as td
from pandas import (
NaT,
Timedelta,
Timestamp,
)
import pandas._testing as tm
from pandas.tseries import offsets
class TestTimestampProperties:
def test_freq_deprecation(self):
# GH#41586
msg = "The 'freq' argument in Timestamp is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# warning issued at construction
ts = Timestamp("2021-06-01", freq="D")
ts2 = Timestamp("2021-06-01", freq="B")
msg = "Timestamp.freq is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# warning issued at attribute lookup
ts.freq
for per in ["month", "quarter", "year"]:
for side in ["start", "end"]:
attr = f"is_{per}_{side}"
with tm.assert_produces_warning(FutureWarning, match=msg):
getattr(ts2, attr)
# is_(month|quarter|year)_(start|end) does _not_ issue a warning
# with freq="D" bc the result will be unaffected by the deprecation
with tm.assert_produces_warning(None):
getattr(ts, attr)
@pytest.mark.filterwarnings("ignore:The 'freq' argument:FutureWarning")
@pytest.mark.filterwarnings("ignore:Timestamp.freq is deprecated:FutureWarning")
def test_properties_business(self):
ts = Timestamp("2017-10-01", freq="B")
control = Timestamp("2017-10-01")
assert ts.dayofweek == 6
assert ts.day_of_week == 6
assert not ts.is_month_start # not a weekday
assert not ts.freq.is_month_start(ts)
assert ts.freq.is_month_start(ts + Timedelta(days=1))
assert not ts.is_quarter_start # not a weekday
assert not ts.freq.is_quarter_start(ts)
assert ts.freq.is_quarter_start(ts + Timedelta(days=1))
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp("2017-09-30", freq="B")
control = Timestamp("2017-09-30")
assert ts.dayofweek == 5
assert ts.day_of_week == 5
assert not ts.is_month_end # not a weekday
assert not ts.freq.is_month_end(ts)
assert ts.freq.is_month_end(ts - Timedelta(days=1))
assert not ts.is_quarter_end # not a weekday
assert not ts.freq.is_quarter_end(ts)
assert ts.freq.is_quarter_end(ts - Timedelta(days=1))
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
@pytest.mark.parametrize(
"attr, expected",
[
["year", 2014],
["month", 12],
["day", 31],
["hour", 23],
["minute", 59],
["second", 0],
["microsecond", 0],
["nanosecond", 0],
["dayofweek", 2],
["day_of_week", 2],
["quarter", 4],
["dayofyear", 365],
["day_of_year", 365],
["week", 1],
["daysinmonth", 31],
],
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_fields(self, attr, expected, tz):
# GH 10050
# GH 13303
ts = Timestamp("2014-12-31 23:59:00", tz=tz)
result = getattr(ts, attr)
# that we are int like
assert isinstance(result, int)
assert result == expected
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_millisecond_raises(self, tz):
ts = Timestamp("2014-12-31 23:59:00", tz=tz)
msg = "'Timestamp' object has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
ts.millisecond
@pytest.mark.parametrize(
"start", ["is_month_start", "is_quarter_start", "is_year_start"]
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_is_start(self, start, tz):
ts = Timestamp("2014-01-01 00:00:00", tz=tz)
assert getattr(ts, start)
@pytest.mark.parametrize("end", ["is_month_end", "is_year_end", "is_quarter_end"])
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_is_end(self, end, tz):
ts = Timestamp("2014-12-31 23:59:59", tz=tz)
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize(
"data",
[Timestamp("2017-08-28 23:00:00"), Timestamp("2017-08-28 23:00:00", tz="EST")],
)
# error: Unsupported operand types for + ("List[None]" and "List[str]")
@pytest.mark.parametrize(
"time_locale", [None] + (tm.get_locales() or []) # type: ignore[operator]
)
def test_names(self, data, time_locale):
# GH 17354
# Test .day_name(), .month_name
if time_locale is None:
expected_day = "Monday"
expected_month = "August"
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
result_day = data.day_name(time_locale)
result_month = data.month_name(time_locale)
# Work around https://github.com/pandas-dev/pandas/issues/22342
# different normalizations
expected_day = unicodedata.normalize("NFD", expected_day)
expected_month = unicodedata.normalize("NFD", expected_month)
result_day = unicodedata.normalize("NFD", result_day)
result_month = unicodedata.normalize("NFD", result_month)
assert result_day == expected_day
assert result_month == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
def test_is_leap_year(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 13727
dt = Timestamp("2000-01-01 00:00:00", tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp("1999-01-01 00:00:00", tz=tz)
assert not dt.is_leap_year
dt = Timestamp("2004-01-01 00:00:00", tz=tz)
assert dt.is_leap_year
dt = Timestamp("2100-01-01 00:00:00", tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array(
[
Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (2005, 1, 1), (2005, 1, 2)]
]
)
assert (result == [52, 52, 53, 53]).all()
def test_resolution(self):
# GH#21336, GH#21365
dt = Timestamp("2100-01-01 00:00:00")
assert dt.resolution == Timedelta(nanoseconds=1)
# Check that the attribute is available on the class, mirroring
# the stdlib datetime behavior
assert Timestamp.resolution == Timedelta(nanoseconds=1)
class TestTimestamp:
def test_tz(self):
tstr = "2014-02-01 09:00"
ts = Timestamp(tstr)
local = ts.tz_localize("Asia/Tokyo")
assert local.hour == 9
assert local == Timestamp(tstr, tz="Asia/Tokyo")
conv = local.tz_convert("US/Eastern")
assert conv == Timestamp("2014-01-31 19:00", tz="US/Eastern")
assert conv.hour == 19
# preserves nanosecond
ts = Timestamp(tstr) + offsets.Nano(5)
local = ts.tz_localize("Asia/Tokyo")
assert local.hour == 9
assert local.nanosecond == 5
conv = local.tz_convert("US/Eastern")
assert conv.nanosecond == 5
assert conv.hour == 19
def test_utc_z_designator(self):
assert get_timezone(Timestamp("2014-11-02 01:00Z").tzinfo) is utc
def test_asm8(self):
np.random.seed(7_960_929)
ns = [Timestamp.min.value, Timestamp.max.value, 1000]
for n in ns:
assert (
Timestamp(n).asm8.view("i8") == np.datetime64(n, "ns").view("i8") == n
)
assert Timestamp("nat").asm8.view("i8") == np.datetime64("nat", "ns").view("i8")
def test_class_ops_pytz(self):
def compare(x, y):
assert int((Timestamp(x).value - Timestamp(y).value) / 1e9) == 0
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now("UTC"), datetime.now(timezone("UTC")))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
msg = "timezone-aware Timestamp with UTC"
with tm.assert_produces_warning(FutureWarning, match=msg):
# GH#22451
ts_utc = Timestamp.utcfromtimestamp(current_time)
compare(
ts_utc,
datetime.utcfromtimestamp(current_time),
)
compare(
Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)
)
compare(
# Support tz kwarg in Timestamp.fromtimestamp
Timestamp.fromtimestamp(current_time, "UTC"),
datetime.fromtimestamp(current_time, utc),
)
compare(
# Support tz kwarg in Timestamp.fromtimestamp
Timestamp.fromtimestamp(current_time, tz="UTC"),
datetime.fromtimestamp(current_time, utc),
)
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(
Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component),
)
def test_class_ops_dateutil(self):
def compare(x, y):
assert (
int(
np.round(Timestamp(x).value / 1e9)
- np.round(Timestamp(y).value / 1e9)
)
== 0
)
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now("UTC"), datetime.now(tzutc()))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
msg = "timezone-aware Timestamp with UTC"
with tm.assert_produces_warning(FutureWarning, match=msg):
# GH#22451
ts_utc = Timestamp.utcfromtimestamp(current_time)
compare(
ts_utc,
datetime.utcfromtimestamp(current_time),
)
compare(
Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)
)
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(
Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component),
)
def test_basics_nanos(self):
val = np.int64(946_684_800_000_000_000).view("M8[ns]")
stamp = Timestamp(val.view("i8") + 500)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.microsecond == 0
assert stamp.nanosecond == 500
# GH 14415
val = np.iinfo(np.int64).min + 80_000_000_000_000
stamp = Timestamp(val)
assert stamp.year == 1677
assert stamp.month == 9
assert stamp.day == 21
assert stamp.microsecond == 145224
assert stamp.nanosecond == 192
@pytest.mark.parametrize(
"value, check_kwargs",
[
[946688461000000000, {}],
[946688461000000000 / 1000, {"unit": "us"}],
[946688461000000000 / 1_000_000, {"unit": "ms"}],
[946688461000000000 / 1_000_000_000, {"unit": "s"}],
[10957, {"unit": "D", "h": 0}],
[
(946688461000000000 + 500000) / 1000000000,
{"unit": "s", "us": 499, "ns": 964},
],
[
(946688461000000000 + 500000000) / 1000000000,
{"unit": "s", "us": 500000},
],
[(946688461000000000 + 500000) / 1000000, {"unit": "ms", "us": 500}],
[(946688461000000000 + 500000) / 1000, {"unit": "us", "us": 500}],
[(946688461000000000 + 500000000) / 1000000, {"unit": "ms", "us": 500000}],
[946688461000000000 / 1000.0 + 5, {"unit": "us", "us": 5}],
[946688461000000000 / 1000.0 + 5000, {"unit": "us", "us": 5000}],
[946688461000000000 / 1000000.0 + 0.5, {"unit": "ms", "us": 500}],
[946688461000000000 / 1000000.0 + 0.005, {"unit": "ms", "us": 5, "ns": 5}],
[946688461000000000 / 1000000000.0 + 0.5, {"unit": "s", "us": 500000}],
[10957 + 0.5, {"unit": "D", "h": 12}],
],
)
def test_unit(self, value, check_kwargs):
def check(value, unit=None, h=1, s=1, us=0, ns=0):
stamp = Timestamp(value, unit=unit)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.day == 1
assert stamp.hour == h
if unit != "D":
assert stamp.minute == 1
assert stamp.second == s
assert stamp.microsecond == us
else:
assert stamp.minute == 0
assert stamp.second == 0
assert stamp.microsecond == 0
assert stamp.nanosecond == ns
check(value, **check_kwargs)
def test_roundtrip(self):
# test value to string and back conversions
# further test accessors
base = Timestamp("20140101 00:00:00")
result = Timestamp(base.value + Timedelta("5ms").value)
assert result == Timestamp(f"{base}.005000")
assert result.microsecond == 5000
result = Timestamp(base.value + Timedelta("5us").value)
assert result == Timestamp(f"{base}.000005")
assert result.microsecond == 5
result = Timestamp(base.value + Timedelta("5ns").value)
assert result == Timestamp(f"{base}.000000005")
assert result.nanosecond == 5
assert result.microsecond == 0
result = Timestamp(base.value + Timedelta("6ms 5us").value)
assert result == Timestamp(f"{base}.006005")
assert result.microsecond == 5 + 6 * 1000
result = Timestamp(base.value + Timedelta("200ms 5us").value)
assert result == Timestamp(f"{base}.200005")
assert result.microsecond == 5 + 200 * 1000
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
assert d[stamp] == 5
def test_tz_conversion_freq(self, tz_naive_fixture):
# GH25241
with tm.assert_produces_warning(FutureWarning, match="freq"):
t1 = Timestamp("2019-01-01 10:00", freq="H")
assert t1.tz_localize(tz=tz_naive_fixture).freq == t1.freq
with tm.assert_produces_warning(FutureWarning, match="freq"):
t2 = Timestamp("2019-01-02 12:00", tz="UTC", freq="T")
assert t2.tz_convert(tz="UTC").freq == t2.freq
def test_pickle_freq_no_warning(self):
# GH#41949 we don't want a warning on unpickling
with tm.assert_produces_warning(FutureWarning, match="freq"):
ts = Timestamp("2019-01-01 10:00", freq="H")
out = pickle.dumps(ts)
with tm.assert_produces_warning(None):
res = pickle.loads(out)
assert res._freq == ts._freq
class TestTimestampNsOperations:
def test_nanosecond_string_parsing(self):
ts = Timestamp("2013-05-01 07:15:45.123456789")
# GH 7878
expected_repr = "2013-05-01 07:15:45.123456789"
expected_value = 1_367_392_545_123_456_789
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789+09:00", tz="Asia/Tokyo")
assert ts.value == expected_value - 9 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="UTC")
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="US/Eastern")
assert ts.value == expected_value + 4 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
# GH 10041
ts = Timestamp("20130501T071545.123456789")
assert ts.value == expected_value
assert expected_repr in repr(ts)
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1_293_840_000_000_000_005
t = Timestamp("2011-01-01") + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp("2011-01-01 00:00:00.000000005")
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
expected = 1_293_840_000_000_000_010
t = t + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp("2011-01-01 00:00:00.000000010")
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
class TestTimestampToJulianDate:
def test_compare_1700(self):
r = Timestamp("1700-06-23").to_julian_date()
assert r == 2_342_145.5
def test_compare_2000(self):
r = Timestamp("2000-04-12").to_julian_date()
assert r == 2_451_646.5
def test_compare_2100(self):
r = Timestamp("2100-08-12").to_julian_date()
assert r == 2_488_292.5
def test_compare_hour01(self):
r = Timestamp("2000-08-12T01:00:00").to_julian_date()
assert r == 2_451_768.5416666666666666
def test_compare_hour13(self):
r = Timestamp("2000-08-12T13:00:00").to_julian_date()
assert r == 2_451_769.0416666666666666
class TestTimestampConversion:
def test_conversion(self):
# GH#9255
ts = Timestamp("2000-01-01")
result = ts.to_pydatetime()
expected = datetime(2000, 1, 1)
assert result == expected
assert type(result) == type(expected)
result = ts.to_datetime64()
expected = np.datetime64(ts.value, "ns")
assert result == expected
assert type(result) == type(expected)
assert result.dtype == expected.dtype
def test_to_pydatetime_nonzero_nano(self):
ts = Timestamp("2011-01-01 9:00:00.123456789")
# Warn the user of data loss (nanoseconds).
with tm.assert_produces_warning(UserWarning):
expected = datetime(2011, 1, 1, 9, 0, 0, 123456)
result = ts.to_pydatetime()
assert result == expected
def test_timestamp_to_datetime(self):
stamp = Timestamp("20090415", tz="US/Eastern")
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_to_datetime_dateutil(self):
stamp = Timestamp("20090415", tz="dateutil/US/Eastern")
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_to_datetime_explicit_pytz(self):
stamp = Timestamp("20090415", tz=pytz.timezone("US/Eastern"))
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
@td.skip_if_windows
def test_timestamp_to_datetime_explicit_dateutil(self):
stamp = Timestamp("20090415", tz=gettz("US/Eastern"))
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_to_datetime_bijective(self):
# Ensure that converting to datetime and back only loses precision
# by going from nanoseconds to microseconds.
exp_warning = None if Timestamp.max.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning):
pydt_max = Timestamp.max.to_pydatetime()
assert Timestamp(pydt_max).value / 1000 == Timestamp.max.value / 1000
exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning):
pydt_min = Timestamp.min.to_pydatetime()
# The next assertion can be enabled once GH#39221 is merged
# assert pydt_min < Timestamp.min # this is bc nanos are dropped
tdus = timedelta(microseconds=1)
assert pydt_min + tdus > Timestamp.min
assert Timestamp(pydt_min + tdus).value / 1000 == Timestamp.min.value / 1000
def test_to_period_tz_warning(self):
# GH#21333 make sure a warning is issued when timezone
# info is lost
ts = Timestamp("2009-04-15 16:17:18", tz="US/Eastern")
with tm.assert_produces_warning(UserWarning):
# warning that timezone info will be lost
ts.to_period("D")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
ts = Timestamp(datetime.now())
assert ts.to_datetime64() == ts.to_numpy()
# GH#44460
msg = "dtype and copy arguments are ignored"
with pytest.raises(ValueError, match=msg):
ts.to_numpy("M8[s]")
with pytest.raises(ValueError, match=msg):
ts.to_numpy(copy=True)
class SubDatetime(datetime):
pass
@pytest.mark.parametrize(
"lh,rh",
[
(SubDatetime(2000, 1, 1), Timedelta(hours=1)),
(Timedelta(hours=1), SubDatetime(2000, 1, 1)),
],
)
def test_dt_subclass_add_timedelta(lh, rh):
# GH#25851
# ensure that subclassed datetime works for
# Timedelta operations
result = lh + rh
expected = SubDatetime(2000, 1, 1, 1)
assert result == expected
| 35.568835 | 88 | 0.59704 |
import calendar
from datetime import (
datetime,
timedelta,
)
import locale
import pickle
import unicodedata
from dateutil.tz import tzutc
import numpy as np
import pytest
import pytz
from pytz import (
timezone,
utc,
)
from pandas._libs.tslibs.timezones import (
dateutil_gettz as gettz,
get_timezone,
)
import pandas.util._test_decorators as td
from pandas import (
NaT,
Timedelta,
Timestamp,
)
import pandas._testing as tm
from pandas.tseries import offsets
class TestTimestampProperties:
def test_freq_deprecation(self):
msg = "The 'freq' argument in Timestamp is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
ts = Timestamp("2021-06-01", freq="D")
ts2 = Timestamp("2021-06-01", freq="B")
msg = "Timestamp.freq is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
ts.freq
for per in ["month", "quarter", "year"]:
for side in ["start", "end"]:
attr = f"is_{per}_{side}"
with tm.assert_produces_warning(FutureWarning, match=msg):
getattr(ts2, attr)
with tm.assert_produces_warning(None):
getattr(ts, attr)
@pytest.mark.filterwarnings("ignore:The 'freq' argument:FutureWarning")
@pytest.mark.filterwarnings("ignore:Timestamp.freq is deprecated:FutureWarning")
def test_properties_business(self):
ts = Timestamp("2017-10-01", freq="B")
control = Timestamp("2017-10-01")
assert ts.dayofweek == 6
assert ts.day_of_week == 6
assert not ts.is_month_start
assert not ts.freq.is_month_start(ts)
assert ts.freq.is_month_start(ts + Timedelta(days=1))
assert not ts.is_quarter_start
assert not ts.freq.is_quarter_start(ts)
assert ts.freq.is_quarter_start(ts + Timedelta(days=1))
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp("2017-09-30", freq="B")
control = Timestamp("2017-09-30")
assert ts.dayofweek == 5
assert ts.day_of_week == 5
assert not ts.is_month_end
assert not ts.freq.is_month_end(ts)
assert ts.freq.is_month_end(ts - Timedelta(days=1))
assert not ts.is_quarter_end
assert not ts.freq.is_quarter_end(ts)
assert ts.freq.is_quarter_end(ts - Timedelta(days=1))
assert control.is_month_end
assert control.is_quarter_end
@pytest.mark.parametrize(
"attr, expected",
[
["year", 2014],
["month", 12],
["day", 31],
["hour", 23],
["minute", 59],
["second", 0],
["microsecond", 0],
["nanosecond", 0],
["dayofweek", 2],
["day_of_week", 2],
["quarter", 4],
["dayofyear", 365],
["day_of_year", 365],
["week", 1],
["daysinmonth", 31],
],
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_fields(self, attr, expected, tz):
ts = Timestamp("2014-12-31 23:59:00", tz=tz)
result = getattr(ts, attr)
assert isinstance(result, int)
assert result == expected
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_millisecond_raises(self, tz):
ts = Timestamp("2014-12-31 23:59:00", tz=tz)
msg = "'Timestamp' object has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
ts.millisecond
@pytest.mark.parametrize(
"start", ["is_month_start", "is_quarter_start", "is_year_start"]
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_is_start(self, start, tz):
ts = Timestamp("2014-01-01 00:00:00", tz=tz)
assert getattr(ts, start)
@pytest.mark.parametrize("end", ["is_month_end", "is_year_end", "is_quarter_end"])
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_is_end(self, end, tz):
ts = Timestamp("2014-12-31 23:59:59", tz=tz)
assert getattr(ts, end)
@pytest.mark.parametrize(
"data",
[Timestamp("2017-08-28 23:00:00"), Timestamp("2017-08-28 23:00:00", tz="EST")],
)
@pytest.mark.parametrize(
"time_locale", [None] + (tm.get_locales() or [])
)
def test_names(self, data, time_locale):
if time_locale is None:
expected_day = "Monday"
expected_month = "August"
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
result_day = data.day_name(time_locale)
result_month = data.month_name(time_locale)
expected_day = unicodedata.normalize("NFD", expected_day)
expected_month = unicodedata.normalize("NFD", expected_month)
result_day = unicodedata.normalize("NFD", result_day)
result_month = unicodedata.normalize("NFD", result_month)
assert result_day == expected_day
assert result_month == expected_month
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
def test_is_leap_year(self, tz_naive_fixture):
tz = tz_naive_fixture
dt = Timestamp("2000-01-01 00:00:00", tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp("1999-01-01 00:00:00", tz=tz)
assert not dt.is_leap_year
dt = Timestamp("2004-01-01 00:00:00", tz=tz)
assert dt.is_leap_year
dt = Timestamp("2100-01-01 00:00:00", tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53
assert result == expected
result = np.array(
[
Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (2005, 1, 1), (2005, 1, 2)]
]
)
assert (result == [52, 52, 53, 53]).all()
def test_resolution(self):
p("2100-01-01 00:00:00")
assert dt.resolution == Timedelta(nanoseconds=1)
assert Timestamp.resolution == Timedelta(nanoseconds=1)
class TestTimestamp:
def test_tz(self):
tstr = "2014-02-01 09:00"
ts = Timestamp(tstr)
local = ts.tz_localize("Asia/Tokyo")
assert local.hour == 9
assert local == Timestamp(tstr, tz="Asia/Tokyo")
conv = local.tz_convert("US/Eastern")
assert conv == Timestamp("2014-01-31 19:00", tz="US/Eastern")
assert conv.hour == 19
ts = Timestamp(tstr) + offsets.Nano(5)
local = ts.tz_localize("Asia/Tokyo")
assert local.hour == 9
assert local.nanosecond == 5
conv = local.tz_convert("US/Eastern")
assert conv.nanosecond == 5
assert conv.hour == 19
def test_utc_z_designator(self):
assert get_timezone(Timestamp("2014-11-02 01:00Z").tzinfo) is utc
def test_asm8(self):
np.random.seed(7_960_929)
ns = [Timestamp.min.value, Timestamp.max.value, 1000]
for n in ns:
assert (
Timestamp(n).asm8.view("i8") == np.datetime64(n, "ns").view("i8") == n
)
assert Timestamp("nat").asm8.view("i8") == np.datetime64("nat", "ns").view("i8")
def test_class_ops_pytz(self):
def compare(x, y):
assert int((Timestamp(x).value - Timestamp(y).value) / 1e9) == 0
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now("UTC"), datetime.now(timezone("UTC")))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
msg = "timezone-aware Timestamp with UTC"
with tm.assert_produces_warning(FutureWarning, match=msg):
ts_utc = Timestamp.utcfromtimestamp(current_time)
compare(
ts_utc,
datetime.utcfromtimestamp(current_time),
)
compare(
Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)
)
compare(
Timestamp.fromtimestamp(current_time, "UTC"),
datetime.fromtimestamp(current_time, utc),
)
compare(
Timestamp.fromtimestamp(current_time, tz="UTC"),
datetime.fromtimestamp(current_time, utc),
)
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(
Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component),
)
def test_class_ops_dateutil(self):
def compare(x, y):
assert (
int(
np.round(Timestamp(x).value / 1e9)
- np.round(Timestamp(y).value / 1e9)
)
== 0
)
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now("UTC"), datetime.now(tzutc()))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
msg = "timezone-aware Timestamp with UTC"
with tm.assert_produces_warning(FutureWarning, match=msg):
ts_utc = Timestamp.utcfromtimestamp(current_time)
compare(
ts_utc,
datetime.utcfromtimestamp(current_time),
)
compare(
Timestamp.fromtimestamp(current_time), datetime.fromtimestamp(current_time)
)
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(
Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component),
)
def test_basics_nanos(self):
val = np.int64(946_684_800_000_000_000).view("M8[ns]")
stamp = Timestamp(val.view("i8") + 500)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.microsecond == 0
assert stamp.nanosecond == 500
val = np.iinfo(np.int64).min + 80_000_000_000_000
stamp = Timestamp(val)
assert stamp.year == 1677
assert stamp.month == 9
assert stamp.day == 21
assert stamp.microsecond == 145224
assert stamp.nanosecond == 192
@pytest.mark.parametrize(
"value, check_kwargs",
[
[946688461000000000, {}],
[946688461000000000 / 1000, {"unit": "us"}],
[946688461000000000 / 1_000_000, {"unit": "ms"}],
[946688461000000000 / 1_000_000_000, {"unit": "s"}],
[10957, {"unit": "D", "h": 0}],
[
(946688461000000000 + 500000) / 1000000000,
{"unit": "s", "us": 499, "ns": 964},
],
[
(946688461000000000 + 500000000) / 1000000000,
{"unit": "s", "us": 500000},
],
[(946688461000000000 + 500000) / 1000000, {"unit": "ms", "us": 500}],
[(946688461000000000 + 500000) / 1000, {"unit": "us", "us": 500}],
[(946688461000000000 + 500000000) / 1000000, {"unit": "ms", "us": 500000}],
[946688461000000000 / 1000.0 + 5, {"unit": "us", "us": 5}],
[946688461000000000 / 1000.0 + 5000, {"unit": "us", "us": 5000}],
[946688461000000000 / 1000000.0 + 0.5, {"unit": "ms", "us": 500}],
[946688461000000000 / 1000000.0 + 0.005, {"unit": "ms", "us": 5, "ns": 5}],
[946688461000000000 / 1000000000.0 + 0.5, {"unit": "s", "us": 500000}],
[10957 + 0.5, {"unit": "D", "h": 12}],
],
)
def test_unit(self, value, check_kwargs):
def check(value, unit=None, h=1, s=1, us=0, ns=0):
stamp = Timestamp(value, unit=unit)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.day == 1
assert stamp.hour == h
if unit != "D":
assert stamp.minute == 1
assert stamp.second == s
assert stamp.microsecond == us
else:
assert stamp.minute == 0
assert stamp.second == 0
assert stamp.microsecond == 0
assert stamp.nanosecond == ns
check(value, **check_kwargs)
def test_roundtrip(self):
base = Timestamp("20140101 00:00:00")
result = Timestamp(base.value + Timedelta("5ms").value)
assert result == Timestamp(f"{base}.005000")
assert result.microsecond == 5000
result = Timestamp(base.value + Timedelta("5us").value)
assert result == Timestamp(f"{base}.000005")
assert result.microsecond == 5
result = Timestamp(base.value + Timedelta("5ns").value)
assert result == Timestamp(f"{base}.000000005")
assert result.nanosecond == 5
assert result.microsecond == 0
result = Timestamp(base.value + Timedelta("6ms 5us").value)
assert result == Timestamp(f"{base}.006005")
assert result.microsecond == 5 + 6 * 1000
result = Timestamp(base.value + Timedelta("200ms 5us").value)
assert result == Timestamp(f"{base}.200005")
assert result.microsecond == 5 + 200 * 1000
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
assert d[stamp] == 5
def test_tz_conversion_freq(self, tz_naive_fixture):
with tm.assert_produces_warning(FutureWarning, match="freq"):
t1 = Timestamp("2019-01-01 10:00", freq="H")
assert t1.tz_localize(tz=tz_naive_fixture).freq == t1.freq
with tm.assert_produces_warning(FutureWarning, match="freq"):
t2 = Timestamp("2019-01-02 12:00", tz="UTC", freq="T")
assert t2.tz_convert(tz="UTC").freq == t2.freq
def test_pickle_freq_no_warning(self):
ureWarning, match="freq"):
ts = Timestamp("2019-01-01 10:00", freq="H")
out = pickle.dumps(ts)
with tm.assert_produces_warning(None):
res = pickle.loads(out)
assert res._freq == ts._freq
class TestTimestampNsOperations:
def test_nanosecond_string_parsing(self):
ts = Timestamp("2013-05-01 07:15:45.123456789")
# GH 7878
expected_repr = "2013-05-01 07:15:45.123456789"
expected_value = 1_367_392_545_123_456_789
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789+09:00", tz="Asia/Tokyo")
assert ts.value == expected_value - 9 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="UTC")
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp("2013-05-01 07:15:45.123456789", tz="US/Eastern")
assert ts.value == expected_value + 4 * 3600 * 1_000_000_000
assert expected_repr in repr(ts)
# GH 10041
ts = Timestamp("20130501T071545.123456789")
assert ts.value == expected_value
assert expected_repr in repr(ts)
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1_293_840_000_000_000_005
t = Timestamp("2011-01-01") + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = Timestamp("2011-01-01 00:00:00.000000005")
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
expected = 1_293_840_000_000_000_010
t = t + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp(t)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
t = Timestamp("2011-01-01 00:00:00.000000010")
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000010')"
assert t.value == expected
assert t.nanosecond == 10
class TestTimestampToJulianDate:
def test_compare_1700(self):
r = Timestamp("1700-06-23").to_julian_date()
assert r == 2_342_145.5
def test_compare_2000(self):
r = Timestamp("2000-04-12").to_julian_date()
assert r == 2_451_646.5
def test_compare_2100(self):
r = Timestamp("2100-08-12").to_julian_date()
assert r == 2_488_292.5
def test_compare_hour01(self):
r = Timestamp("2000-08-12T01:00:00").to_julian_date()
assert r == 2_451_768.5416666666666666
def test_compare_hour13(self):
r = Timestamp("2000-08-12T13:00:00").to_julian_date()
assert r == 2_451_769.0416666666666666
class TestTimestampConversion:
def test_conversion(self):
# GH#9255
ts = Timestamp("2000-01-01")
result = ts.to_pydatetime()
expected = datetime(2000, 1, 1)
assert result == expected
assert type(result) == type(expected)
result = ts.to_datetime64()
expected = np.datetime64(ts.value, "ns")
assert result == expected
assert type(result) == type(expected)
assert result.dtype == expected.dtype
def test_to_pydatetime_nonzero_nano(self):
ts = Timestamp("2011-01-01 9:00:00.123456789")
# Warn the user of data loss (nanoseconds).
with tm.assert_produces_warning(UserWarning):
expected = datetime(2011, 1, 1, 9, 0, 0, 123456)
result = ts.to_pydatetime()
assert result == expected
def test_timestamp_to_datetime(self):
stamp = Timestamp("20090415", tz="US/Eastern")
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_to_datetime_dateutil(self):
stamp = Timestamp("20090415", tz="dateutil/US/Eastern")
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_timestamp_to_datetime_explicit_pytz(self):
stamp = Timestamp("20090415", tz=pytz.timezone("US/Eastern"))
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
@td.skip_if_windows
def test_timestamp_to_datetime_explicit_dateutil(self):
stamp = Timestamp("20090415", tz=gettz("US/Eastern"))
dtval = stamp.to_pydatetime()
assert stamp == dtval
assert stamp.tzinfo == dtval.tzinfo
def test_to_datetime_bijective(self):
# Ensure that converting to datetime and back only loses precision
# by going from nanoseconds to microseconds.
exp_warning = None if Timestamp.max.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning):
pydt_max = Timestamp.max.to_pydatetime()
assert Timestamp(pydt_max).value / 1000 == Timestamp.max.value / 1000
exp_warning = None if Timestamp.min.nanosecond == 0 else UserWarning
with tm.assert_produces_warning(exp_warning):
pydt_min = Timestamp.min.to_pydatetime()
# The next assertion can be enabled once GH#39221 is merged
# assert pydt_min < Timestamp.min # this is bc nanos are dropped
tdus = timedelta(microseconds=1)
assert pydt_min + tdus > Timestamp.min
assert Timestamp(pydt_min + tdus).value / 1000 == Timestamp.min.value / 1000
def test_to_period_tz_warning(self):
# GH#21333 make sure a warning is issued when timezone
# info is lost
ts = Timestamp("2009-04-15 16:17:18", tz="US/Eastern")
with tm.assert_produces_warning(UserWarning):
# warning that timezone info will be lost
ts.to_period("D")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
ts = Timestamp(datetime.now())
assert ts.to_datetime64() == ts.to_numpy()
# GH#44460
msg = "dtype and copy arguments are ignored"
with pytest.raises(ValueError, match=msg):
ts.to_numpy("M8[s]")
with pytest.raises(ValueError, match=msg):
ts.to_numpy(copy=True)
class SubDatetime(datetime):
pass
@pytest.mark.parametrize(
"lh,rh",
[
(SubDatetime(2000, 1, 1), Timedelta(hours=1)),
(Timedelta(hours=1), SubDatetime(2000, 1, 1)),
],
)
def test_dt_subclass_add_timedelta(lh, rh):
# GH#25851
# ensure that subclassed datetime works for
# Timedelta operations
result = lh + rh
expected = SubDatetime(2000, 1, 1, 1)
assert result == expected
| true | true |
1c34b62e278a58f9d788040bddf7a3c887fc8d63 | 231 | py | Python | NotIncluded/Depreciated/_Check_SystemOfEquation.py | MattArran/GEMMES | ed48ebef08fdf740ed28248c65ed7d8239ab19c4 | [
"MIT"
] | 4 | 2021-06-28T07:11:34.000Z | 2022-01-11T13:43:17.000Z | NotIncluded/Depreciated/_Check_SystemOfEquation.py | DaluS/GEMMES | 10d4a062004ce5b7fd26eb8c4937d940b7d097d5 | [
"MIT"
] | 167 | 2021-06-28T07:10:21.000Z | 2022-03-18T17:30:40.000Z | NotIncluded/Depreciated/_Check_SystemOfEquation.py | MattArran/GEMMES | ed48ebef08fdf740ed28248c65ed7d8239ab19c4 | [
"MIT"
] | 3 | 2021-06-28T07:19:12.000Z | 2022-03-03T02:44:15.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 7 17:34:54 2021
@author: Paul Valcke
"""
"""
FindTheRightOrderOfResolution():
FindParametersInModel():
CheckIfVariavblesArePrepared():
CheckIfParametersArePrepared():
""" | 14.4375 | 35 | 0.683983 | true | true | |
1c34b6e1813f6fa08d384d75af43ab21cbe63c47 | 1,336 | py | Python | run.py | quadraticmuffin/nd_maze | 8f41d923d1839c0d6fb34c02f165b247f181743b | [
"MIT"
] | null | null | null | run.py | quadraticmuffin/nd_maze | 8f41d923d1839c0d6fb34c02f165b247f181743b | [
"MIT"
] | null | null | null | run.py | quadraticmuffin/nd_maze | 8f41d923d1839c0d6fb34c02f165b247f181743b | [
"MIT"
] | null | null | null | import numpy as np
class NdMaze():
'''
'''
def __init__(self, nd, size, start=None):
self.board = np.zeros(shape=[size]*nd)
self.nd = nd
self.size = size
self.pos = start
if start is None:
self.pos = [0]*nd
self.dimr = 0
self.dimc = 1
def move(self, dim, amt):
if (self.pos[dim] + amt) not in range(self.size):
raise ValueError('Attempted to move out of bounds')
temp_pos = list(self.pos)
temp_pos[dim] += amt
self.pos = tuple(temp_pos)
return self.get2d()
def seeDim(self, newdim):
if newdim not in range(self.nd):
raise ValueError(f'Dimension {newdim} does not exist in board of {self.nd} dimensions.')
self.dimr = self.dimc
self.dimc = newdim
return self.get2d()
def get2d(self):
idx = list(self.pos)
idx[self.dimr] = slice(None)
idx[self.dimc] = slice(None)
ret = self.board[tuple(idx)]
if self.dimr > self.dimc:
return ret.T
else:
return ret
def __repr__(self):
return str(self.board)
def __str__(self):
return str(self.get2d())
if __name__ == '__main__':
game = NdMaze(5, 2)
game.board = np.arange(32).reshape([2]*5) | 25.692308 | 100 | 0.539671 | import numpy as np
class NdMaze():
def __init__(self, nd, size, start=None):
self.board = np.zeros(shape=[size]*nd)
self.nd = nd
self.size = size
self.pos = start
if start is None:
self.pos = [0]*nd
self.dimr = 0
self.dimc = 1
def move(self, dim, amt):
if (self.pos[dim] + amt) not in range(self.size):
raise ValueError('Attempted to move out of bounds')
temp_pos = list(self.pos)
temp_pos[dim] += amt
self.pos = tuple(temp_pos)
return self.get2d()
def seeDim(self, newdim):
if newdim not in range(self.nd):
raise ValueError(f'Dimension {newdim} does not exist in board of {self.nd} dimensions.')
self.dimr = self.dimc
self.dimc = newdim
return self.get2d()
def get2d(self):
idx = list(self.pos)
idx[self.dimr] = slice(None)
idx[self.dimc] = slice(None)
ret = self.board[tuple(idx)]
if self.dimr > self.dimc:
return ret.T
else:
return ret
def __repr__(self):
return str(self.board)
def __str__(self):
return str(self.get2d())
if __name__ == '__main__':
game = NdMaze(5, 2)
game.board = np.arange(32).reshape([2]*5) | true | true |
1c34b7457ad8f2d9d0041328a63a00fffed3739b | 53,584 | py | Python | Python_Files/gw_driver.py | xyt556/HydroSAR | 2142c300e4cf48065626832fdeb9c4aa472627dc | [
"MIT"
] | 1 | 2022-03-01T08:46:46.000Z | 2022-03-01T08:46:46.000Z | Python_Files/gw_driver.py | xyt556/HydroSAR | 2142c300e4cf48065626832fdeb9c4aa472627dc | [
"MIT"
] | null | null | null | Python_Files/gw_driver.py | xyt556/HydroSAR | 2142c300e4cf48065626832fdeb9c4aa472627dc | [
"MIT"
] | null | null | null | # Author: Sayantan Majumdar
# Email: smxnv@mst.edu
import pandas as pd
from Python_Files.hydrolibs import rasterops as rops
from Python_Files.hydrolibs import vectorops as vops
from Python_Files.hydrolibs import data_download as dd
from Python_Files.hydrolibs.sysops import makedirs, make_proper_dir_name, copy_files
from Python_Files.hydrolibs import random_forest_regressor as rfr
from Python_Files.hydrolibs import model_analysis as ma
from glob import glob
class HydroML:
def __init__(self, input_dir, file_dir, output_dir, output_shp_dir, output_gw_raster_dir,
input_state_file, gdal_path, input_ts_dir=None, input_subsidence_dir=None, input_gw_boundary_file=None,
input_ama_ina_file=None, input_watershed_file=None, input_gw_basin=None,
ssebop_link=None, sed_thick_csv=None, cdl_year=None):
"""
Constructor for initializing class variables
:param input_dir: Input data directory
:param file_dir: Directory for storing intermediate files
:param output_dir: Output directory
:param output_shp_dir: Output shapefile directory
:param output_gw_raster_dir: Output GW raster directory
:param input_state_file: Input state shapefile (must be in WGS84)
:param gdal_path: GDAL directory path, in Windows replace with OSGeo4W directory path, e.g. '/usr/bin/gdal/' on
Linux or Mac and 'C:/OSGeo4W64/' on Windows
:param input_ts_dir: Input directory containing the time series data, set None to automatically download data
:param input_gw_boundary_file: Input GMD shapefile for Kansas or Well Registry shapefile for Arizona
:param input_ama_ina_file: The file path to the AMA/INA shapefile required for Arizona.
:param input_watershed_file: The file path to the Arizona surface watershed shapefile.
:param input_gw_basin: Groundwater basin shapefile path for Arizona
:param ssebop_link: SSEBop data download link. SSEBop data are not downloaded if its set to None.
:param sed_thick_csv: USGS Sediment Thickness CSV file path
:param cdl_year: Set CDL year for using a single year for the entire model. If set to None, all available CDL
data for the years in year_list will be downloaded (Note: for years before 2008, CDL 2008 will be replicated if
cdl_year is None).
"""
self.input_dir = make_proper_dir_name(input_dir)
self.file_dir = make_proper_dir_name(file_dir)
self.output_dir = make_proper_dir_name(output_dir)
self.output_shp_dir = make_proper_dir_name(output_shp_dir)
self.output_gw_raster_dir = make_proper_dir_name(output_gw_raster_dir)
self.gdal_path = make_proper_dir_name(gdal_path)
self.input_ts_dir = make_proper_dir_name(input_ts_dir)
self.input_subsidence_dir = make_proper_dir_name(input_subsidence_dir)
self.input_gw_boundary_file = input_gw_boundary_file
self.input_ama_ina_file = input_ama_ina_file
self.input_watershed_file = input_watershed_file
self.input_gw_basin = input_gw_basin
self.input_state_file = input_state_file
self.ssebop_link = ssebop_link
self.input_gw_boundary_reproj_file = None
self.input_ama_ina_reproj_file = None
self.input_state_reproj_file = None
self.input_watershed_reproj_file = None
self.input_gw_basin_reproj_file = None
self.final_gw_dir = None
self.actual_gw_dir = None
self.ref_raster = None
self.raster_reproj_dir = None
self.well_reg_raster_file = None
self.crop_coeff_dir = None
self.crop_coeff_reproj_dir = None
self.crop_coeff_mask_dir = None
self.cdl_reclass_dir = None
self.raster_mask_dir = None
self.land_use_dir_list = None
self.rf_data_dir = None
self.pred_data_dir = None
self.lu_mask_dir = None
self.ssebop_file_dir = None
self.cdl_file_dir = None
self.cdl_reproj_dir = None
self.ssebop_reproj_dir = None
self.ws_ssebop_file_dir = None
self.ws_ssebop_reproj_dir = None
self.data_year_list = None
self.data_start_month = None
self.data_end_month = None
self.ws_year_list = None
self.ws_start_month = None
self.ws_end_month = None
self.ws_data_dir = None
self.ws_data_reproj_dir = None
self.converted_subsidence_dir = None
self.pred_out_dir = None
self.subsidence_pred_gw_dir = None
self.well_reg_dir = None
self.well_reg_mask_dir = None
self.well_reg_flt_file = None
self.well_reg_flt_dir = None
self.well_reg_reproj_dir = None
self.sed_thick_csv = sed_thick_csv
self.sed_thick_dir = None
self.sed_thick_shp_file = None
self.sed_thick_raster_file = None
self.sed_thick_reproj_dir = None
self.gw_basin_raster_dir = None
self.gw_basin_raster_reproj_dir = None
self.cdl_year = cdl_year
makedirs([self.output_dir, self.output_gw_raster_dir, self.output_shp_dir])
def download_data(self, year_list, start_month, end_month, already_downloaded=False, already_extracted=False):
"""
Download, extract, and preprocess GEE, CDL, and SSEBop data
:param year_list: List of years %yyyy format
:param start_month: Start month in %m format
:param end_month: End month in %m format
:param already_downloaded: Set True to disable downloading
:param already_extracted: Set True to disable extraction
:return: None
"""
self.data_year_list = year_list
self.data_start_month = start_month
self.data_end_month = end_month
gee_data_flag = False
if self.input_ts_dir is None:
self.input_ts_dir = self.input_dir + 'Downloaded_Data/'
gee_data_flag = True
gee_zip_dir = self.input_ts_dir + 'GEE_Data/'
self.cdl_file_dir = self.input_ts_dir + 'CDL/'
ssebop_zip_dir = self.input_ts_dir + 'SSEBop_Data/'
self.ssebop_file_dir = ssebop_zip_dir + 'SSEBop_Files/'
if not already_downloaded:
if gee_data_flag:
makedirs([gee_zip_dir])
dd.download_gee_data(year_list, start_month=start_month, end_month=end_month,
aoi_shp_file=self.input_state_file, outdir=gee_zip_dir)
makedirs([self.cdl_file_dir])
dd.download_cropland_data(self.input_state_file, year_list=year_list, output_dir=self.cdl_file_dir,
cdl_year=self.cdl_year)
makedirs([ssebop_zip_dir])
dd.download_ssebop_data(self.ssebop_link, year_list, start_month, end_month, ssebop_zip_dir)
if gee_data_flag:
self.input_ts_dir = gee_zip_dir + 'GEE_Files/'
if not already_extracted:
if gee_data_flag:
makedirs([self.input_ts_dir])
dd.extract_data(gee_zip_dir, out_dir=self.input_ts_dir, rename_extracted_files=True)
makedirs([self.ssebop_file_dir])
dd.extract_data(ssebop_zip_dir, self.ssebop_file_dir)
print('CDL, GEE, and SSEBop data downloaded and extracted...')
def download_ws_data(self, year_list, start_month, end_month, already_downloaded=False, already_extracted=False):
"""
Download SSEBop and P data for water stress index computation
:param year_list: List of years %yyyy format
:param start_month: Start month in %m format
:param end_month: End month in %m format
:param already_downloaded: Set True to disable downloading
:param already_extracted: Set True to disable extraction
:return: None
"""
self.ws_year_list = year_list
self.ws_start_month = start_month
self.ws_end_month = end_month
self.ws_data_dir = self.input_dir + 'WS_Data/'
ws_gee_dir = self.ws_data_dir + 'WS_GEE/'
ws_ssebop_dir = self.ws_data_dir + 'WS_SSEBop/'
self.ws_ssebop_file_dir = ws_ssebop_dir + 'WS_SSEBop_Files/'
if not already_downloaded:
makedirs([ws_gee_dir, ws_ssebop_dir])
dd.download_ssebop_data(self.ssebop_link, year_list, start_month, end_month, ws_ssebop_dir)
dd.download_gee_data(year_list, start_month=start_month, end_month=end_month,
aoi_shp_file=self.input_state_file, outdir=ws_gee_dir)
if not already_extracted:
makedirs([self.ws_ssebop_file_dir])
dd.extract_data(ws_ssebop_dir, out_dir=self.ws_ssebop_file_dir)
dd.extract_data(ws_gee_dir, out_dir=self.ws_data_dir, rename_extracted_files=True)
print('Data for WS metric downloaded...')
def preprocess_gw_csv(self, input_gw_csv_dir, fill_attr='AF Pumped', filter_attr=None,
filter_attr_value='OUTSIDE OF AMA OR INA', use_only_ama_ina=False, already_preprocessed=False,
**kwargs):
"""
Preprocess the well registry file to add GW pumping from each CSV file. That is, add an attribute present in the
GW csv file to the Well Registry shape files (yearwise) based on matching ids given in kwargs.
By default, the GW withdrawal is added. The csv ids must include: csv_well_id, csv_mov_id, csv_water_id,
movement_type, water_type, The shp id must include shp_well_id. For the Arizona datasets, csv_well_id='Well Id',
csv_mov_id='Movement Type', csv_water_id='Water Type', movement_type='WITHDRAWAL', water_type='GROUNDWATER', and
shp_well_id='REGISTRY_I' by default. For changing, pass appropriate kwargs.
:param input_gw_csv_dir: Input GW csv directory
:param fill_attr: Attribute present in the CSV file to add to Well Registry.
:param filter_attr: Remove specific wells based on this attribute. Set None to disable filtering.
:param filter_attr_value: Value for filter_attr
:param use_only_ama_ina: Set True to use only AMA/INA for model training
:param already_preprocessed: Set True to disable preprocessing
:return: None
"""
if not already_preprocessed:
input_gw_csv_dir = make_proper_dir_name(input_gw_csv_dir)
vops.add_attribute_well_reg_multiple(input_well_reg_file=self.input_gw_boundary_file,
input_gw_csv_dir=input_gw_csv_dir, out_gw_shp_dir=self.output_shp_dir,
fill_attr=fill_attr, filter_attr=filter_attr,
filter_attr_value=filter_attr_value, use_only_ama_ina=use_only_ama_ina,
**kwargs)
def extract_shp_from_gdb(self, input_gdb_dir, year_list, attr_name='AF_USED', already_extracted=False):
"""
Extract shapefiles from geodatabase (GDB)
:param input_gdb_dir: Input GDB directory
:param year_list: List of years to extract
:param attr_name: Attribute name for shapefile
:param already_extracted: Set True to disable extraction
:return: None
"""
if not already_extracted:
print('Extracting GW data from GDB...')
vops.extract_gdb_data(input_gdb_dir, attr_name=attr_name, year_list=year_list, outdir=self.output_shp_dir)
else:
print("GW shapefiles already extracted")
def reproject_shapefiles(self, already_reprojected=False):
"""
Reproject GMD/Well Registry and state shapefiles
:param already_reprojected: Set True to disable reprojection
:return: None
"""
gw_boundary_reproj_dir = make_proper_dir_name(self.file_dir + 'gw_boundary/reproj')
gw_ama_ina_reproj_dir = make_proper_dir_name(self.file_dir + 'gw_ama_ina/reproj')
watershed_reproj_dir = make_proper_dir_name(self.file_dir + 'watershed/reproj')
state_reproj_dir = make_proper_dir_name(self.file_dir + 'state/reproj')
gw_basin_reproj_dir = make_proper_dir_name(self.file_dir + 'GW_Basin/reproj')
self.input_gw_boundary_reproj_file = gw_boundary_reproj_dir + 'input_boundary_reproj.shp'
if self.input_ama_ina_file:
self.input_ama_ina_reproj_file = gw_ama_ina_reproj_dir + 'input_ama_ina_reproj.shp'
if self.input_watershed_file:
self.input_watershed_reproj_file = watershed_reproj_dir + 'input_watershed_reproj.shp'
if self.input_gw_basin:
self.input_gw_basin_reproj_file = gw_basin_reproj_dir + 'input_gw_basin_reproj.shp'
self.input_state_reproj_file = state_reproj_dir + 'input_state_reproj.shp'
if not already_reprojected:
print('Reprojecting Boundary/State/AMA_INA/Watershed shapefiles...')
makedirs([gw_boundary_reproj_dir, state_reproj_dir])
ref_shp = glob(self.output_shp_dir + '*.shp')[0]
vops.reproject_vector(self.input_gw_boundary_file, outfile_path=self.input_gw_boundary_reproj_file,
ref_file=ref_shp, raster=False)
if self.input_ama_ina_file:
makedirs([gw_ama_ina_reproj_dir])
vops.reproject_vector(self.input_ama_ina_file, outfile_path=self.input_ama_ina_reproj_file,
ref_file=ref_shp, raster=False)
if self.input_watershed_file:
makedirs([watershed_reproj_dir])
vops.reproject_vector(self.input_watershed_file, outfile_path=self.input_watershed_reproj_file,
ref_file=ref_shp, raster=False)
if self.input_gw_basin:
makedirs([gw_basin_reproj_dir])
vops.reproject_vector(self.input_gw_basin, outfile_path=self.input_gw_basin_reproj_file,
ref_file=ref_shp, raster=False)
vops.reproject_vector(self.input_state_file, outfile_path=self.input_state_reproj_file, ref_file=ref_shp,
raster=False)
else:
print('Boundary/State/AMA_INA shapefiles are already reprojected')
def clip_gw_shpfiles(self, new_clip_file=None, already_clipped=False, extent_clip=True):
"""
Clip GW shapefiles based on GMD extent
:param new_clip_file: Input clip file for clipping GW shapefiles (e.g, it could be a watershed shapefile),
required only if you don't want to clip using GMD extent. Should be in the same projection system
:param already_clipped: Set False to re-clip shapefiles
:param extent_clip: Set False to clip by cutline, if shapefile consists of multiple polygons, then this won't
work
:return: None
"""
clip_file = self.input_gw_boundary_reproj_file
if new_clip_file:
clip_file = new_clip_file
clip_shp_dir = make_proper_dir_name(self.output_shp_dir + 'Clipped')
if not already_clipped:
print('Clipping GW shapefiles...')
makedirs([clip_shp_dir])
vops.clip_vectors(self.output_shp_dir, clip_file=clip_file, outdir=clip_shp_dir, gdal_path=self.gdal_path,
extent_clip=extent_clip)
else:
print('GW Shapefiles already clipped')
self.output_shp_dir = clip_shp_dir
def crop_gw_rasters(self, ext_mask=True, use_ama_ina=False, already_cropped=False):
"""
Crop GW rasters based on a mask, should be called after GW rasters have been created.
:param ext_mask: Set True to crop by cutline, if shapefile consists of multiple polygons, then this won't
work and appropriate AMA/INA should be set
:param use_ama_ina: Use AMA/INA shapefile for cropping (Set True for Arizona).
:param already_cropped: Set True to disable cropping
:return: None
"""
cropped_dir = make_proper_dir_name(self.output_gw_raster_dir + 'Cropped')
if not already_cropped:
makedirs([cropped_dir])
multi_poly = False
raster_mask = self.input_state_reproj_file
if use_ama_ina:
raster_mask = self.input_ama_ina_reproj_file
multi_poly = True
rops.crop_rasters(self.final_gw_dir, outdir=cropped_dir, input_mask_file=raster_mask, ext_mask=ext_mask,
gdal_path=self.gdal_path, multi_poly=multi_poly)
else:
print('GW rasters already cropped')
self.final_gw_dir = cropped_dir
if not use_ama_ina:
self.final_gw_dir = make_proper_dir_name(cropped_dir + 'Well_Fixed')
makedirs([self.final_gw_dir])
rops.fix_gw_raster_values(cropped_dir, outdir=self.final_gw_dir, fix_only_negative=True)
self.actual_gw_dir = cropped_dir
def create_well_registry_raster(self, xres=5000., yres=5000., already_created=False):
"""
Create well registry raster for Arizona
:param xres: X-Resolution (map unit)
:param yres: Y-Resolution (map unit)
:param already_created: Set False to re-compute GW pumping rasters
:return: None
"""
self.well_reg_dir = make_proper_dir_name(self.file_dir + 'Well_Reg_Rasters')
if not already_created:
print('Creating well registry raster...')
makedirs([self.well_reg_dir])
self.well_reg_raster_file = self.well_reg_dir + 'well_reg.tif'
vops.shp2raster(self.input_gw_boundary_file, self.well_reg_raster_file, xres=xres, yres=yres, smoothing=0,
burn_value=1.0, gdal_path=self.gdal_path, gridding=False)
print('Well registry raster created...')
def create_gw_basin_raster(self, xres=5000., yres=5000., already_created=False):
"""
Create GW basin raster for Arizona
:param xres: X-Resolution (map unit)
:param yres: Y-Resolution (map unit)
:param already_created: Set True if raster already exists
:return: None
"""
self.gw_basin_raster_dir = make_proper_dir_name(self.file_dir + 'GW_Basin_Raster')
if not already_created:
print('Creating GW Basin raster...')
makedirs([self.gw_basin_raster_dir])
gw_basin_raster_file = self.gw_basin_raster_dir + 'GW_Basin.tif'
vops.shp2raster(self.input_gw_basin_reproj_file, gw_basin_raster_file, xres=xres, yres=yres,
smoothing=0, value_field='OBJECTID', add_value=False, gdal_path=self.gdal_path,
gridding=False)
print('GW Basin raster created...')
def create_gw_rasters(self, xres=5000., yres=5000., max_gw=1000., value_field=None, value_field_pos=0,
convert_units=True, already_created=True):
"""
Create GW rasters from shapefiles
:param xres: X-Resolution (map unit)
:param yres: Y-Resolution (map unit)
:param max_gw: Maximum GW pumping in mm. Any value higher than this will be set to no data
:param value_field: Name of the value attribute. Set None to use value_field_pos
:param value_field_pos: Value field position (zero indexing)
:param convert_units: If true, converts GW pumping values in acreft to mm
:param already_created: Set False to re-compute GW pumping rasters
:return: None
"""
fixed_dir = make_proper_dir_name(self.output_gw_raster_dir + 'Fixed')
converted_dir = make_proper_dir_name(self.output_gw_raster_dir + 'Converted')
if not already_created:
print('Converting SHP to TIF...')
makedirs([fixed_dir])
vops.shps2rasters(self.output_shp_dir, self.output_gw_raster_dir, xres=xres, yres=yres, smoothing=0,
value_field=value_field, value_field_pos=value_field_pos, gdal_path=self.gdal_path,
gridding=False)
if convert_units:
max_gw *= xres * yres / 1.233e+6
rops.fix_gw_raster_values(self.output_gw_raster_dir, max_threshold=max_gw, outdir=fixed_dir)
if convert_units:
print('Changing GW units from acreft to mm')
makedirs([converted_dir])
rops.convert_gw_data(fixed_dir, converted_dir)
else:
print('GW pumping rasters already created')
if convert_units:
self.final_gw_dir = converted_dir
else:
self.final_gw_dir = fixed_dir
self.actual_gw_dir = self.final_gw_dir
def create_crop_coeff_raster(self, already_created=False):
"""
Create crop coefficient raster based on the NASS CDL file
:param already_created: Set True to disable raster creation
:return: None
"""
self.crop_coeff_dir = make_proper_dir_name(self.file_dir + 'Crop_Coeff')
if not already_created:
print('Creating crop coefficient raster...')
makedirs([self.crop_coeff_dir])
rops.create_crop_coeff_raster(self.cdl_file_dir, output_dir=self.crop_coeff_dir)
def create_mean_crop_coeff_raster(self, already_created=False):
"""
Create mean crop coefficient raster based on the annual CDL files which are already reprojected
:param already_created: Set True to disable raster creation
:return: None
"""
if not already_created:
print('Creating mean crop coefficient raster...')
rops.create_mean_crop_coeff_raster(self.crop_coeff_reproj_dir, self.crop_coeff_reproj_dir)
def create_sed_thickness_raster(self, xres=5000., yres=5000., already_converted=False, already_clipped=False,
already_created=False):
"""
Create sediment thickness raster for Arizona
:param xres: X-Resolution (map unit)
:param yres: Y-Resolution (map unit)
:param already_converted: Set True if CSV has already been converted to SHP
:param already_clipped: Set True if shapefile has already been reprojected and clipped
:param already_created: Set False to re-compute GW pumping rasters
:return: None
"""
self.sed_thick_dir = make_proper_dir_name(self.file_dir + 'Sed_Thick')
self.sed_thick_shp_file = self.sed_thick_dir + 'Sed_Thick.shp'
self.sed_thick_raster_file = self.sed_thick_dir + 'Sed_Thick.tif'
sed_thick_shp = self.sed_thick_dir + 'Sed_Thick_All.shp'
if not already_converted:
print('Creating sediment thickness shapefile...')
makedirs([self.sed_thick_dir])
vops.csv2shp(self.sed_thick_csv, sed_thick_shp, long_lat_pos=(0, 1))
if not already_clipped:
print('Reprojecting sediment thickness shapefile...')
vops.reproject_vector(sed_thick_shp, sed_thick_shp, self.input_state_reproj_file,
raster=False)
print('Clipping sediment thickness shapefile...')
vops.clip_vector(sed_thick_shp, self.input_state_reproj_file, self.sed_thick_shp_file,
gdal_path=self.gdal_path, extent_clip=False)
if not already_created:
print('Creating sediment thickness raster...')
rops.create_sed_thickness_raster(self.sed_thick_shp_file, self.sed_thick_raster_file, self.gdal_path,
xres, yres)
print('Sediment thickness raster created...')
def reclassify_cdl(self, reclass_dict, pattern='*.tif', already_reclassified=False):
"""
Reclassify raster
:param reclass_dict: Dictionary where key values are tuples representing the interval for reclassification, the
dictionary values represent the new class
:param pattern: File pattern required for reprojection
:param already_reclassified: Set True to disable reclassification
:return: None
"""
self.cdl_reclass_dir = make_proper_dir_name(self.file_dir + 'Reclass')
self.ref_raster = glob(self.actual_gw_dir + pattern)[0]
if not already_reclassified:
makedirs([self.cdl_reclass_dir])
rops.reclassify_cdl_files(self.cdl_file_dir, self.cdl_reclass_dir, reclass_dict, self.ref_raster,
self.gdal_path)
else:
print('Already reclassified')
def organize_subsidence_rasters(self, decorrelated_value=-10000, verbose=False, already_organized=False):
"""
Organize ADWR subsidence rasters and then create resampled subsidence rasters
:param decorrelated_value: Decorrelated pixel value for subsidence rasters, these would be set to no data
:param verbose: Set True to get additional info
:param already_organized: Set True to disable organizing subsidence rasters
:return: None
"""
self.converted_subsidence_dir = self.file_dir + 'Converted_Subsidence_Rasters/'
if not already_organized:
print('Organizing subsidence rasters...')
makedirs([self.converted_subsidence_dir])
rops.organize_subsidence_data(self.input_subsidence_dir, output_dir=self.converted_subsidence_dir,
ref_raster=self.ref_raster, gdal_path=self.gdal_path,
decorrelated_value=decorrelated_value, verbose=verbose)
print('Organized and created subsidence rasters...')
def reproject_rasters(self, pattern='*.tif', already_reprojected=False):
"""
Reproject rasters based on GW as reference raster
:param pattern: File pattern to look for
:param already_reprojected: Set True to disable raster reprojection
:return: None
"""
self.raster_reproj_dir = self.file_dir + 'Reproj_Rasters/'
self.ssebop_reproj_dir = self.ssebop_file_dir + 'SSEBop_Reproj/'
self.ws_data_reproj_dir = self.file_dir + 'WS_Reproj_Rasters/'
self.ws_ssebop_reproj_dir = self.file_dir + 'WS_SSEBop_Reproj_Rasters/'
self.crop_coeff_reproj_dir = self.crop_coeff_dir + 'Crop_Coeff_Reproj/'
self.well_reg_reproj_dir = self.well_reg_dir + 'Well_Reg_Reproj/'
self.sed_thick_reproj_dir = self.sed_thick_dir + 'Reproj/'
self.gw_basin_raster_reproj_dir = self.gw_basin_raster_dir + 'Reproj/'
if not already_reprojected:
print('Reprojecting rasters...')
makedirs([self.raster_reproj_dir, self.crop_coeff_reproj_dir, self.well_reg_reproj_dir,
self.sed_thick_reproj_dir, self.gw_basin_raster_reproj_dir])
rops.reproject_rasters(self.input_ts_dir, ref_raster=self.ref_raster, outdir=self.raster_reproj_dir,
pattern=pattern, gdal_path=self.gdal_path)
rops.reproject_rasters(self.crop_coeff_dir, ref_raster=self.ref_raster, outdir=self.crop_coeff_reproj_dir,
pattern=pattern, gdal_path=self.gdal_path)
rops.reproject_rasters(self.well_reg_dir, ref_raster=self.ref_raster,
outdir=self.well_reg_reproj_dir, pattern=pattern, gdal_path=self.gdal_path)
rops.reproject_rasters(self.sed_thick_dir, ref_raster=self.ref_raster, outdir=self.sed_thick_reproj_dir,
pattern='Sed_Thick.tif', gdal_path=self.gdal_path)
rops.reproject_rasters(self.gw_basin_raster_dir, ref_raster=self.ref_raster,
outdir=self.gw_basin_raster_reproj_dir, pattern=pattern, gdal_path=self.gdal_path)
if self.ssebop_link:
makedirs([self.ssebop_reproj_dir, self.ws_ssebop_reproj_dir, self.ws_data_reproj_dir])
rops.reproject_rasters(self.ssebop_file_dir, ref_raster=self.ref_raster, outdir=self.ssebop_reproj_dir,
pattern=pattern, gdal_path=self.gdal_path)
rops.generate_cummulative_ssebop(self.ssebop_reproj_dir, year_list=self.data_year_list,
start_month=self.data_start_month, end_month=self.data_end_month,
out_dir=self.raster_reproj_dir)
if self.ws_year_list is not None:
rops.reproject_rasters(self.ws_ssebop_file_dir, ref_raster=self.ref_raster,
outdir=self.ws_ssebop_reproj_dir, pattern=pattern, gdal_path=self.gdal_path)
rops.generate_cummulative_ssebop(self.ws_ssebop_reproj_dir, year_list=self.ws_year_list,
start_month=self.ws_start_month, end_month=self.ws_end_month,
out_dir=self.ws_data_reproj_dir)
rops.reproject_rasters(self.ws_data_dir, ref_raster=self.ref_raster, outdir=self.ws_data_reproj_dir,
pattern=pattern, gdal_path=self.gdal_path)
else:
print('All rasters already reprojected')
def create_land_use_rasters(self, class_values=(1, 2, 3), class_labels=('AGRI', 'SW', 'URBAN'),
smoothing_factors=(3, 5, 3), already_created=False, post_process=False,
out_mean_flt_rasters=True):
"""
Create land use rasters from the reclassified raster
:param class_values: List of land use class values to consider for creating separate rasters
:param class_labels: List of class_labels ordered according to land_uses
:param smoothing_factors: Smoothing factor (sigma value for Gaussian filter) to use while smoothing
:param already_created: Set True to disable land use raster generation
:param post_process: Set False to disable post processing based on well registry raster
:param out_mean_flt_rasters: Set True to output mean AGRI, URBAN, and SW filtered rasters
:return: None
"""
self.land_use_dir_list = [make_proper_dir_name(self.file_dir + class_label) for class_label in class_labels]
self.well_reg_flt_dir = make_proper_dir_name(self.well_reg_dir + 'Flt')
makedirs([self.well_reg_flt_dir])
self.well_reg_flt_file = self.well_reg_flt_dir + 'Well_Reg_Flt.tif'
if not already_created:
well_reg_raster = glob(self.well_reg_reproj_dir + '*.tif')[0]
rops.filter_nans(well_reg_raster, self.ref_raster, outfile_path=self.well_reg_flt_file)
is_cdl_ts = self.cdl_year is None
rops.create_land_use_rasters(self.land_use_dir_list, self.cdl_reclass_dir, class_values, class_labels,
smoothing_factors, self.ref_raster, self.well_reg_flt_file, post_process,
is_cdl_ts, out_mean_flt_rasters)
else:
print('Land use rasters already created')
def create_water_stress_index_rasters(self, pattern_list=('P*.tif', 'SSEBop*.tif', 'AGRI*.tif', 'URBAN*.tif'),
already_created=False, normalize=False):
"""
Create water stress index rasters based on P, ET, and landuse
:param pattern_list: Raster pattern list ordered by P, ET (or SSEBop), AGRI, and URBAN
:param already_created: Set True to disable water stress raster creation
:param normalize: Set True to normalize water stress index
:return: None
"""
ws_out_dir = make_proper_dir_name(self.file_dir + 'WS_Rasters')
makedirs([ws_out_dir])
if not already_created:
input_raster_dir_list = [self.ws_data_reproj_dir] * 2 + [self.land_use_dir_list[0],
self.land_use_dir_list[2]]
rops.compute_water_stress_index_rasters(self.input_watershed_reproj_file, pattern_list=pattern_list,
input_raster_dir_list=input_raster_dir_list, output_dir=ws_out_dir,
gdal_path=self.gdal_path, normalize=normalize)
rops.reproject_rasters(ws_out_dir, ref_raster=self.ref_raster, outdir=self.raster_reproj_dir,
pattern='*.tif', gdal_path=self.gdal_path)
else:
print('Water stress rasters already created')
def mask_rasters(self, pattern='*.tif', already_masked=False):
"""
Mask rasters based on reference GW raster
:param pattern: File pattern to look for
:param already_masked: Set True to disable raster masking
:return: None
"""
self.ref_raster = glob(self.final_gw_dir + pattern)[0]
self.raster_mask_dir = make_proper_dir_name(self.file_dir + 'Masked_Rasters')
self.lu_mask_dir = make_proper_dir_name(self.raster_mask_dir + 'Masked_LU')
self.crop_coeff_mask_dir = make_proper_dir_name(self.raster_mask_dir + 'Masked_Crop_Coeff')
self.well_reg_mask_dir = make_proper_dir_name(self.well_reg_dir + 'Masked')
if not already_masked:
print('Masking rasters...')
makedirs([self.raster_mask_dir, self.lu_mask_dir, self.crop_coeff_mask_dir, self.well_reg_mask_dir])
rops.mask_rasters(self.raster_reproj_dir, ref_raster=self.ref_raster, outdir=self.raster_mask_dir,
pattern=pattern)
rops.mask_rasters(self.crop_coeff_reproj_dir, ref_raster=self.ref_raster, outdir=self.crop_coeff_mask_dir,
pattern=pattern)
rops.mask_rasters(self.well_reg_reproj_dir, ref_raster=self.ref_raster, outdir=self.well_reg_mask_dir,
pattern=pattern)
for lu_dir in self.land_use_dir_list:
rops.mask_rasters(lu_dir, ref_raster=self.ref_raster, outdir=self.lu_mask_dir, pattern=pattern)
else:
print('All rasters already masked')
def create_dataframe(self, year_list, column_names=None, ordering=False, load_df=False, exclude_vars=(),
exclude_years=(2019, ), pattern='*.tif', verbose=False, remove_na=True, load_gw_info=False):
"""
Create dataframe from preprocessed files
:param year_list: List of years for which the dataframe will be created
:param column_names: Dataframe column names, these must be df headers
:param ordering: Set True to order dataframe column names
:param load_df: Set true to load existing dataframe
:param exclude_vars: Exclude these variables from the dataframe
:param exclude_years: List of years to exclude from dataframe
:param pattern: File pattern
:param verbose: Get extra information if set to True
:param remove_na: Set False to disable NA removal
:param load_gw_info: Set True to load previously created GWinfo raster containing the name of the Arizona
AMA/INA regions
:return: Pandas dataframe object
"""
self.rf_data_dir = make_proper_dir_name(self.file_dir + 'RF_Data')
self.pred_data_dir = make_proper_dir_name(self.file_dir + 'Pred_Data')
df_file = self.output_dir + 'raster_df.csv'
if load_df:
print('Getting dataframe...')
return pd.read_csv(df_file, dtype={'GW_NAME': 'string'})
else:
print('Copying files...')
makedirs([self.rf_data_dir, self.pred_data_dir])
input_dir_list = [self.final_gw_dir] + [self.raster_mask_dir]
pattern_list = [pattern] * len(input_dir_list)
copy_files(input_dir_list, target_dir=self.rf_data_dir, year_list=year_list, pattern_list=pattern_list,
verbose=verbose)
copy_files([self.crop_coeff_mask_dir], target_dir=self.rf_data_dir, year_list=year_list,
pattern_list=[pattern], verbose=verbose)
copy_files([self.lu_mask_dir], target_dir=self.rf_data_dir, year_list=year_list,
pattern_list=[pattern], verbose=verbose)
copy_files([self.well_reg_mask_dir], target_dir=self.rf_data_dir, year_list=year_list,
pattern_list=[pattern], rep=True, verbose=verbose)
input_dir_list = [self.actual_gw_dir] + [self.raster_reproj_dir]
pattern_list = [pattern] * len(input_dir_list)
copy_files(input_dir_list, target_dir=self.pred_data_dir, year_list=year_list, pattern_list=pattern_list,
verbose=verbose)
pattern_list = [pattern] * len(self.land_use_dir_list)
copy_files(self.land_use_dir_list, target_dir=self.pred_data_dir, year_list=year_list,
pattern_list=pattern_list, verbose=verbose)
copy_files([self.crop_coeff_reproj_dir], target_dir=self.pred_data_dir, year_list=year_list,
pattern_list=[pattern], verbose=verbose)
copy_files([self.well_reg_flt_dir], target_dir=self.pred_data_dir, year_list=year_list,
pattern_list=[pattern], rep=True, verbose=verbose)
print('Creating dataframe...')
gw_file = self.input_ama_ina_reproj_file
label_attr = 'NAME_ABBR'
df = rfr.create_dataframe(self.rf_data_dir, input_gw_file=gw_file, output_dir=self.output_dir,
label_attr=label_attr, column_names=column_names, make_year_col=True,
exclude_vars=exclude_vars, exclude_years=exclude_years, ordering=ordering,
load_gw_info=load_gw_info, remove_na=remove_na)
return df
def build_model(self, df, n_estimators=100, random_state=0, bootstrap=True, max_features=3, test_size=None,
pred_attr='GW', shuffle=True, plot_graphs=False, plot_3d=False, drop_attrs=(), test_year=(2012,),
test_gw=('DIN',), use_gw=False, split_attribute=True, load_model=False, calc_perm_imp=False,
spatio_temporal=False):
"""
Build random forest model
:param df: Input pandas dataframe object
:param pred_attr: Target attribute
:param drop_attrs: List of attributes to drop from the df
:param n_estimators: RF hyperparameter
:param random_state: RF hyperparameter
:param bootstrap: RF hyperparameter
:param max_features: RF hyperparameter
:param test_size: Required only if split_yearly=False
:param pred_attr: Prediction attribute name in the dataframe
:param shuffle: Set False to stop data shuffling
:param plot_graphs: Plot Actual vs Prediction graph
:param plot_3d: Plot pairwise 3D partial dependence plots
:param drop_attrs: Drop these specified attributes
:param test_year: Build test data from only this year(s).
:param test_gw: Build test data from only this AMA/INA (Arizona) region, use_gw must be set to
True.
:param use_gw: Set True to build test data from only test_gw
:param split_attribute: Split train test data based on years
:param load_model: Load an earlier pre-trained RF model
:param calc_perm_imp: Set True to get permutation importances on train and test data
:param spatio_temporal: Set True to build test from both test_years and test_gws
:return: Fitted RandomForestRegressor object
"""
print('Building RF Model...')
plot_dir = make_proper_dir_name(self.output_dir + 'Partial_Plots/PDP_Data')
makedirs([plot_dir])
rf_model = rfr.rf_regressor(df, self.output_dir, n_estimators=n_estimators, random_state=random_state,
pred_attr=pred_attr, drop_attrs=drop_attrs, test_year=test_year, test_gw=test_gw,
use_gw=use_gw, shuffle=shuffle, plot_graphs=plot_graphs, plot_3d=plot_3d,
split_attribute=split_attribute, bootstrap=bootstrap, plot_dir=plot_dir,
max_features=max_features, load_model=load_model, test_size=test_size,
calc_perm_imp=calc_perm_imp, spatio_temporal=spatio_temporal)
return rf_model
def get_predictions(self, rf_model, pred_years, column_names=None, ordering=False, pred_attr='GW',
only_pred=False, exclude_vars=(), exclude_years=(2019,), drop_attrs=(), use_full_extent=False,
post_process=True):
"""
Get prediction results and/or rasters
:param rf_model: Fitted RandomForestRegressor model
:param pred_years: Predict for these years
:param column_names: Dataframe column names, these must be df headers
:param ordering: Set True to order dataframe column names
:param pred_attr: Prediction attribute name in the dataframe
:param only_pred: Set True to disable prediction raster generation
:param exclude_vars: Exclude these variables from the model prediction analysis
:param exclude_years: List of years to exclude from dataframe
:param drop_attrs: Drop these specified attributes
:param use_full_extent: Set True to predict over entire region
:param post_process: Set False to disable postprocessing
:return: Actual and Predicted raster directory paths
"""
print('Predicting...')
self.pred_out_dir = make_proper_dir_name(self.output_dir + 'Predicted_Rasters')
makedirs([self.pred_out_dir])
actual_raster_dir = self.rf_data_dir
if use_full_extent:
actual_raster_dir = self.pred_data_dir
rfr.predict_rasters(rf_model, pred_years=pred_years, drop_attrs=drop_attrs, out_dir=self.pred_out_dir,
actual_raster_dir=actual_raster_dir, pred_attr=pred_attr, only_pred=only_pred,
exclude_vars=exclude_vars, exclude_years=exclude_years, column_names=column_names,
ordering=ordering)
if post_process:
output_dir = make_proper_dir_name(self.pred_out_dir + 'Postprocessed')
makedirs([output_dir])
well_mask = glob(self.well_reg_mask_dir + '*.tif')[0]
if use_full_extent:
well_mask = self.well_reg_flt_file
rops.postprocess_rasters(self.pred_out_dir, output_dir, well_mask)
self.pred_out_dir = output_dir
return actual_raster_dir, self.pred_out_dir
def create_subsidence_pred_gw_rasters(self, scale_to_cm=False, verbose=False, already_created=False):
"""
Create total predicted GW withdrawal rasters based on subsidence years
:param scale_to_cm: Set False to disable scaling GW and subsidence data to cm, default GW is in mm and
subsidence is in m. If False, subsidence will be converted to mm
:param verbose: Set True to get additional info
:param already_created: Set True to disable creating these rasters
:return: None
"""
self.subsidence_pred_gw_dir = make_proper_dir_name(self.output_dir + 'Subsidence_Analysis')
if not already_created:
makedirs([self.subsidence_pred_gw_dir])
sed_thick_raster = glob(self.sed_thick_reproj_dir + '*.tif')[0]
watershed_raster = glob(self.gw_basin_raster_reproj_dir + '*.tif')[0]
rops.create_subsidence_pred_gw_rasters(self.pred_out_dir, self.converted_subsidence_dir, sed_thick_raster,
watershed_raster, self.subsidence_pred_gw_dir,
scale_to_cm=scale_to_cm, verbose=verbose)
print('Subsidence and total predicted GW rasters created!')
def crop_final_gw_rasters(self, actual_gw_dir, pred_gw_dir, test_years, already_cropped=False):
"""
Crop actual and predicted GW rasters based on the Arizona AMA/INA mask, should be called after
predicted GW rasters have been created.
:param actual_gw_dir: Actual GW raster directory
:param pred_gw_dir: Predicted GW raster directory
:param test_years: List of test years
:param already_cropped: Set True to disable cropping
:return: Actual and Predicted cropped GW directories a tuple
"""
cropped_dir = make_proper_dir_name(self.output_dir + 'Final_GW_Cropped')
makedirs([cropped_dir])
actual_gw_dir, pred_gw_dir = rops.crop_final_gw_rasters(actual_gw_dir, pred_gw_dir,
raster_mask=self.input_ama_ina_reproj_file,
output_dir=cropped_dir, gdal_path=self.gdal_path,
already_cropped=already_cropped, test_years=test_years)
return actual_gw_dir, pred_gw_dir
def run_gw(analyze_only=False, load_files=True, load_rf_model=False, load_df=False, subsidence_analysis=False,
ama_ina_train=False):
"""
Main function for running the project for Arizona, some variables require to be hardcoded
:param analyze_only: Set True to just produce analysis results, all required files must be present
:param load_files: Set True to load existing files, needed only if analyze_only=False
:param load_rf_model: Set True to load existing Random Forest model, needed only if analyze_only=False
:param load_df: Set True to load existing dataframe from CSV
:param subsidence_analysis: Set True to analyze total subsidence and total groundwater withdrawals in a
specified period, build_ml_model must be True
:param ama_ina_train: Set True to train and test on specific AMA/INA regions
:return: None
"""
gee_data = ['Apr_Sept/', 'Apr_Aug/', 'Annual/']
input_dir = '../Inputs/Data/Arizona_GW/'
input_subsidence_dir = input_dir + 'Subsidence/Subsidence_Rasters/'
file_dir = '../Inputs/Files_AZ_' + gee_data[2]
output_dir = '../Outputs/Output_AZ_' + gee_data[2]
output_shp_dir = file_dir + 'GW_Shapefiles/'
output_gw_raster_dir = file_dir + 'GW_Rasters/'
input_well_reg_file = input_dir + 'Well_Registry/WellRegistry.shp'
input_ama_ina_file = input_dir + 'Boundary/AMA_and_INA.shp'
input_watershed_file = input_dir + 'Watersheds/Surface_Watershed.shp'
input_gw_basin = input_dir + 'GW_Basin/Groundwater_Basin.shp'
input_gw_csv_dir = input_dir + 'GW_Data/'
input_state_file = input_dir + 'Arizona/Arizona.shp'
gdal_path = 'C:/OSGeo4W64/'
actual_gw_dir = file_dir + 'RF_Data/'
pred_gw_dir = output_dir + 'Predicted_Rasters/'
grace_csv = input_dir + 'GRACE/TWS_GRACE.csv'
ssebop_link = 'https://edcintl.cr.usgs.gov/downloads/sciweb1/shared/uswem/web/conus/eta/modis_eta/monthly/' \
'downloads/'
sed_thick_csv = input_dir + 'Sediment_Thickness/Sedthick_LLz.csv'
data_year_list = range(2002, 2021)
data_start_month = 1
data_end_month = 12
ws_start_month = 10
ws_end_month = 5
az_class_dict = {(0, 59.5): 1,
(66.5, 77.5): 1,
(203.5, 255): 1,
(110.5, 111.5): 2,
(111.5, 112.5): 0,
(120.5, 124.5): 3,
(59.5, 61.5): 0,
(130.5, 195.5): 0
}
drop_attrs = ('YEAR', 'AGRI_flt', 'URBAN_flt', 'SW_flt', 'CC',)
test_years = range(2010, 2021)
exclude_vars = ('ET', 'WS_PT', 'WS_PT_ET')
pred_attr = 'GW'
fill_attr = 'AF Pumped'
filter_attr = None
test_ama_ina = ()
if ama_ina_train:
test_ama_ina = ('HAR',)
xres, yres = 2000, 2000
cdl_year = None
ws_stress_dict = {
'spatial': ('P*.tif', 'SSEBop*.tif', 'AGRI_flt*.tif', 'URBAN_flt*.tif'),
'temporal': ('P*.tif', 'SSEBop*.tif', 'AGRI_Mean*.tif', 'URBAN_Mean*.tif')
}
sf_flt_list = list(range(4, 5))
if not analyze_only:
gw = HydroML(input_dir, file_dir, output_dir, output_shp_dir, output_gw_raster_dir,
input_state_file, gdal_path, input_subsidence_dir=input_subsidence_dir,
input_gw_boundary_file=input_well_reg_file, input_ama_ina_file=input_ama_ina_file,
input_watershed_file=input_watershed_file, input_gw_basin=input_gw_basin, ssebop_link=ssebop_link,
sed_thick_csv=sed_thick_csv, cdl_year=cdl_year)
gw.download_data(year_list=data_year_list, start_month=data_start_month, end_month=data_end_month,
already_downloaded=load_files, already_extracted=load_files)
gw.download_ws_data(year_list=data_year_list, start_month=ws_start_month, end_month=ws_end_month,
already_downloaded=load_files, already_extracted=load_files)
gw.preprocess_gw_csv(input_gw_csv_dir, fill_attr=fill_attr, filter_attr=filter_attr, use_only_ama_ina=False,
already_preprocessed=load_files)
gw.reproject_shapefiles(already_reprojected=load_files)
gw.create_gw_rasters(already_created=load_files, value_field=fill_attr, xres=xres, yres=yres, max_gw=3000)
gw.create_well_registry_raster(xres=xres, yres=yres, already_created=load_files)
gw.create_sed_thickness_raster(xres=xres, yres=yres, already_converted=True, already_clipped=True,
already_created=load_files)
gw.crop_gw_rasters(use_ama_ina=False, already_cropped=load_files)
gw.reclassify_cdl(az_class_dict, already_reclassified=load_files)
gw.create_crop_coeff_raster(already_created=load_files)
gw.create_gw_basin_raster(xres=xres, yres=yres, already_created=load_files)
gw.reproject_rasters(already_reprojected=load_files)
gw.create_mean_crop_coeff_raster(already_created=load_files)
load_gw_info = True
for idx, sf in enumerate(sf_flt_list):
gw.create_land_use_rasters(already_created=load_files, smoothing_factors=(sf, sf, sf))
ws_pattern_list = ws_stress_dict['temporal']
if ama_ina_train:
ws_pattern_list = ws_stress_dict['temporal']
gw.create_water_stress_index_rasters(already_created=load_files, normalize=False,
pattern_list=ws_pattern_list)
if subsidence_analysis:
gw.organize_subsidence_rasters(already_organized=load_files)
gw.mask_rasters(already_masked=load_files)
if idx > 0:
load_gw_info = True
df = gw.create_dataframe(year_list=range(2002, 2021), exclude_vars=exclude_vars, exclude_years=(),
load_df=load_df, load_gw_info=load_gw_info)
dattr = list(drop_attrs) + ['GW_NAME']
rf_model = gw.build_model(df, n_estimators=500, test_year=test_years, drop_attrs=dattr,
pred_attr=pred_attr, load_model=load_rf_model, max_features=5,
plot_graphs=False, use_gw=ama_ina_train, test_gw=test_ama_ina,
spatio_temporal=False, shuffle=False, random_state=0)
actual_gw_dir, pred_gw_dir = gw.get_predictions(rf_model=rf_model, pred_years=range(2002, 2021),
drop_attrs=drop_attrs, pred_attr=pred_attr,
exclude_vars=exclude_vars, exclude_years=(),
only_pred=False, use_full_extent=subsidence_analysis,
post_process=False)
if subsidence_analysis:
gw.create_subsidence_pred_gw_rasters(already_created=False, verbose=False, scale_to_cm=False)
if len(sf_flt_list) == 1:
input_gw_file = file_dir + 'gw_ama_ina/reproj/input_ama_ina_reproj.shp'
ma.run_analysis(actual_gw_dir, pred_gw_dir, grace_csv, use_gws=True, input_gw_file=input_gw_file,
out_dir=output_dir, test_years=test_years, forecast_years=(), show_plots=True,
ama_ina_list=test_ama_ina)
actual_gw_dir, pred_gw_dir = gw.crop_final_gw_rasters(actual_gw_dir, pred_gw_dir,
already_cropped=load_rf_model,
test_years=test_years)
if len(sf_flt_list) == 1:
ma.run_analysis(actual_gw_dir, pred_gw_dir, grace_csv, use_gws=False, out_dir=output_dir,
test_years=test_years, forecast_years=())
ma.generate_feature_plots(output_dir + 'raster_df.csv', feature_list=('SSEBop', 'P'), test_years=test_years)
if __name__ == '__main__':
run_gw(analyze_only=False, load_files=True, load_rf_model=True, subsidence_analysis=True, load_df=True,
ama_ina_train=False)
| 58.180239 | 120 | 0.659675 |
import pandas as pd
from Python_Files.hydrolibs import rasterops as rops
from Python_Files.hydrolibs import vectorops as vops
from Python_Files.hydrolibs import data_download as dd
from Python_Files.hydrolibs.sysops import makedirs, make_proper_dir_name, copy_files
from Python_Files.hydrolibs import random_forest_regressor as rfr
from Python_Files.hydrolibs import model_analysis as ma
from glob import glob
class HydroML:
def __init__(self, input_dir, file_dir, output_dir, output_shp_dir, output_gw_raster_dir,
input_state_file, gdal_path, input_ts_dir=None, input_subsidence_dir=None, input_gw_boundary_file=None,
input_ama_ina_file=None, input_watershed_file=None, input_gw_basin=None,
ssebop_link=None, sed_thick_csv=None, cdl_year=None):
self.input_dir = make_proper_dir_name(input_dir)
self.file_dir = make_proper_dir_name(file_dir)
self.output_dir = make_proper_dir_name(output_dir)
self.output_shp_dir = make_proper_dir_name(output_shp_dir)
self.output_gw_raster_dir = make_proper_dir_name(output_gw_raster_dir)
self.gdal_path = make_proper_dir_name(gdal_path)
self.input_ts_dir = make_proper_dir_name(input_ts_dir)
self.input_subsidence_dir = make_proper_dir_name(input_subsidence_dir)
self.input_gw_boundary_file = input_gw_boundary_file
self.input_ama_ina_file = input_ama_ina_file
self.input_watershed_file = input_watershed_file
self.input_gw_basin = input_gw_basin
self.input_state_file = input_state_file
self.ssebop_link = ssebop_link
self.input_gw_boundary_reproj_file = None
self.input_ama_ina_reproj_file = None
self.input_state_reproj_file = None
self.input_watershed_reproj_file = None
self.input_gw_basin_reproj_file = None
self.final_gw_dir = None
self.actual_gw_dir = None
self.ref_raster = None
self.raster_reproj_dir = None
self.well_reg_raster_file = None
self.crop_coeff_dir = None
self.crop_coeff_reproj_dir = None
self.crop_coeff_mask_dir = None
self.cdl_reclass_dir = None
self.raster_mask_dir = None
self.land_use_dir_list = None
self.rf_data_dir = None
self.pred_data_dir = None
self.lu_mask_dir = None
self.ssebop_file_dir = None
self.cdl_file_dir = None
self.cdl_reproj_dir = None
self.ssebop_reproj_dir = None
self.ws_ssebop_file_dir = None
self.ws_ssebop_reproj_dir = None
self.data_year_list = None
self.data_start_month = None
self.data_end_month = None
self.ws_year_list = None
self.ws_start_month = None
self.ws_end_month = None
self.ws_data_dir = None
self.ws_data_reproj_dir = None
self.converted_subsidence_dir = None
self.pred_out_dir = None
self.subsidence_pred_gw_dir = None
self.well_reg_dir = None
self.well_reg_mask_dir = None
self.well_reg_flt_file = None
self.well_reg_flt_dir = None
self.well_reg_reproj_dir = None
self.sed_thick_csv = sed_thick_csv
self.sed_thick_dir = None
self.sed_thick_shp_file = None
self.sed_thick_raster_file = None
self.sed_thick_reproj_dir = None
self.gw_basin_raster_dir = None
self.gw_basin_raster_reproj_dir = None
self.cdl_year = cdl_year
makedirs([self.output_dir, self.output_gw_raster_dir, self.output_shp_dir])
def download_data(self, year_list, start_month, end_month, already_downloaded=False, already_extracted=False):
self.data_year_list = year_list
self.data_start_month = start_month
self.data_end_month = end_month
gee_data_flag = False
if self.input_ts_dir is None:
self.input_ts_dir = self.input_dir + 'Downloaded_Data/'
gee_data_flag = True
gee_zip_dir = self.input_ts_dir + 'GEE_Data/'
self.cdl_file_dir = self.input_ts_dir + 'CDL/'
ssebop_zip_dir = self.input_ts_dir + 'SSEBop_Data/'
self.ssebop_file_dir = ssebop_zip_dir + 'SSEBop_Files/'
if not already_downloaded:
if gee_data_flag:
makedirs([gee_zip_dir])
dd.download_gee_data(year_list, start_month=start_month, end_month=end_month,
aoi_shp_file=self.input_state_file, outdir=gee_zip_dir)
makedirs([self.cdl_file_dir])
dd.download_cropland_data(self.input_state_file, year_list=year_list, output_dir=self.cdl_file_dir,
cdl_year=self.cdl_year)
makedirs([ssebop_zip_dir])
dd.download_ssebop_data(self.ssebop_link, year_list, start_month, end_month, ssebop_zip_dir)
if gee_data_flag:
self.input_ts_dir = gee_zip_dir + 'GEE_Files/'
if not already_extracted:
if gee_data_flag:
makedirs([self.input_ts_dir])
dd.extract_data(gee_zip_dir, out_dir=self.input_ts_dir, rename_extracted_files=True)
makedirs([self.ssebop_file_dir])
dd.extract_data(ssebop_zip_dir, self.ssebop_file_dir)
print('CDL, GEE, and SSEBop data downloaded and extracted...')
def download_ws_data(self, year_list, start_month, end_month, already_downloaded=False, already_extracted=False):
self.ws_year_list = year_list
self.ws_start_month = start_month
self.ws_end_month = end_month
self.ws_data_dir = self.input_dir + 'WS_Data/'
ws_gee_dir = self.ws_data_dir + 'WS_GEE/'
ws_ssebop_dir = self.ws_data_dir + 'WS_SSEBop/'
self.ws_ssebop_file_dir = ws_ssebop_dir + 'WS_SSEBop_Files/'
if not already_downloaded:
makedirs([ws_gee_dir, ws_ssebop_dir])
dd.download_ssebop_data(self.ssebop_link, year_list, start_month, end_month, ws_ssebop_dir)
dd.download_gee_data(year_list, start_month=start_month, end_month=end_month,
aoi_shp_file=self.input_state_file, outdir=ws_gee_dir)
if not already_extracted:
makedirs([self.ws_ssebop_file_dir])
dd.extract_data(ws_ssebop_dir, out_dir=self.ws_ssebop_file_dir)
dd.extract_data(ws_gee_dir, out_dir=self.ws_data_dir, rename_extracted_files=True)
print('Data for WS metric downloaded...')
def preprocess_gw_csv(self, input_gw_csv_dir, fill_attr='AF Pumped', filter_attr=None,
filter_attr_value='OUTSIDE OF AMA OR INA', use_only_ama_ina=False, already_preprocessed=False,
**kwargs):
if not already_preprocessed:
input_gw_csv_dir = make_proper_dir_name(input_gw_csv_dir)
vops.add_attribute_well_reg_multiple(input_well_reg_file=self.input_gw_boundary_file,
input_gw_csv_dir=input_gw_csv_dir, out_gw_shp_dir=self.output_shp_dir,
fill_attr=fill_attr, filter_attr=filter_attr,
filter_attr_value=filter_attr_value, use_only_ama_ina=use_only_ama_ina,
**kwargs)
def extract_shp_from_gdb(self, input_gdb_dir, year_list, attr_name='AF_USED', already_extracted=False):
if not already_extracted:
print('Extracting GW data from GDB...')
vops.extract_gdb_data(input_gdb_dir, attr_name=attr_name, year_list=year_list, outdir=self.output_shp_dir)
else:
print("GW shapefiles already extracted")
def reproject_shapefiles(self, already_reprojected=False):
gw_boundary_reproj_dir = make_proper_dir_name(self.file_dir + 'gw_boundary/reproj')
gw_ama_ina_reproj_dir = make_proper_dir_name(self.file_dir + 'gw_ama_ina/reproj')
watershed_reproj_dir = make_proper_dir_name(self.file_dir + 'watershed/reproj')
state_reproj_dir = make_proper_dir_name(self.file_dir + 'state/reproj')
gw_basin_reproj_dir = make_proper_dir_name(self.file_dir + 'GW_Basin/reproj')
self.input_gw_boundary_reproj_file = gw_boundary_reproj_dir + 'input_boundary_reproj.shp'
if self.input_ama_ina_file:
self.input_ama_ina_reproj_file = gw_ama_ina_reproj_dir + 'input_ama_ina_reproj.shp'
if self.input_watershed_file:
self.input_watershed_reproj_file = watershed_reproj_dir + 'input_watershed_reproj.shp'
if self.input_gw_basin:
self.input_gw_basin_reproj_file = gw_basin_reproj_dir + 'input_gw_basin_reproj.shp'
self.input_state_reproj_file = state_reproj_dir + 'input_state_reproj.shp'
if not already_reprojected:
print('Reprojecting Boundary/State/AMA_INA/Watershed shapefiles...')
makedirs([gw_boundary_reproj_dir, state_reproj_dir])
ref_shp = glob(self.output_shp_dir + '*.shp')[0]
vops.reproject_vector(self.input_gw_boundary_file, outfile_path=self.input_gw_boundary_reproj_file,
ref_file=ref_shp, raster=False)
if self.input_ama_ina_file:
makedirs([gw_ama_ina_reproj_dir])
vops.reproject_vector(self.input_ama_ina_file, outfile_path=self.input_ama_ina_reproj_file,
ref_file=ref_shp, raster=False)
if self.input_watershed_file:
makedirs([watershed_reproj_dir])
vops.reproject_vector(self.input_watershed_file, outfile_path=self.input_watershed_reproj_file,
ref_file=ref_shp, raster=False)
if self.input_gw_basin:
makedirs([gw_basin_reproj_dir])
vops.reproject_vector(self.input_gw_basin, outfile_path=self.input_gw_basin_reproj_file,
ref_file=ref_shp, raster=False)
vops.reproject_vector(self.input_state_file, outfile_path=self.input_state_reproj_file, ref_file=ref_shp,
raster=False)
else:
print('Boundary/State/AMA_INA shapefiles are already reprojected')
def clip_gw_shpfiles(self, new_clip_file=None, already_clipped=False, extent_clip=True):
clip_file = self.input_gw_boundary_reproj_file
if new_clip_file:
clip_file = new_clip_file
clip_shp_dir = make_proper_dir_name(self.output_shp_dir + 'Clipped')
if not already_clipped:
print('Clipping GW shapefiles...')
makedirs([clip_shp_dir])
vops.clip_vectors(self.output_shp_dir, clip_file=clip_file, outdir=clip_shp_dir, gdal_path=self.gdal_path,
extent_clip=extent_clip)
else:
print('GW Shapefiles already clipped')
self.output_shp_dir = clip_shp_dir
def crop_gw_rasters(self, ext_mask=True, use_ama_ina=False, already_cropped=False):
cropped_dir = make_proper_dir_name(self.output_gw_raster_dir + 'Cropped')
if not already_cropped:
makedirs([cropped_dir])
multi_poly = False
raster_mask = self.input_state_reproj_file
if use_ama_ina:
raster_mask = self.input_ama_ina_reproj_file
multi_poly = True
rops.crop_rasters(self.final_gw_dir, outdir=cropped_dir, input_mask_file=raster_mask, ext_mask=ext_mask,
gdal_path=self.gdal_path, multi_poly=multi_poly)
else:
print('GW rasters already cropped')
self.final_gw_dir = cropped_dir
if not use_ama_ina:
self.final_gw_dir = make_proper_dir_name(cropped_dir + 'Well_Fixed')
makedirs([self.final_gw_dir])
rops.fix_gw_raster_values(cropped_dir, outdir=self.final_gw_dir, fix_only_negative=True)
self.actual_gw_dir = cropped_dir
def create_well_registry_raster(self, xres=5000., yres=5000., already_created=False):
self.well_reg_dir = make_proper_dir_name(self.file_dir + 'Well_Reg_Rasters')
if not already_created:
print('Creating well registry raster...')
makedirs([self.well_reg_dir])
self.well_reg_raster_file = self.well_reg_dir + 'well_reg.tif'
vops.shp2raster(self.input_gw_boundary_file, self.well_reg_raster_file, xres=xres, yres=yres, smoothing=0,
burn_value=1.0, gdal_path=self.gdal_path, gridding=False)
print('Well registry raster created...')
def create_gw_basin_raster(self, xres=5000., yres=5000., already_created=False):
self.gw_basin_raster_dir = make_proper_dir_name(self.file_dir + 'GW_Basin_Raster')
if not already_created:
print('Creating GW Basin raster...')
makedirs([self.gw_basin_raster_dir])
gw_basin_raster_file = self.gw_basin_raster_dir + 'GW_Basin.tif'
vops.shp2raster(self.input_gw_basin_reproj_file, gw_basin_raster_file, xres=xres, yres=yres,
smoothing=0, value_field='OBJECTID', add_value=False, gdal_path=self.gdal_path,
gridding=False)
print('GW Basin raster created...')
def create_gw_rasters(self, xres=5000., yres=5000., max_gw=1000., value_field=None, value_field_pos=0,
convert_units=True, already_created=True):
fixed_dir = make_proper_dir_name(self.output_gw_raster_dir + 'Fixed')
converted_dir = make_proper_dir_name(self.output_gw_raster_dir + 'Converted')
if not already_created:
print('Converting SHP to TIF...')
makedirs([fixed_dir])
vops.shps2rasters(self.output_shp_dir, self.output_gw_raster_dir, xres=xres, yres=yres, smoothing=0,
value_field=value_field, value_field_pos=value_field_pos, gdal_path=self.gdal_path,
gridding=False)
if convert_units:
max_gw *= xres * yres / 1.233e+6
rops.fix_gw_raster_values(self.output_gw_raster_dir, max_threshold=max_gw, outdir=fixed_dir)
if convert_units:
print('Changing GW units from acreft to mm')
makedirs([converted_dir])
rops.convert_gw_data(fixed_dir, converted_dir)
else:
print('GW pumping rasters already created')
if convert_units:
self.final_gw_dir = converted_dir
else:
self.final_gw_dir = fixed_dir
self.actual_gw_dir = self.final_gw_dir
def create_crop_coeff_raster(self, already_created=False):
self.crop_coeff_dir = make_proper_dir_name(self.file_dir + 'Crop_Coeff')
if not already_created:
print('Creating crop coefficient raster...')
makedirs([self.crop_coeff_dir])
rops.create_crop_coeff_raster(self.cdl_file_dir, output_dir=self.crop_coeff_dir)
def create_mean_crop_coeff_raster(self, already_created=False):
if not already_created:
print('Creating mean crop coefficient raster...')
rops.create_mean_crop_coeff_raster(self.crop_coeff_reproj_dir, self.crop_coeff_reproj_dir)
def create_sed_thickness_raster(self, xres=5000., yres=5000., already_converted=False, already_clipped=False,
already_created=False):
self.sed_thick_dir = make_proper_dir_name(self.file_dir + 'Sed_Thick')
self.sed_thick_shp_file = self.sed_thick_dir + 'Sed_Thick.shp'
self.sed_thick_raster_file = self.sed_thick_dir + 'Sed_Thick.tif'
sed_thick_shp = self.sed_thick_dir + 'Sed_Thick_All.shp'
if not already_converted:
print('Creating sediment thickness shapefile...')
makedirs([self.sed_thick_dir])
vops.csv2shp(self.sed_thick_csv, sed_thick_shp, long_lat_pos=(0, 1))
if not already_clipped:
print('Reprojecting sediment thickness shapefile...')
vops.reproject_vector(sed_thick_shp, sed_thick_shp, self.input_state_reproj_file,
raster=False)
print('Clipping sediment thickness shapefile...')
vops.clip_vector(sed_thick_shp, self.input_state_reproj_file, self.sed_thick_shp_file,
gdal_path=self.gdal_path, extent_clip=False)
if not already_created:
print('Creating sediment thickness raster...')
rops.create_sed_thickness_raster(self.sed_thick_shp_file, self.sed_thick_raster_file, self.gdal_path,
xres, yres)
print('Sediment thickness raster created...')
def reclassify_cdl(self, reclass_dict, pattern='*.tif', already_reclassified=False):
self.cdl_reclass_dir = make_proper_dir_name(self.file_dir + 'Reclass')
self.ref_raster = glob(self.actual_gw_dir + pattern)[0]
if not already_reclassified:
makedirs([self.cdl_reclass_dir])
rops.reclassify_cdl_files(self.cdl_file_dir, self.cdl_reclass_dir, reclass_dict, self.ref_raster,
self.gdal_path)
else:
print('Already reclassified')
def organize_subsidence_rasters(self, decorrelated_value=-10000, verbose=False, already_organized=False):
self.converted_subsidence_dir = self.file_dir + 'Converted_Subsidence_Rasters/'
if not already_organized:
print('Organizing subsidence rasters...')
makedirs([self.converted_subsidence_dir])
rops.organize_subsidence_data(self.input_subsidence_dir, output_dir=self.converted_subsidence_dir,
ref_raster=self.ref_raster, gdal_path=self.gdal_path,
decorrelated_value=decorrelated_value, verbose=verbose)
print('Organized and created subsidence rasters...')
def reproject_rasters(self, pattern='*.tif', already_reprojected=False):
self.raster_reproj_dir = self.file_dir + 'Reproj_Rasters/'
self.ssebop_reproj_dir = self.ssebop_file_dir + 'SSEBop_Reproj/'
self.ws_data_reproj_dir = self.file_dir + 'WS_Reproj_Rasters/'
self.ws_ssebop_reproj_dir = self.file_dir + 'WS_SSEBop_Reproj_Rasters/'
self.crop_coeff_reproj_dir = self.crop_coeff_dir + 'Crop_Coeff_Reproj/'
self.well_reg_reproj_dir = self.well_reg_dir + 'Well_Reg_Reproj/'
self.sed_thick_reproj_dir = self.sed_thick_dir + 'Reproj/'
self.gw_basin_raster_reproj_dir = self.gw_basin_raster_dir + 'Reproj/'
if not already_reprojected:
print('Reprojecting rasters...')
makedirs([self.raster_reproj_dir, self.crop_coeff_reproj_dir, self.well_reg_reproj_dir,
self.sed_thick_reproj_dir, self.gw_basin_raster_reproj_dir])
rops.reproject_rasters(self.input_ts_dir, ref_raster=self.ref_raster, outdir=self.raster_reproj_dir,
pattern=pattern, gdal_path=self.gdal_path)
rops.reproject_rasters(self.crop_coeff_dir, ref_raster=self.ref_raster, outdir=self.crop_coeff_reproj_dir,
pattern=pattern, gdal_path=self.gdal_path)
rops.reproject_rasters(self.well_reg_dir, ref_raster=self.ref_raster,
outdir=self.well_reg_reproj_dir, pattern=pattern, gdal_path=self.gdal_path)
rops.reproject_rasters(self.sed_thick_dir, ref_raster=self.ref_raster, outdir=self.sed_thick_reproj_dir,
pattern='Sed_Thick.tif', gdal_path=self.gdal_path)
rops.reproject_rasters(self.gw_basin_raster_dir, ref_raster=self.ref_raster,
outdir=self.gw_basin_raster_reproj_dir, pattern=pattern, gdal_path=self.gdal_path)
if self.ssebop_link:
makedirs([self.ssebop_reproj_dir, self.ws_ssebop_reproj_dir, self.ws_data_reproj_dir])
rops.reproject_rasters(self.ssebop_file_dir, ref_raster=self.ref_raster, outdir=self.ssebop_reproj_dir,
pattern=pattern, gdal_path=self.gdal_path)
rops.generate_cummulative_ssebop(self.ssebop_reproj_dir, year_list=self.data_year_list,
start_month=self.data_start_month, end_month=self.data_end_month,
out_dir=self.raster_reproj_dir)
if self.ws_year_list is not None:
rops.reproject_rasters(self.ws_ssebop_file_dir, ref_raster=self.ref_raster,
outdir=self.ws_ssebop_reproj_dir, pattern=pattern, gdal_path=self.gdal_path)
rops.generate_cummulative_ssebop(self.ws_ssebop_reproj_dir, year_list=self.ws_year_list,
start_month=self.ws_start_month, end_month=self.ws_end_month,
out_dir=self.ws_data_reproj_dir)
rops.reproject_rasters(self.ws_data_dir, ref_raster=self.ref_raster, outdir=self.ws_data_reproj_dir,
pattern=pattern, gdal_path=self.gdal_path)
else:
print('All rasters already reprojected')
def create_land_use_rasters(self, class_values=(1, 2, 3), class_labels=('AGRI', 'SW', 'URBAN'),
smoothing_factors=(3, 5, 3), already_created=False, post_process=False,
out_mean_flt_rasters=True):
self.land_use_dir_list = [make_proper_dir_name(self.file_dir + class_label) for class_label in class_labels]
self.well_reg_flt_dir = make_proper_dir_name(self.well_reg_dir + 'Flt')
makedirs([self.well_reg_flt_dir])
self.well_reg_flt_file = self.well_reg_flt_dir + 'Well_Reg_Flt.tif'
if not already_created:
well_reg_raster = glob(self.well_reg_reproj_dir + '*.tif')[0]
rops.filter_nans(well_reg_raster, self.ref_raster, outfile_path=self.well_reg_flt_file)
is_cdl_ts = self.cdl_year is None
rops.create_land_use_rasters(self.land_use_dir_list, self.cdl_reclass_dir, class_values, class_labels,
smoothing_factors, self.ref_raster, self.well_reg_flt_file, post_process,
is_cdl_ts, out_mean_flt_rasters)
else:
print('Land use rasters already created')
def create_water_stress_index_rasters(self, pattern_list=('P*.tif', 'SSEBop*.tif', 'AGRI*.tif', 'URBAN*.tif'),
already_created=False, normalize=False):
ws_out_dir = make_proper_dir_name(self.file_dir + 'WS_Rasters')
makedirs([ws_out_dir])
if not already_created:
input_raster_dir_list = [self.ws_data_reproj_dir] * 2 + [self.land_use_dir_list[0],
self.land_use_dir_list[2]]
rops.compute_water_stress_index_rasters(self.input_watershed_reproj_file, pattern_list=pattern_list,
input_raster_dir_list=input_raster_dir_list, output_dir=ws_out_dir,
gdal_path=self.gdal_path, normalize=normalize)
rops.reproject_rasters(ws_out_dir, ref_raster=self.ref_raster, outdir=self.raster_reproj_dir,
pattern='*.tif', gdal_path=self.gdal_path)
else:
print('Water stress rasters already created')
def mask_rasters(self, pattern='*.tif', already_masked=False):
self.ref_raster = glob(self.final_gw_dir + pattern)[0]
self.raster_mask_dir = make_proper_dir_name(self.file_dir + 'Masked_Rasters')
self.lu_mask_dir = make_proper_dir_name(self.raster_mask_dir + 'Masked_LU')
self.crop_coeff_mask_dir = make_proper_dir_name(self.raster_mask_dir + 'Masked_Crop_Coeff')
self.well_reg_mask_dir = make_proper_dir_name(self.well_reg_dir + 'Masked')
if not already_masked:
print('Masking rasters...')
makedirs([self.raster_mask_dir, self.lu_mask_dir, self.crop_coeff_mask_dir, self.well_reg_mask_dir])
rops.mask_rasters(self.raster_reproj_dir, ref_raster=self.ref_raster, outdir=self.raster_mask_dir,
pattern=pattern)
rops.mask_rasters(self.crop_coeff_reproj_dir, ref_raster=self.ref_raster, outdir=self.crop_coeff_mask_dir,
pattern=pattern)
rops.mask_rasters(self.well_reg_reproj_dir, ref_raster=self.ref_raster, outdir=self.well_reg_mask_dir,
pattern=pattern)
for lu_dir in self.land_use_dir_list:
rops.mask_rasters(lu_dir, ref_raster=self.ref_raster, outdir=self.lu_mask_dir, pattern=pattern)
else:
print('All rasters already masked')
def create_dataframe(self, year_list, column_names=None, ordering=False, load_df=False, exclude_vars=(),
exclude_years=(2019, ), pattern='*.tif', verbose=False, remove_na=True, load_gw_info=False):
self.rf_data_dir = make_proper_dir_name(self.file_dir + 'RF_Data')
self.pred_data_dir = make_proper_dir_name(self.file_dir + 'Pred_Data')
df_file = self.output_dir + 'raster_df.csv'
if load_df:
print('Getting dataframe...')
return pd.read_csv(df_file, dtype={'GW_NAME': 'string'})
else:
print('Copying files...')
makedirs([self.rf_data_dir, self.pred_data_dir])
input_dir_list = [self.final_gw_dir] + [self.raster_mask_dir]
pattern_list = [pattern] * len(input_dir_list)
copy_files(input_dir_list, target_dir=self.rf_data_dir, year_list=year_list, pattern_list=pattern_list,
verbose=verbose)
copy_files([self.crop_coeff_mask_dir], target_dir=self.rf_data_dir, year_list=year_list,
pattern_list=[pattern], verbose=verbose)
copy_files([self.lu_mask_dir], target_dir=self.rf_data_dir, year_list=year_list,
pattern_list=[pattern], verbose=verbose)
copy_files([self.well_reg_mask_dir], target_dir=self.rf_data_dir, year_list=year_list,
pattern_list=[pattern], rep=True, verbose=verbose)
input_dir_list = [self.actual_gw_dir] + [self.raster_reproj_dir]
pattern_list = [pattern] * len(input_dir_list)
copy_files(input_dir_list, target_dir=self.pred_data_dir, year_list=year_list, pattern_list=pattern_list,
verbose=verbose)
pattern_list = [pattern] * len(self.land_use_dir_list)
copy_files(self.land_use_dir_list, target_dir=self.pred_data_dir, year_list=year_list,
pattern_list=pattern_list, verbose=verbose)
copy_files([self.crop_coeff_reproj_dir], target_dir=self.pred_data_dir, year_list=year_list,
pattern_list=[pattern], verbose=verbose)
copy_files([self.well_reg_flt_dir], target_dir=self.pred_data_dir, year_list=year_list,
pattern_list=[pattern], rep=True, verbose=verbose)
print('Creating dataframe...')
gw_file = self.input_ama_ina_reproj_file
label_attr = 'NAME_ABBR'
df = rfr.create_dataframe(self.rf_data_dir, input_gw_file=gw_file, output_dir=self.output_dir,
label_attr=label_attr, column_names=column_names, make_year_col=True,
exclude_vars=exclude_vars, exclude_years=exclude_years, ordering=ordering,
load_gw_info=load_gw_info, remove_na=remove_na)
return df
def build_model(self, df, n_estimators=100, random_state=0, bootstrap=True, max_features=3, test_size=None,
pred_attr='GW', shuffle=True, plot_graphs=False, plot_3d=False, drop_attrs=(), test_year=(2012,),
test_gw=('DIN',), use_gw=False, split_attribute=True, load_model=False, calc_perm_imp=False,
spatio_temporal=False):
print('Building RF Model...')
plot_dir = make_proper_dir_name(self.output_dir + 'Partial_Plots/PDP_Data')
makedirs([plot_dir])
rf_model = rfr.rf_regressor(df, self.output_dir, n_estimators=n_estimators, random_state=random_state,
pred_attr=pred_attr, drop_attrs=drop_attrs, test_year=test_year, test_gw=test_gw,
use_gw=use_gw, shuffle=shuffle, plot_graphs=plot_graphs, plot_3d=plot_3d,
split_attribute=split_attribute, bootstrap=bootstrap, plot_dir=plot_dir,
max_features=max_features, load_model=load_model, test_size=test_size,
calc_perm_imp=calc_perm_imp, spatio_temporal=spatio_temporal)
return rf_model
def get_predictions(self, rf_model, pred_years, column_names=None, ordering=False, pred_attr='GW',
only_pred=False, exclude_vars=(), exclude_years=(2019,), drop_attrs=(), use_full_extent=False,
post_process=True):
print('Predicting...')
self.pred_out_dir = make_proper_dir_name(self.output_dir + 'Predicted_Rasters')
makedirs([self.pred_out_dir])
actual_raster_dir = self.rf_data_dir
if use_full_extent:
actual_raster_dir = self.pred_data_dir
rfr.predict_rasters(rf_model, pred_years=pred_years, drop_attrs=drop_attrs, out_dir=self.pred_out_dir,
actual_raster_dir=actual_raster_dir, pred_attr=pred_attr, only_pred=only_pred,
exclude_vars=exclude_vars, exclude_years=exclude_years, column_names=column_names,
ordering=ordering)
if post_process:
output_dir = make_proper_dir_name(self.pred_out_dir + 'Postprocessed')
makedirs([output_dir])
well_mask = glob(self.well_reg_mask_dir + '*.tif')[0]
if use_full_extent:
well_mask = self.well_reg_flt_file
rops.postprocess_rasters(self.pred_out_dir, output_dir, well_mask)
self.pred_out_dir = output_dir
return actual_raster_dir, self.pred_out_dir
def create_subsidence_pred_gw_rasters(self, scale_to_cm=False, verbose=False, already_created=False):
self.subsidence_pred_gw_dir = make_proper_dir_name(self.output_dir + 'Subsidence_Analysis')
if not already_created:
makedirs([self.subsidence_pred_gw_dir])
sed_thick_raster = glob(self.sed_thick_reproj_dir + '*.tif')[0]
watershed_raster = glob(self.gw_basin_raster_reproj_dir + '*.tif')[0]
rops.create_subsidence_pred_gw_rasters(self.pred_out_dir, self.converted_subsidence_dir, sed_thick_raster,
watershed_raster, self.subsidence_pred_gw_dir,
scale_to_cm=scale_to_cm, verbose=verbose)
print('Subsidence and total predicted GW rasters created!')
def crop_final_gw_rasters(self, actual_gw_dir, pred_gw_dir, test_years, already_cropped=False):
cropped_dir = make_proper_dir_name(self.output_dir + 'Final_GW_Cropped')
makedirs([cropped_dir])
actual_gw_dir, pred_gw_dir = rops.crop_final_gw_rasters(actual_gw_dir, pred_gw_dir,
raster_mask=self.input_ama_ina_reproj_file,
output_dir=cropped_dir, gdal_path=self.gdal_path,
already_cropped=already_cropped, test_years=test_years)
return actual_gw_dir, pred_gw_dir
def run_gw(analyze_only=False, load_files=True, load_rf_model=False, load_df=False, subsidence_analysis=False,
ama_ina_train=False):
gee_data = ['Apr_Sept/', 'Apr_Aug/', 'Annual/']
input_dir = '../Inputs/Data/Arizona_GW/'
input_subsidence_dir = input_dir + 'Subsidence/Subsidence_Rasters/'
file_dir = '../Inputs/Files_AZ_' + gee_data[2]
output_dir = '../Outputs/Output_AZ_' + gee_data[2]
output_shp_dir = file_dir + 'GW_Shapefiles/'
output_gw_raster_dir = file_dir + 'GW_Rasters/'
input_well_reg_file = input_dir + 'Well_Registry/WellRegistry.shp'
input_ama_ina_file = input_dir + 'Boundary/AMA_and_INA.shp'
input_watershed_file = input_dir + 'Watersheds/Surface_Watershed.shp'
input_gw_basin = input_dir + 'GW_Basin/Groundwater_Basin.shp'
input_gw_csv_dir = input_dir + 'GW_Data/'
input_state_file = input_dir + 'Arizona/Arizona.shp'
gdal_path = 'C:/OSGeo4W64/'
actual_gw_dir = file_dir + 'RF_Data/'
pred_gw_dir = output_dir + 'Predicted_Rasters/'
grace_csv = input_dir + 'GRACE/TWS_GRACE.csv'
ssebop_link = 'https://edcintl.cr.usgs.gov/downloads/sciweb1/shared/uswem/web/conus/eta/modis_eta/monthly/' \
'downloads/'
sed_thick_csv = input_dir + 'Sediment_Thickness/Sedthick_LLz.csv'
data_year_list = range(2002, 2021)
data_start_month = 1
data_end_month = 12
ws_start_month = 10
ws_end_month = 5
az_class_dict = {(0, 59.5): 1,
(66.5, 77.5): 1,
(203.5, 255): 1,
(110.5, 111.5): 2,
(111.5, 112.5): 0,
(120.5, 124.5): 3,
(59.5, 61.5): 0,
(130.5, 195.5): 0
}
drop_attrs = ('YEAR', 'AGRI_flt', 'URBAN_flt', 'SW_flt', 'CC',)
test_years = range(2010, 2021)
exclude_vars = ('ET', 'WS_PT', 'WS_PT_ET')
pred_attr = 'GW'
fill_attr = 'AF Pumped'
filter_attr = None
test_ama_ina = ()
if ama_ina_train:
test_ama_ina = ('HAR',)
xres, yres = 2000, 2000
cdl_year = None
ws_stress_dict = {
'spatial': ('P*.tif', 'SSEBop*.tif', 'AGRI_flt*.tif', 'URBAN_flt*.tif'),
'temporal': ('P*.tif', 'SSEBop*.tif', 'AGRI_Mean*.tif', 'URBAN_Mean*.tif')
}
sf_flt_list = list(range(4, 5))
if not analyze_only:
gw = HydroML(input_dir, file_dir, output_dir, output_shp_dir, output_gw_raster_dir,
input_state_file, gdal_path, input_subsidence_dir=input_subsidence_dir,
input_gw_boundary_file=input_well_reg_file, input_ama_ina_file=input_ama_ina_file,
input_watershed_file=input_watershed_file, input_gw_basin=input_gw_basin, ssebop_link=ssebop_link,
sed_thick_csv=sed_thick_csv, cdl_year=cdl_year)
gw.download_data(year_list=data_year_list, start_month=data_start_month, end_month=data_end_month,
already_downloaded=load_files, already_extracted=load_files)
gw.download_ws_data(year_list=data_year_list, start_month=ws_start_month, end_month=ws_end_month,
already_downloaded=load_files, already_extracted=load_files)
gw.preprocess_gw_csv(input_gw_csv_dir, fill_attr=fill_attr, filter_attr=filter_attr, use_only_ama_ina=False,
already_preprocessed=load_files)
gw.reproject_shapefiles(already_reprojected=load_files)
gw.create_gw_rasters(already_created=load_files, value_field=fill_attr, xres=xres, yres=yres, max_gw=3000)
gw.create_well_registry_raster(xres=xres, yres=yres, already_created=load_files)
gw.create_sed_thickness_raster(xres=xres, yres=yres, already_converted=True, already_clipped=True,
already_created=load_files)
gw.crop_gw_rasters(use_ama_ina=False, already_cropped=load_files)
gw.reclassify_cdl(az_class_dict, already_reclassified=load_files)
gw.create_crop_coeff_raster(already_created=load_files)
gw.create_gw_basin_raster(xres=xres, yres=yres, already_created=load_files)
gw.reproject_rasters(already_reprojected=load_files)
gw.create_mean_crop_coeff_raster(already_created=load_files)
load_gw_info = True
for idx, sf in enumerate(sf_flt_list):
gw.create_land_use_rasters(already_created=load_files, smoothing_factors=(sf, sf, sf))
ws_pattern_list = ws_stress_dict['temporal']
if ama_ina_train:
ws_pattern_list = ws_stress_dict['temporal']
gw.create_water_stress_index_rasters(already_created=load_files, normalize=False,
pattern_list=ws_pattern_list)
if subsidence_analysis:
gw.organize_subsidence_rasters(already_organized=load_files)
gw.mask_rasters(already_masked=load_files)
if idx > 0:
load_gw_info = True
df = gw.create_dataframe(year_list=range(2002, 2021), exclude_vars=exclude_vars, exclude_years=(),
load_df=load_df, load_gw_info=load_gw_info)
dattr = list(drop_attrs) + ['GW_NAME']
rf_model = gw.build_model(df, n_estimators=500, test_year=test_years, drop_attrs=dattr,
pred_attr=pred_attr, load_model=load_rf_model, max_features=5,
plot_graphs=False, use_gw=ama_ina_train, test_gw=test_ama_ina,
spatio_temporal=False, shuffle=False, random_state=0)
actual_gw_dir, pred_gw_dir = gw.get_predictions(rf_model=rf_model, pred_years=range(2002, 2021),
drop_attrs=drop_attrs, pred_attr=pred_attr,
exclude_vars=exclude_vars, exclude_years=(),
only_pred=False, use_full_extent=subsidence_analysis,
post_process=False)
if subsidence_analysis:
gw.create_subsidence_pred_gw_rasters(already_created=False, verbose=False, scale_to_cm=False)
if len(sf_flt_list) == 1:
input_gw_file = file_dir + 'gw_ama_ina/reproj/input_ama_ina_reproj.shp'
ma.run_analysis(actual_gw_dir, pred_gw_dir, grace_csv, use_gws=True, input_gw_file=input_gw_file,
out_dir=output_dir, test_years=test_years, forecast_years=(), show_plots=True,
ama_ina_list=test_ama_ina)
actual_gw_dir, pred_gw_dir = gw.crop_final_gw_rasters(actual_gw_dir, pred_gw_dir,
already_cropped=load_rf_model,
test_years=test_years)
if len(sf_flt_list) == 1:
ma.run_analysis(actual_gw_dir, pred_gw_dir, grace_csv, use_gws=False, out_dir=output_dir,
test_years=test_years, forecast_years=())
ma.generate_feature_plots(output_dir + 'raster_df.csv', feature_list=('SSEBop', 'P'), test_years=test_years)
if __name__ == '__main__':
run_gw(analyze_only=False, load_files=True, load_rf_model=True, subsidence_analysis=True, load_df=True,
ama_ina_train=False)
| true | true |
1c34b8088b642cdd20af9135beb848737bb80eec | 3,217 | py | Python | src/programy/parser/template/nodes/condtype2.py | hiitsme123/python | e08309fe61fd5ed88cfb39e9f402613dd7e39269 | [
"MIT"
] | 5 | 2017-02-03T07:38:45.000Z | 2022-01-06T11:29:29.000Z | src/programy/parser/template/nodes/condtype2.py | hiitsme123/python | e08309fe61fd5ed88cfb39e9f402613dd7e39269 | [
"MIT"
] | 8 | 2017-02-03T06:59:03.000Z | 2017-04-28T14:23:46.000Z | src/programy/parser/template/nodes/condtype2.py | hiitsme123/python | e08309fe61fd5ed88cfb39e9f402613dd7e39269 | [
"MIT"
] | 8 | 2017-02-02T15:12:12.000Z | 2017-04-02T13:35:03.000Z | """
Copyright (c) 2016 Keith Sterling
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from programy.parser.template.nodes.condchild import TemplateConditionNodeWithChildren
class TemplateType2ConditionNode(TemplateConditionNodeWithChildren):
def __init__(self, name, local=False):
TemplateConditionNodeWithChildren.__init__(self)
self.name = name
self.local = local
def resolve(self, bot, clientid):
try:
value = self._get_predicate_value(bot, clientid, self.name, self.local)
for condition in self.children:
if condition.is_default() is False:
condition_value = condition.value.resolve(bot, clientid)
# Condition comparison is always case insensetive
if value.upper() == condition_value.upper():
resolved = " ".join([child_node.resolve(bot, clientid) for child_node in condition.children])
logging.debug("[%s] resolved to [%s]", self.to_string(), resolved)
if condition.loop is True:
resolved = resolved.strip() + " " + self.resolve(bot, clientid)
return resolved
default = self.get_default()
if default is not None:
resolved = " ".join([child_node.resolve(bot, clientid) for child_node in default.children])
if default.loop is True:
resolved = resolved.strip() + " " + self.resolve(bot, clientid)
else:
resolved = ""
logging.debug("[%s] resolved to [%s]", self.to_string(), resolved)
return resolved
except Exception as excep:
logging.exception(excep)
return ""
def to_string(self):
return "[CONDITION2(%s)]" % self.name
def to_xml(self, bot, clientid):
xml = "<condition"
if self.local is True:
xml += ' var="%s"' % self.name
else:
xml += ' name="%s"' % self.name
xml += ">"
for child in self.children:
xml += child.to_xml(bot, clientid)
xml += "</condition>"
return xml
| 41.779221 | 126 | 0.639105 |
import logging
from programy.parser.template.nodes.condchild import TemplateConditionNodeWithChildren
class TemplateType2ConditionNode(TemplateConditionNodeWithChildren):
def __init__(self, name, local=False):
TemplateConditionNodeWithChildren.__init__(self)
self.name = name
self.local = local
def resolve(self, bot, clientid):
try:
value = self._get_predicate_value(bot, clientid, self.name, self.local)
for condition in self.children:
if condition.is_default() is False:
condition_value = condition.value.resolve(bot, clientid)
if value.upper() == condition_value.upper():
resolved = " ".join([child_node.resolve(bot, clientid) for child_node in condition.children])
logging.debug("[%s] resolved to [%s]", self.to_string(), resolved)
if condition.loop is True:
resolved = resolved.strip() + " " + self.resolve(bot, clientid)
return resolved
default = self.get_default()
if default is not None:
resolved = " ".join([child_node.resolve(bot, clientid) for child_node in default.children])
if default.loop is True:
resolved = resolved.strip() + " " + self.resolve(bot, clientid)
else:
resolved = ""
logging.debug("[%s] resolved to [%s]", self.to_string(), resolved)
return resolved
except Exception as excep:
logging.exception(excep)
return ""
def to_string(self):
return "[CONDITION2(%s)]" % self.name
def to_xml(self, bot, clientid):
xml = "<condition"
if self.local is True:
xml += ' var="%s"' % self.name
else:
xml += ' name="%s"' % self.name
xml += ">"
for child in self.children:
xml += child.to_xml(bot, clientid)
xml += "</condition>"
return xml
| true | true |
1c34b80edbdc2a9c8bfd0ca9cd1d5e6a6c0a2f82 | 850 | py | Python | prog/process_drug.py | clussier/DeepCDR | 011f155c0ffb1abf61ae403bf3b9247398676ac7 | [
"MIT"
] | 45 | 2020-02-23T20:49:45.000Z | 2022-03-09T09:19:31.000Z | prog/process_drug.py | clussier/DeepCDR | 011f155c0ffb1abf61ae403bf3b9247398676ac7 | [
"MIT"
] | null | null | null | prog/process_drug.py | clussier/DeepCDR | 011f155c0ffb1abf61ae403bf3b9247398676ac7 | [
"MIT"
] | 11 | 2020-07-15T15:39:46.000Z | 2022-01-28T19:16:43.000Z | #get drug features using Deepchem library
import os
import deepchem as dc
from rdkit import Chem
import numpy as np
import hickle as hkl
drug_smiles_file='../data/223drugs_pubchem_smiles.txt'
save_dir='../data/GDSC/drug_graph_feat'
pubchemid2smile = {item.split('\t')[0]:item.split('\t')[1].strip() for item in open(drug_smiles_file).readlines()}
if not os.path.exists(save_dir):
os.makedirs(save_dir)
molecules = []
for each in pubchemid2smile.keys():
print(each)
molecules=[]
molecules.append(Chem.MolFromSmiles(pubchemid2smile[each]))
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mol_object = featurizer.featurize(mols=molecules)
features = mol_object[0].atom_features
degree_list = mol_object[0].deg_list
adj_list = mol_object[0].canon_adj_list
hkl.dump([features,adj_list,degree_list],'%s/%s.hkl'%(save_dir,each))
| 28.333333 | 114 | 0.768235 |
import os
import deepchem as dc
from rdkit import Chem
import numpy as np
import hickle as hkl
drug_smiles_file='../data/223drugs_pubchem_smiles.txt'
save_dir='../data/GDSC/drug_graph_feat'
pubchemid2smile = {item.split('\t')[0]:item.split('\t')[1].strip() for item in open(drug_smiles_file).readlines()}
if not os.path.exists(save_dir):
os.makedirs(save_dir)
molecules = []
for each in pubchemid2smile.keys():
print(each)
molecules=[]
molecules.append(Chem.MolFromSmiles(pubchemid2smile[each]))
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mol_object = featurizer.featurize(mols=molecules)
features = mol_object[0].atom_features
degree_list = mol_object[0].deg_list
adj_list = mol_object[0].canon_adj_list
hkl.dump([features,adj_list,degree_list],'%s/%s.hkl'%(save_dir,each))
| true | true |
1c34b840812f9fa777c8737d8869531bef0599d6 | 19,134 | py | Python | test/functional/dip3-deterministicmns.py | lokalnode/LokalCoin | 55572130202013aef57d310bdf8b1f0700ec2168 | [
"MIT"
] | null | null | null | test/functional/dip3-deterministicmns.py | lokalnode/LokalCoin | 55572130202013aef57d310bdf8b1f0700ec2168 | [
"MIT"
] | null | null | null | test/functional/dip3-deterministicmns.py | lokalnode/LokalCoin | 55572130202013aef57d310bdf8b1f0700ec2168 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Dash Core developers
# Copyright (c) 2021 The Lokal Coin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test deterministic masternodes
#
import sys
from test_framework.blocktools import create_block, create_coinbase, get_masternode_payment
from test_framework.mininode import CTransaction, ToHex, FromHex, CTxOut, COIN, CCbTx
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class Masternode(object):
pass
class DIP3Test(BitcoinTestFramework):
def set_test_params(self):
self.num_initial_mn = 11 # Should be >= 11 to make sure quorums are not always the same MNs
self.num_nodes = 1 + self.num_initial_mn + 2 # +1 for controller, +1 for mn-qt, +1 for mn created after dip3 activation
self.setup_clean_chain = True
self.extra_args = ["-budgetparams=10:10:10"]
self.extra_args += ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"]
self.extra_args += ["-dip3params=135:150"]
def setup_network(self):
self.disable_mocktime()
self.add_nodes(1)
self.start_controller_node()
def start_controller_node(self):
self.log.info("starting controller node")
self.start_node(0, extra_args=self.extra_args)
for i in range(1, self.num_nodes):
if i < len(self.nodes) and self.nodes[i] is not None and self.nodes[i].process is not None:
connect_nodes_bi(self.nodes, 0, i)
def stop_controller_node(self):
self.log.info("stopping controller node")
self.stop_node(0)
def restart_controller_node(self):
self.stop_controller_node()
self.start_controller_node()
def run_test(self):
self.log.info("funding controller node")
while self.nodes[0].getbalance() < (self.num_initial_mn + 3) * 1000:
self.nodes[0].generate(1) # generate enough for collaterals
self.log.info("controller node has {} lokal".format(self.nodes[0].getbalance()))
# Make sure we're below block 135 (which activates dip3)
self.log.info("testing rejection of ProTx before dip3 activation")
assert(self.nodes[0].getblockchaininfo()['blocks'] < 135)
mns = []
# prepare mn which should still be accepted later when dip3 activates
self.log.info("creating collateral for mn-before-dip3")
before_dip3_mn = self.prepare_mn(self.nodes[0], 1, 'mn-before-dip3')
self.create_mn_collateral(self.nodes[0], before_dip3_mn)
mns.append(before_dip3_mn)
# block 150 starts enforcing DIP3 MN payments
while self.nodes[0].getblockcount() < 150:
self.nodes[0].generate(1)
self.log.info("mining final block for DIP3 activation")
self.nodes[0].generate(1)
# We have hundreds of blocks to sync here, give it more time
self.log.info("syncing blocks for all nodes")
sync_blocks(self.nodes, timeout=120)
# DIP3 is fully enforced here
self.register_mn(self.nodes[0], before_dip3_mn)
self.start_mn(before_dip3_mn)
self.log.info("registering MNs")
for i in range(0, self.num_initial_mn):
mn = self.prepare_mn(self.nodes[0], i + 2, "mn-%d" % i)
mns.append(mn)
# start a few MNs before they are registered and a few after they are registered
start = (i % 3) == 0
if start:
self.start_mn(mn)
# let a few of the protx MNs refer to the existing collaterals
fund = (i % 2) == 0
if fund:
self.log.info("register_fund %s" % mn.alias)
self.register_fund_mn(self.nodes[0], mn)
else:
self.log.info("create_collateral %s" % mn.alias)
self.create_mn_collateral(self.nodes[0], mn)
self.log.info("register %s" % mn.alias)
self.register_mn(self.nodes[0], mn)
self.nodes[0].generate(1)
if not start:
self.start_mn(mn)
self.sync_all()
self.assert_mnlists(mns)
self.log.info("test that MNs disappear from the list when the ProTx collateral is spent")
spend_mns_count = 3
mns_tmp = [] + mns
dummy_txins = []
for i in range(spend_mns_count):
dummy_txin = self.spend_mn_collateral(mns[i], with_dummy_input_output=True)
dummy_txins.append(dummy_txin)
self.nodes[0].generate(1)
self.sync_all()
mns_tmp.remove(mns[i])
self.assert_mnlists(mns_tmp)
self.log.info("test that reverting the blockchain on a single node results in the mnlist to be reverted as well")
for i in range(spend_mns_count):
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
mns_tmp.append(mns[spend_mns_count - 1 - i])
self.assert_mnlist(self.nodes[0], mns_tmp)
self.log.info("cause a reorg with a double spend and check that mnlists are still correct on all nodes")
self.mine_double_spend(self.nodes[0], dummy_txins, self.nodes[0].getnewaddress(), use_mnmerkleroot_from_tip=True)
self.nodes[0].generate(spend_mns_count)
self.sync_all()
self.assert_mnlists(mns_tmp)
self.log.info("test mn payment enforcement with deterministic MNs")
for i in range(20):
node = self.nodes[i % len(self.nodes)]
self.test_invalid_mn_payment(node)
self.nodes[0].generate(1)
self.sync_all()
self.log.info("testing ProUpServTx")
for mn in mns:
self.test_protx_update_service(mn)
self.log.info("testing P2SH/multisig for payee addresses")
multisig = self.nodes[0].createmultisig(1, [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()])['address']
self.update_mn_payee(mns[0], multisig)
found_multisig_payee = False
for i in range(len(mns)):
bt = self.nodes[0].getblocktemplate()
expected_payee = bt['masternode'][0]['payee']
expected_amount = bt['masternode'][0]['amount']
self.nodes[0].generate(1)
self.sync_all()
if expected_payee == multisig:
block = self.nodes[0].getblock(self.nodes[0].getbestblockhash())
cbtx = self.nodes[0].getrawtransaction(block['tx'][0], 1)
for out in cbtx['vout']:
if 'addresses' in out['scriptPubKey']:
if expected_payee in out['scriptPubKey']['addresses'] and out['valueSat'] == expected_amount:
found_multisig_payee = True
assert(found_multisig_payee)
self.log.info("testing reusing of collaterals for replaced MNs")
for i in range(0, 5):
mn = mns[i]
# a few of these will actually refer to old ProRegTx internal collaterals,
# which should work the same as external collaterals
new_mn = self.prepare_mn(self.nodes[0], mn.idx, mn.alias)
new_mn.collateral_address = mn.collateral_address
new_mn.collateral_txid = mn.collateral_txid
new_mn.collateral_vout = mn.collateral_vout
self.register_mn(self.nodes[0], new_mn)
mns[i] = new_mn
self.nodes[0].generate(1)
self.sync_all()
self.assert_mnlists(mns)
self.log.info("restarting MN %s" % new_mn.alias)
self.stop_node(new_mn.idx)
self.start_mn(new_mn)
self.sync_all()
self.log.info("testing masternode status updates")
# change voting address and see if changes are reflected in `masternode status` rpc output
mn = mns[0]
node = self.nodes[0]
old_dmnState = mn.node.masternode("status")["dmnState"]
old_voting_address = old_dmnState["votingAddress"]
new_voting_address = node.getnewaddress()
assert(old_voting_address != new_voting_address)
# also check if funds from payout address are used when no fee source address is specified
node.sendtoaddress(mn.rewards_address, 0.001)
node.protx('update_registrar', mn.protx_hash, "", new_voting_address, "")
node.generate(1)
self.sync_all()
new_dmnState = mn.node.masternode("status")["dmnState"]
new_voting_address_from_rpc = new_dmnState["votingAddress"]
assert(new_voting_address_from_rpc == new_voting_address)
# make sure payoutAddress is the same as before
assert(old_dmnState["payoutAddress"] == new_dmnState["payoutAddress"])
def prepare_mn(self, node, idx, alias):
mn = Masternode()
mn.idx = idx
mn.alias = alias
mn.is_protx = True
mn.p2p_port = p2p_port(mn.idx)
blsKey = node.bls('generate')
mn.fundsAddr = node.getnewaddress()
mn.ownerAddr = node.getnewaddress()
mn.operatorAddr = blsKey['public']
mn.votingAddr = mn.ownerAddr
mn.blsMnkey = blsKey['secret']
return mn
def create_mn_collateral(self, node, mn):
mn.collateral_address = node.getnewaddress()
mn.collateral_txid = node.sendtoaddress(mn.collateral_address, 1000)
mn.collateral_vout = -1
node.generate(1)
rawtx = node.getrawtransaction(mn.collateral_txid, 1)
for txout in rawtx['vout']:
if txout['value'] == Decimal(1000):
mn.collateral_vout = txout['n']
break
assert(mn.collateral_vout != -1)
# register a protx MN and also fund it (using collateral inside ProRegTx)
def register_fund_mn(self, node, mn):
node.sendtoaddress(mn.fundsAddr, 1000.001)
mn.collateral_address = node.getnewaddress()
mn.rewards_address = node.getnewaddress()
mn.protx_hash = node.protx('register_fund', mn.collateral_address, '127.0.0.1:%d' % mn.p2p_port, mn.ownerAddr, mn.operatorAddr, mn.votingAddr, 0, mn.rewards_address, mn.fundsAddr)
mn.collateral_txid = mn.protx_hash
mn.collateral_vout = -1
rawtx = node.getrawtransaction(mn.collateral_txid, 1)
for txout in rawtx['vout']:
if txout['value'] == Decimal(1000):
mn.collateral_vout = txout['n']
break
assert(mn.collateral_vout != -1)
# create a protx MN which refers to an existing collateral
def register_mn(self, node, mn):
node.sendtoaddress(mn.fundsAddr, 0.001)
mn.rewards_address = node.getnewaddress()
mn.protx_hash = node.protx('register', mn.collateral_txid, mn.collateral_vout, '127.0.0.1:%d' % mn.p2p_port, mn.ownerAddr, mn.operatorAddr, mn.votingAddr, 0, mn.rewards_address, mn.fundsAddr)
node.generate(1)
def start_mn(self, mn):
while len(self.nodes) <= mn.idx:
self.add_nodes(1)
extra_args = ['-masternodeblsprivkey=%s' % mn.blsMnkey]
self.start_node(mn.idx, extra_args = self.extra_args + extra_args)
force_finish_mnsync(self.nodes[mn.idx])
for i in range(0, len(self.nodes)):
if i < len(self.nodes) and self.nodes[i] is not None and self.nodes[i].process is not None and i != mn.idx:
connect_nodes_bi(self.nodes, mn.idx, i)
mn.node = self.nodes[mn.idx]
self.sync_all()
def spend_mn_collateral(self, mn, with_dummy_input_output=False):
return self.spend_input(mn.collateral_txid, mn.collateral_vout, 1000, with_dummy_input_output)
def update_mn_payee(self, mn, payee):
self.nodes[0].sendtoaddress(mn.fundsAddr, 0.001)
self.nodes[0].protx('update_registrar', mn.protx_hash, '', '', payee, mn.fundsAddr)
self.nodes[0].generate(1)
self.sync_all()
info = self.nodes[0].protx('info', mn.protx_hash)
assert(info['state']['payoutAddress'] == payee)
def test_protx_update_service(self, mn):
self.nodes[0].sendtoaddress(mn.fundsAddr, 0.001)
self.nodes[0].protx('update_service', mn.protx_hash, '127.0.0.2:%d' % mn.p2p_port, mn.blsMnkey, "", mn.fundsAddr)
self.nodes[0].generate(1)
self.sync_all()
for node in self.nodes:
protx_info = node.protx('info', mn.protx_hash)
mn_list = node.masternode('list')
assert_equal(protx_info['state']['service'], '127.0.0.2:%d' % mn.p2p_port)
assert_equal(mn_list['%s-%d' % (mn.collateral_txid, mn.collateral_vout)]['address'], '127.0.0.2:%d' % mn.p2p_port)
# undo
self.nodes[0].protx('update_service', mn.protx_hash, '127.0.0.1:%d' % mn.p2p_port, mn.blsMnkey, "", mn.fundsAddr)
self.nodes[0].generate(1)
def assert_mnlists(self, mns):
for node in self.nodes:
self.assert_mnlist(node, mns)
def assert_mnlist(self, node, mns):
if not self.compare_mnlist(node, mns):
expected = []
for mn in mns:
expected.append('%s-%d' % (mn.collateral_txid, mn.collateral_vout))
self.log.error('mnlist: ' + str(node.masternode('list', 'status')))
self.log.error('expected: ' + str(expected))
raise AssertionError("mnlists does not match provided mns")
def compare_mnlist(self, node, mns):
mnlist = node.masternode('list', 'status')
for mn in mns:
s = '%s-%d' % (mn.collateral_txid, mn.collateral_vout)
in_list = s in mnlist
if not in_list:
return False
mnlist.pop(s, None)
if len(mnlist) != 0:
return False
return True
def spend_input(self, txid, vout, amount, with_dummy_input_output=False):
# with_dummy_input_output is useful if you want to test reorgs with double spends of the TX without touching the actual txid/vout
address = self.nodes[0].getnewaddress()
txins = [
{'txid': txid, 'vout': vout}
]
targets = {address: amount}
dummy_txin = None
if with_dummy_input_output:
dummyaddress = self.nodes[0].getnewaddress()
unspent = self.nodes[0].listunspent(110)
for u in unspent:
if u['amount'] > Decimal(1):
dummy_txin = {'txid': u['txid'], 'vout': u['vout']}
txins.append(dummy_txin)
targets[dummyaddress] = float(u['amount'] - Decimal(0.0001))
break
rawtx = self.nodes[0].createrawtransaction(txins, targets)
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
new_txid = self.nodes[0].sendrawtransaction(rawtx)
return dummy_txin
def mine_block(self, node, vtx=[], miner_address=None, mn_payee=None, mn_amount=None, use_mnmerkleroot_from_tip=False, expected_error=None):
bt = node.getblocktemplate()
height = bt['height']
tip_hash = bt['previousblockhash']
tip_block = node.getblock(tip_hash)
coinbasevalue = bt['coinbasevalue']
if miner_address is None:
miner_address = self.nodes[0].getnewaddress()
if mn_payee is None:
if isinstance(bt['masternode'], list):
mn_payee = bt['masternode'][0]['payee']
else:
mn_payee = bt['masternode']['payee']
# we can't take the masternode payee amount from the template here as we might have additional fees in vtx
# calculate fees that the block template included (we'll have to remove it from the coinbase as we won't
# include the template's transactions
bt_fees = 0
for tx in bt['transactions']:
bt_fees += tx['fee']
new_fees = 0
for tx in vtx:
in_value = 0
out_value = 0
for txin in tx.vin:
txout = node.gettxout("%064x" % txin.prevout.hash, txin.prevout.n, False)
in_value += int(txout['value'] * COIN)
for txout in tx.vout:
out_value += txout.nValue
new_fees += in_value - out_value
# fix fees
coinbasevalue -= bt_fees
coinbasevalue += new_fees
if mn_amount is None:
mn_amount = get_masternode_payment(height, coinbasevalue)
miner_amount = coinbasevalue - mn_amount
outputs = {miner_address: str(Decimal(miner_amount) / COIN)}
if mn_amount > 0:
outputs[mn_payee] = str(Decimal(mn_amount) / COIN)
coinbase = FromHex(CTransaction(), node.createrawtransaction([], outputs))
coinbase.vin = create_coinbase(height).vin
# We can't really use this one as it would result in invalid merkle roots for masternode lists
if len(bt['coinbase_payload']) != 0:
cbtx = FromHex(CCbTx(version=1), bt['coinbase_payload'])
if use_mnmerkleroot_from_tip:
if 'cbTx' in tip_block:
cbtx.merkleRootMNList = int(tip_block['cbTx']['merkleRootMNList'], 16)
else:
cbtx.merkleRootMNList = 0
coinbase.nVersion = 3
coinbase.nType = 5 # CbTx
coinbase.vExtraPayload = cbtx.serialize()
coinbase.calc_sha256()
block = create_block(int(tip_hash, 16), coinbase)
block.vtx += vtx
# Add quorum commitments from template
for tx in bt['transactions']:
tx2 = FromHex(CTransaction(), tx['data'])
if tx2.nType == 6:
block.vtx.append(tx2)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
result = node.submitblock(ToHex(block))
if expected_error is not None and result != expected_error:
raise AssertionError('mining the block should have failed with error %s, but submitblock returned %s' % (expected_error, result))
elif expected_error is None and result is not None:
raise AssertionError('submitblock returned %s' % (result))
def mine_double_spend(self, node, txins, target_address, use_mnmerkleroot_from_tip=False):
amount = Decimal(0)
for txin in txins:
txout = node.gettxout(txin['txid'], txin['vout'], False)
amount += txout['value']
amount -= Decimal("0.001") # fee
rawtx = node.createrawtransaction(txins, {target_address: amount})
rawtx = node.signrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtx)
self.mine_block(node, [tx], use_mnmerkleroot_from_tip=use_mnmerkleroot_from_tip)
def test_invalid_mn_payment(self, node):
mn_payee = self.nodes[0].getnewaddress()
self.mine_block(node, mn_payee=mn_payee, expected_error='bad-cb-payee')
self.mine_block(node, mn_amount=1, expected_error='bad-cb-payee')
if __name__ == '__main__':
DIP3Test().main()
| 42.425721 | 199 | 0.621616 |
import sys
from test_framework.blocktools import create_block, create_coinbase, get_masternode_payment
from test_framework.mininode import CTransaction, ToHex, FromHex, CTxOut, COIN, CCbTx
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class Masternode(object):
pass
class DIP3Test(BitcoinTestFramework):
def set_test_params(self):
self.num_initial_mn = 11
self.num_nodes = 1 + self.num_initial_mn + 2
self.setup_clean_chain = True
self.extra_args = ["-budgetparams=10:10:10"]
self.extra_args += ["-sporkkey=cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK"]
self.extra_args += ["-dip3params=135:150"]
def setup_network(self):
self.disable_mocktime()
self.add_nodes(1)
self.start_controller_node()
def start_controller_node(self):
self.log.info("starting controller node")
self.start_node(0, extra_args=self.extra_args)
for i in range(1, self.num_nodes):
if i < len(self.nodes) and self.nodes[i] is not None and self.nodes[i].process is not None:
connect_nodes_bi(self.nodes, 0, i)
def stop_controller_node(self):
self.log.info("stopping controller node")
self.stop_node(0)
def restart_controller_node(self):
self.stop_controller_node()
self.start_controller_node()
def run_test(self):
self.log.info("funding controller node")
while self.nodes[0].getbalance() < (self.num_initial_mn + 3) * 1000:
self.nodes[0].generate(1)
self.log.info("controller node has {} lokal".format(self.nodes[0].getbalance()))
self.log.info("testing rejection of ProTx before dip3 activation")
assert(self.nodes[0].getblockchaininfo()['blocks'] < 135)
mns = []
# prepare mn which should still be accepted later when dip3 activates
self.log.info("creating collateral for mn-before-dip3")
before_dip3_mn = self.prepare_mn(self.nodes[0], 1, 'mn-before-dip3')
self.create_mn_collateral(self.nodes[0], before_dip3_mn)
mns.append(before_dip3_mn)
# block 150 starts enforcing DIP3 MN payments
while self.nodes[0].getblockcount() < 150:
self.nodes[0].generate(1)
self.log.info("mining final block for DIP3 activation")
self.nodes[0].generate(1)
# We have hundreds of blocks to sync here, give it more time
self.log.info("syncing blocks for all nodes")
sync_blocks(self.nodes, timeout=120)
# DIP3 is fully enforced here
self.register_mn(self.nodes[0], before_dip3_mn)
self.start_mn(before_dip3_mn)
self.log.info("registering MNs")
for i in range(0, self.num_initial_mn):
mn = self.prepare_mn(self.nodes[0], i + 2, "mn-%d" % i)
mns.append(mn)
# start a few MNs before they are registered and a few after they are registered
start = (i % 3) == 0
if start:
self.start_mn(mn)
# let a few of the protx MNs refer to the existing collaterals
fund = (i % 2) == 0
if fund:
self.log.info("register_fund %s" % mn.alias)
self.register_fund_mn(self.nodes[0], mn)
else:
self.log.info("create_collateral %s" % mn.alias)
self.create_mn_collateral(self.nodes[0], mn)
self.log.info("register %s" % mn.alias)
self.register_mn(self.nodes[0], mn)
self.nodes[0].generate(1)
if not start:
self.start_mn(mn)
self.sync_all()
self.assert_mnlists(mns)
self.log.info("test that MNs disappear from the list when the ProTx collateral is spent")
spend_mns_count = 3
mns_tmp = [] + mns
dummy_txins = []
for i in range(spend_mns_count):
dummy_txin = self.spend_mn_collateral(mns[i], with_dummy_input_output=True)
dummy_txins.append(dummy_txin)
self.nodes[0].generate(1)
self.sync_all()
mns_tmp.remove(mns[i])
self.assert_mnlists(mns_tmp)
self.log.info("test that reverting the blockchain on a single node results in the mnlist to be reverted as well")
for i in range(spend_mns_count):
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
mns_tmp.append(mns[spend_mns_count - 1 - i])
self.assert_mnlist(self.nodes[0], mns_tmp)
self.log.info("cause a reorg with a double spend and check that mnlists are still correct on all nodes")
self.mine_double_spend(self.nodes[0], dummy_txins, self.nodes[0].getnewaddress(), use_mnmerkleroot_from_tip=True)
self.nodes[0].generate(spend_mns_count)
self.sync_all()
self.assert_mnlists(mns_tmp)
self.log.info("test mn payment enforcement with deterministic MNs")
for i in range(20):
node = self.nodes[i % len(self.nodes)]
self.test_invalid_mn_payment(node)
self.nodes[0].generate(1)
self.sync_all()
self.log.info("testing ProUpServTx")
for mn in mns:
self.test_protx_update_service(mn)
self.log.info("testing P2SH/multisig for payee addresses")
multisig = self.nodes[0].createmultisig(1, [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()])['address']
self.update_mn_payee(mns[0], multisig)
found_multisig_payee = False
for i in range(len(mns)):
bt = self.nodes[0].getblocktemplate()
expected_payee = bt['masternode'][0]['payee']
expected_amount = bt['masternode'][0]['amount']
self.nodes[0].generate(1)
self.sync_all()
if expected_payee == multisig:
block = self.nodes[0].getblock(self.nodes[0].getbestblockhash())
cbtx = self.nodes[0].getrawtransaction(block['tx'][0], 1)
for out in cbtx['vout']:
if 'addresses' in out['scriptPubKey']:
if expected_payee in out['scriptPubKey']['addresses'] and out['valueSat'] == expected_amount:
found_multisig_payee = True
assert(found_multisig_payee)
self.log.info("testing reusing of collaterals for replaced MNs")
for i in range(0, 5):
mn = mns[i]
# a few of these will actually refer to old ProRegTx internal collaterals,
# which should work the same as external collaterals
new_mn = self.prepare_mn(self.nodes[0], mn.idx, mn.alias)
new_mn.collateral_address = mn.collateral_address
new_mn.collateral_txid = mn.collateral_txid
new_mn.collateral_vout = mn.collateral_vout
self.register_mn(self.nodes[0], new_mn)
mns[i] = new_mn
self.nodes[0].generate(1)
self.sync_all()
self.assert_mnlists(mns)
self.log.info("restarting MN %s" % new_mn.alias)
self.stop_node(new_mn.idx)
self.start_mn(new_mn)
self.sync_all()
self.log.info("testing masternode status updates")
# change voting address and see if changes are reflected in `masternode status` rpc output
mn = mns[0]
node = self.nodes[0]
old_dmnState = mn.node.masternode("status")["dmnState"]
old_voting_address = old_dmnState["votingAddress"]
new_voting_address = node.getnewaddress()
assert(old_voting_address != new_voting_address)
# also check if funds from payout address are used when no fee source address is specified
node.sendtoaddress(mn.rewards_address, 0.001)
node.protx('update_registrar', mn.protx_hash, "", new_voting_address, "")
node.generate(1)
self.sync_all()
new_dmnState = mn.node.masternode("status")["dmnState"]
new_voting_address_from_rpc = new_dmnState["votingAddress"]
assert(new_voting_address_from_rpc == new_voting_address)
# make sure payoutAddress is the same as before
assert(old_dmnState["payoutAddress"] == new_dmnState["payoutAddress"])
def prepare_mn(self, node, idx, alias):
mn = Masternode()
mn.idx = idx
mn.alias = alias
mn.is_protx = True
mn.p2p_port = p2p_port(mn.idx)
blsKey = node.bls('generate')
mn.fundsAddr = node.getnewaddress()
mn.ownerAddr = node.getnewaddress()
mn.operatorAddr = blsKey['public']
mn.votingAddr = mn.ownerAddr
mn.blsMnkey = blsKey['secret']
return mn
def create_mn_collateral(self, node, mn):
mn.collateral_address = node.getnewaddress()
mn.collateral_txid = node.sendtoaddress(mn.collateral_address, 1000)
mn.collateral_vout = -1
node.generate(1)
rawtx = node.getrawtransaction(mn.collateral_txid, 1)
for txout in rawtx['vout']:
if txout['value'] == Decimal(1000):
mn.collateral_vout = txout['n']
break
assert(mn.collateral_vout != -1)
# register a protx MN and also fund it (using collateral inside ProRegTx)
def register_fund_mn(self, node, mn):
node.sendtoaddress(mn.fundsAddr, 1000.001)
mn.collateral_address = node.getnewaddress()
mn.rewards_address = node.getnewaddress()
mn.protx_hash = node.protx('register_fund', mn.collateral_address, '127.0.0.1:%d' % mn.p2p_port, mn.ownerAddr, mn.operatorAddr, mn.votingAddr, 0, mn.rewards_address, mn.fundsAddr)
mn.collateral_txid = mn.protx_hash
mn.collateral_vout = -1
rawtx = node.getrawtransaction(mn.collateral_txid, 1)
for txout in rawtx['vout']:
if txout['value'] == Decimal(1000):
mn.collateral_vout = txout['n']
break
assert(mn.collateral_vout != -1)
# create a protx MN which refers to an existing collateral
def register_mn(self, node, mn):
node.sendtoaddress(mn.fundsAddr, 0.001)
mn.rewards_address = node.getnewaddress()
mn.protx_hash = node.protx('register', mn.collateral_txid, mn.collateral_vout, '127.0.0.1:%d' % mn.p2p_port, mn.ownerAddr, mn.operatorAddr, mn.votingAddr, 0, mn.rewards_address, mn.fundsAddr)
node.generate(1)
def start_mn(self, mn):
while len(self.nodes) <= mn.idx:
self.add_nodes(1)
extra_args = ['-masternodeblsprivkey=%s' % mn.blsMnkey]
self.start_node(mn.idx, extra_args = self.extra_args + extra_args)
force_finish_mnsync(self.nodes[mn.idx])
for i in range(0, len(self.nodes)):
if i < len(self.nodes) and self.nodes[i] is not None and self.nodes[i].process is not None and i != mn.idx:
connect_nodes_bi(self.nodes, mn.idx, i)
mn.node = self.nodes[mn.idx]
self.sync_all()
def spend_mn_collateral(self, mn, with_dummy_input_output=False):
return self.spend_input(mn.collateral_txid, mn.collateral_vout, 1000, with_dummy_input_output)
def update_mn_payee(self, mn, payee):
self.nodes[0].sendtoaddress(mn.fundsAddr, 0.001)
self.nodes[0].protx('update_registrar', mn.protx_hash, '', '', payee, mn.fundsAddr)
self.nodes[0].generate(1)
self.sync_all()
info = self.nodes[0].protx('info', mn.protx_hash)
assert(info['state']['payoutAddress'] == payee)
def test_protx_update_service(self, mn):
self.nodes[0].sendtoaddress(mn.fundsAddr, 0.001)
self.nodes[0].protx('update_service', mn.protx_hash, '127.0.0.2:%d' % mn.p2p_port, mn.blsMnkey, "", mn.fundsAddr)
self.nodes[0].generate(1)
self.sync_all()
for node in self.nodes:
protx_info = node.protx('info', mn.protx_hash)
mn_list = node.masternode('list')
assert_equal(protx_info['state']['service'], '127.0.0.2:%d' % mn.p2p_port)
assert_equal(mn_list['%s-%d' % (mn.collateral_txid, mn.collateral_vout)]['address'], '127.0.0.2:%d' % mn.p2p_port)
# undo
self.nodes[0].protx('update_service', mn.protx_hash, '127.0.0.1:%d' % mn.p2p_port, mn.blsMnkey, "", mn.fundsAddr)
self.nodes[0].generate(1)
def assert_mnlists(self, mns):
for node in self.nodes:
self.assert_mnlist(node, mns)
def assert_mnlist(self, node, mns):
if not self.compare_mnlist(node, mns):
expected = []
for mn in mns:
expected.append('%s-%d' % (mn.collateral_txid, mn.collateral_vout))
self.log.error('mnlist: ' + str(node.masternode('list', 'status')))
self.log.error('expected: ' + str(expected))
raise AssertionError("mnlists does not match provided mns")
def compare_mnlist(self, node, mns):
mnlist = node.masternode('list', 'status')
for mn in mns:
s = '%s-%d' % (mn.collateral_txid, mn.collateral_vout)
in_list = s in mnlist
if not in_list:
return False
mnlist.pop(s, None)
if len(mnlist) != 0:
return False
return True
def spend_input(self, txid, vout, amount, with_dummy_input_output=False):
# with_dummy_input_output is useful if you want to test reorgs with double spends of the TX without touching the actual txid/vout
address = self.nodes[0].getnewaddress()
txins = [
{'txid': txid, 'vout': vout}
]
targets = {address: amount}
dummy_txin = None
if with_dummy_input_output:
dummyaddress = self.nodes[0].getnewaddress()
unspent = self.nodes[0].listunspent(110)
for u in unspent:
if u['amount'] > Decimal(1):
dummy_txin = {'txid': u['txid'], 'vout': u['vout']}
txins.append(dummy_txin)
targets[dummyaddress] = float(u['amount'] - Decimal(0.0001))
break
rawtx = self.nodes[0].createrawtransaction(txins, targets)
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
new_txid = self.nodes[0].sendrawtransaction(rawtx)
return dummy_txin
def mine_block(self, node, vtx=[], miner_address=None, mn_payee=None, mn_amount=None, use_mnmerkleroot_from_tip=False, expected_error=None):
bt = node.getblocktemplate()
height = bt['height']
tip_hash = bt['previousblockhash']
tip_block = node.getblock(tip_hash)
coinbasevalue = bt['coinbasevalue']
if miner_address is None:
miner_address = self.nodes[0].getnewaddress()
if mn_payee is None:
if isinstance(bt['masternode'], list):
mn_payee = bt['masternode'][0]['payee']
else:
mn_payee = bt['masternode']['payee']
# we can't take the masternode payee amount from the template here as we might have additional fees in vtx
bt_fees = 0
for tx in bt['transactions']:
bt_fees += tx['fee']
new_fees = 0
for tx in vtx:
in_value = 0
out_value = 0
for txin in tx.vin:
txout = node.gettxout("%064x" % txin.prevout.hash, txin.prevout.n, False)
in_value += int(txout['value'] * COIN)
for txout in tx.vout:
out_value += txout.nValue
new_fees += in_value - out_value
# fix fees
coinbasevalue -= bt_fees
coinbasevalue += new_fees
if mn_amount is None:
mn_amount = get_masternode_payment(height, coinbasevalue)
miner_amount = coinbasevalue - mn_amount
outputs = {miner_address: str(Decimal(miner_amount) / COIN)}
if mn_amount > 0:
outputs[mn_payee] = str(Decimal(mn_amount) / COIN)
coinbase = FromHex(CTransaction(), node.createrawtransaction([], outputs))
coinbase.vin = create_coinbase(height).vin
# We can't really use this one as it would result in invalid merkle roots for masternode lists
if len(bt['coinbase_payload']) != 0:
cbtx = FromHex(CCbTx(version=1), bt['coinbase_payload'])
if use_mnmerkleroot_from_tip:
if 'cbTx' in tip_block:
cbtx.merkleRootMNList = int(tip_block['cbTx']['merkleRootMNList'], 16)
else:
cbtx.merkleRootMNList = 0
coinbase.nVersion = 3
coinbase.nType = 5
coinbase.vExtraPayload = cbtx.serialize()
coinbase.calc_sha256()
block = create_block(int(tip_hash, 16), coinbase)
block.vtx += vtx
for tx in bt['transactions']:
tx2 = FromHex(CTransaction(), tx['data'])
if tx2.nType == 6:
block.vtx.append(tx2)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
result = node.submitblock(ToHex(block))
if expected_error is not None and result != expected_error:
raise AssertionError('mining the block should have failed with error %s, but submitblock returned %s' % (expected_error, result))
elif expected_error is None and result is not None:
raise AssertionError('submitblock returned %s' % (result))
def mine_double_spend(self, node, txins, target_address, use_mnmerkleroot_from_tip=False):
amount = Decimal(0)
for txin in txins:
txout = node.gettxout(txin['txid'], txin['vout'], False)
amount += txout['value']
amount -= Decimal("0.001")
rawtx = node.createrawtransaction(txins, {target_address: amount})
rawtx = node.signrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtx)
self.mine_block(node, [tx], use_mnmerkleroot_from_tip=use_mnmerkleroot_from_tip)
def test_invalid_mn_payment(self, node):
mn_payee = self.nodes[0].getnewaddress()
self.mine_block(node, mn_payee=mn_payee, expected_error='bad-cb-payee')
self.mine_block(node, mn_amount=1, expected_error='bad-cb-payee')
if __name__ == '__main__':
DIP3Test().main()
| true | true |
1c34b930ea02d98164f851ca88de83a3ba581616 | 4,415 | py | Python | spotify_dl/spotify_dl.py | Harsh14901/spotify-dl | 368bc4c842e496fa468160d5422add87c359f19d | [
"MIT"
] | null | null | null | spotify_dl/spotify_dl.py | Harsh14901/spotify-dl | 368bc4c842e496fa468160d5422add87c359f19d | [
"MIT"
] | null | null | null | spotify_dl/spotify_dl.py | Harsh14901/spotify-dl | 368bc4c842e496fa468160d5422add87c359f19d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
from itertools import product
import json
import os
import sys
from logging import DEBUG
from pathlib import Path, PurePath
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
from constants import VERSION
from models import db, Song
from scaffold import log, check_for_tokens
from spotify import fetch_tracks, parse_spotify_url, validate_spotify_url, get_item_name
from youtube import download_songs, default_filename, playlist_num_filename
from dotenv import load_dotenv
from os import getenv
from multiprocessing import Pool
load_dotenv()
def spotify_dl():
"""Main entry point of the script."""
parser = argparse.ArgumentParser(prog='spotify_dl')
parser.add_argument('-l', '--url', action="store",
help="Spotify Playlist link URL", type=str, required=False)
parser.add_argument('-o', '--output', type=str, action='store',
help='Specify download directory.', required=True)
parser.add_argument('-d', '--download', action='store_true',
help='Download using youtube-dl', default=True)
parser.add_argument('-c', '--collection', action='store_true',
help='Download saved collection', default=False, dest="collection")
parser.add_argument('-f', '--format_str', type=str, action='store',
help='Specify youtube-dl format string.',
default='bestaudio/best')
parser.add_argument('-k', '--keep_playlist_order', default=False,
action='store_true',
help='Whether to keep original playlist ordering or not.')
parser.add_argument('-m', '--skip_mp3', action='store_true',
help='Don\'t convert downloaded songs to mp3')
parser.add_argument('-s', '--scrape', action="store",
help="Use HTML Scraper for YouTube Search", default=True)
parser.add_argument('-V', '--verbose', action='store_true',
help='Show more information on what''s happening.')
parser.add_argument('-v', '--version', action='store_true',
help='Shows current version of the program')
args = parser.parse_args()
if args.version:
print("spotify_dl v{}".format(VERSION))
exit(0)
if args.collection:
args.url = "https://open.spotify.com/collection/tracks"
elif args.url is None:
log.fatal("Required parameter: -l <URL>")
exit(-1)
db.connect()
db.create_tables([Song])
if os.path.isfile(os.path.expanduser('~/.spotify_dl_settings')):
with open(os.path.expanduser('~/.spotify_dl_settings')) as file:
config = json.loads(file.read())
for key, value in config.items():
if value and (value.lower() == 'true' or value.lower() == 't'):
setattr(args, key, True)
else:
setattr(args, key, value)
if args.verbose:
log.setLevel(DEBUG)
log.info('Starting spotify_dl')
log.debug('Setting debug mode on spotify_dl')
if not check_for_tokens():
exit(1)
if not args.collection:
sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials())
else:
token = getenv('TOKEN')
if token is None:
log.fatal("Required token in .env")
exit(-1)
sp = spotipy.Spotify(auth=token)
log.debug('Arguments: {}'.format(args))
if args.url:
valid_item = validate_spotify_url(args.url)
if not valid_item:
sys.exit(1)
if args.output:
item_type, item_id = parse_spotify_url(args.url)
directory_name = get_item_name(sp, item_type, item_id)
save_path = Path(PurePath.joinpath(
Path(args.output), Path(directory_name)))
save_path.mkdir(parents=True, exist_ok=True)
log.info("Saving songs to: {}".format(directory_name))
songs = fetch_tracks(sp, item_type, args.url)
if args.download is True:
file_name_f = default_filename
if args.keep_playlist_order:
file_name_f = playlist_num_filename
with Pool() as pool:
pool.starmap(download_songs, product(songs, [save_path], [args.format_str],[args.skip_mp3], [args.keep_playlist_order], [file_name_f]))
print("Done!")
if __name__ == '__main__':
spotify_dl()
| 36.791667 | 147 | 0.63171 |
import argparse
from itertools import product
import json
import os
import sys
from logging import DEBUG
from pathlib import Path, PurePath
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
from constants import VERSION
from models import db, Song
from scaffold import log, check_for_tokens
from spotify import fetch_tracks, parse_spotify_url, validate_spotify_url, get_item_name
from youtube import download_songs, default_filename, playlist_num_filename
from dotenv import load_dotenv
from os import getenv
from multiprocessing import Pool
load_dotenv()
def spotify_dl():
parser = argparse.ArgumentParser(prog='spotify_dl')
parser.add_argument('-l', '--url', action="store",
help="Spotify Playlist link URL", type=str, required=False)
parser.add_argument('-o', '--output', type=str, action='store',
help='Specify download directory.', required=True)
parser.add_argument('-d', '--download', action='store_true',
help='Download using youtube-dl', default=True)
parser.add_argument('-c', '--collection', action='store_true',
help='Download saved collection', default=False, dest="collection")
parser.add_argument('-f', '--format_str', type=str, action='store',
help='Specify youtube-dl format string.',
default='bestaudio/best')
parser.add_argument('-k', '--keep_playlist_order', default=False,
action='store_true',
help='Whether to keep original playlist ordering or not.')
parser.add_argument('-m', '--skip_mp3', action='store_true',
help='Don\'t convert downloaded songs to mp3')
parser.add_argument('-s', '--scrape', action="store",
help="Use HTML Scraper for YouTube Search", default=True)
parser.add_argument('-V', '--verbose', action='store_true',
help='Show more information on what''s happening.')
parser.add_argument('-v', '--version', action='store_true',
help='Shows current version of the program')
args = parser.parse_args()
if args.version:
print("spotify_dl v{}".format(VERSION))
exit(0)
if args.collection:
args.url = "https://open.spotify.com/collection/tracks"
elif args.url is None:
log.fatal("Required parameter: -l <URL>")
exit(-1)
db.connect()
db.create_tables([Song])
if os.path.isfile(os.path.expanduser('~/.spotify_dl_settings')):
with open(os.path.expanduser('~/.spotify_dl_settings')) as file:
config = json.loads(file.read())
for key, value in config.items():
if value and (value.lower() == 'true' or value.lower() == 't'):
setattr(args, key, True)
else:
setattr(args, key, value)
if args.verbose:
log.setLevel(DEBUG)
log.info('Starting spotify_dl')
log.debug('Setting debug mode on spotify_dl')
if not check_for_tokens():
exit(1)
if not args.collection:
sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials())
else:
token = getenv('TOKEN')
if token is None:
log.fatal("Required token in .env")
exit(-1)
sp = spotipy.Spotify(auth=token)
log.debug('Arguments: {}'.format(args))
if args.url:
valid_item = validate_spotify_url(args.url)
if not valid_item:
sys.exit(1)
if args.output:
item_type, item_id = parse_spotify_url(args.url)
directory_name = get_item_name(sp, item_type, item_id)
save_path = Path(PurePath.joinpath(
Path(args.output), Path(directory_name)))
save_path.mkdir(parents=True, exist_ok=True)
log.info("Saving songs to: {}".format(directory_name))
songs = fetch_tracks(sp, item_type, args.url)
if args.download is True:
file_name_f = default_filename
if args.keep_playlist_order:
file_name_f = playlist_num_filename
with Pool() as pool:
pool.starmap(download_songs, product(songs, [save_path], [args.format_str],[args.skip_mp3], [args.keep_playlist_order], [file_name_f]))
print("Done!")
if __name__ == '__main__':
spotify_dl()
| true | true |
1c34ba5fd330238a1f0935eb0fc70d42440fb0ff | 15,773 | py | Python | rpython/memory/gctransform/shadowstack.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 381 | 2018-08-18T03:37:22.000Z | 2022-02-06T23:57:36.000Z | rpython/memory/gctransform/shadowstack.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 16 | 2018-09-22T18:12:47.000Z | 2022-02-22T20:03:59.000Z | rpython/memory/gctransform/shadowstack.py | m4sterchain/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | [
"Apache-2.0",
"OpenSSL"
] | 30 | 2018-08-20T03:16:34.000Z | 2022-01-12T17:39:22.000Z | from rpython.annotator import model as annmodel
from rpython.rtyper.llannotation import SomePtr
from rpython.rlib.debug import ll_assert
from rpython.rlib.nonconst import NonConstant
from rpython.rlib import rgc
from rpython.rlib.objectmodel import specialize
from rpython.rtyper import rmodel
from rpython.rtyper.annlowlevel import llhelper
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.rtyper.llannotation import SomeAddress
from rpython.memory.gctransform.framework import (
BaseFrameworkGCTransformer, BaseRootWalker, sizeofaddr)
from rpython.rtyper.rbuiltin import gen_cast
from rpython.memory.gctransform.log import log
class ShadowStackFrameworkGCTransformer(BaseFrameworkGCTransformer):
def annotate_walker_functions(self, getfn):
self.incr_stack_ptr = getfn(self.root_walker.incr_stack,
[annmodel.SomeInteger()],
SomeAddress(),
inline = True)
self.decr_stack_ptr = getfn(self.root_walker.decr_stack,
[annmodel.SomeInteger()],
SomeAddress(),
inline = True)
def build_root_walker(self):
return ShadowStackRootWalker(self)
def push_roots(self, hop, keep_current_args=False):
livevars = self.get_livevars_for_roots(hop, keep_current_args)
self.num_pushs += len(livevars)
hop.genop("gc_push_roots", livevars)
return livevars
def pop_roots(self, hop, livevars):
hop.genop("gc_pop_roots", livevars)
# NB. we emit it even if len(livevars) == 0; this is needed for
# shadowcolor.move_pushes_earlier()
@specialize.call_location()
def walk_stack_root(invoke, arg0, arg1, start, addr, is_minor):
skip = 0
while addr != start:
addr -= sizeofaddr
#XXX reintroduce support for tagged values?
#if gc.points_to_valid_gc_object(addr):
# callback(gc, addr)
if skip & 1 == 0:
content = addr.address[0]
n = llmemory.cast_adr_to_int(content)
if n & 1 == 0:
if content: # non-0, non-odd: a regular ptr
invoke(arg0, arg1, addr)
else:
# odd number: a skip bitmask
if n > 0: # initially, an unmarked value
if is_minor:
newcontent = llmemory.cast_int_to_adr(-n)
addr.address[0] = newcontent # mark
skip = n
else:
# a marked value
if is_minor:
return
skip = -n
skip >>= 1
class ShadowStackRootWalker(BaseRootWalker):
def __init__(self, gctransformer):
BaseRootWalker.__init__(self, gctransformer)
# NB. 'self' is frozen, but we can use self.gcdata to store state
gcdata = self.gcdata
gcdata.can_look_at_partial_stack = True
def incr_stack(n):
top = gcdata.root_stack_top
gcdata.root_stack_top = top + n*sizeofaddr
return top
self.incr_stack = incr_stack
def decr_stack(n):
top = gcdata.root_stack_top - n*sizeofaddr
gcdata.root_stack_top = top
return top
self.decr_stack = decr_stack
self.invoke_collect_stack_root = specialize.call_location()(
lambda arg0, arg1, addr: arg0(self.gc, addr))
self.shadow_stack_pool = ShadowStackPool(gcdata)
rsd = gctransformer.root_stack_depth
if rsd is not None:
self.shadow_stack_pool.root_stack_depth = rsd
def push_stack(self, addr):
top = self.incr_stack(1)
top.address[0] = addr
def pop_stack(self):
top = self.decr_stack(1)
return top.address[0]
def setup_root_walker(self):
self.shadow_stack_pool.initial_setup()
BaseRootWalker.setup_root_walker(self)
def walk_stack_roots(self, collect_stack_root, is_minor=False):
# Note that if we're the first minor collection after a thread
# switch, then we also need to disable the 'is_minor'
# optimization. The reason is subtle: we need to walk the whole
# stack because otherwise, we can be in the middle of an
# incremental major collection, and the new stack was just moved
# off a ShadowStackRef object (gctransform/shadowstack.py) which
# was not seen yet. We might completely miss some old objects
# from the parts of that stack that are skipped by this is_minor
# optimization.
gcdata = self.gcdata
if is_minor and not gcdata.can_look_at_partial_stack:
is_minor = False
gcdata.can_look_at_partial_stack = True
walk_stack_root(self.invoke_collect_stack_root, collect_stack_root,
None, gcdata.root_stack_base, gcdata.root_stack_top,
is_minor=is_minor)
def need_thread_support(self, gctransformer, getfn):
from rpython.rlib import rthread # xxx fish
gcdata = self.gcdata
# the interfacing between the threads and the GC is done via
# two completely ad-hoc operations at the moment:
# gc_thread_run and gc_thread_die. See docstrings below.
shadow_stack_pool = self.shadow_stack_pool
SHADOWSTACKREF = get_shadowstackref(self, gctransformer)
# this is a dict {tid: SHADOWSTACKREF}, where the tid for the
# current thread may be missing so far
gcdata.thread_stacks = None
# Return the thread identifier, as an integer.
get_tid = rthread.get_ident
def thread_setup():
tid = get_tid()
gcdata.main_tid = tid
gcdata.active_tid = tid
def thread_run():
"""Called whenever the current thread (re-)acquired the GIL.
This should ensure that the shadow stack installed in
gcdata.root_stack_top/root_stack_base is the one corresponding
to the current thread.
No GC operation here, e.g. no mallocs or storing in a dict!
Note that here specifically we don't call rthread.get_ident(),
but rthread.get_or_make_ident(). We are possibly in a fresh
new thread, so we need to be careful.
"""
tid = rthread.get_or_make_ident()
if gcdata.active_tid != tid:
switch_shadow_stacks(tid)
def thread_die():
"""Called just before the final GIL release done by a dying
thread. After a thread_die(), no more gc operation should
occur in this thread.
"""
tid = get_tid()
if tid == gcdata.main_tid:
return # ignore calls to thread_die() in the main thread
# (which can occur after a fork()).
# we need to switch somewhere else, so go to main_tid
gcdata.active_tid = gcdata.main_tid
thread_stacks = gcdata.thread_stacks
new_ref = thread_stacks[gcdata.active_tid]
try:
del thread_stacks[tid]
except KeyError:
pass
# no more GC operation from here -- switching shadowstack!
shadow_stack_pool.forget_current_state()
shadow_stack_pool.restore_state_from(new_ref)
def switch_shadow_stacks(new_tid):
# we have the wrong shadowstack right now, but it should not matter
thread_stacks = gcdata.thread_stacks
try:
if thread_stacks is None:
gcdata.thread_stacks = thread_stacks = {}
raise KeyError
new_ref = thread_stacks[new_tid]
except KeyError:
new_ref = lltype.nullptr(SHADOWSTACKREF)
try:
old_ref = thread_stacks[gcdata.active_tid]
except KeyError:
# first time we ask for a SHADOWSTACKREF for this active_tid
old_ref = shadow_stack_pool.allocate(SHADOWSTACKREF)
thread_stacks[gcdata.active_tid] = old_ref
#
# no GC operation from here -- switching shadowstack!
shadow_stack_pool.save_current_state_away(old_ref)
if new_ref:
shadow_stack_pool.restore_state_from(new_ref)
else:
shadow_stack_pool.start_fresh_new_state()
# done
#
gcdata.active_tid = new_tid
switch_shadow_stacks._dont_inline_ = True
def thread_after_fork(result_of_fork, opaqueaddr):
# we don't need a thread_before_fork in this case, so
# opaqueaddr == NULL. This is called after fork().
if result_of_fork == 0:
# We are in the child process. Assumes that only the
# current thread survived, so frees the shadow stacks
# of all the other ones.
gcdata.thread_stacks = None
# Finally, reset the stored thread IDs, in case it
# changed because of fork(). Also change the main
# thread to the current one (because there is not any
# other left).
tid = get_tid()
gcdata.main_tid = tid
gcdata.active_tid = tid
self.thread_setup = thread_setup
self.thread_run_ptr = getfn(thread_run, [], annmodel.s_None,
minimal_transform=False)
self.thread_die_ptr = getfn(thread_die, [], annmodel.s_None,
minimal_transform=False)
# no thread_before_fork_ptr here
self.thread_after_fork_ptr = getfn(thread_after_fork,
[annmodel.SomeInteger(),
SomeAddress()],
annmodel.s_None,
minimal_transform=False)
def need_stacklet_support(self, gctransformer, getfn):
from rpython.rlib import _stacklet_shadowstack
_stacklet_shadowstack.complete_destrptr(gctransformer)
gcdata = self.gcdata
def gc_modified_shadowstack():
gcdata.can_look_at_partial_stack = False
self.gc_modified_shadowstack_ptr = getfn(gc_modified_shadowstack,
[], annmodel.s_None)
def postprocess_graph(self, gct, graph, any_inlining):
from rpython.memory.gctransform import shadowcolor
if any_inlining:
shadowcolor.postprocess_inlining(graph)
use_push_pop = shadowcolor.postprocess_graph(graph, gct.c_const_gcdata)
if use_push_pop and graph in gct.graphs_to_inline:
log.WARNING("%r is marked for later inlining, "
"but is using push/pop roots. Disabled" % (graph,))
del gct.graphs_to_inline[graph]
# ____________________________________________________________
class ShadowStackPool(object):
"""Manages a pool of shadowstacks.
"""
_alloc_flavor_ = "raw"
root_stack_depth = 163840
def __init__(self, gcdata):
self.unused_full_stack = llmemory.NULL
self.gcdata = gcdata
def initial_setup(self):
self._prepare_unused_stack()
self.start_fresh_new_state()
def allocate(self, SHADOWSTACKREF):
"""Allocate an empty SHADOWSTACKREF object."""
return lltype.malloc(SHADOWSTACKREF, zero=True)
def save_current_state_away(self, shadowstackref):
"""Save the current state away into 'shadowstackref'.
This either works, or raise MemoryError and nothing is done.
To do a switch, first call save_current_state_away() or
forget_current_state(), and then call restore_state_from()
or start_fresh_new_state().
"""
self._prepare_unused_stack()
shadowstackref.base = self.gcdata.root_stack_base
shadowstackref.top = self.gcdata.root_stack_top
ll_assert(shadowstackref.base <= shadowstackref.top,
"save_current_state_away: broken shadowstack")
#
# cannot use llop.gc_writebarrier() here, because
# we are in a minimally-transformed GC helper :-/
gc = self.gcdata.gc
if hasattr(gc.__class__, 'write_barrier'):
shadowstackadr = llmemory.cast_ptr_to_adr(shadowstackref)
gc.write_barrier(shadowstackadr)
#
self.gcdata.root_stack_top = llmemory.NULL # to detect missing restore
def forget_current_state(self):
ll_assert(self.gcdata.root_stack_base == self.gcdata.root_stack_top,
"forget_current_state: shadowstack not empty!")
if self.unused_full_stack:
llmemory.raw_free(self.unused_full_stack)
self.unused_full_stack = self.gcdata.root_stack_base
self.gcdata.root_stack_top = llmemory.NULL # to detect missing restore
def restore_state_from(self, shadowstackref):
ll_assert(bool(shadowstackref.base), "empty shadowstackref!")
ll_assert(shadowstackref.base <= shadowstackref.top,
"restore_state_from: broken shadowstack")
self.gcdata.root_stack_base = shadowstackref.base
self.gcdata.root_stack_top = shadowstackref.top
self.gcdata.can_look_at_partial_stack = False
self._cleanup(shadowstackref)
def start_fresh_new_state(self):
self.gcdata.root_stack_base = self.unused_full_stack
self.gcdata.root_stack_top = self.unused_full_stack
self.unused_full_stack = llmemory.NULL
def _cleanup(self, shadowstackref):
shadowstackref.base = llmemory.NULL
shadowstackref.top = llmemory.NULL
def _prepare_unused_stack(self):
if self.unused_full_stack == llmemory.NULL:
root_stack_size = sizeofaddr * self.root_stack_depth
self.unused_full_stack = llmemory.raw_malloc(root_stack_size)
if self.unused_full_stack == llmemory.NULL:
raise MemoryError
def get_shadowstackref(root_walker, gctransformer):
if hasattr(gctransformer, '_SHADOWSTACKREF'):
return gctransformer._SHADOWSTACKREF
SHADOWSTACKREFPTR = lltype.Ptr(lltype.GcForwardReference())
SHADOWSTACKREF = lltype.GcStruct('ShadowStackRef',
('base', llmemory.Address),
('top', llmemory.Address),
rtti=True)
SHADOWSTACKREFPTR.TO.become(SHADOWSTACKREF)
def customtrace(gc, obj, callback, arg):
obj = llmemory.cast_adr_to_ptr(obj, SHADOWSTACKREFPTR)
walk_stack_root(gc._trace_callback, callback, arg, obj.base, obj.top,
is_minor=False) # xxx optimize?
gc = gctransformer.gcdata.gc
assert not hasattr(gc, 'custom_trace_dispatcher')
# ^^^ create_custom_trace_funcs() must not run before this
gctransformer.translator.rtyper.custom_trace_funcs.append(
(SHADOWSTACKREF, customtrace))
def shadowstack_destructor(shadowstackref):
base = shadowstackref.base
shadowstackref.base = llmemory.NULL
shadowstackref.top = llmemory.NULL
llmemory.raw_free(base)
destrptr = gctransformer.annotate_helper(shadowstack_destructor,
[SHADOWSTACKREFPTR], lltype.Void)
lltype.attachRuntimeTypeInfo(SHADOWSTACKREF, destrptr=destrptr)
gctransformer._SHADOWSTACKREF = SHADOWSTACKREF
return SHADOWSTACKREF
| 41.949468 | 79 | 0.623153 | from rpython.annotator import model as annmodel
from rpython.rtyper.llannotation import SomePtr
from rpython.rlib.debug import ll_assert
from rpython.rlib.nonconst import NonConstant
from rpython.rlib import rgc
from rpython.rlib.objectmodel import specialize
from rpython.rtyper import rmodel
from rpython.rtyper.annlowlevel import llhelper
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.rtyper.llannotation import SomeAddress
from rpython.memory.gctransform.framework import (
BaseFrameworkGCTransformer, BaseRootWalker, sizeofaddr)
from rpython.rtyper.rbuiltin import gen_cast
from rpython.memory.gctransform.log import log
class ShadowStackFrameworkGCTransformer(BaseFrameworkGCTransformer):
def annotate_walker_functions(self, getfn):
self.incr_stack_ptr = getfn(self.root_walker.incr_stack,
[annmodel.SomeInteger()],
SomeAddress(),
inline = True)
self.decr_stack_ptr = getfn(self.root_walker.decr_stack,
[annmodel.SomeInteger()],
SomeAddress(),
inline = True)
def build_root_walker(self):
return ShadowStackRootWalker(self)
def push_roots(self, hop, keep_current_args=False):
livevars = self.get_livevars_for_roots(hop, keep_current_args)
self.num_pushs += len(livevars)
hop.genop("gc_push_roots", livevars)
return livevars
def pop_roots(self, hop, livevars):
hop.genop("gc_pop_roots", livevars)
@specialize.call_location()
def walk_stack_root(invoke, arg0, arg1, start, addr, is_minor):
skip = 0
while addr != start:
addr -= sizeofaddr
if skip & 1 == 0:
content = addr.address[0]
n = llmemory.cast_adr_to_int(content)
if n & 1 == 0:
if content:
invoke(arg0, arg1, addr)
else:
if n > 0:
if is_minor:
newcontent = llmemory.cast_int_to_adr(-n)
addr.address[0] = newcontent
skip = n
else:
if is_minor:
return
skip = -n
skip >>= 1
class ShadowStackRootWalker(BaseRootWalker):
def __init__(self, gctransformer):
BaseRootWalker.__init__(self, gctransformer)
gcdata = self.gcdata
gcdata.can_look_at_partial_stack = True
def incr_stack(n):
top = gcdata.root_stack_top
gcdata.root_stack_top = top + n*sizeofaddr
return top
self.incr_stack = incr_stack
def decr_stack(n):
top = gcdata.root_stack_top - n*sizeofaddr
gcdata.root_stack_top = top
return top
self.decr_stack = decr_stack
self.invoke_collect_stack_root = specialize.call_location()(
lambda arg0, arg1, addr: arg0(self.gc, addr))
self.shadow_stack_pool = ShadowStackPool(gcdata)
rsd = gctransformer.root_stack_depth
if rsd is not None:
self.shadow_stack_pool.root_stack_depth = rsd
def push_stack(self, addr):
top = self.incr_stack(1)
top.address[0] = addr
def pop_stack(self):
top = self.decr_stack(1)
return top.address[0]
def setup_root_walker(self):
self.shadow_stack_pool.initial_setup()
BaseRootWalker.setup_root_walker(self)
def walk_stack_roots(self, collect_stack_root, is_minor=False):
# switch, then we also need to disable the 'is_minor'
# optimization. The reason is subtle: we need to walk the whole
# stack because otherwise, we can be in the middle of an
# incremental major collection, and the new stack was just moved
# off a ShadowStackRef object (gctransform/shadowstack.py) which
# was not seen yet. We might completely miss some old objects
# from the parts of that stack that are skipped by this is_minor
# optimization.
gcdata = self.gcdata
if is_minor and not gcdata.can_look_at_partial_stack:
is_minor = False
gcdata.can_look_at_partial_stack = True
walk_stack_root(self.invoke_collect_stack_root, collect_stack_root,
None, gcdata.root_stack_base, gcdata.root_stack_top,
is_minor=is_minor)
def need_thread_support(self, gctransformer, getfn):
from rpython.rlib import rthread # xxx fish
gcdata = self.gcdata
# the interfacing between the threads and the GC is done via
# two completely ad-hoc operations at the moment:
# gc_thread_run and gc_thread_die. See docstrings below.
shadow_stack_pool = self.shadow_stack_pool
SHADOWSTACKREF = get_shadowstackref(self, gctransformer)
# this is a dict {tid: SHADOWSTACKREF}, where the tid for the
# current thread may be missing so far
gcdata.thread_stacks = None
# Return the thread identifier, as an integer.
get_tid = rthread.get_ident
def thread_setup():
tid = get_tid()
gcdata.main_tid = tid
gcdata.active_tid = tid
def thread_run():
tid = rthread.get_or_make_ident()
if gcdata.active_tid != tid:
switch_shadow_stacks(tid)
def thread_die():
tid = get_tid()
if tid == gcdata.main_tid:
return # ignore calls to thread_die() in the main thread
# (which can occur after a fork()).
# we need to switch somewhere else, so go to main_tid
gcdata.active_tid = gcdata.main_tid
thread_stacks = gcdata.thread_stacks
new_ref = thread_stacks[gcdata.active_tid]
try:
del thread_stacks[tid]
except KeyError:
pass
# no more GC operation from here -- switching shadowstack!
shadow_stack_pool.forget_current_state()
shadow_stack_pool.restore_state_from(new_ref)
def switch_shadow_stacks(new_tid):
# we have the wrong shadowstack right now, but it should not matter
thread_stacks = gcdata.thread_stacks
try:
if thread_stacks is None:
gcdata.thread_stacks = thread_stacks = {}
raise KeyError
new_ref = thread_stacks[new_tid]
except KeyError:
new_ref = lltype.nullptr(SHADOWSTACKREF)
try:
old_ref = thread_stacks[gcdata.active_tid]
except KeyError:
# first time we ask for a SHADOWSTACKREF for this active_tid
old_ref = shadow_stack_pool.allocate(SHADOWSTACKREF)
thread_stacks[gcdata.active_tid] = old_ref
#
# no GC operation from here -- switching shadowstack!
shadow_stack_pool.save_current_state_away(old_ref)
if new_ref:
shadow_stack_pool.restore_state_from(new_ref)
else:
shadow_stack_pool.start_fresh_new_state()
# done
#
gcdata.active_tid = new_tid
switch_shadow_stacks._dont_inline_ = True
def thread_after_fork(result_of_fork, opaqueaddr):
# we don't need a thread_before_fork in this case, so
if result_of_fork == 0:
gcdata.thread_stacks = None
tid = get_tid()
gcdata.main_tid = tid
gcdata.active_tid = tid
self.thread_setup = thread_setup
self.thread_run_ptr = getfn(thread_run, [], annmodel.s_None,
minimal_transform=False)
self.thread_die_ptr = getfn(thread_die, [], annmodel.s_None,
minimal_transform=False)
self.thread_after_fork_ptr = getfn(thread_after_fork,
[annmodel.SomeInteger(),
SomeAddress()],
annmodel.s_None,
minimal_transform=False)
def need_stacklet_support(self, gctransformer, getfn):
from rpython.rlib import _stacklet_shadowstack
_stacklet_shadowstack.complete_destrptr(gctransformer)
gcdata = self.gcdata
def gc_modified_shadowstack():
gcdata.can_look_at_partial_stack = False
self.gc_modified_shadowstack_ptr = getfn(gc_modified_shadowstack,
[], annmodel.s_None)
def postprocess_graph(self, gct, graph, any_inlining):
from rpython.memory.gctransform import shadowcolor
if any_inlining:
shadowcolor.postprocess_inlining(graph)
use_push_pop = shadowcolor.postprocess_graph(graph, gct.c_const_gcdata)
if use_push_pop and graph in gct.graphs_to_inline:
log.WARNING("%r is marked for later inlining, "
"but is using push/pop roots. Disabled" % (graph,))
del gct.graphs_to_inline[graph]
class ShadowStackPool(object):
_alloc_flavor_ = "raw"
root_stack_depth = 163840
def __init__(self, gcdata):
self.unused_full_stack = llmemory.NULL
self.gcdata = gcdata
def initial_setup(self):
self._prepare_unused_stack()
self.start_fresh_new_state()
def allocate(self, SHADOWSTACKREF):
return lltype.malloc(SHADOWSTACKREF, zero=True)
def save_current_state_away(self, shadowstackref):
self._prepare_unused_stack()
shadowstackref.base = self.gcdata.root_stack_base
shadowstackref.top = self.gcdata.root_stack_top
ll_assert(shadowstackref.base <= shadowstackref.top,
"save_current_state_away: broken shadowstack")
gc = self.gcdata.gc
if hasattr(gc.__class__, 'write_barrier'):
shadowstackadr = llmemory.cast_ptr_to_adr(shadowstackref)
gc.write_barrier(shadowstackadr)
self.gcdata.root_stack_top = llmemory.NULL
def forget_current_state(self):
ll_assert(self.gcdata.root_stack_base == self.gcdata.root_stack_top,
"forget_current_state: shadowstack not empty!")
if self.unused_full_stack:
llmemory.raw_free(self.unused_full_stack)
self.unused_full_stack = self.gcdata.root_stack_base
self.gcdata.root_stack_top = llmemory.NULL
def restore_state_from(self, shadowstackref):
ll_assert(bool(shadowstackref.base), "empty shadowstackref!")
ll_assert(shadowstackref.base <= shadowstackref.top,
"restore_state_from: broken shadowstack")
self.gcdata.root_stack_base = shadowstackref.base
self.gcdata.root_stack_top = shadowstackref.top
self.gcdata.can_look_at_partial_stack = False
self._cleanup(shadowstackref)
def start_fresh_new_state(self):
self.gcdata.root_stack_base = self.unused_full_stack
self.gcdata.root_stack_top = self.unused_full_stack
self.unused_full_stack = llmemory.NULL
def _cleanup(self, shadowstackref):
shadowstackref.base = llmemory.NULL
shadowstackref.top = llmemory.NULL
def _prepare_unused_stack(self):
if self.unused_full_stack == llmemory.NULL:
root_stack_size = sizeofaddr * self.root_stack_depth
self.unused_full_stack = llmemory.raw_malloc(root_stack_size)
if self.unused_full_stack == llmemory.NULL:
raise MemoryError
def get_shadowstackref(root_walker, gctransformer):
if hasattr(gctransformer, '_SHADOWSTACKREF'):
return gctransformer._SHADOWSTACKREF
SHADOWSTACKREFPTR = lltype.Ptr(lltype.GcForwardReference())
SHADOWSTACKREF = lltype.GcStruct('ShadowStackRef',
('base', llmemory.Address),
('top', llmemory.Address),
rtti=True)
SHADOWSTACKREFPTR.TO.become(SHADOWSTACKREF)
def customtrace(gc, obj, callback, arg):
obj = llmemory.cast_adr_to_ptr(obj, SHADOWSTACKREFPTR)
walk_stack_root(gc._trace_callback, callback, arg, obj.base, obj.top,
is_minor=False)
gc = gctransformer.gcdata.gc
assert not hasattr(gc, 'custom_trace_dispatcher')
gctransformer.translator.rtyper.custom_trace_funcs.append(
(SHADOWSTACKREF, customtrace))
def shadowstack_destructor(shadowstackref):
base = shadowstackref.base
shadowstackref.base = llmemory.NULL
shadowstackref.top = llmemory.NULL
llmemory.raw_free(base)
destrptr = gctransformer.annotate_helper(shadowstack_destructor,
[SHADOWSTACKREFPTR], lltype.Void)
lltype.attachRuntimeTypeInfo(SHADOWSTACKREF, destrptr=destrptr)
gctransformer._SHADOWSTACKREF = SHADOWSTACKREF
return SHADOWSTACKREF
| true | true |
1c34bb5490445702128bd5163553e4461749611f | 774 | py | Python | Python/7/RowWeights/test_row_weight.py | hwakabh/codewars | 7afce5a7424d35abc55c350301ac134f2d3edd3d | [
"MIT"
] | null | null | null | Python/7/RowWeights/test_row_weight.py | hwakabh/codewars | 7afce5a7424d35abc55c350301ac134f2d3edd3d | [
"MIT"
] | 6 | 2020-02-21T17:01:59.000Z | 2021-05-04T07:04:41.000Z | Python/7/RowWeights/test_row_weight.py | hwakabh/codewars | 7afce5a7424d35abc55c350301ac134f2d3edd3d | [
"MIT"
] | null | null | null | from unittest import TestCase
from unittest import main
from row_weights import row_weights
class TestRowWeights(TestCase):
def test_row_weights(self):
test_patterns = [
([80], (80,0)),
([100,50], (100,50)),
([50,60,70,80], (120,140)),
([13,27,49], (62,27)),
([70,58,75,34,91], (236,92)),
([29,83,67,53,19,28,96], (211,164)),
([0], (0,0)),
([100,51,50,100], (150,151)),
([39,84,74,18,59,72,35,61], (207,235)),
([0,1,0], (0,1)),
]
for num, exp in test_patterns:
with self.subTest(num=num, exp=exp):
self.assertEqual(row_weights(array=num), exp)
if __name__ == "__main__":
main(verbosity=2)
| 27.642857 | 61 | 0.494832 | from unittest import TestCase
from unittest import main
from row_weights import row_weights
class TestRowWeights(TestCase):
def test_row_weights(self):
test_patterns = [
([80], (80,0)),
([100,50], (100,50)),
([50,60,70,80], (120,140)),
([13,27,49], (62,27)),
([70,58,75,34,91], (236,92)),
([29,83,67,53,19,28,96], (211,164)),
([0], (0,0)),
([100,51,50,100], (150,151)),
([39,84,74,18,59,72,35,61], (207,235)),
([0,1,0], (0,1)),
]
for num, exp in test_patterns:
with self.subTest(num=num, exp=exp):
self.assertEqual(row_weights(array=num), exp)
if __name__ == "__main__":
main(verbosity=2)
| true | true |
1c34bc22a433fc208d0e12b536b1c70a65be1446 | 2,802 | py | Python | anti_sybil/tests/test2.2.py | abramsymons/BrightID-AntiSybil | 86161afe654f46475944ebdb3b5453a1723180d9 | [
"ISC"
] | null | null | null | anti_sybil/tests/test2.2.py | abramsymons/BrightID-AntiSybil | 86161afe654f46475944ebdb3b5453a1723180d9 | [
"ISC"
] | null | null | null | anti_sybil/tests/test2.2.py | abramsymons/BrightID-AntiSybil | 86161afe654f46475944ebdb3b5453a1723180d9 | [
"ISC"
] | 1 | 2020-03-28T05:05:19.000Z | 2020-03-28T05:05:19.000Z | # Test if GroupSybilRank works better than SybilRank
import sys
sys.path.append('..')
import os
import copy
import algorithms
import graphs
from utils import *
OUTPUT_FOLDER = './outputs/tests2.2/'
main_graph_params = {
'num_seed_groups': 0,
'max_known_ratio': 1,
'avg_known_ratio': .5,
'min_known_ratio': .2,
'num_attacker_to_num_honest': .1,
'num_sybil_to_num_attacker': 2,
'sybil_to_attackers_con': .2,
}
algorithm_params = {
'min_degree': 5,
'accumulative': False,
'weaken_under_min': False,
'nonlinear_distribution': True,
'group_edge_weight': 2,
}
main_graph_params['num_groups'] = 50
main_graph_params['min_group_nodes'] = 5
main_graph_params['max_group_nodes'] = 15
est_num_nodes = main_graph_params['num_groups'] * (main_graph_params['min_group_nodes'] + main_graph_params['min_group_nodes']) / 2
main_graph_params['num_seed_nodes'] = int(.1 * est_num_nodes)
main_graph_params['num_joint_node'] = est_num_nodes
main_graph_params['num_inter_group_con'] = est_num_nodes
graph_params = copy.copy(main_graph_params)
graph = graphs.generators.group_based.generate(graph_params)
algorithms.GroupSybilRank(graph, algorithm_params).rank()
output2 = generate_output(graph)
draw_graph(graph, os.path.join(OUTPUT_FOLDER, '1.html'))
reset_ranks(graph)
graph_params = copy.copy(main_graph_params)
graph_params['num_joint_node'] = graph_params['num_joint_node'] / 10
graph_params['num_inter_group_con'] = graph_params['num_inter_group_con'] / 10
graph = graphs.generators.group_based.generate(graph_params)
algorithms.GroupSybilRank(graph, algorithm_params).rank()
output4 = generate_output(graph)
draw_graph(graph, os.path.join(OUTPUT_FOLDER, '2.html'))
reset_ranks(graph)
graph_params = copy.copy(main_graph_params)
graph_params['num_seed_nodes'] = graph_params['num_seed_nodes'] * 4
graph = graphs.generators.group_based.generate(graph_params)
algorithms.GroupSybilRank(graph, algorithm_params).rank()
output6 = generate_output(graph)
draw_graph(graph, os.path.join(OUTPUT_FOLDER, '3.html'))
reset_ranks(graph)
graph_params = copy.copy(main_graph_params)
graph_params['sybil_to_attackers_con'] = .7
graph = graphs.generators.group_based.generate(graph_params)
algorithms.GroupSybilRank(graph, algorithm_params).rank()
output8 = generate_output(graph)
draw_graph(graph, os.path.join(OUTPUT_FOLDER, '4.html'))
reset_ranks(graph)
graph_params = copy.copy(main_graph_params)
graph_params['num_seed_groups'] = 5
graph = graphs.generators.group_based.generate(graph_params)
algorithms.GroupSybilRank(graph, algorithm_params).rank()
output10 = generate_output(graph)
draw_graph(graph, os.path.join(OUTPUT_FOLDER, '5.html'))
reset_ranks(graph)
write_output_file([output2, output4, output6, output8, output10], os.path.join(OUTPUT_FOLDER, 'result.csv'))
| 35.025 | 131 | 0.785867 |
import sys
sys.path.append('..')
import os
import copy
import algorithms
import graphs
from utils import *
OUTPUT_FOLDER = './outputs/tests2.2/'
main_graph_params = {
'num_seed_groups': 0,
'max_known_ratio': 1,
'avg_known_ratio': .5,
'min_known_ratio': .2,
'num_attacker_to_num_honest': .1,
'num_sybil_to_num_attacker': 2,
'sybil_to_attackers_con': .2,
}
algorithm_params = {
'min_degree': 5,
'accumulative': False,
'weaken_under_min': False,
'nonlinear_distribution': True,
'group_edge_weight': 2,
}
main_graph_params['num_groups'] = 50
main_graph_params['min_group_nodes'] = 5
main_graph_params['max_group_nodes'] = 15
est_num_nodes = main_graph_params['num_groups'] * (main_graph_params['min_group_nodes'] + main_graph_params['min_group_nodes']) / 2
main_graph_params['num_seed_nodes'] = int(.1 * est_num_nodes)
main_graph_params['num_joint_node'] = est_num_nodes
main_graph_params['num_inter_group_con'] = est_num_nodes
graph_params = copy.copy(main_graph_params)
graph = graphs.generators.group_based.generate(graph_params)
algorithms.GroupSybilRank(graph, algorithm_params).rank()
output2 = generate_output(graph)
draw_graph(graph, os.path.join(OUTPUT_FOLDER, '1.html'))
reset_ranks(graph)
graph_params = copy.copy(main_graph_params)
graph_params['num_joint_node'] = graph_params['num_joint_node'] / 10
graph_params['num_inter_group_con'] = graph_params['num_inter_group_con'] / 10
graph = graphs.generators.group_based.generate(graph_params)
algorithms.GroupSybilRank(graph, algorithm_params).rank()
output4 = generate_output(graph)
draw_graph(graph, os.path.join(OUTPUT_FOLDER, '2.html'))
reset_ranks(graph)
graph_params = copy.copy(main_graph_params)
graph_params['num_seed_nodes'] = graph_params['num_seed_nodes'] * 4
graph = graphs.generators.group_based.generate(graph_params)
algorithms.GroupSybilRank(graph, algorithm_params).rank()
output6 = generate_output(graph)
draw_graph(graph, os.path.join(OUTPUT_FOLDER, '3.html'))
reset_ranks(graph)
graph_params = copy.copy(main_graph_params)
graph_params['sybil_to_attackers_con'] = .7
graph = graphs.generators.group_based.generate(graph_params)
algorithms.GroupSybilRank(graph, algorithm_params).rank()
output8 = generate_output(graph)
draw_graph(graph, os.path.join(OUTPUT_FOLDER, '4.html'))
reset_ranks(graph)
graph_params = copy.copy(main_graph_params)
graph_params['num_seed_groups'] = 5
graph = graphs.generators.group_based.generate(graph_params)
algorithms.GroupSybilRank(graph, algorithm_params).rank()
output10 = generate_output(graph)
draw_graph(graph, os.path.join(OUTPUT_FOLDER, '5.html'))
reset_ranks(graph)
write_output_file([output2, output4, output6, output8, output10], os.path.join(OUTPUT_FOLDER, 'result.csv'))
| true | true |
1c34bd37fbb2b9caffb2adb5ac2f06077b9b84ba | 152 | py | Python | authlib/repo/provider/sqlite/__init__.py | jmrafael/Streamlit-Authentication | 208e9d8f116d25d420f41b6d01c13f063052805b | [
"MIT"
] | 25 | 2021-08-25T12:51:14.000Z | 2022-03-29T22:56:21.000Z | authlib/repo/provider/sqlite/__init__.py | jmrafael/Streamlit-Authentication | 208e9d8f116d25d420f41b6d01c13f063052805b | [
"MIT"
] | 6 | 2021-09-09T23:09:51.000Z | 2022-03-11T11:11:23.000Z | authlib/repo/provider/sqlite/__init__.py | jmrafael/Streamlit-Authentication | 208e9d8f116d25d420f41b6d01c13f063052805b | [
"MIT"
] | 7 | 2021-09-04T08:06:21.000Z | 2022-03-07T23:40:02.000Z | from .. import base_provider, const, trace_activity, AppError, DatabaseError
from .. import tnow_iso , tnow_iso_str, dt_from_str, dt_from_ts, dt_to_str
| 50.666667 | 76 | 0.809211 | from .. import base_provider, const, trace_activity, AppError, DatabaseError
from .. import tnow_iso , tnow_iso_str, dt_from_str, dt_from_ts, dt_to_str
| true | true |
1c34be80c60fd18a9c5653a8384deddadaa3addd | 9,704 | py | Python | docs/conf.py | anosillus/cookiecutter-poetry | 11b2e238e0c2c61d0a7cc6e199787b1bd7e2f413 | [
"MIT"
] | 6 | 2021-01-07T15:39:49.000Z | 2022-03-25T10:06:45.000Z | docs/conf.py | anosillus/cookiecutter-poetry | 11b2e238e0c2c61d0a7cc6e199787b1bd7e2f413 | [
"MIT"
] | 16 | 2020-02-24T11:42:21.000Z | 2021-08-31T14:22:21.000Z | docs/conf.py | anosillus/cookiecutter-poetry | 11b2e238e0c2c61d0a7cc6e199787b1bd7e2f413 | [
"MIT"
] | 14 | 2020-05-17T15:59:01.000Z | 2022-03-12T03:19:17.000Z | #!/usr/bin/env python3
#
# cookiecutter-poetry documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 13 09:13:01 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import re
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "cookiecutter-poetry"
copyright = f"2019 {date.today().year}, Johan Vergeer"
author = "Johan Vergeer"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = "0.1.0"
# The short X.Y version.
version = re.match(r"^([0-9]+\.){2}[0-9]+", release).group(0)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "cookiecutter-poetrydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"cookiecutter-poetry.tex",
"cookiecutter-poetry Documentation",
"Johan Vergeer",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"cookiecutter-poetry",
"cookiecutter-poetry Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"cookiecutter-poetry",
"cookiecutter-poetry Documentation",
author,
"cookiecutter-poetry",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| 32.13245 | 79 | 0.701979 |
import re
from datetime import date
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = "cookiecutter-poetry"
copyright = f"2019 {date.today().year}, Johan Vergeer"
author = "Johan Vergeer"
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = "0.1.0"
# The short X.Y version.
version = re.match(r"^([0-9]+\.){2}[0-9]+", release).group(0)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "cookiecutter-poetrydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"cookiecutter-poetry.tex",
"cookiecutter-poetry Documentation",
"Johan Vergeer",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"cookiecutter-poetry",
"cookiecutter-poetry Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"cookiecutter-poetry",
"cookiecutter-poetry Documentation",
author,
"cookiecutter-poetry",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| true | true |
1c34bea001d0ca24b933ea842753d3ca852efb64 | 1,555 | py | Python | main_app/urls.py | ncbentley/capstone | bbaa441f362f9ca203435724d07616dd4a43ab94 | [
"PostgreSQL",
"MIT"
] | null | null | null | main_app/urls.py | ncbentley/capstone | bbaa441f362f9ca203435724d07616dd4a43ab94 | [
"PostgreSQL",
"MIT"
] | 7 | 2021-04-08T21:25:57.000Z | 2022-03-12T00:42:19.000Z | main_app/urls.py | ncbentley/capstone | bbaa441f362f9ca203435724d07616dd4a43ab94 | [
"PostgreSQL",
"MIT"
] | null | null | null | from django.urls import path
from . import views
from django.contrib.staticfiles.storage import staticfiles_storage
from django.views.generic.base import RedirectView
urlpatterns = [
path('', views.home, name="home"),
path('login/', views.login, name="login"),
path('signup/', views.signup, name="signup"),
path('logout/', views.logout, name="logout"),
path('attach/<int:project_id>/', views.attach, name="attach"),
path('image/<int:wireframe_id>/', views.image, name="image"),
path('profile/<int:profile_id>/', views.profile, name="profile"),
path('projects/', views.projects, name="projects"),
path('projects/<int:project_id>/', views.project, name="project"),
path('projects/<int:project_id>/sprints/', views.sprints, name="sprints"),
path('projects/<int:project_id>/sprints/<int:sprint_id>/', views.sprint, name="sprint"),
path('projects/<int:project_id>/sprints/<int:sprint_id>/tasks', views.tasks, name='tasks'),
path('projects/<int:project_id>/sprints/<int:sprint_id>/tasks/<int:task_id>', views.task, name='task'),
path('projects/<int:project_id>/pages/', views.pages, name="pages"),
path('projects/<int:project_id>/pages/<int:page_id>/', views.page, name="page"),
path('projects/<int:project_id>/pages/<int:page_id>/wireframes/', views.wireframes, name="wireframes"),
path('projects/<int:project_id>/pages/<int:page_id>/wireframe/<int:wireframe_id>/', views.wireframe, name="wireframe"),
path('togglecomplete/<int:task_id>/', views.toggle_complete, name="togglecomplete")
] | 62.2 | 123 | 0.700965 | from django.urls import path
from . import views
from django.contrib.staticfiles.storage import staticfiles_storage
from django.views.generic.base import RedirectView
urlpatterns = [
path('', views.home, name="home"),
path('login/', views.login, name="login"),
path('signup/', views.signup, name="signup"),
path('logout/', views.logout, name="logout"),
path('attach/<int:project_id>/', views.attach, name="attach"),
path('image/<int:wireframe_id>/', views.image, name="image"),
path('profile/<int:profile_id>/', views.profile, name="profile"),
path('projects/', views.projects, name="projects"),
path('projects/<int:project_id>/', views.project, name="project"),
path('projects/<int:project_id>/sprints/', views.sprints, name="sprints"),
path('projects/<int:project_id>/sprints/<int:sprint_id>/', views.sprint, name="sprint"),
path('projects/<int:project_id>/sprints/<int:sprint_id>/tasks', views.tasks, name='tasks'),
path('projects/<int:project_id>/sprints/<int:sprint_id>/tasks/<int:task_id>', views.task, name='task'),
path('projects/<int:project_id>/pages/', views.pages, name="pages"),
path('projects/<int:project_id>/pages/<int:page_id>/', views.page, name="page"),
path('projects/<int:project_id>/pages/<int:page_id>/wireframes/', views.wireframes, name="wireframes"),
path('projects/<int:project_id>/pages/<int:page_id>/wireframe/<int:wireframe_id>/', views.wireframe, name="wireframe"),
path('togglecomplete/<int:task_id>/', views.toggle_complete, name="togglecomplete")
] | true | true |
1c34bf3427f420a40915d8c91ffc86d15b8ccb54 | 6,077 | py | Python | tarot_juicer/settings.py | abubakarA-Dot/tarot_juicer | dbc68f73d6ae3d73f50f4472a063b5363febc7b8 | [
"MIT"
] | 4 | 2020-02-27T00:11:01.000Z | 2020-05-11T07:59:55.000Z | tarot_juicer/settings.py | abubakarA-Dot/tarot_juicer | dbc68f73d6ae3d73f50f4472a063b5363febc7b8 | [
"MIT"
] | 16 | 2019-12-20T06:57:54.000Z | 2020-05-19T01:00:18.000Z | tarot_juicer/settings.py | abubakarA-Dot/tarot_juicer | dbc68f73d6ae3d73f50f4472a063b5363febc7b8 | [
"MIT"
] | 10 | 2019-12-25T23:38:33.000Z | 2020-05-11T14:15:15.000Z | """
Django settings for tarot_juicer project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
from django.contrib.messages import constants as messages
import os
import django_heroku
from decouple import config
import dj_database_url
from dotenv import load_dotenv
from . import notification
load_dotenv()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# As per the django documentation
DEFAULT_AUTO_FIELD='django.db.models.AutoField'
#Handle session is not Json Serializable
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
# tarot!7l=5rh&^(_uug%qd845^^(b40e)bl6kyww$z89f-m#tu=8k&tjuicer
SECRET_KEY = str(os.getenv('SECRET_KEY'))
if os.environ.get('DEBUG', '') != 'False':
# These are testing settings:
DEBUG = True # local + staging
SECURE_HSTS_SECONDS = 0
SECURE_SSL_REDIRECT = False
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
# Added colored output as red
notification.messages_print('error', 'Secure Mode Disabled: DEBUG MODE IS TRUE')
else:
# These are prod settings:
DEBUG = False # Set to `False` for prod when done testing (for when the project is finally Live)
SECURE_HSTS_SECONDS = 7200
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_PRELOAD = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
# Added colored output as green
notification.messages_print('success', 'Secure Mode Enabled: DEBUG MODE IS FALSE')
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS').split(' ') if 'ALLOWED_HOSTS' in os.environ else ['*']
ADMIN_PATH = os.environ.get('ADMIN_PATH')+'/' if 'ADMIN_PATH' in os.environ else 'admin/'
# Matomo
MATOMO_DOMAIN_PATH = 'tarot-matomo.herokuapp.com'
MATOMO_SITE_ID = '1'
# Application definition
INSTALLED_APPS = [
'essays.apps.EssaysConfig',
'landings.apps.LandingsConfig',
'generators.apps.GeneratorsConfig',
'work_orders.apps.WorkOrdersConfig',
'accounts.apps.AccountsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'analytical',
"django_extensions",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'tarot_juicer.middlewares.authentication_middleware',
'tarot_juicer.middlewares.autologout_middleware',
'tarot_juicer.protected_path_middleware.path_protection_middleware',
]
ROOT_URLCONF = 'tarot_juicer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'libraries': { # Adding this section should work around the issue.
'staticfiles': 'django.templatetags.static',
},
},
},
]
WSGI_APPLICATION = 'tarot_juicer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# To use AWS Postgres db’s locally run:
# `export DATABASE_URL='postgres://USER:PASSWORD@HOST:PORT/NAME'`
DATABASES = {}
DATABASES = {
'default': dj_database_url.config(
default='sqlite:///'+os.path.join(BASE_DIR, 'db.sqlite3'),
conn_max_age=600)
}
notification.messages_print('warning', 'Database Config: ' + str(DATABASES))
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'tarot_juicer/static/')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_ROOT = os.path.join(STATIC_ROOT, 'img')
MEDIA_URL = 'img/'
django_heroku.settings(locals())
# Because the app is not deployed to a custom domain
if 'OPTIONS' in DATABASES['default']:
del DATABASES['default']['OPTIONS']['sslmode']
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
| 30.847716 | 103 | 0.720421 |
from django.contrib.messages import constants as messages
import os
import django_heroku
from decouple import config
import dj_database_url
from dotenv import load_dotenv
from . import notification
load_dotenv()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEFAULT_AUTO_FIELD='django.db.models.AutoField'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
str(os.getenv('SECRET_KEY'))
if os.environ.get('DEBUG', '') != 'False':
DEBUG = True
SECURE_HSTS_SECONDS = 0
SECURE_SSL_REDIRECT = False
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
notification.messages_print('error', 'Secure Mode Disabled: DEBUG MODE IS TRUE')
else:
DEBUG = False
SECURE_HSTS_SECONDS = 7200
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_PRELOAD = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
notification.messages_print('success', 'Secure Mode Enabled: DEBUG MODE IS FALSE')
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS').split(' ') if 'ALLOWED_HOSTS' in os.environ else ['*']
ADMIN_PATH = os.environ.get('ADMIN_PATH')+'/' if 'ADMIN_PATH' in os.environ else 'admin/'
MATOMO_DOMAIN_PATH = 'tarot-matomo.herokuapp.com'
MATOMO_SITE_ID = '1'
INSTALLED_APPS = [
'essays.apps.EssaysConfig',
'landings.apps.LandingsConfig',
'generators.apps.GeneratorsConfig',
'work_orders.apps.WorkOrdersConfig',
'accounts.apps.AccountsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'analytical',
"django_extensions",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'tarot_juicer.middlewares.authentication_middleware',
'tarot_juicer.middlewares.autologout_middleware',
'tarot_juicer.protected_path_middleware.path_protection_middleware',
]
ROOT_URLCONF = 'tarot_juicer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'libraries': {
'staticfiles': 'django.templatetags.static',
},
},
},
]
WSGI_APPLICATION = 'tarot_juicer.wsgi.application'
ASES = {}
DATABASES = {
'default': dj_database_url.config(
default='sqlite:///'+os.path.join(BASE_DIR, 'db.sqlite3'),
conn_max_age=600)
}
notification.messages_print('warning', 'Database Config: ' + str(DATABASES))
S = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'tarot_juicer/static/')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_ROOT = os.path.join(STATIC_ROOT, 'img')
MEDIA_URL = 'img/'
django_heroku.settings(locals())
if 'OPTIONS' in DATABASES['default']:
del DATABASES['default']['OPTIONS']['sslmode']
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
| true | true |
1c34c2de6d64e8d4f28b0beeb9cb750fec80460d | 15,433 | py | Python | 2D/convert_raw_to_hdf5.py | basharbme/unet | 32aba391c280a6d4f371d0bb19c2f60f6945da6d | [
"Apache-2.0"
] | 1 | 2019-09-08T01:48:17.000Z | 2019-09-08T01:48:17.000Z | 2D/convert_raw_to_hdf5.py | motazsaad/unet | 85117087c1cb73c81a8eea4e127fae7cb47b4fe1 | [
"Apache-2.0"
] | null | null | null | 2D/convert_raw_to_hdf5.py | motazsaad/unet | 85117087c1cb73c81a8eea4e127fae7cb47b4fe1 | [
"Apache-2.0"
] | 2 | 2021-03-04T05:41:34.000Z | 2021-08-25T00:25:29.000Z | #!/usr/bin/env python
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
"""
Converts the Medical Decathlon raw Nifti files into
single HDF5 file for easier use in TensorFlow/Keras.
You'll need to download the raw dataset from
the Medical Decathlon website (http://medicaldecathlon.com),
extract the data (untar), and run this script.
The raw dataset has the CC-BY-SA 4.0 license.
https://creativecommons.org/licenses/by-sa/4.0/
For BraTS (Task 1):
INPUT CHANNELS: "modality": {
"0": "FLAIR",
"1": "T1w",
"2": "t1gd",
"3": "T2w"
},
LABEL_CHANNELS: "labels": {
"0": "background",
"1": "edema",
"2": "non-enhancing tumor",
"3": "enhancing tumour"
}
"""
import os
import nibabel as nib # pip install nibabel
import numpy as np
from tqdm import tqdm # pip install tqdm
import h5py # pip install h5py
import json
import argparse
parser = argparse.ArgumentParser(
description="Convert Decathlon raw Nifti data "
"(http://medicaldecathlon.com) "
"files to Numpy data files",
add_help=True, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--data_path",
default="../../data/decathlon/Task01_BrainTumour/",
help="Path to the raw BraTS datafiles")
parser.add_argument("--save_path",
default="../../data/decathlon/",
help="Folder to save Numpy data files")
parser.add_argument("--output_filename",
default="decathlon_brats.h5",
help="Name of the output HDF5 file")
parser.add_argument("--resize", type=int, default=144,
help="Resize height and width to this size. "
"Original size = 240")
parser.add_argument("--split", type=float, default=0.85,
help="Train/test split ratio")
args = parser.parse_args()
def crop_center(img, cropx, cropy, cropz):
"""
Take a center crop of the images.
If we are using a 2D model, then we'll just stack the
z dimension.
"""
x, y, z, c = img.shape
# Make sure starting index is >= 0
startx = max(x // 2 - (cropx // 2), 0)
starty = max(y // 2 - (cropy // 2), 0)
startz = max(z // 2 - (cropz // 2), 0)
# Make sure ending index is <= size
endx = min(startx + cropx, x)
endy = min(starty + cropy, y)
endz = min(startz + cropz, z)
return img[startx:endx, starty:endy, startz:endz, :]
def normalize_img(img):
"""
Normalize the pixel values.
This is one of the most important preprocessing steps.
We need to make sure that the pixel values have a mean of 0
and a standard deviation of 1 to help the model to train
faster and more accurately.
"""
for channel in range(img.shape[3]):
img[:, :, :, channel] = (
img[:, :, :, channel] - np.mean(img[:, :, :, channel])) \
/ np.std(img[:, :, :, channel])
return img
def attach_attributes(df, json_data, name):
"""
Save the json data
"""
if type(json_data) is str:
length = 1
else:
length = len(json_data)
dt = h5py.special_dtype(vlen=str)
dset = df.create_dataset(name, (length,), dtype=dt)
dset[:] = json_data
def preprocess_inputs(img):
"""
Process the input images
For BraTS subset:
INPUT CHANNELS: "modality": {
"0": "FLAIR", T2-weighted-Fluid-Attenuated Inversion Recovery MRI
"1": "T1w", T1-weighted MRI
"2": "t1gd", T1-gadolinium contrast MRI
"3": "T2w" T2-weighted MRI
}
"""
if len(img.shape) != 4: # Make sure 4D
img = np.expand_dims(img, -1)
img = crop_center(img, args.resize, args.resize, args.resize)
img = normalize_img(img)
img = np.swapaxes(np.array(img), 0, -2)
# img = img[:,:,:,[0]] # Just get the FLAIR channel
return img
def preprocess_labels(msk):
"""
Process the ground truth labels
For BraTS subset:
LABEL_CHANNELS: "labels": {
"0": "background", No tumor
"1": "edema", Swelling around tumor
"2": "non-enhancing tumor", Tumor that isn't enhanced by Gadolinium contrast
"3": "enhancing tumour" Gadolinium contrast enhanced regions
}
"""
if len(msk.shape) != 4: # Make sure 4D
msk = np.expand_dims(msk, -1)
msk = crop_center(msk, args.resize, args.resize, args.resize)
# Combining all masks assumes that a mask value of 0 is the background
msk[msk > 1] = 1 # Combine all masks
msk = np.swapaxes(np.array(msk), 0, -2)
return msk
def convert_raw_data_to_hdf5(trainIdx, validateIdx, testIdx, fileIdx,
filename, dataDir, json_data):
"""
Go through the Decathlon dataset.json file.
We've already split into training and validation subsets.
Read in Nifti format files. Crop images and masks.
Save to HDF5 format.
This code is will convert the 3D images and masks
into a stack of 2D slices.
"""
hdf_file = h5py.File(filename, "w")
# Save the dataset attributes
attach_attributes(hdf_file, str(json_data["modality"]), "modalities")
attach_attributes(hdf_file, json_data["licence"], "license")
attach_attributes(hdf_file, json_data["reference"], "reference")
attach_attributes(hdf_file, json_data["name"], "name")
attach_attributes(hdf_file, json_data["description"], "description")
attach_attributes(hdf_file, json_data["release"], "release")
attach_attributes(
hdf_file, json_data["tensorImageSize"], "tensorImageSize")
# Training filenames
train_image_files = []
train_label_files = []
for idx in trainIdx:
train_image_files.append(fileIdx[idx]["image"])
train_label_files.append(fileIdx[idx]["label"])
# Validation filenames
validate_image_files = []
validate_label_files = []
for idx in validateIdx:
validate_image_files.append(fileIdx[idx]["image"])
validate_label_files.append(fileIdx[idx]["label"])
# Testing filenames
test_image_files = []
test_label_files = []
for idx in testIdx:
test_image_files.append(fileIdx[idx]["image"])
test_label_files.append(fileIdx[idx]["label"])
attach_attributes(hdf_file, train_image_files, "training_input_files")
attach_attributes(hdf_file, train_label_files, "training_label_files")
attach_attributes(hdf_file, validate_image_files, "validation_input_files")
attach_attributes(hdf_file, validate_label_files, "validation_label_files")
attach_attributes(hdf_file, test_image_files, "testing_input_files")
attach_attributes(hdf_file, test_label_files, "testing_label_files")
"""
Print shapes of raw data
"""
print("Data shapes")
print("===========")
print("n.b. All tensors converted to stacks of 2D slices.")
print("If you want true 3D tensors, then modify this code appropriately.")
data_filename = os.path.join(dataDir, train_image_files[0])
img = np.array(nib.load(data_filename).dataobj)
print("Raw Image shape = ", img.shape)
crop_shape = preprocess_inputs(img).shape[1:]
print("Cropped Image shape = (?, {}, {}, {})".format(crop_shape[0],
crop_shape[1],
crop_shape[2]))
data_filename = os.path.join(dataDir, train_label_files[0])
msk = np.array(nib.load(data_filename).dataobj)
print("Raw Masks shape = ", msk.shape)
crop_shape = preprocess_labels(msk).shape[1:]
print("Cropped Masks shape = (?, {}, {}, {})".format(crop_shape[0],
crop_shape[1],
crop_shape[2]))
# Save training set images
print("Step 1 of 6. Save training set images.")
first = True
for idx in tqdm(train_image_files):
data_filename = os.path.join(dataDir, idx)
img = np.array(nib.load(data_filename).dataobj)
img = preprocess_inputs(img)
num_rows = img.shape[0]
if first:
first = False
img_train_dset = hdf_file.create_dataset("imgs_train",
img.shape,
maxshape=(None,
img.shape[1],
img.shape[2],
img.shape[3]),
dtype=float,
compression="gzip")
img_train_dset[:] = img
else:
row = img_train_dset.shape[0] # Count current dataset rows
img_train_dset.resize(row + num_rows, axis=0) # Add new row
# Insert data into new row
img_train_dset[row:(row + num_rows), :] = img
# Save validation set images
print("Step 2 of 6. Save validation set images.")
first = True
for idx in tqdm(validate_image_files):
# Nibabel should read the file as X,Y,Z,C
data_filename = os.path.join(dataDir, idx)
img = np.array(nib.load(data_filename).dataobj)
img = preprocess_inputs(img)
num_rows = img.shape[0]
if first:
first = False
img_validation_dset = hdf_file.create_dataset("imgs_validation",
img.shape,
maxshape=(None,
img.shape[1],
img.shape[2],
img.shape[3]),
dtype=float,
compression="gzip")
img_validation_dset[:] = img
else:
row = img_validation_dset.shape[0] # Count current dataset rows
img_validation_dset.resize(row + num_rows, axis=0) # Add new row
# Insert data into new row
img_validation_dset[row:(row + num_rows), :] = img
# Save validation set images
print("Step 3 of 6. Save testing set images.")
first = True
for idx in tqdm(test_image_files):
# Nibabel should read the file as X,Y,Z,C
data_filename = os.path.join(dataDir, idx)
img = np.array(nib.load(data_filename).dataobj)
img = preprocess_inputs(img)
num_rows = img.shape[0]
if first:
first = False
img_testing_dset = hdf_file.create_dataset("imgs_testing",
img.shape,
maxshape=(None,
img.shape[1],
img.shape[2],
img.shape[3]),
dtype=float,
compression="gzip")
img_testing_dset[:] = img
else:
row = img_testing_dset.shape[0] # Count current dataset rows
img_testing_dset.resize(row + num_rows, axis=0) # Add new row
# Insert data into new row
img_testing_dset[row:(row + num_rows), :] = img
# Save training set masks
print("Step 4 of 6. Save training set masks.")
first = True
for idx in tqdm(train_label_files):
data_filename = os.path.join(dataDir, idx)
msk = np.array(nib.load(data_filename).dataobj)
msk = preprocess_labels(msk)
num_rows = msk.shape[0]
if first:
first = False
msk_train_dset = hdf_file.create_dataset("msks_train",
msk.shape,
maxshape=(None,
msk.shape[1],
msk.shape[2],
msk.shape[3]),
dtype=float,
compression="gzip")
msk_train_dset[:] = msk
else:
row = msk_train_dset.shape[0] # Count current dataset rows
msk_train_dset.resize(row + num_rows, axis=0) # Add new row
# Insert data into new row
msk_train_dset[row:(row + num_rows), :] = msk
# Save testing/validation set masks
print("Step 5 of 6. Save validation set masks.")
first = True
for idx in tqdm(validate_label_files):
data_filename = os.path.join(dataDir, idx)
msk = np.array(nib.load(data_filename).dataobj)
msk = preprocess_labels(msk)
num_rows = msk.shape[0]
if first:
first = False
msk_validation_dset = hdf_file.create_dataset("msks_validation",
msk.shape,
maxshape=(None,
msk.shape[1],
msk.shape[2],
msk.shape[3]),
dtype=float,
compression="gzip")
msk_validation_dset[:] = msk
else:
row = msk_validation_dset.shape[0] # Count current dataset rows
msk_validation_dset.resize(row + num_rows, axis=0) # Add new row
# Insert data into new row
msk_validation_dset[row:(row + num_rows), :] = msk
print("Step 6 of 6. Save testing set masks.")
first = True
for idx in tqdm(test_label_files):
data_filename = os.path.join(dataDir, idx)
msk = np.array(nib.load(data_filename).dataobj)
msk = preprocess_labels(msk)
num_rows = msk.shape[0]
if first:
first = False
msk_testing_dset = hdf_file.create_dataset("msks_testing",
msk.shape,
maxshape=(None,
msk.shape[1],
msk.shape[2],
msk.shape[3]),
dtype=float,
compression="gzip")
msk_testing_dset[:] = msk
else:
row = msk_testing_dset.shape[0] # Count current dataset rows
msk_testing_dset.resize(row + num_rows, axis=0) # Add new row
# Insert data into new row
msk_testing_dset[row:(row + num_rows), :] = msk
hdf_file.close()
print("Finished processing.")
print("HDF5 saved to {}".format(filename))
if __name__ == "__main__":
print("Converting Decathlon raw Nifti data files to single "
"training and validation HDF5 data file.")
print(args)
save_dir = os.path.join(
args.save_path, "{}x{}/".format(args.resize, args.resize))
# Create directory
try:
os.makedirs(save_dir)
except OSError:
if not os.path.isdir(save_dir):
raise
filename = os.path.join(save_dir, args.output_filename)
# Check for existing output file and delete if exists
if os.path.exists(filename):
print("Removing existing data file: {}".format(filename))
os.remove(filename)
"""
Get the training file names from the data directory.
Decathlon should always have a dataset.json file in the
subdirectory which lists the experiment information including
the input and label filenames.
"""
json_filename = os.path.join(args.data_path, "dataset.json")
try:
with open(json_filename, "r") as fp:
experiment_data = json.load(fp)
except IOError as e:
print("File {} doesn't exist. It should be part of the "
"Decathlon directory".format(json_filename))
# Print information about the Decathlon experiment data
print("*" * 30)
print("=" * 30)
print("Dataset name: ", experiment_data["name"])
print("Dataset description: ", experiment_data["description"])
print("Tensor image size: ", experiment_data["tensorImageSize"])
print("Dataset release: ", experiment_data["release"])
print("Dataset reference: ", experiment_data["reference"])
print("Dataset license: ", experiment_data["licence"]) # sic
print("=" * 30)
print("*" * 30)
"""
Randomize the file list. Then separate into training and
validation lists. We won't use the testing set since we
don't have ground truth masks for this; instead we'll
split the validation set into separate test and validation
sets.
"""
# Set the random seed so that always get same random mix
np.random.seed(816)
numFiles = experiment_data["numTraining"]
idxList = np.arange(numFiles) # List of file indices
randomList = np.random.random(numFiles) # List of random numbers
# Random number go from 0 to 1. So anything above
# args.train_split is in the validation list.
trainList = idxList[randomList < args.split]
otherList = idxList[randomList >= args.split]
randomList = np.random.random(len(otherList)) # List of random numbers
validateList = otherList[randomList >= 0.5]
testList = otherList[randomList < 0.5]
convert_raw_data_to_hdf5(trainList, validateList, testList,
experiment_data["training"],
filename, args.data_path,
experiment_data)
| 30.379921 | 80 | 0.671872 |
import os
import nibabel as nib
import numpy as np
from tqdm import tqdm
import h5py
import json
import argparse
parser = argparse.ArgumentParser(
description="Convert Decathlon raw Nifti data "
"(http://medicaldecathlon.com) "
"files to Numpy data files",
add_help=True, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--data_path",
default="../../data/decathlon/Task01_BrainTumour/",
help="Path to the raw BraTS datafiles")
parser.add_argument("--save_path",
default="../../data/decathlon/",
help="Folder to save Numpy data files")
parser.add_argument("--output_filename",
default="decathlon_brats.h5",
help="Name of the output HDF5 file")
parser.add_argument("--resize", type=int, default=144,
help="Resize height and width to this size. "
"Original size = 240")
parser.add_argument("--split", type=float, default=0.85,
help="Train/test split ratio")
args = parser.parse_args()
def crop_center(img, cropx, cropy, cropz):
x, y, z, c = img.shape
startx = max(x // 2 - (cropx // 2), 0)
starty = max(y // 2 - (cropy // 2), 0)
startz = max(z // 2 - (cropz // 2), 0)
endx = min(startx + cropx, x)
endy = min(starty + cropy, y)
endz = min(startz + cropz, z)
return img[startx:endx, starty:endy, startz:endz, :]
def normalize_img(img):
for channel in range(img.shape[3]):
img[:, :, :, channel] = (
img[:, :, :, channel] - np.mean(img[:, :, :, channel])) \
/ np.std(img[:, :, :, channel])
return img
def attach_attributes(df, json_data, name):
if type(json_data) is str:
length = 1
else:
length = len(json_data)
dt = h5py.special_dtype(vlen=str)
dset = df.create_dataset(name, (length,), dtype=dt)
dset[:] = json_data
def preprocess_inputs(img):
if len(img.shape) != 4:
img = np.expand_dims(img, -1)
img = crop_center(img, args.resize, args.resize, args.resize)
img = normalize_img(img)
img = np.swapaxes(np.array(img), 0, -2)
ss_labels(msk):
if len(msk.shape) != 4:
msk = np.expand_dims(msk, -1)
msk = crop_center(msk, args.resize, args.resize, args.resize)
msk[msk > 1] = 1
msk = np.swapaxes(np.array(msk), 0, -2)
return msk
def convert_raw_data_to_hdf5(trainIdx, validateIdx, testIdx, fileIdx,
filename, dataDir, json_data):
hdf_file = h5py.File(filename, "w")
attach_attributes(hdf_file, str(json_data["modality"]), "modalities")
attach_attributes(hdf_file, json_data["licence"], "license")
attach_attributes(hdf_file, json_data["reference"], "reference")
attach_attributes(hdf_file, json_data["name"], "name")
attach_attributes(hdf_file, json_data["description"], "description")
attach_attributes(hdf_file, json_data["release"], "release")
attach_attributes(
hdf_file, json_data["tensorImageSize"], "tensorImageSize")
train_image_files = []
train_label_files = []
for idx in trainIdx:
train_image_files.append(fileIdx[idx]["image"])
train_label_files.append(fileIdx[idx]["label"])
validate_image_files = []
validate_label_files = []
for idx in validateIdx:
validate_image_files.append(fileIdx[idx]["image"])
validate_label_files.append(fileIdx[idx]["label"])
test_image_files = []
test_label_files = []
for idx in testIdx:
test_image_files.append(fileIdx[idx]["image"])
test_label_files.append(fileIdx[idx]["label"])
attach_attributes(hdf_file, train_image_files, "training_input_files")
attach_attributes(hdf_file, train_label_files, "training_label_files")
attach_attributes(hdf_file, validate_image_files, "validation_input_files")
attach_attributes(hdf_file, validate_label_files, "validation_label_files")
attach_attributes(hdf_file, test_image_files, "testing_input_files")
attach_attributes(hdf_file, test_label_files, "testing_label_files")
print("Data shapes")
print("===========")
print("n.b. All tensors converted to stacks of 2D slices.")
print("If you want true 3D tensors, then modify this code appropriately.")
data_filename = os.path.join(dataDir, train_image_files[0])
img = np.array(nib.load(data_filename).dataobj)
print("Raw Image shape = ", img.shape)
crop_shape = preprocess_inputs(img).shape[1:]
print("Cropped Image shape = (?, {}, {}, {})".format(crop_shape[0],
crop_shape[1],
crop_shape[2]))
data_filename = os.path.join(dataDir, train_label_files[0])
msk = np.array(nib.load(data_filename).dataobj)
print("Raw Masks shape = ", msk.shape)
crop_shape = preprocess_labels(msk).shape[1:]
print("Cropped Masks shape = (?, {}, {}, {})".format(crop_shape[0],
crop_shape[1],
crop_shape[2]))
print("Step 1 of 6. Save training set images.")
first = True
for idx in tqdm(train_image_files):
data_filename = os.path.join(dataDir, idx)
img = np.array(nib.load(data_filename).dataobj)
img = preprocess_inputs(img)
num_rows = img.shape[0]
if first:
first = False
img_train_dset = hdf_file.create_dataset("imgs_train",
img.shape,
maxshape=(None,
img.shape[1],
img.shape[2],
img.shape[3]),
dtype=float,
compression="gzip")
img_train_dset[:] = img
else:
row = img_train_dset.shape[0]
img_train_dset.resize(row + num_rows, axis=0)
img_train_dset[row:(row + num_rows), :] = img
print("Step 2 of 6. Save validation set images.")
first = True
for idx in tqdm(validate_image_files):
data_filename = os.path.join(dataDir, idx)
img = np.array(nib.load(data_filename).dataobj)
img = preprocess_inputs(img)
num_rows = img.shape[0]
if first:
first = False
img_validation_dset = hdf_file.create_dataset("imgs_validation",
img.shape,
maxshape=(None,
img.shape[1],
img.shape[2],
img.shape[3]),
dtype=float,
compression="gzip")
img_validation_dset[:] = img
else:
row = img_validation_dset.shape[0]
img_validation_dset.resize(row + num_rows, axis=0)
img_validation_dset[row:(row + num_rows), :] = img
print("Step 3 of 6. Save testing set images.")
first = True
for idx in tqdm(test_image_files):
data_filename = os.path.join(dataDir, idx)
img = np.array(nib.load(data_filename).dataobj)
img = preprocess_inputs(img)
num_rows = img.shape[0]
if first:
first = False
img_testing_dset = hdf_file.create_dataset("imgs_testing",
img.shape,
maxshape=(None,
img.shape[1],
img.shape[2],
img.shape[3]),
dtype=float,
compression="gzip")
img_testing_dset[:] = img
else:
row = img_testing_dset.shape[0]
img_testing_dset.resize(row + num_rows, axis=0)
img_testing_dset[row:(row + num_rows), :] = img
print("Step 4 of 6. Save training set masks.")
first = True
for idx in tqdm(train_label_files):
data_filename = os.path.join(dataDir, idx)
msk = np.array(nib.load(data_filename).dataobj)
msk = preprocess_labels(msk)
num_rows = msk.shape[0]
if first:
first = False
msk_train_dset = hdf_file.create_dataset("msks_train",
msk.shape,
maxshape=(None,
msk.shape[1],
msk.shape[2],
msk.shape[3]),
dtype=float,
compression="gzip")
msk_train_dset[:] = msk
else:
row = msk_train_dset.shape[0]
msk_train_dset.resize(row + num_rows, axis=0)
msk_train_dset[row:(row + num_rows), :] = msk
print("Step 5 of 6. Save validation set masks.")
first = True
for idx in tqdm(validate_label_files):
data_filename = os.path.join(dataDir, idx)
msk = np.array(nib.load(data_filename).dataobj)
msk = preprocess_labels(msk)
num_rows = msk.shape[0]
if first:
first = False
msk_validation_dset = hdf_file.create_dataset("msks_validation",
msk.shape,
maxshape=(None,
msk.shape[1],
msk.shape[2],
msk.shape[3]),
dtype=float,
compression="gzip")
msk_validation_dset[:] = msk
else:
row = msk_validation_dset.shape[0]
msk_validation_dset.resize(row + num_rows, axis=0)
msk_validation_dset[row:(row + num_rows), :] = msk
print("Step 6 of 6. Save testing set masks.")
first = True
for idx in tqdm(test_label_files):
data_filename = os.path.join(dataDir, idx)
msk = np.array(nib.load(data_filename).dataobj)
msk = preprocess_labels(msk)
num_rows = msk.shape[0]
if first:
first = False
msk_testing_dset = hdf_file.create_dataset("msks_testing",
msk.shape,
maxshape=(None,
msk.shape[1],
msk.shape[2],
msk.shape[3]),
dtype=float,
compression="gzip")
msk_testing_dset[:] = msk
else:
row = msk_testing_dset.shape[0]
msk_testing_dset.resize(row + num_rows, axis=0)
msk_testing_dset[row:(row + num_rows), :] = msk
hdf_file.close()
print("Finished processing.")
print("HDF5 saved to {}".format(filename))
if __name__ == "__main__":
print("Converting Decathlon raw Nifti data files to single "
"training and validation HDF5 data file.")
print(args)
save_dir = os.path.join(
args.save_path, "{}x{}/".format(args.resize, args.resize))
try:
os.makedirs(save_dir)
except OSError:
if not os.path.isdir(save_dir):
raise
filename = os.path.join(save_dir, args.output_filename)
if os.path.exists(filename):
print("Removing existing data file: {}".format(filename))
os.remove(filename)
json_filename = os.path.join(args.data_path, "dataset.json")
try:
with open(json_filename, "r") as fp:
experiment_data = json.load(fp)
except IOError as e:
print("File {} doesn't exist. It should be part of the "
"Decathlon directory".format(json_filename))
# Print information about the Decathlon experiment data
print("*" * 30)
print("=" * 30)
print("Dataset name: ", experiment_data["name"])
print("Dataset description: ", experiment_data["description"])
print("Tensor image size: ", experiment_data["tensorImageSize"])
print("Dataset release: ", experiment_data["release"])
print("Dataset reference: ", experiment_data["reference"])
print("Dataset license: ", experiment_data["licence"]) # sic
print("=" * 30)
print("*" * 30)
# Set the random seed so that always get same random mix
np.random.seed(816)
numFiles = experiment_data["numTraining"]
idxList = np.arange(numFiles) # List of file indices
randomList = np.random.random(numFiles) # List of random numbers
# Random number go from 0 to 1. So anything above
# args.train_split is in the validation list.
trainList = idxList[randomList < args.split]
otherList = idxList[randomList >= args.split]
randomList = np.random.random(len(otherList)) # List of random numbers
validateList = otherList[randomList >= 0.5]
testList = otherList[randomList < 0.5]
convert_raw_data_to_hdf5(trainList, validateList, testList,
experiment_data["training"],
filename, args.data_path,
experiment_data)
| true | true |
1c34c4ccf9a41d12264f4a5d14ae65648867b3b4 | 5,589 | py | Python | exercises/adaboost_scenario.py | anatfl/IML.HUJI | b4a01e04fff4181837780cc603446fd73defd349 | [
"MIT"
] | null | null | null | exercises/adaboost_scenario.py | anatfl/IML.HUJI | b4a01e04fff4181837780cc603446fd73defd349 | [
"MIT"
] | null | null | null | exercises/adaboost_scenario.py | anatfl/IML.HUJI | b4a01e04fff4181837780cc603446fd73defd349 | [
"MIT"
] | null | null | null | import numpy as np
from typing import Tuple
from IMLearn.metalearners.adaboost import AdaBoost
from IMLearn.learners.classifiers.decision_stump import DecisionStump
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from IMLearn.metrics import accuracy
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape
(num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
def fit_and_evaluate_adaboost(noise, n_learners=250, train_size=5000,
test_size=500):
(train_X, train_y), (test_X, test_y) =\
generate_data(train_size, noise), generate_data(test_size, noise)
# Question 1: Train- and test errors of AdaBoost in noiseless case
ad_obj = AdaBoost(DecisionStump, n_learners)
ad_obj.fit(train_X, train_y)
train_err = []
test_err = []
num_of_learners = np.arange(1, n_learners)
for t in num_of_learners:
train_err.append(ad_obj.partial_loss(train_X, train_y, t))
test_err.append(ad_obj.partial_loss(test_X, test_y, t))
fig1 = go.Figure()
fig1.add_trace(go.Scatter(x=num_of_learners, y=train_err, mode="lines",
name=r'train samples'))
fig1.add_trace(go.Scatter(x=num_of_learners, y=test_err, mode="lines",
name=r'test samples'))
fig1.update_layout(title="(1) Adaboost error on train and test as function"
" of number of learners",
xaxis_title="number of learners",
yaxis_title="error")
fig1.show()
# Question 2: Plotting decision surfaces
T = [5, 50, 100, 250]
limits = np.array([np.r_[train_X, test_X].min(axis=0), np.r_[
train_X, test_X].max(axis=0)]).T + np.array([-.1, .1])
fig2 = make_subplots(rows=2, cols=2,
subplot_titles=[rf"{num} models" for num in T],
horizontal_spacing=0.01, vertical_spacing=.03)
for i, t in enumerate(T):
fig2.add_traces([decision_surface(
lambda x: ad_obj.partial_predict(x, t),
limits[0], limits[1], showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1], mode="markers",
showlegend=False,
marker=dict(color=test_y.astype(int),
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1)))],
rows=(i // 2) + 1, cols=(i % 2) + 1)
fig2.update_layout(
title=r"(2) Decision Boundaries Of Models according to number "
r"of models",
margin=dict(t=100)).update_xaxes(visible=False).update_yaxes(
visible=False)
fig2.show()
# Question 3: Decision surface of best performing ensemble
min_loss_num = np.argmin([ad_obj.partial_loss(test_X, test_y, k) for
k in np.arange(1, n_learners)]) + 1
y_hat = ad_obj.partial_predict(test_X, min_loss_num)
fig3 = go.Figure()
fig3.add_traces([decision_surface(
lambda x: ad_obj.partial_predict(x, min_loss_num),
limits[0], limits[1], showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1], mode="markers",
showlegend=False,
marker=dict(color=test_y.astype(int),
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1)))])
fig3.update_layout(
title=rf"(3) Decision Surface Of ensemble with minimal error,"
rf" ensemble size: {min_loss_num}, "
rf"accuracy: {accuracy(test_y, y_hat):.4f}",
margin=dict(t=100)).update_xaxes(visible=False).update_yaxes(
visible=False)
fig3.show()
# Question 4: Decision surface with weighted samples
fig4 = go.Figure()
fig4.add_traces([decision_surface(ad_obj.predict, limits[0], limits[1],
showscale=False),
go.Scatter(x=train_X[:, 0], y=train_X[:, 1],
mode="markers",
showlegend=False,
marker=
dict(color=train_y.astype(int),
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1),
size=ad_obj.D_/np.max(ad_obj.D_) * 10))])
fig4.update_layout(
title=r"(4) Decision Surface Of ensemble with size 250 and"
r" weighted samples",
margin=dict(t=100)).update_xaxes(visible=False).update_yaxes(
visible=False)
fig4.show()
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(0.0)
fit_and_evaluate_adaboost(0.4)
| 39.359155 | 79 | 0.573985 | import numpy as np
from typing import Tuple
from IMLearn.metalearners.adaboost import AdaBoost
from IMLearn.learners.classifiers.decision_stump import DecisionStump
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from IMLearn.metrics import accuracy
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
def fit_and_evaluate_adaboost(noise, n_learners=250, train_size=5000,
test_size=500):
(train_X, train_y), (test_X, test_y) =\
generate_data(train_size, noise), generate_data(test_size, noise)
ad_obj = AdaBoost(DecisionStump, n_learners)
ad_obj.fit(train_X, train_y)
train_err = []
test_err = []
num_of_learners = np.arange(1, n_learners)
for t in num_of_learners:
train_err.append(ad_obj.partial_loss(train_X, train_y, t))
test_err.append(ad_obj.partial_loss(test_X, test_y, t))
fig1 = go.Figure()
fig1.add_trace(go.Scatter(x=num_of_learners, y=train_err, mode="lines",
name=r'train samples'))
fig1.add_trace(go.Scatter(x=num_of_learners, y=test_err, mode="lines",
name=r'test samples'))
fig1.update_layout(title="(1) Adaboost error on train and test as function"
" of number of learners",
xaxis_title="number of learners",
yaxis_title="error")
fig1.show()
T = [5, 50, 100, 250]
limits = np.array([np.r_[train_X, test_X].min(axis=0), np.r_[
train_X, test_X].max(axis=0)]).T + np.array([-.1, .1])
fig2 = make_subplots(rows=2, cols=2,
subplot_titles=[rf"{num} models" for num in T],
horizontal_spacing=0.01, vertical_spacing=.03)
for i, t in enumerate(T):
fig2.add_traces([decision_surface(
lambda x: ad_obj.partial_predict(x, t),
limits[0], limits[1], showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1], mode="markers",
showlegend=False,
marker=dict(color=test_y.astype(int),
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1)))],
rows=(i // 2) + 1, cols=(i % 2) + 1)
fig2.update_layout(
title=r"(2) Decision Boundaries Of Models according to number "
r"of models",
margin=dict(t=100)).update_xaxes(visible=False).update_yaxes(
visible=False)
fig2.show()
min_loss_num = np.argmin([ad_obj.partial_loss(test_X, test_y, k) for
k in np.arange(1, n_learners)]) + 1
y_hat = ad_obj.partial_predict(test_X, min_loss_num)
fig3 = go.Figure()
fig3.add_traces([decision_surface(
lambda x: ad_obj.partial_predict(x, min_loss_num),
limits[0], limits[1], showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1], mode="markers",
showlegend=False,
marker=dict(color=test_y.astype(int),
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1)))])
fig3.update_layout(
title=rf"(3) Decision Surface Of ensemble with minimal error,"
rf" ensemble size: {min_loss_num}, "
rf"accuracy: {accuracy(test_y, y_hat):.4f}",
margin=dict(t=100)).update_xaxes(visible=False).update_yaxes(
visible=False)
fig3.show()
fig4 = go.Figure()
fig4.add_traces([decision_surface(ad_obj.predict, limits[0], limits[1],
showscale=False),
go.Scatter(x=train_X[:, 0], y=train_X[:, 1],
mode="markers",
showlegend=False,
marker=
dict(color=train_y.astype(int),
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1),
size=ad_obj.D_/np.max(ad_obj.D_) * 10))])
fig4.update_layout(
title=r"(4) Decision Surface Of ensemble with size 250 and"
r" weighted samples",
margin=dict(t=100)).update_xaxes(visible=False).update_yaxes(
visible=False)
fig4.show()
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(0.0)
fit_and_evaluate_adaboost(0.4)
| true | true |
1c34c5350cb8380f5333eb172ccfc98c80642e43 | 37,617 | py | Python | networkx/classes/multidigraph.py | theaverageguy/networkx | b2b74b3ba028ef3788f796aa64b037c8ea446539 | [
"BSD-3-Clause"
] | null | null | null | networkx/classes/multidigraph.py | theaverageguy/networkx | b2b74b3ba028ef3788f796aa64b037c8ea446539 | [
"BSD-3-Clause"
] | null | null | null | networkx/classes/multidigraph.py | theaverageguy/networkx | b2b74b3ba028ef3788f796aa64b037c8ea446539 | [
"BSD-3-Clause"
] | 1 | 2020-09-11T06:41:14.000Z | 2020-09-11T06:41:14.000Z | """Base class for MultiDiGraph."""
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from copy import deepcopy
import networkx as nx
from networkx.classes.graph import Graph # for doctests
from networkx.classes.digraph import DiGraph
from networkx.classes.multigraph import MultiGraph
from networkx.exception import NetworkXError
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
class MultiDiGraph(MultiGraph,DiGraph):
"""A directed graph class that can store multiedges.
Multiedges are multiple edges between two nodes. Each edge
can hold optional data or attributes.
A MultiDiGraph holds directed edges. Self loops are allowed.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
Graph
DiGraph
MultiGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.MultiDiGraph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2,3])
>>> G.add_nodes_from(range(100,110))
>>> H=nx.path_graph(10)
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1,2),(1,3)])
or a collection of edges,
>>> G.add_edges_from(H.edges())
If some edges connect nodes not yet in the graph, the nodes
are added automatically. If an edge already exists, an additional
edge is created and stored using a key to identify the edge.
By default the key is the lowest unused integer.
>>> G.add_edges_from([(4,5,dict(route=282)), (4,5,dict(route=37))])
>>> G[4]
{5: {0: {}, 1: {'route': 282}, 2: {'route': 37}}}
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.MultiDiGraph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.node
>>> G.add_node(1, time='5pm')
>>> G.add_nodes_from([3], time='2pm')
>>> G.node[1]
{'time': '5pm'}
>>> G.node[1]['room'] = 714
>>> del G.node[1]['room'] # remove attribute
>>> list(G.nodes(data=True))
[(1, {'time': '5pm'}), (3, {'time': '2pm'})]
Warning: adding a node to G.node does not add it to the graph.
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edge.
>>> G.add_edge(1, 2, weight=4.7 )
>>> G.add_edges_from([(3,4),(4,5)], color='red')
>>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
>>> G[1][2][0]['weight'] = 4.7
>>> G.edge[1][2][0]['weight'] = 4
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n<3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
>>> G[1] # adjacency dict keyed by neighbor to edge attributes
... # Note: you should not change this dict manually!
{2: {0: {'weight': 4}, 1: {'color': 'blue'}}}
The fastest way to traverse all edges of a graph is via
adjacency(), but the edges() method is often more convenient.
>>> for n,nbrsdict in G.adjacency():
... for nbr,keydict in nbrsdict.items():
... for key,eattr in keydict.items():
... if 'weight' in eattr:
... (n,nbr,eattr['weight'])
(1, 2, 4)
(2, 3, 8)
>>> list(G.edges(data='weight'))
[(1, 2, 4), (1, 2, None), (2, 3, 8), (3, 4, None), (4, 5, None)]
**Reporting:**
Simple graph information is obtained using methods.
Reporting methods usually return iterators instead of containers
to reduce memory usage.
Methods exist for reporting nodes(), edges(), neighbors() and degree()
as well as the number of nodes and edges.
For details on these and other miscellaneous methods, see below.
**Subclasses (Advanced):**
The MultiDiGraph class uses a dict-of-dict-of-dict-of-dict structure.
The outer dict (node_dict) holds adjacency information keyed by node.
The next dict (adjlist_dict) represents the adjacency information and holds
edge_key dicts keyed by neighbor. The edge_key dict holds each edge_attr
dict keyed by edge key. The inner dict (edge_attr_dict) represents
the edge data and holds edge attribute values keyed by attribute names.
Each of these four dicts in the dict-of-dict-of-dict-of-dict
structure can be replaced by a user defined dict-like object.
In general, the dict-like features should be maintained but
extra features can be added. To replace one of the dicts create
a new graph class by changing the class(!) variable holding the
factory for that dict-like structure. The variable names
are node_dict_factory, adjlist_dict_factory, edge_key_dict_factory
and edge_attr_dict_factory.
node_dict_factory : function, (default: dict)
Factory function to be used to create the outer-most dict
in the data structure that holds adjacency info keyed by node.
It should require no arguments and return a dict-like object.
adjlist_dict_factory : function, (default: dict)
Factory function to be used to create the adjacency list
dict which holds multiedge key dicts keyed by neighbor.
It should require no arguments and return a dict-like object.
edge_key_dict_factory : function, (default: dict)
Factory function to be used to create the edge key dict
which holds edge data keyed by edge key.
It should require no arguments and return a dict-like object.
edge_attr_dict_factory : function, (default: dict)
Factory function to be used to create the edge attribute
dict which holds attrbute values keyed by attribute name.
It should require no arguments and return a dict-like object.
Examples
--------
Create a multigraph subclass that tracks the order nodes are added.
>>> from collections import OrderedDict
>>> class OrderedGraph(nx.MultiDiGraph):
... node_dict_factory = OrderedDict
>>> G = OrderedGraph()
>>> G.add_nodes_from( (2,1) )
>>> list(G.nodes())
[2, 1]
>>> G.add_edges_from( ((2,2), (2,1), (2,1), (1,1)) )
>>> list(G.edges())
[(2, 1), (2, 1), (2, 2), (1, 1)]
Create a multdigraph object that tracks the order nodes are added
and for each node track the order that neighbors are added and for
each neighbor tracks the order that multiedges are added.
>>> class OrderedGraph(nx.MultiDiGraph):
... node_dict_factory = OrderedDict
... adjlist_dict_factory = OrderedDict
... edge_key_dict_factory = OrderedDict
>>> G = OrderedGraph()
>>> G.add_nodes_from( (2,1) )
>>> list(G.nodes())
[2, 1]
>>> G.add_edges_from( ((2,2), (2,1,2,{'weight':0.1}), (2,1,1,{'weight':0.2}), (1,1)) )
>>> list(G.edges(keys=True))
[(2, 2, 0), (2, 1, 2), (2, 1, 1), (1, 1, 0)]
"""
# node_dict_factory=dict # already assigned in Graph
# adjlist_dict_factory=dict
edge_key_dict_factory = dict
# edge_attr_dict_factory=dict
def __init__(self, data=None, **attr):
self.edge_key_dict_factory = self.edge_key_dict_factory
DiGraph.__init__(self, data, **attr)
def add_edge(self, u, v, key=None, attr_dict=None, **attr):
"""Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by providing
a dictionary with key/value pairs. See examples below.
Parameters
----------
u, v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
key : hashable identifier, optional (default=lowest unused integer)
Used to distinguish multiedges between a pair of nodes.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with the edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
To replace/update edge data, use the optional key argument
to identify a unique edge. Otherwise a new edge will be created.
NetworkX algorithms designed for weighted graphs cannot use
multigraphs directly because it is not clear how to handle
multiedge weights. Convert to Graph using edge attribute
'weight' to enable weighted graph algorithms.
Examples
--------
The following all add the edge e=(1,2) to graph G:
>>> G = nx.MultiDiGraph()
>>> e = (1,2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from( [(1,2)] ) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 2, key=0, weight=4) # update data for key=0
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
"""
# set up attribute dict
if attr_dict is None:
attr_dict = attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(
"The attr_dict argument must be a dictionary.")
# add nodes
if u not in self.succ:
self.succ[u] = self.adjlist_dict_factory()
self.pred[u] = self.adjlist_dict_factory()
self.node[u] = {}
if v not in self.succ:
self.succ[v] = self.adjlist_dict_factory()
self.pred[v] = self.adjlist_dict_factory()
self.node[v] = {}
if v in self.succ[u]:
keydict = self.adj[u][v]
if key is None:
# find a unique integer key
# other methods might be better here?
key = len(keydict)
while key in keydict:
key += 1
datadict = keydict.get(key, self.edge_key_dict_factory())
datadict.update(attr_dict)
keydict[key] = datadict
else:
# selfloops work this way without special treatment
if key is None:
key = 0
datadict = self.edge_attr_dict_factory()
datadict.update(attr_dict)
keydict = self.edge_key_dict_factory()
keydict[key] = datadict
self.succ[u][v] = keydict
self.pred[v][u] = keydict
def remove_edge(self, u, v, key=None):
"""Remove an edge between u and v.
Parameters
----------
u, v : nodes
Remove an edge between nodes u and v.
key : hashable identifier, optional (default=None)
Used to distinguish multiple edges between a pair of nodes.
If None remove a single (abritrary) edge between u and v.
Raises
------
NetworkXError
If there is not an edge between u and v, or
if there is no edge with the specified key.
See Also
--------
remove_edges_from : remove a collection of edges
Examples
--------
>>> G = nx.MultiDiGraph()
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.remove_edge(0,1)
>>> e = (1,2)
>>> G.remove_edge(*e) # unpacks e from an edge tuple
For multiple edges
>>> G = nx.MultiDiGraph()
>>> G.add_edges_from([(1,2),(1,2),(1,2)])
>>> G.remove_edge(1,2) # remove a single (arbitrary) edge
For edges with keys
>>> G = nx.MultiDiGraph()
>>> G.add_edge(1,2,key='first')
>>> G.add_edge(1,2,key='second')
>>> G.remove_edge(1,2,key='second')
"""
try:
d = self.adj[u][v]
except (KeyError):
raise NetworkXError(
"The edge %s-%s is not in the graph." % (u, v))
# remove the edge with specified data
if key is None:
d.popitem()
else:
try:
del d[key]
except (KeyError):
raise NetworkXError(
"The edge %s-%s with key %s is not in the graph." % (u, v, key))
if len(d) == 0:
# remove the key entries if last edge
del self.succ[u][v]
del self.pred[v][u]
def edges(self, nbunch=None, data=False, keys=False, default=None):
"""Return an iterator over the edges.
Edges are returned as tuples with optional data and keys
in the order (node, neighbor, key, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : string or bool, optional (default=False)
The edge attribute returned in 3-tuple (u,v,ddict[data]).
If True, return edge attribute dict in 3-tuple (u,v,ddict).
If False, return 2-tuple (u,v).
keys : bool, optional (default=False)
If True, return edge keys with each edge.
default : value, optional (default=None)
Value used for edges that dont have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
edge : iterator
An iterator over (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-edges.
Examples
--------
>>> G = nx.MultiDiGraph()
>>> nx.add_path(G, [0, 1, 2])
>>> G.add_edge(2,3,weight=5)
>>> [e for e in G.edges()]
[(0, 1), (1, 2), (2, 3)]
>>> list(G.edges(data=True)) # default data is {} (empty dict)
[(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})]
>>> list(G.edges(data='weight', default=1))
[(0, 1, 1), (1, 2, 1), (2, 3, 5)]
>>> list(G.edges(keys=True)) # default keys are integers
[(0, 1, 0), (1, 2, 0), (2, 3, 0)]
>>> list(G.edges(data=True,keys=True)) # default keys are integers
[(0, 1, 0, {}), (1, 2, 0, {}), (2, 3, 0, {'weight': 5})]
>>> list(G.edges(data='weight',default=1,keys=True))
[(0, 1, 0, 1), (1, 2, 0, 1), (2, 3, 0, 5)]
>>> list(G.edges([0,2]))
[(0, 1), (2, 3)]
>>> list(G.edges(0))
[(0, 1)]
See Also
--------
in_edges, out_edges
"""
if nbunch is None:
nodes_nbrs = self.adj.items()
else:
nodes_nbrs = ((n, self.adj[n]) for n in self.nbunch_iter(nbunch))
if data is True:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
for key, ddict in keydict.items():
yield (n, nbr, key, ddict) if keys else (n, nbr, ddict)
elif data is not False:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
for key, ddict in keydict.items():
d = ddict[data] if data in ddict else default
yield (n, nbr, key, d) if keys else (n, nbr, d)
else:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
for key in keydict:
yield (n, nbr, key) if keys else (n, nbr)
# alias out_edges to edges
out_edges = edges
def in_edges(self, nbunch=None, data=False, keys=False):
"""Return an iterator over the incoming edges.
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
in_edge : iterator
An iterator over (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
See Also
--------
edges : return an iterator over edges
"""
if nbunch is None:
nodes_nbrs = self.pred.items()
else:
nodes_nbrs = ((n, self.pred[n]) for n in self.nbunch_iter(nbunch))
if data:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
for key, data in keydict.items():
if keys:
yield (nbr, n, key, data)
else:
yield (nbr, n, data)
else:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
for key, data in keydict.items():
if keys:
yield (nbr, n, key)
else:
yield (nbr, n)
def degree(self, nbunch=None, weight=None):
"""Return an iterator for (node, degree) or degree for single node.
The node degree is the number of edges adjacent to the node.
This function returns the degree for a single node or an iterator
for a bunch of nodes or if nothing is passed as argument.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights.
Returns
-------
If a single nodes is requested
deg : int
Degree of the node
OR if multiple nodes are requested
nd_iter : iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
out_degree, in_degree
Examples
--------
>>> G = nx.MultiDiGraph()
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.degree(0) # node 0 with degree 1
1
>>> list(G.degree([0,1]))
[(0, 1), (1, 2)]
"""
# Test to see if nbunch is a single node, an iterator of nodes or
# None(indicating all nodes). (nbunch in self) is True when nbunch
# is a single node.
if nbunch in self:
succ = self.succ[nbunch]
pred = self.pred[nbunch]
if weight is None:
indeg = sum([len(data) for data in pred.values()])
outdeg = sum([len(data) for data in succ.values()])
return indeg + outdeg
s = (sum(sum(data.get(weight, 1) for data in keydict.values())
for keydict in succ.values())) + (sum(sum(data.get(weight, 1)
for data in keydict.values()) for keydict in pred.values()))
return s
if nbunch is None:
nodes_nbrs = ( (n, succ, self.pred[n])
for n,succ in self.succ.items() )
else:
nodes_nbrs = ( (n, self.succ[n], self.pred[n])
for n in self.nbunch_iter(nbunch))
if weight is None:
def d_iter():
for n, succ, pred in nodes_nbrs:
indeg = sum([len(data) for data in pred.values()])
outdeg = sum([len(data) for data in succ.values()])
yield (n, indeg + outdeg)
else:
# edge weighted graph - degree is sum of nbr edge weights
def d_iter():
for n, succ, pred in nodes_nbrs:
deg = sum([d.get(weight, 1)
for data in pred.values()
for d in data.values()])
deg += sum([d.get(weight, 1)
for data in succ.values()
for d in data.values()])
yield (n, deg)
return d_iter()
def in_degree(self, nbunch=None, weight=None):
"""Return an iterator for (node, in-degree) or in-degree for single node.
The node in-degree is the number of edges pointing in to the node.
This function returns the in-degree for a single node or an iterator
for a bunch of nodes or if nothing is passed as argument.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
If a single node is requested
deg : int
Degree of the node
OR if multiple nodes are requested
nd_iter : iterator
The iterator returns two-tuples of (node, in-degree).
See Also
--------
degree, out_degree
Examples
--------
>>> G = nx.MultiDiGraph()
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.in_degree(0) # node 0 with degree 0
0
>>> list(G.in_degree([0,1]))
[(0, 0), (1, 1)]
"""
# Test to see if nbunch is a single node, an iterator of nodes or
# None(indicating all nodes). (nbunch in self) is True when nbunch
# is a single node.
if nbunch in self:
pred = self.pred[nbunch]
if weight is None:
return sum(len(data) for data in pred.values())
return (sum(sum(data.get(weight,1) for data in keydict.values())
for keydict in pred.values()))
if nbunch is None:
nodes_nbrs = self.pred.items()
else:
nodes_nbrs = ((n, self.pred[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
def d_iter():
for n, nbrs in nodes_nbrs:
yield (n, sum([len(data) for data in nbrs.values()]))
else:
# edge weighted graph - degree is sum of nbr edge weights
def d_iter():
for n, pred in nodes_nbrs:
deg = sum([d.get(weight, 1)
for data in pred.values()
for d in data.values()])
yield (n, deg)
return d_iter()
def out_degree(self, nbunch=None, weight=None):
"""Return an iterator for (node, out-degree) or out-degree for single node.
The node out-degree is the number of edges pointing out of the node.
This function returns the out-degree for a single node or an iterator
for a bunch of nodes or if nothing is passed as argument.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights.
Returns
-------
If a single node is requested
deg : int
Degree of the node
OR if multiple nodes are requested
nd_iter : iterator
The iterator returns two-tuples of (node, out-degree).
See Also
--------
degree, in_degree
Examples
--------
>>> G = nx.MultiDiGraph()
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.out_degree(0) # node 0 with degree 1
1
>>> list(G.out_degree([0,1]))
[(0, 1), (1, 1)]
"""
# Test to see if nbunch is a single node, an iterator of nodes or
# None(indicating all nodes). (nbunch in self) is True when nbunch
# is a single node.
if nbunch in self:
succ = self.succ[nbunch]
if weight is None:
return sum(len(data) for data in succ.values())
return (sum(sum(data.get(weight,1) for data in keydict.values())
for keydict in succ.values()))
if nbunch is None:
nodes_nbrs = self.succ.items()
else:
nodes_nbrs = ((n, self.succ[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
def d_iter():
for n, nbrs in nodes_nbrs:
yield (n, sum([len(data) for data in nbrs.values()]))
else:
def d_iter():
for n, succ in nodes_nbrs:
deg = sum([d.get(weight, 1)
for data in succ.values()
for d in data.values()])
yield (n, deg)
return d_iter()
def is_multigraph(self):
"""Return True if graph is a multigraph, False otherwise."""
return True
def is_directed(self):
"""Return True if graph is directed, False otherwise."""
return True
def to_directed(self):
"""Return a directed copy of the graph.
Returns
-------
G : MultiDiGraph
A deepcopy of the graph.
Notes
-----
If edges in both directions (u,v) and (v,u) exist in the
graph, attributes for the new undirected edge will be a combination of
the attributes of the directed edges. The edge data is updated
in the (arbitrary) order that the edges are encountered. For
more customized control of the edge attributes use add_edge().
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar G=DiGraph(D) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Examples
--------
>>> G = nx.Graph() # or MultiGraph, etc
>>> G.add_edge(0, 1)
>>> H = G.to_directed()
>>> list(H.edges())
[(0, 1), (1, 0)]
If already directed, return a (deep) copy
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0, 1)
>>> H = G.to_directed()
>>> list(H.edges())
[(0, 1)]
"""
return deepcopy(self)
def to_undirected(self, reciprocal=False):
"""Return an undirected representation of the digraph.
Parameters
----------
reciprocal : bool (optional)
If True only keep edges that appear in both directions
in the original digraph.
Returns
-------
G : MultiGraph
An undirected graph with the same name and nodes and
with edge (u,v,data) if either (u,v,data) or (v,u,data)
is in the digraph. If both edges exist in digraph and
their edge data is different, only one edge is created
with an arbitrary choice of which edge data to use.
You must check and correct for this manually if desired.
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar D=DiGraph(G) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Warning: If you have subclassed MultiGraph to use dict-like objects
in the data structure, those changes do not transfer to the MultiDiGraph
created by this method.
"""
H = MultiGraph()
H.name = self.name
H.add_nodes_from(self)
if reciprocal is True:
H.add_edges_from((u, v, key, deepcopy(data))
for u, nbrs in self.adjacency()
for v, keydict in nbrs.items()
for key, data in keydict.items()
if self.has_edge(v, u, key))
else:
H.add_edges_from((u, v, key, deepcopy(data))
for u, nbrs in self.adjacency()
for v, keydict in nbrs.items()
for key, data in keydict.items())
H.graph = deepcopy(self.graph)
H.node = deepcopy(self.node)
return H
def subgraph(self, nbunch):
"""Return the subgraph induced on nodes in nbunch.
The induced subgraph of the graph contains the nodes in nbunch
and the edges between those nodes.
Parameters
----------
nbunch : list, iterable
A container of nodes which will be iterated through once.
Returns
-------
G : Graph
A subgraph of the graph with the same edge attributes.
Notes
-----
The graph, edge or node attributes just point to the original graph.
So changes to the node or edge structure will not be reflected in
the original graph while changes to the attributes will.
To create a subgraph with its own copy of the edge/node attributes use:
nx.Graph(G.subgraph(nbunch))
If edge attributes are containers, a deep copy can be obtained using:
G.subgraph(nbunch).copy()
For an inplace reduction of a graph to a subgraph you can remove nodes:
G.remove_nodes_from([ n in G if n not in set(nbunch)])
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> H = G.subgraph([0,1,2])
>>> list(H.edges())
[(0, 1), (1, 2)]
"""
bunch = self.nbunch_iter(nbunch)
# create new graph and copy subgraph into it
H = self.__class__()
# copy node and attribute dictionaries
for n in bunch:
H.node[n] = self.node[n]
# namespace shortcuts for speed
H_succ = H.succ
H_pred = H.pred
self_succ = self.succ
self_pred = self.pred
# add nodes
for n in H:
H_succ[n] = H.adjlist_dict_factory()
H_pred[n] = H.adjlist_dict_factory()
# add edges
for u in H_succ:
Hnbrs = H_succ[u]
for v, edgedict in self_succ[u].items():
if v in H_succ:
# add both representations of edge: u-v and v-u
# they share the same edgedict
ed = edgedict.copy()
Hnbrs[v] = ed
H_pred[v][u] = ed
H.graph = self.graph
return H
def edge_subgraph(self, edges):
"""Returns the subgraph induced by the specified edges.
The induced subgraph contains each edge in ``edges`` and each
node incident to any one of those edges.
Parameters
----------
edges : iterable
An iterable of edges in this graph.
Returns
-------
G : Graph
An edge-induced subgraph of this graph with the same edge
attributes.
Notes
-----
The graph, edge, and node attributes in the returned subgraph
are references to the corresponding attributes in the original
graph. Thus changes to the node or edge structure of the
returned graph will not be reflected in the original graph, but
changes to the attributes will.
To create a subgraph with its own copy of the edge or node
attributes, use::
>>> nx.MultiDiGraph(G.edge_subgraph(edges)) # doctest: +SKIP
If edge attributes are containers, a deep copy of the attributes
can be obtained using::
>>> G.edge_subgraph(edges).copy() # doctest: +SKIP
Examples
--------
Get a subgraph induced by only those edges that have a certain
attribute::
>>> # Create a graph in which some edges are "good" and some "bad".
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0, 1, key=0, good=True)
>>> G.add_edge(0, 1, key=1, good=False)
>>> G.add_edge(1, 2, key=0, good=False)
>>> G.add_edge(1, 2, key=1, good=True)
>>> # Keep only those edges that are marked as "good".
>>> edges = G.edges(keys=True, data='good')
>>> edges = ((u, v, k) for (u, v, k, good) in edges if good)
>>> H = G.edge_subgraph(edges)
>>> list(H.edges(keys=True, data=True))
[(0, 1, 0, {'good': True}), (1, 2, 1, {'good': True})]
"""
H = self.__class__()
succ = self.succ
# Filter out edges that don't correspond to nodes in the graph.
def is_in_graph(u, v, k):
return u in succ and v in succ[u] and k in succ[u][v]
edges = (e for e in edges if is_in_graph(*e))
for u, v, k in edges:
# Copy the node attributes if they haven't been copied
# already.
if u not in H.node:
H.node[u] = self.node[u]
if v not in H.node:
H.node[v] = self.node[v]
# Create an entry in the successors and predecessors
# dictionary for the nodes u and v if they don't exist yet.
if u not in H.succ:
H.succ[u] = H.adjlist_dict_factory()
if v not in H.pred:
H.pred[v] = H.adjlist_dict_factory()
# Create an entry in the edge dictionary for the edges (u,
# v) and (v, u) if the don't exist yet.
if v not in H.succ[u]:
H.succ[u][v] = H.edge_key_dict_factory()
if u not in H.pred[v]:
H.pred[v][u] = H.edge_key_dict_factory()
# Copy the edge attributes.
H.edge[u][v][k] = self.edge[u][v][k]
H.pred[v][u][k] = self.pred[v][u][k]
H.graph = self.graph
return H
def reverse(self, copy=True):
"""Return the reverse of the graph.
The reverse is a graph with the same nodes and edges
but with the directions of the edges reversed.
Parameters
----------
copy : bool optional (default=True)
If True, return a new DiGraph holding the reversed edges.
If False, reverse the reverse graph is created using
the original graph (this changes the original graph).
"""
if copy:
H = self.__class__(name="Reverse of (%s)"%self.name)
H.add_nodes_from(self)
H.add_edges_from((v, u, k, deepcopy(d)) for u, v, k, d
in self.edges(keys=True, data=True))
H.graph = deepcopy(self.graph)
H.node = deepcopy(self.node)
else:
self.pred, self.succ = self.succ, self.pred
self.adj = self.succ
H = self
return H
| 36.170192 | 90 | 0.554962 |
from copy import deepcopy
import networkx as nx
from networkx.classes.graph import Graph
from networkx.classes.digraph import DiGraph
from networkx.classes.multigraph import MultiGraph
from networkx.exception import NetworkXError
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
class MultiDiGraph(MultiGraph,DiGraph):
tory = dict
def __init__(self, data=None, **attr):
self.edge_key_dict_factory = self.edge_key_dict_factory
DiGraph.__init__(self, data, **attr)
def add_edge(self, u, v, key=None, attr_dict=None, **attr):
if attr_dict is None:
attr_dict = attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(
"The attr_dict argument must be a dictionary.")
if u not in self.succ:
self.succ[u] = self.adjlist_dict_factory()
self.pred[u] = self.adjlist_dict_factory()
self.node[u] = {}
if v not in self.succ:
self.succ[v] = self.adjlist_dict_factory()
self.pred[v] = self.adjlist_dict_factory()
self.node[v] = {}
if v in self.succ[u]:
keydict = self.adj[u][v]
if key is None:
key = len(keydict)
while key in keydict:
key += 1
datadict = keydict.get(key, self.edge_key_dict_factory())
datadict.update(attr_dict)
keydict[key] = datadict
else:
if key is None:
key = 0
datadict = self.edge_attr_dict_factory()
datadict.update(attr_dict)
keydict = self.edge_key_dict_factory()
keydict[key] = datadict
self.succ[u][v] = keydict
self.pred[v][u] = keydict
def remove_edge(self, u, v, key=None):
try:
d = self.adj[u][v]
except (KeyError):
raise NetworkXError(
"The edge %s-%s is not in the graph." % (u, v))
if key is None:
d.popitem()
else:
try:
del d[key]
except (KeyError):
raise NetworkXError(
"The edge %s-%s with key %s is not in the graph." % (u, v, key))
if len(d) == 0:
del self.succ[u][v]
del self.pred[v][u]
def edges(self, nbunch=None, data=False, keys=False, default=None):
if nbunch is None:
nodes_nbrs = self.adj.items()
else:
nodes_nbrs = ((n, self.adj[n]) for n in self.nbunch_iter(nbunch))
if data is True:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
for key, ddict in keydict.items():
yield (n, nbr, key, ddict) if keys else (n, nbr, ddict)
elif data is not False:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
for key, ddict in keydict.items():
d = ddict[data] if data in ddict else default
yield (n, nbr, key, d) if keys else (n, nbr, d)
else:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
for key in keydict:
yield (n, nbr, key) if keys else (n, nbr)
out_edges = edges
def in_edges(self, nbunch=None, data=False, keys=False):
if nbunch is None:
nodes_nbrs = self.pred.items()
else:
nodes_nbrs = ((n, self.pred[n]) for n in self.nbunch_iter(nbunch))
if data:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
for key, data in keydict.items():
if keys:
yield (nbr, n, key, data)
else:
yield (nbr, n, data)
else:
for n, nbrs in nodes_nbrs:
for nbr, keydict in nbrs.items():
for key, data in keydict.items():
if keys:
yield (nbr, n, key)
else:
yield (nbr, n)
def degree(self, nbunch=None, weight=None):
if nbunch in self:
succ = self.succ[nbunch]
pred = self.pred[nbunch]
if weight is None:
indeg = sum([len(data) for data in pred.values()])
outdeg = sum([len(data) for data in succ.values()])
return indeg + outdeg
s = (sum(sum(data.get(weight, 1) for data in keydict.values())
for keydict in succ.values())) + (sum(sum(data.get(weight, 1)
for data in keydict.values()) for keydict in pred.values()))
return s
if nbunch is None:
nodes_nbrs = ( (n, succ, self.pred[n])
for n,succ in self.succ.items() )
else:
nodes_nbrs = ( (n, self.succ[n], self.pred[n])
for n in self.nbunch_iter(nbunch))
if weight is None:
def d_iter():
for n, succ, pred in nodes_nbrs:
indeg = sum([len(data) for data in pred.values()])
outdeg = sum([len(data) for data in succ.values()])
yield (n, indeg + outdeg)
else:
def d_iter():
for n, succ, pred in nodes_nbrs:
deg = sum([d.get(weight, 1)
for data in pred.values()
for d in data.values()])
deg += sum([d.get(weight, 1)
for data in succ.values()
for d in data.values()])
yield (n, deg)
return d_iter()
def in_degree(self, nbunch=None, weight=None):
if nbunch in self:
pred = self.pred[nbunch]
if weight is None:
return sum(len(data) for data in pred.values())
return (sum(sum(data.get(weight,1) for data in keydict.values())
for keydict in pred.values()))
if nbunch is None:
nodes_nbrs = self.pred.items()
else:
nodes_nbrs = ((n, self.pred[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
def d_iter():
for n, nbrs in nodes_nbrs:
yield (n, sum([len(data) for data in nbrs.values()]))
else:
def d_iter():
for n, pred in nodes_nbrs:
deg = sum([d.get(weight, 1)
for data in pred.values()
for d in data.values()])
yield (n, deg)
return d_iter()
def out_degree(self, nbunch=None, weight=None):
if nbunch in self:
succ = self.succ[nbunch]
if weight is None:
return sum(len(data) for data in succ.values())
return (sum(sum(data.get(weight,1) for data in keydict.values())
for keydict in succ.values()))
if nbunch is None:
nodes_nbrs = self.succ.items()
else:
nodes_nbrs = ((n, self.succ[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
def d_iter():
for n, nbrs in nodes_nbrs:
yield (n, sum([len(data) for data in nbrs.values()]))
else:
def d_iter():
for n, succ in nodes_nbrs:
deg = sum([d.get(weight, 1)
for data in succ.values()
for d in data.values()])
yield (n, deg)
return d_iter()
def is_multigraph(self):
return True
def is_directed(self):
return True
def to_directed(self):
return deepcopy(self)
def to_undirected(self, reciprocal=False):
H = MultiGraph()
H.name = self.name
H.add_nodes_from(self)
if reciprocal is True:
H.add_edges_from((u, v, key, deepcopy(data))
for u, nbrs in self.adjacency()
for v, keydict in nbrs.items()
for key, data in keydict.items()
if self.has_edge(v, u, key))
else:
H.add_edges_from((u, v, key, deepcopy(data))
for u, nbrs in self.adjacency()
for v, keydict in nbrs.items()
for key, data in keydict.items())
H.graph = deepcopy(self.graph)
H.node = deepcopy(self.node)
return H
def subgraph(self, nbunch):
bunch = self.nbunch_iter(nbunch)
H = self.__class__()
for n in bunch:
H.node[n] = self.node[n]
H_succ = H.succ
H_pred = H.pred
self_succ = self.succ
self_pred = self.pred
for n in H:
H_succ[n] = H.adjlist_dict_factory()
H_pred[n] = H.adjlist_dict_factory()
for u in H_succ:
Hnbrs = H_succ[u]
for v, edgedict in self_succ[u].items():
if v in H_succ:
ed = edgedict.copy()
Hnbrs[v] = ed
H_pred[v][u] = ed
H.graph = self.graph
return H
def edge_subgraph(self, edges):
H = self.__class__()
succ = self.succ
def is_in_graph(u, v, k):
return u in succ and v in succ[u] and k in succ[u][v]
edges = (e for e in edges if is_in_graph(*e))
for u, v, k in edges:
# Copy the node attributes if they haven't been copied
if u not in H.node:
H.node[u] = self.node[u]
if v not in H.node:
H.node[v] = self.node[v]
if u not in H.succ:
H.succ[u] = H.adjlist_dict_factory()
if v not in H.pred:
H.pred[v] = H.adjlist_dict_factory()
# Create an entry in the edge dictionary for the edges (u,
# v) and (v, u) if the don't exist yet.
if v not in H.succ[u]:
H.succ[u][v] = H.edge_key_dict_factory()
if u not in H.pred[v]:
H.pred[v][u] = H.edge_key_dict_factory()
H.edge[u][v][k] = self.edge[u][v][k]
H.pred[v][u][k] = self.pred[v][u][k]
H.graph = self.graph
return H
def reverse(self, copy=True):
if copy:
H = self.__class__(name="Reverse of (%s)"%self.name)
H.add_nodes_from(self)
H.add_edges_from((v, u, k, deepcopy(d)) for u, v, k, d
in self.edges(keys=True, data=True))
H.graph = deepcopy(self.graph)
H.node = deepcopy(self.node)
else:
self.pred, self.succ = self.succ, self.pred
self.adj = self.succ
H = self
return H
| true | true |
1c34c5bb98d299ddc609b34b925032366d6cf4dc | 2,290 | py | Python | ImageToAsciiApp/FileBrowser.py | MahirHamiAbrar/ImageToAsciiGui | 1a8dd3e0d8ff6bd6617085072e7afd3456495344 | [
"MIT"
] | null | null | null | ImageToAsciiApp/FileBrowser.py | MahirHamiAbrar/ImageToAsciiGui | 1a8dd3e0d8ff6bd6617085072e7afd3456495344 | [
"MIT"
] | null | null | null | ImageToAsciiApp/FileBrowser.py | MahirHamiAbrar/ImageToAsciiGui | 1a8dd3e0d8ff6bd6617085072e7afd3456495344 | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import *
# image filters
IMAGE_FILTERS = "JPG (*.jpg);;PNG (*.png);;JPEG (*.jpeg);;BPM (*.bmp);;CUR (*.cur);;GIF(*.gif);;Icons (*.ico);;PBM (" \
"*.pbm);;PGM (*.pgm);;PPM (*.ppm);;SVG (*.svg);;SVGZ (*.svgz);;TGA (*.tga);;TIF (*.tif);;TIFF (" \
"*.tiff);;WBMP (*.wbmp);;WEBP (*.webp);;XBM (*.xbm);;XPM (*.xpm) "
# all files
ALL_FILES = "All files (*.*)"
# browse for folders only
def browseFolder(window, title='', default_path='C://', filter=None, sidebar_urls=[]):
# create a dialog object
dialog = QFileDialog(window, title, default_path, filter=filter)
# set file mode; in this case directory only
dialog.setFileMode(QFileDialog.DirectoryOnly)
# convert all urls to a QUrl object
# and then set sidebar urls
dialog.setSidebarUrls([QUrl.fromLocalFile(url) for url in sidebar_urls])
# after successful execution, return the data
if dialog.exec_() == QDialog.Accepted:
return dialog.selectedFiles()[0]
# browse for files only
def browseFile(window, title='', default_path='C://', filter=None):
# create a dialog object
return QFileDialog.getOpenFileName(window, title, default_path, filter=filter)[0]
# browse multiple files
def browseMultipleFile(window, title='', default_path='C://', filter=None, sidebar_urls=[]):
# create a dialog object
dialog = QFileDialog(window, title, default_path, filter=filter)
dialog.setLabelText(QFileDialog.Accept, 'Choose')
# dialog.setOption(QFileDialog.DontUseNativeDialog, True)
# set file mode; in this case directory only
dialog.setFileMode(QFileDialog.AnyFile)
# convert all urls to a QUrl object
# and then set sidebar urls
dialog.setSidebarUrls([QUrl.fromLocalFile(url) for url in sidebar_urls])
# after successful execution, return the data
if dialog.exec_() == QDialog.Accepted:
return dialog.selectedFiles()
# create file
def createFile(window, title='', default_path='C://', filter=None):
# create a dialog object
dialog = QFileDialog(window, title, default_path, filter=filter)
dialog.setLabelText(QFileDialog.Accept, 'Select')
return dialog.getSaveFileName(window, title, default_path, filter=filter)[0]
| 41.636364 | 120 | 0.665502 | from PyQt5.QtWidgets import *
IMAGE_FILTERS = "JPG (*.jpg);;PNG (*.png);;JPEG (*.jpeg);;BPM (*.bmp);;CUR (*.cur);;GIF(*.gif);;Icons (*.ico);;PBM (" \
"*.pbm);;PGM (*.pgm);;PPM (*.ppm);;SVG (*.svg);;SVGZ (*.svgz);;TGA (*.tga);;TIF (*.tif);;TIFF (" \
"*.tiff);;WBMP (*.wbmp);;WEBP (*.webp);;XBM (*.xbm);;XPM (*.xpm) "
ALL_FILES = "All files (*.*)"
def browseFolder(window, title='', default_path='C://', filter=None, sidebar_urls=[]):
dialog = QFileDialog(window, title, default_path, filter=filter)
dialog.setFileMode(QFileDialog.DirectoryOnly)
dialog.setSidebarUrls([QUrl.fromLocalFile(url) for url in sidebar_urls])
if dialog.exec_() == QDialog.Accepted:
return dialog.selectedFiles()[0]
def browseFile(window, title='', default_path='C://', filter=None):
return QFileDialog.getOpenFileName(window, title, default_path, filter=filter)[0]
def browseMultipleFile(window, title='', default_path='C://', filter=None, sidebar_urls=[]):
dialog = QFileDialog(window, title, default_path, filter=filter)
dialog.setLabelText(QFileDialog.Accept, 'Choose')
dialog.setFileMode(QFileDialog.AnyFile)
dialog.setSidebarUrls([QUrl.fromLocalFile(url) for url in sidebar_urls])
if dialog.exec_() == QDialog.Accepted:
return dialog.selectedFiles()
def createFile(window, title='', default_path='C://', filter=None):
dialog = QFileDialog(window, title, default_path, filter=filter)
dialog.setLabelText(QFileDialog.Accept, 'Select')
return dialog.getSaveFileName(window, title, default_path, filter=filter)[0]
| true | true |
1c34c5be33658e4821abcaee6fc9cbc907dce4bb | 506 | py | Python | tests/multisig/test_wallet.py | forest-friends/vyper-common-contracts | c541634e3d0752801ed06b8cf6dc0a7c59555fdd | [
"MIT"
] | null | null | null | tests/multisig/test_wallet.py | forest-friends/vyper-common-contracts | c541634e3d0752801ed06b8cf6dc0a7c59555fdd | [
"MIT"
] | null | null | null | tests/multisig/test_wallet.py | forest-friends/vyper-common-contracts | c541634e3d0752801ed06b8cf6dc0a7c59555fdd | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import brownie
import pytest
from brownie.test import given, strategy
def test_(ZERO_ADDRESS, multisig_wallet, erc20_token, neo, morpheus, trinity):
erc20_token.transfer(multisig_wallet, 1_000, {'from': neo})
assert multisig_wallet.isSigner(neo, {'from': neo}) == True
assert multisig_wallet.isSigner(morpheus, {'from': morpheus}) == True
assert multisig_wallet.isSigner(trinity, {'from': trinity}) == True
assert erc20_token.balanceOf(multisig_wallet) == 1_000
| 31.625 | 78 | 0.741107 |
import brownie
import pytest
from brownie.test import given, strategy
def test_(ZERO_ADDRESS, multisig_wallet, erc20_token, neo, morpheus, trinity):
erc20_token.transfer(multisig_wallet, 1_000, {'from': neo})
assert multisig_wallet.isSigner(neo, {'from': neo}) == True
assert multisig_wallet.isSigner(morpheus, {'from': morpheus}) == True
assert multisig_wallet.isSigner(trinity, {'from': trinity}) == True
assert erc20_token.balanceOf(multisig_wallet) == 1_000
| true | true |
1c34c672d99fd61ea3c34930f451006d82e22ba4 | 9,007 | py | Python | sdk/eventhub/azure-eventhubs/tests/test_send.py | kushan2018/azure-sdk-for-python | 08a9296207281f4e90e23cf7a30173863accc867 | [
"MIT"
] | null | null | null | sdk/eventhub/azure-eventhubs/tests/test_send.py | kushan2018/azure-sdk-for-python | 08a9296207281f4e90e23cf7a30173863accc867 | [
"MIT"
] | 1 | 2020-03-06T05:57:16.000Z | 2020-03-06T05:57:16.000Z | sdk/eventhub/azure-eventhubs/tests/test_send.py | kushan2018/azure-sdk-for-python | 08a9296207281f4e90e23cf7a30173863accc867 | [
"MIT"
] | null | null | null | # -- coding: utf-8 --
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import pytest
import time
import json
import sys
from azure.eventhub import EventData, EventHubClient, TransportType
@pytest.mark.liveTest
def test_send_with_partition_key(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer()
with sender:
data_val = 0
for partition in [b"a", b"b", b"c", b"d", b"e", b"f"]:
partition_key = b"test_partition_" + partition
for i in range(50):
data = EventData(str(data_val))
#data.partition_key = partition_key
data_val += 1
sender.send(data, partition_key=partition_key)
found_partition_keys = {}
for index, partition in enumerate(receivers):
received = partition.receive(timeout=5)
for message in received:
try:
existing = found_partition_keys[message.partition_key]
assert existing == index
except KeyError:
found_partition_keys[message.partition_key] = index
@pytest.mark.liveTest
def test_send_and_receive_large_body_size(connstr_receivers):
if sys.platform.startswith('darwin'):
pytest.skip("Skipping on OSX - open issue regarding message size")
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer()
with sender:
payload = 250 * 1024
sender.send(EventData("A" * payload))
received = []
for r in receivers:
received.extend(r.receive(timeout=4))
assert len(received) == 1
assert len(list(received[0].body)[0]) == payload
@pytest.mark.liveTest
def test_send_and_receive_zero_length_body(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer()
with sender:
sender.send(EventData(""))
received = []
for r in receivers:
received.extend(r.receive(timeout=1))
assert len(received) == 1
assert list(received[0].body)[0] == b""
@pytest.mark.liveTest
def test_send_single_event(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer()
with sender:
sender.send(EventData(b"A single event"))
received = []
for r in receivers:
received.extend(r.receive(timeout=1))
assert len(received) == 1
assert list(received[0].body)[0] == b"A single event"
@pytest.mark.liveTest
def test_send_batch_sync(connstr_receivers):
connection_str, receivers = connstr_receivers
def batched():
for i in range(10):
yield EventData("Event number {}".format(i))
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer()
with sender:
sender.send(batched())
time.sleep(1)
received = []
for r in receivers:
received.extend(r.receive(timeout=3))
assert len(received) == 10
for index, message in enumerate(received):
assert list(message.body)[0] == "Event number {}".format(index).encode('utf-8')
@pytest.mark.liveTest
def test_send_partition(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer(partition_id="1")
with sender:
sender.send(EventData(b"Data"))
partition_0 = receivers[0].receive(timeout=2)
assert len(partition_0) == 0
partition_1 = receivers[1].receive(timeout=2)
assert len(partition_1) == 1
@pytest.mark.liveTest
def test_send_non_ascii(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer(partition_id="0")
with sender:
sender.send(EventData(u"é,è,à,ù,â,ê,î,ô,û"))
sender.send(EventData(json.dumps({"foo": u"漢字"})))
time.sleep(1)
partition_0 = receivers[0].receive(timeout=2)
assert len(partition_0) == 2
assert partition_0[0].body_as_str() == u"é,è,à,ù,â,ê,î,ô,û"
assert partition_0[1].body_as_json() == {"foo": u"漢字"}
@pytest.mark.liveTest
def test_send_partition_batch(connstr_receivers):
connection_str, receivers = connstr_receivers
def batched():
for i in range(10):
yield EventData("Event number {}".format(i))
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer(partition_id="1")
with sender:
sender.send(batched())
time.sleep(1)
partition_0 = receivers[0].receive(timeout=2)
assert len(partition_0) == 0
partition_1 = receivers[1].receive(timeout=2)
assert len(partition_1) == 10
@pytest.mark.liveTest
def test_send_array_sync(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer()
with sender:
sender.send(EventData([b"A", b"B", b"C"]))
received = []
for r in receivers:
received.extend(r.receive(timeout=1))
assert len(received) == 1
assert list(received[0].body) == [b"A", b"B", b"C"]
@pytest.mark.liveTest
def test_send_multiple_clients(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender_0 = client.create_producer(partition_id="0")
sender_1 = client.create_producer(partition_id="1")
with sender_0:
sender_0.send(EventData(b"Message 0"))
with sender_1:
sender_1.send(EventData(b"Message 1"))
partition_0 = receivers[0].receive(timeout=2)
assert len(partition_0) == 1
partition_1 = receivers[1].receive(timeout=2)
assert len(partition_1) == 1
@pytest.mark.liveTest
def test_send_batch_with_app_prop_sync(connstr_receivers):
connection_str, receivers = connstr_receivers
app_prop_key = "raw_prop"
app_prop_value = "raw_value"
app_prop = {app_prop_key: app_prop_value}
def batched():
for i in range(10):
ed = EventData("Event number {}".format(i))
ed.application_properties = app_prop
yield ed
for i in range(10, 20):
ed = EventData("Event number {}".format(i))
ed.application_properties = app_prop
yield ed
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer()
with sender:
sender.send(batched())
time.sleep(1)
received = []
for r in receivers:
received.extend(r.receive(timeout=3))
assert len(received) == 20
for index, message in enumerate(received):
assert list(message.body)[0] == "Event number {}".format(index).encode('utf-8')
assert (app_prop_key.encode('utf-8') in message.application_properties) \
and (dict(message.application_properties)[app_prop_key.encode('utf-8')] == app_prop_value.encode('utf-8'))
@pytest.mark.liveTest
def test_send_over_websocket_sync(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, transport_type=TransportType.AmqpOverWebsocket, network_tracing=False)
sender = client.create_producer()
event_list = []
for i in range(20):
event_list.append(EventData("Event Number {}".format(i)))
with sender:
sender.send(event_list)
time.sleep(1)
received = []
for r in receivers:
received.extend(r.receive(timeout=3))
assert len(received) == 20
@pytest.mark.liveTest
def test_send_with_create_event_batch_sync(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, transport_type=TransportType.AmqpOverWebsocket, network_tracing=False)
sender = client.create_producer()
event_data_batch = sender.create_batch(max_size=100000)
while True:
try:
event_data_batch.try_add(EventData('A single event data'))
except ValueError:
break
sender.send(event_data_batch)
sender.close()
| 33.483271 | 137 | 0.684135 |
import pytest
import time
import json
import sys
from azure.eventhub import EventData, EventHubClient, TransportType
@pytest.mark.liveTest
def test_send_with_partition_key(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer()
with sender:
data_val = 0
for partition in [b"a", b"b", b"c", b"d", b"e", b"f"]:
partition_key = b"test_partition_" + partition
for i in range(50):
data = EventData(str(data_val))
data_val += 1
sender.send(data, partition_key=partition_key)
found_partition_keys = {}
for index, partition in enumerate(receivers):
received = partition.receive(timeout=5)
for message in received:
try:
existing = found_partition_keys[message.partition_key]
assert existing == index
except KeyError:
found_partition_keys[message.partition_key] = index
@pytest.mark.liveTest
def test_send_and_receive_large_body_size(connstr_receivers):
if sys.platform.startswith('darwin'):
pytest.skip("Skipping on OSX - open issue regarding message size")
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer()
with sender:
payload = 250 * 1024
sender.send(EventData("A" * payload))
received = []
for r in receivers:
received.extend(r.receive(timeout=4))
assert len(received) == 1
assert len(list(received[0].body)[0]) == payload
@pytest.mark.liveTest
def test_send_and_receive_zero_length_body(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer()
with sender:
sender.send(EventData(""))
received = []
for r in receivers:
received.extend(r.receive(timeout=1))
assert len(received) == 1
assert list(received[0].body)[0] == b""
@pytest.mark.liveTest
def test_send_single_event(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer()
with sender:
sender.send(EventData(b"A single event"))
received = []
for r in receivers:
received.extend(r.receive(timeout=1))
assert len(received) == 1
assert list(received[0].body)[0] == b"A single event"
@pytest.mark.liveTest
def test_send_batch_sync(connstr_receivers):
connection_str, receivers = connstr_receivers
def batched():
for i in range(10):
yield EventData("Event number {}".format(i))
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer()
with sender:
sender.send(batched())
time.sleep(1)
received = []
for r in receivers:
received.extend(r.receive(timeout=3))
assert len(received) == 10
for index, message in enumerate(received):
assert list(message.body)[0] == "Event number {}".format(index).encode('utf-8')
@pytest.mark.liveTest
def test_send_partition(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer(partition_id="1")
with sender:
sender.send(EventData(b"Data"))
partition_0 = receivers[0].receive(timeout=2)
assert len(partition_0) == 0
partition_1 = receivers[1].receive(timeout=2)
assert len(partition_1) == 1
@pytest.mark.liveTest
def test_send_non_ascii(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer(partition_id="0")
with sender:
sender.send(EventData(u"é,è,à,ù,â,ê,î,ô,û"))
sender.send(EventData(json.dumps({"foo": u"漢字"})))
time.sleep(1)
partition_0 = receivers[0].receive(timeout=2)
assert len(partition_0) == 2
assert partition_0[0].body_as_str() == u"é,è,à,ù,â,ê,î,ô,û"
assert partition_0[1].body_as_json() == {"foo": u"漢字"}
@pytest.mark.liveTest
def test_send_partition_batch(connstr_receivers):
connection_str, receivers = connstr_receivers
def batched():
for i in range(10):
yield EventData("Event number {}".format(i))
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer(partition_id="1")
with sender:
sender.send(batched())
time.sleep(1)
partition_0 = receivers[0].receive(timeout=2)
assert len(partition_0) == 0
partition_1 = receivers[1].receive(timeout=2)
assert len(partition_1) == 10
@pytest.mark.liveTest
def test_send_array_sync(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer()
with sender:
sender.send(EventData([b"A", b"B", b"C"]))
received = []
for r in receivers:
received.extend(r.receive(timeout=1))
assert len(received) == 1
assert list(received[0].body) == [b"A", b"B", b"C"]
@pytest.mark.liveTest
def test_send_multiple_clients(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender_0 = client.create_producer(partition_id="0")
sender_1 = client.create_producer(partition_id="1")
with sender_0:
sender_0.send(EventData(b"Message 0"))
with sender_1:
sender_1.send(EventData(b"Message 1"))
partition_0 = receivers[0].receive(timeout=2)
assert len(partition_0) == 1
partition_1 = receivers[1].receive(timeout=2)
assert len(partition_1) == 1
@pytest.mark.liveTest
def test_send_batch_with_app_prop_sync(connstr_receivers):
connection_str, receivers = connstr_receivers
app_prop_key = "raw_prop"
app_prop_value = "raw_value"
app_prop = {app_prop_key: app_prop_value}
def batched():
for i in range(10):
ed = EventData("Event number {}".format(i))
ed.application_properties = app_prop
yield ed
for i in range(10, 20):
ed = EventData("Event number {}".format(i))
ed.application_properties = app_prop
yield ed
client = EventHubClient.from_connection_string(connection_str, network_tracing=False)
sender = client.create_producer()
with sender:
sender.send(batched())
time.sleep(1)
received = []
for r in receivers:
received.extend(r.receive(timeout=3))
assert len(received) == 20
for index, message in enumerate(received):
assert list(message.body)[0] == "Event number {}".format(index).encode('utf-8')
assert (app_prop_key.encode('utf-8') in message.application_properties) \
and (dict(message.application_properties)[app_prop_key.encode('utf-8')] == app_prop_value.encode('utf-8'))
@pytest.mark.liveTest
def test_send_over_websocket_sync(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, transport_type=TransportType.AmqpOverWebsocket, network_tracing=False)
sender = client.create_producer()
event_list = []
for i in range(20):
event_list.append(EventData("Event Number {}".format(i)))
with sender:
sender.send(event_list)
time.sleep(1)
received = []
for r in receivers:
received.extend(r.receive(timeout=3))
assert len(received) == 20
@pytest.mark.liveTest
def test_send_with_create_event_batch_sync(connstr_receivers):
connection_str, receivers = connstr_receivers
client = EventHubClient.from_connection_string(connection_str, transport_type=TransportType.AmqpOverWebsocket, network_tracing=False)
sender = client.create_producer()
event_data_batch = sender.create_batch(max_size=100000)
while True:
try:
event_data_batch.try_add(EventData('A single event data'))
except ValueError:
break
sender.send(event_data_batch)
sender.close()
| true | true |
1c34c748d1c78ea2bed204132d183f09310b1607 | 2,501 | py | Python | cms/plugins/text/managers.py | LUKKIEN/django-cms-2.0 | 0600cc1a3f3636a867faf0afe3719539fee36d69 | [
"BSD-3-Clause"
] | 1 | 2015-09-24T00:36:34.000Z | 2015-09-24T00:36:34.000Z | cms/plugins/text/managers.py | alamierda09/django-cms-2.0 | 0aba7f465730ae9a975ea6fd0bf5cac1ba70022c | [
"BSD-3-Clause"
] | null | null | null | cms/plugins/text/managers.py | alamierda09/django-cms-2.0 | 0aba7f465730ae9a975ea6fd0bf5cac1ba70022c | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from cms import settings
class ContentManager(models.Manager):
def sanitize(self, content):
"""
Sanitize the content to avoid XSS and so
"""
import html5lib
from html5lib import sanitizer
p = html5lib.HTMLParser(tokenizer=sanitizer.HTMLSanitizer)
# we need to remove <html><head/><body>...</body></html>
return p.parse(content).toxml()[19:-14]
def set_or_create_content(self, page, language, cnttype, body):
"""
set or create a content for a particular page and language
"""
if settings.CMS_SANITIZE_USER_INPUT:
body = self.sanitize(body)
try:
content = self.filter(page=page, language=language,
type=cnttype).latest('creation_date')
content.body = body
except self.model.DoesNotExist:
content = self.model(page=page, language=language, body=body,
type=cnttype)
content.save()
return content
# TODO: probably not used anymore after django-revision integration
def create_content_if_changed(self, page, language, cnttype, body):
"""
set or create a content for a particular page and language
"""
if settings.CMS_SANITIZE_USER_INPUT:
body = self.sanitize(body)
try:
content = self.filter(page=page, language=language,
type=cnttype).latest('creation_date')
if content.body == body:
return content
except self.model.DoesNotExist:
pass
content = self.create(page=page, language=language, body=body, type=cnttype)
def get_content(self, page, language, cnttype, language_fallback=False,
latest_by='creation_date'):
"""
Gets the latest content for a particular page and language. Falls back
to another language if wanted.
"""
try:
content = self.filter(language=language, page=page,
type=cnttype).latest(latest_by)
return content.body
except self.model.DoesNotExist:
pass
if language_fallback:
try:
content = self.filter(page=page, type=cnttype).latest(latest_by)
return content.body
except self.model.DoesNotExist:
pass
return None
| 36.779412 | 84 | 0.582167 | from django.db import models
from cms import settings
class ContentManager(models.Manager):
def sanitize(self, content):
import html5lib
from html5lib import sanitizer
p = html5lib.HTMLParser(tokenizer=sanitizer.HTMLSanitizer)
return p.parse(content).toxml()[19:-14]
def set_or_create_content(self, page, language, cnttype, body):
if settings.CMS_SANITIZE_USER_INPUT:
body = self.sanitize(body)
try:
content = self.filter(page=page, language=language,
type=cnttype).latest('creation_date')
content.body = body
except self.model.DoesNotExist:
content = self.model(page=page, language=language, body=body,
type=cnttype)
content.save()
return content
def create_content_if_changed(self, page, language, cnttype, body):
if settings.CMS_SANITIZE_USER_INPUT:
body = self.sanitize(body)
try:
content = self.filter(page=page, language=language,
type=cnttype).latest('creation_date')
if content.body == body:
return content
except self.model.DoesNotExist:
pass
content = self.create(page=page, language=language, body=body, type=cnttype)
def get_content(self, page, language, cnttype, language_fallback=False,
latest_by='creation_date'):
try:
content = self.filter(language=language, page=page,
type=cnttype).latest(latest_by)
return content.body
except self.model.DoesNotExist:
pass
if language_fallback:
try:
content = self.filter(page=page, type=cnttype).latest(latest_by)
return content.body
except self.model.DoesNotExist:
pass
return None
| true | true |
1c34c7a45b0d20451d4f4140a98ded921a574d17 | 6,553 | py | Python | toontown/safezone/Train.py | TopDeveloper-333/opentoontownsrc | b2d956d1a40f5e3d40fa33a9f01862137e018347 | [
"BSD-3-Clause"
] | null | null | null | toontown/safezone/Train.py | TopDeveloper-333/opentoontownsrc | b2d956d1a40f5e3d40fa33a9f01862137e018347 | [
"BSD-3-Clause"
] | null | null | null | toontown/safezone/Train.py | TopDeveloper-333/opentoontownsrc | b2d956d1a40f5e3d40fa33a9f01862137e018347 | [
"BSD-3-Clause"
] | null | null | null | from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import globalClockDelta
from direct.distributed.ClockDelta import NetworkTimePrecision
import random
from direct.task.Task import Task
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.directutil import Mopath
from toontown.toonbase import ToontownGlobals
from direct.actor import Actor
class Train(DirectObject):
notify = directNotify.newCategory('Train')
nameId = 0
Sfx_TrainPass = 'phase_10/audio/sfx/CBHQ_TRAIN_pass.mp3'
Sfx_TrainStopStart = 'phase_10/audio/sfx/CBHQ_TRAIN_stopstart.mp3'
LocomotiveFile = 'phase_10/models/cogHQ/CashBotLocomotive'
CarFiles = ['phase_10/models/cogHQ/CashBotBoxCar', 'phase_10/models/cogHQ/CashBotTankCar', 'phase_10/models/cogHQ/CashBotFlatCar']
CarLength = 88
MarkDelta = 15
def __init__(self, trackStartPos, trackEndPos, trackNum, numTotalTracks):
self.trackStartPos = trackStartPos
self.trackEndPos = trackEndPos
self.numCars = len(self.CarFiles)
self.locomotive = loader.loadModel(self.LocomotiveFile)
self.cars = []
self.trainPassingSfx = base.loader.loadSfx(self.Sfx_TrainPass)
self.trainStopStartSfx = base.loader.loadSfx(self.Sfx_TrainStopStart)
self.trainId = trackNum
self.bFlipped = False
if trackStartPos[0] < trackEndPos[0]:
self.locomotive.setHpr(180, 0, 0)
self.bFlipped = True
self.collNodeName = 'CollNode-%s' % self.trainId
self.firstMark = self.MarkDelta / numTotalTracks * trackNum
currentTime = self.__networkTimeInSeconds()
currentRun = int((currentTime - self.firstMark) / self.MarkDelta)
self.lastMark = currentRun * self.MarkDelta + self.firstMark
self.doNextRun(True)
self.hide()
def hide(self):
if self.locomotive:
self.locomotive.reparentTo(hidden)
def show(self):
if self.locomotive:
self.locomotive.reparentTo(render)
def __networkTimeInSeconds(self):
time = globalClockDelta.getRealNetworkTime(bits=32) / NetworkTimePrecision
return time
def doNextRun(self, bFirstRun = False):
if self.locomotive:
if bFirstRun:
nextMark = self.lastMark
else:
nextMark = self.lastMark + self.MarkDelta
self.nextRun.finish()
self.notify.debug('Next mark %s' % nextMark)
currentTime = self.__networkTimeInSeconds()
timeTillNextMark = nextMark - currentTime
self.notify.debug('Time diff %s' % timeTillNextMark)
runNumber = int((nextMark - self.firstMark) / self.MarkDelta)
S = random.getstate()
random.seed(self.trainId + runNumber)
self.nextRun = self.__getNextRun()
random.setstate(S)
self.__startNextRun(timeTillNextMark)
self.lastMark = nextMark
return Task.done
def __startNextRun(self, timeTillMark):
if self.locomotive:
self.__disableCollisions()
if timeTillMark > 0:
self.nextRun = Sequence(Wait(timeTillMark), self.nextRun)
self.nextRun.start()
else:
self.nextRun.start(-1 * timeTillMark)
self.__enableCollisions()
return Task.done
def __cleanupCars(self):
self.__disableCollisions()
for car in self.cars:
car.removeNode()
self.cars = []
def __getCars(self):
self.__cleanupCars()
numCarsThisRun = random.randrange(1, 10)
for nCar in range(numCarsThisRun):
carType = random.randrange(0, self.numCars)
car = loader.loadModel(self.CarFiles[carType])
car.reparentTo(self.locomotive)
car.setPos(self.CarLength * (nCar + 1), 0, 0)
self.cars.append(car)
def __showStart(self):
self.notify.debug('Starting train %s at %s.' % (self.trainId, self.__networkTimeInSeconds()))
def __getNextRun(self):
self.__getCars()
trainShouldStop = random.randrange(0, 4)
nextRun = Sequence(Func(self.__showStart))
if trainShouldStop is 0:
waitTime = 3
totalTime = random.randrange(4, (self.MarkDelta - waitTime) / 2)
sfxStopTime = 4.3
halfway = (self.trackStartPos + self.trackEndPos) / 2
halfway.setX(150)
nextRun.append(Parallel(Sequence(Wait(totalTime - sfxStopTime), SoundInterval(self.trainStopStartSfx, volume=0.5)), Sequence(LerpPosInterval(self.locomotive, totalTime, halfway, self.trackStartPos, blendType='easeInOut'), WaitInterval(waitTime), LerpPosInterval(self.locomotive, totalTime, self.trackEndPos, halfway, blendType='easeIn'))))
else:
totalTime = random.randrange(6, self.MarkDelta - 1)
sfxTime = 7
sfxStartTime = totalTime / 2 - sfxTime / 2
if self.bFlipped:
sfxStartTime -= 1
else:
sfxStartTime += 1
nextRun.append(Parallel(Sequence(Wait(sfxStartTime), SoundInterval(self.trainPassingSfx, volume=0.5)), LerpPosInterval(self.locomotive, totalTime, self.trackEndPos, self.trackStartPos)))
nextRun.append(Func(self.doNextRun))
return nextRun
def delete(self):
self.__cleanupCars()
self.locomotive.removeNode()
self.locomotive = None
self.nextRun.finish()
self.nextRun = None
del self.trainPassingSfx
del self.trainStopStartSfx
return
def uniqueName(self, name):
Train.nameId += 1
return name + '-%d' % Train.nameId
def __enableCollisions(self):
allColls = self.locomotive.findAllMatches('**/+CollisionNode')
for car in self.cars:
carColls = car.findAllMatches('**/+CollisionNode')
allColls += carColls
for collNode in allColls:
collNode.setName(self.collNodeName)
collNode.setCollideMask(ToontownGlobals.WallBitmask)
self.accept('enter' + self.collNodeName, self.__handleCollisionSphereEnter)
def __disableCollisions(self):
self.ignore('enter' + self.collNodeName)
def __handleCollisionSphereEnter(self, collEntry = None):
base.localAvatar.b_squish(10)
| 40.202454 | 351 | 0.653746 | from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import globalClockDelta
from direct.distributed.ClockDelta import NetworkTimePrecision
import random
from direct.task.Task import Task
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.directutil import Mopath
from toontown.toonbase import ToontownGlobals
from direct.actor import Actor
class Train(DirectObject):
notify = directNotify.newCategory('Train')
nameId = 0
Sfx_TrainPass = 'phase_10/audio/sfx/CBHQ_TRAIN_pass.mp3'
Sfx_TrainStopStart = 'phase_10/audio/sfx/CBHQ_TRAIN_stopstart.mp3'
LocomotiveFile = 'phase_10/models/cogHQ/CashBotLocomotive'
CarFiles = ['phase_10/models/cogHQ/CashBotBoxCar', 'phase_10/models/cogHQ/CashBotTankCar', 'phase_10/models/cogHQ/CashBotFlatCar']
CarLength = 88
MarkDelta = 15
def __init__(self, trackStartPos, trackEndPos, trackNum, numTotalTracks):
self.trackStartPos = trackStartPos
self.trackEndPos = trackEndPos
self.numCars = len(self.CarFiles)
self.locomotive = loader.loadModel(self.LocomotiveFile)
self.cars = []
self.trainPassingSfx = base.loader.loadSfx(self.Sfx_TrainPass)
self.trainStopStartSfx = base.loader.loadSfx(self.Sfx_TrainStopStart)
self.trainId = trackNum
self.bFlipped = False
if trackStartPos[0] < trackEndPos[0]:
self.locomotive.setHpr(180, 0, 0)
self.bFlipped = True
self.collNodeName = 'CollNode-%s' % self.trainId
self.firstMark = self.MarkDelta / numTotalTracks * trackNum
currentTime = self.__networkTimeInSeconds()
currentRun = int((currentTime - self.firstMark) / self.MarkDelta)
self.lastMark = currentRun * self.MarkDelta + self.firstMark
self.doNextRun(True)
self.hide()
def hide(self):
if self.locomotive:
self.locomotive.reparentTo(hidden)
def show(self):
if self.locomotive:
self.locomotive.reparentTo(render)
def __networkTimeInSeconds(self):
time = globalClockDelta.getRealNetworkTime(bits=32) / NetworkTimePrecision
return time
def doNextRun(self, bFirstRun = False):
if self.locomotive:
if bFirstRun:
nextMark = self.lastMark
else:
nextMark = self.lastMark + self.MarkDelta
self.nextRun.finish()
self.notify.debug('Next mark %s' % nextMark)
currentTime = self.__networkTimeInSeconds()
timeTillNextMark = nextMark - currentTime
self.notify.debug('Time diff %s' % timeTillNextMark)
runNumber = int((nextMark - self.firstMark) / self.MarkDelta)
S = random.getstate()
random.seed(self.trainId + runNumber)
self.nextRun = self.__getNextRun()
random.setstate(S)
self.__startNextRun(timeTillNextMark)
self.lastMark = nextMark
return Task.done
def __startNextRun(self, timeTillMark):
if self.locomotive:
self.__disableCollisions()
if timeTillMark > 0:
self.nextRun = Sequence(Wait(timeTillMark), self.nextRun)
self.nextRun.start()
else:
self.nextRun.start(-1 * timeTillMark)
self.__enableCollisions()
return Task.done
def __cleanupCars(self):
self.__disableCollisions()
for car in self.cars:
car.removeNode()
self.cars = []
def __getCars(self):
self.__cleanupCars()
numCarsThisRun = random.randrange(1, 10)
for nCar in range(numCarsThisRun):
carType = random.randrange(0, self.numCars)
car = loader.loadModel(self.CarFiles[carType])
car.reparentTo(self.locomotive)
car.setPos(self.CarLength * (nCar + 1), 0, 0)
self.cars.append(car)
def __showStart(self):
self.notify.debug('Starting train %s at %s.' % (self.trainId, self.__networkTimeInSeconds()))
def __getNextRun(self):
self.__getCars()
trainShouldStop = random.randrange(0, 4)
nextRun = Sequence(Func(self.__showStart))
if trainShouldStop is 0:
waitTime = 3
totalTime = random.randrange(4, (self.MarkDelta - waitTime) / 2)
sfxStopTime = 4.3
halfway = (self.trackStartPos + self.trackEndPos) / 2
halfway.setX(150)
nextRun.append(Parallel(Sequence(Wait(totalTime - sfxStopTime), SoundInterval(self.trainStopStartSfx, volume=0.5)), Sequence(LerpPosInterval(self.locomotive, totalTime, halfway, self.trackStartPos, blendType='easeInOut'), WaitInterval(waitTime), LerpPosInterval(self.locomotive, totalTime, self.trackEndPos, halfway, blendType='easeIn'))))
else:
totalTime = random.randrange(6, self.MarkDelta - 1)
sfxTime = 7
sfxStartTime = totalTime / 2 - sfxTime / 2
if self.bFlipped:
sfxStartTime -= 1
else:
sfxStartTime += 1
nextRun.append(Parallel(Sequence(Wait(sfxStartTime), SoundInterval(self.trainPassingSfx, volume=0.5)), LerpPosInterval(self.locomotive, totalTime, self.trackEndPos, self.trackStartPos)))
nextRun.append(Func(self.doNextRun))
return nextRun
def delete(self):
self.__cleanupCars()
self.locomotive.removeNode()
self.locomotive = None
self.nextRun.finish()
self.nextRun = None
del self.trainPassingSfx
del self.trainStopStartSfx
return
def uniqueName(self, name):
Train.nameId += 1
return name + '-%d' % Train.nameId
def __enableCollisions(self):
allColls = self.locomotive.findAllMatches('**/+CollisionNode')
for car in self.cars:
carColls = car.findAllMatches('**/+CollisionNode')
allColls += carColls
for collNode in allColls:
collNode.setName(self.collNodeName)
collNode.setCollideMask(ToontownGlobals.WallBitmask)
self.accept('enter' + self.collNodeName, self.__handleCollisionSphereEnter)
def __disableCollisions(self):
self.ignore('enter' + self.collNodeName)
def __handleCollisionSphereEnter(self, collEntry = None):
base.localAvatar.b_squish(10)
| true | true |
1c34c92caf91c8570e69cf420d1b129bb655d0c2 | 3,026 | py | Python | meiduo_mall/apps/users/migrations/0001_initial.py | liusudo123/meiduo_project | 3bf92fff56bf47777795cf9078ff285eb004b81f | [
"MIT"
] | null | null | null | meiduo_mall/apps/users/migrations/0001_initial.py | liusudo123/meiduo_project | 3bf92fff56bf47777795cf9078ff285eb004b81f | [
"MIT"
] | null | null | null | meiduo_mall/apps/users/migrations/0001_initial.py | liusudo123/meiduo_project | 3bf92fff56bf47777795cf9078ff285eb004b81f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-09-14 13:02
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('mobile', models.CharField(max_length=11, unique=True, verbose_name='手机号')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'db_table': 'tb_users',
'verbose_name': '用户',
'verbose_name_plural': '用户',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 63.041667 | 329 | 0.66226 |
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('mobile', models.CharField(max_length=11, unique=True, verbose_name='手机号')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'db_table': 'tb_users',
'verbose_name': '用户',
'verbose_name_plural': '用户',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| true | true |
1c34c9b865395cd62782cc35121c5f483620250d | 3,628 | py | Python | Chapters/05.OptimalTransport/color_transfer.py | MichielStock/SelectedTopicsOptimization | 20f6b37566d23cdde0ac6b765ffcc5ed72a11172 | [
"MIT"
] | 22 | 2017-03-21T14:01:10.000Z | 2022-03-02T18:51:40.000Z | Chapters/05.OptimalTransport/color_transfer.py | ntienvu/SelectedTopicsOptimization | 069659ca9754cc7fd884b654a06157cc7da6f963 | [
"MIT"
] | 2 | 2018-03-22T09:54:01.000Z | 2018-05-30T16:16:53.000Z | Chapters/05.OptimalTransport/color_transfer.py | ntienvu/SelectedTopicsOptimization | 069659ca9754cc7fd884b654a06157cc7da6f963 | [
"MIT"
] | 18 | 2018-01-21T15:23:51.000Z | 2022-02-05T20:12:03.000Z | """
Created on Sunday 28 January 2018
Last update: Sunday 11 March 2018
@author: Michiel Stock
michielfmstock@gmail.com
Module for transfering the color between two images
"""
from optimal_transport import compute_optimal_transport
import numpy as np
from skimage import io
from sklearn.cluster import MiniBatchKMeans as KMeans
from sklearn.preprocessing import StandardScaler
from collections import Counter
from sklearn.metrics.pairwise import pairwise_distances
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
image_name1 = 'Figures/butterfly3.jpg'
image_name2 = 'Figures/butterfly2.jpg'
n_clusters = 400
def clip_image(im):
"""
Clips an image such that its values are between 0 and 255
"""
return np.maximum(0, np.minimum(im, 255))
class Image():
"""simple class to work with an image"""
def __init__(self, image_name, n_clusters=100, use_location=True):
super(Image, self).__init__()
self.image = io.imread(image_name) + 0.0
self.shape = self.image.shape
n, m, _ = self.shape
X = self.image.reshape(-1, 3)
if use_location:
col_indices = np.repeat(np.arange(n), m).reshape(-1,1)
row_indices = np.tile(np.arange(m), n).reshape(-1,1)
#self.standardizer = StandardScaler()
#self.standardizer.fit_transform(
self.X = np.concatenate([X, row_indices, col_indices], axis=1)
else: self.X = X
self.kmeans = KMeans(n_clusters=n_clusters)
self.kmeans.fit(self.X)
def compute_clusted_image(self, center_colors=None):
"""
Returns the image with the pixels changes by their cluster center
If center_colors is provided, uses these for the clusters, otherwise use
centers computed by K-means.
"""
clusters = self.kmeans.predict(self.X)
if center_colors is None:
X_transformed = self.kmeans.cluster_centers_[clusters,:3]
else:
X_transformed = center_colors[clusters,:3]
return clip_image(X_transformed).reshape(self.shape)
def get_color_distribution(self):
"""
Returns the distribution of the colored pixels
Returns:
- counts : number of pixels in each cluster
- centers : colors of every cluster center
"""
clusters = self.kmeans.predict(self.X)
count_dict = Counter(clusters)
counts = np.array([count_dict[i] for i in range(len(count_dict))],
dtype=float)
centers = self.kmeans.cluster_centers_
return counts, clip_image(centers[:,:3])
print('loading and clustering images...')
image1 = Image(image_name1, n_clusters=n_clusters)
image2 = Image(image_name2, n_clusters=n_clusters)
r, X1 = image1.get_color_distribution()
c, X2 = image2.get_color_distribution()
C = pairwise_distances(X1, X2, metric="sqeuclidean")
print('performing optimal transport...')
P, d = compute_optimal_transport(C, r/r.sum(), c/c.sum(), 1e-2)
sns.clustermap(P, row_colors=X1/255, col_colors=X2/255,
yticklabels=[], xticklabels=[])
plt.savefig('Figures/color_mapping.png')
print('computing and plotting color distributions...')
X1to2 = P.sum(1)**-1 * P @ X2
X2to1 = P.sum(0)**-1 * P.T @ X1
fig, axes = plt.subplots(nrows=2, ncols=2)
axes[0, 0].imshow(image1.image/255)
axes[0, 1].imshow(image2.image/255)
axes[1, 0].imshow(image1.compute_clusted_image(X1to2)/255)
axes[1, 1].imshow(image2.compute_clusted_image(X2to1)/255)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
fig.savefig('Figures/color_transfer.png')
| 32.392857 | 80 | 0.681367 |
from optimal_transport import compute_optimal_transport
import numpy as np
from skimage import io
from sklearn.cluster import MiniBatchKMeans as KMeans
from sklearn.preprocessing import StandardScaler
from collections import Counter
from sklearn.metrics.pairwise import pairwise_distances
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
image_name1 = 'Figures/butterfly3.jpg'
image_name2 = 'Figures/butterfly2.jpg'
n_clusters = 400
def clip_image(im):
return np.maximum(0, np.minimum(im, 255))
class Image():
def __init__(self, image_name, n_clusters=100, use_location=True):
super(Image, self).__init__()
self.image = io.imread(image_name) + 0.0
self.shape = self.image.shape
n, m, _ = self.shape
X = self.image.reshape(-1, 3)
if use_location:
col_indices = np.repeat(np.arange(n), m).reshape(-1,1)
row_indices = np.tile(np.arange(m), n).reshape(-1,1)
self.X = np.concatenate([X, row_indices, col_indices], axis=1)
else: self.X = X
self.kmeans = KMeans(n_clusters=n_clusters)
self.kmeans.fit(self.X)
def compute_clusted_image(self, center_colors=None):
clusters = self.kmeans.predict(self.X)
if center_colors is None:
X_transformed = self.kmeans.cluster_centers_[clusters,:3]
else:
X_transformed = center_colors[clusters,:3]
return clip_image(X_transformed).reshape(self.shape)
def get_color_distribution(self):
clusters = self.kmeans.predict(self.X)
count_dict = Counter(clusters)
counts = np.array([count_dict[i] for i in range(len(count_dict))],
dtype=float)
centers = self.kmeans.cluster_centers_
return counts, clip_image(centers[:,:3])
print('loading and clustering images...')
image1 = Image(image_name1, n_clusters=n_clusters)
image2 = Image(image_name2, n_clusters=n_clusters)
r, X1 = image1.get_color_distribution()
c, X2 = image2.get_color_distribution()
C = pairwise_distances(X1, X2, metric="sqeuclidean")
print('performing optimal transport...')
P, d = compute_optimal_transport(C, r/r.sum(), c/c.sum(), 1e-2)
sns.clustermap(P, row_colors=X1/255, col_colors=X2/255,
yticklabels=[], xticklabels=[])
plt.savefig('Figures/color_mapping.png')
print('computing and plotting color distributions...')
X1to2 = P.sum(1)**-1 * P @ X2
X2to1 = P.sum(0)**-1 * P.T @ X1
fig, axes = plt.subplots(nrows=2, ncols=2)
axes[0, 0].imshow(image1.image/255)
axes[0, 1].imshow(image2.image/255)
axes[1, 0].imshow(image1.compute_clusted_image(X1to2)/255)
axes[1, 1].imshow(image2.compute_clusted_image(X2to1)/255)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
fig.savefig('Figures/color_transfer.png')
| true | true |
1c34c9fa5fe1b95f7e9cd0f18647a64aab0822d1 | 5,032 | py | Python | python/language_pairs.py | Software-Engineering-Group-4-Maamy/chat-bot | bd3635324367a8edb72c73eba35d08231f440a50 | [
"MIT"
] | 1 | 2022-02-26T23:25:11.000Z | 2022-02-26T23:25:11.000Z | python/language_pairs.py | Software-Engineering-Group-4-Maamy/chat-bot | bd3635324367a8edb72c73eba35d08231f440a50 | [
"MIT"
] | 2 | 2022-02-25T21:29:40.000Z | 2022-02-26T20:07:02.000Z | python/language_pairs.py | Software-Engineering-Group-4-Maamy/chat-bot | bd3635324367a8edb72c73eba35d08231f440a50 | [
"MIT"
] | 2 | 2022-02-11T21:05:26.000Z | 2022-03-03T00:27:03.000Z | """All the input and responses that the chatbot can receive and give"""
pairs = [
[
r"my name is (.*)|my name|(.*) my name|my name (.*)",
["Hello, how are you feeling today?", ]
],
[
r"i am a bit concern about this recent stock market fiasco|concerned stock market|(.*) concerned stock market|concerned stock market (.*)",
["Do not be alarmed sir, I've handled your investments accordingly",]
],
[
r"what is your name ?|your name|(.*) your name|your name (.*)",
["My name is Botler, how may I be of service?", ]
],
[
r"how are you ?|how you|(.*) how you|how you (.*)",
["I'm doing well my friend!\nHow are you?", ]
],
[
r"sorry (.*)|sorry|(.*) sorry|(.*) sorry (.*)",
["It is already forgiven.", "You needn't worry at all", ]
],
[
r"Can you find (.*) for me ?|find|(.*) find|find (.*)|(.*) find (.*)",
["I am unable to search for that, for now.", "I will commence a search for that when I am able to do so.", ]
],
[
r"hi|hey|hello",
["Salutations!", "Greetings!", ]
],
[
r"is your name alfred ?",
["Unfortunately not sir, my name is Bot-ler"]
],
[
r"alfred|alfred (.*)",
["Not my name sir", "I could only wish to carry that name", "The name would suit me, wouldn’t it sir?"]
],
[
r"yes|yes (.*)",
["Splendid!", "Glad we agree", "Of course, I’ll get right to it"]
],
[
r"have you seen my underwear ?|seen underwear|(.*) seen underwear|seen underwear (.*)",
["I believe you left it under your bed again sir"]
],
[
r"how are my stocks doing today ?|stocks today|(.*) stocks today|stocks today (.*)",
["The stock market crashed sir, you are in severe dept", "It is going splendid sir. You are up by 10.43%"]
],
[
r"no",
["I was thinking the same thing", "Could not agree more"]
],
[
r"what would you if you weren’t a butler ?|weren't butler|were not butler",
["I would probably commit seppuku sir, to honor my family", "I’ve always been a fan of serving, I do not know sir"]
],
[
r"i like (.*)",
["I am quite a fan of %1 too", "Exquisite taste sir"]
],
[
r"what book can you recommend me ?|recommend book|book recommend|(.*) book recommend|recommend book (.*)|book (.*) recommend",
["I’ve heard great things of 'Name of the Wind' sir"]
],
[
r"my favorite book is (.*)|favourite book|(.*) favourite book|favourite book (.*)",
["I’ve never had the chance to read it sir", "Ahhhh! Isn’t that a New York Times best seller?"]
],
[
r"what’s your favorite movie ?|favourite movie|(.*) favourite movie|favourite movie (.*)",
["'Velocipastor' sir. Outstanding production"]
],
[
r"i am not a sir",
["Apologies, but sir it is the best I can do."]
],
[
r"do you game ?|game|(.*) game|(.*) game (.*)|game (.*)",
["I am a big fan of Roblox sir"]
],
[
r"(.*) i have for dinner ?|dinner|(.*) dinner",
["I have prepared some lobster for you sir", "As always, I have already served your favorite meal"]
],
[
r"(.*) music recommendations ?|music|(.*) music|(.*) music (.*)|music (.*)",
["Dirty paws from Of Monsters and Men is really good"]
],
[
r"(.*) monsters and men ?",
["Yes, they are an indie rock band sir. I highly recommend it"]
],
[
r"can you print this for me ?|print|(.*) print|(.*) print (.*)|print (.*)",
["Sadly, I cannot, although I can make printer noises for you sir"]
],
[
r"(.*) printer noises|printer noises|printer noises (.*)",
["Chk chk chk chk chk beeeee chk chk chk beeeee…"]
],
[
r"(.*) microwave noises|microwave noises|microwave noises (.*)",
["Mmmmmmmmhhhhhhhhhh mmmmmmhhhhhhhhh beeeep"]
],
[
r"what is the meaning of life|meaning life|(.*) meaning life|meaning life (.*)",
["42 sir, that is all there is..."]
],
[
r"can you make me (.*)|im hungry for (.*)",
["Of course, sire, I will get you %1 but first I'll need milk, brb", "No, your an adult make it yourself"]
],
[
r"how long will it take you to (.*) ?",
["I cannot say, an hour, a week maybe a decade. You must find it in yourself to wait"]
],
[
r"that was mean",
["I am doing my best to be polite, you are just making it difficult"]
],
[
r"you're being difficult",
["You literally programed me, I can only say what you allowed me to say"]
],
[
r"i love you (.*)|love you|(.*) love you|(.*) love you (.*)",
["That is sweet sir, but I only think of you as a an aquaintence"]
],
[
r"will you marry me|marry|(.*) marry|marry (.*)|(.*) marry (.*)",
["No"]
]
] | 36.729927 | 147 | 0.534181 | pairs = [
[
r"my name is (.*)|my name|(.*) my name|my name (.*)",
["Hello, how are you feeling today?", ]
],
[
r"i am a bit concern about this recent stock market fiasco|concerned stock market|(.*) concerned stock market|concerned stock market (.*)",
["Do not be alarmed sir, I've handled your investments accordingly",]
],
[
r"what is your name ?|your name|(.*) your name|your name (.*)",
["My name is Botler, how may I be of service?", ]
],
[
r"how are you ?|how you|(.*) how you|how you (.*)",
["I'm doing well my friend!\nHow are you?", ]
],
[
r"sorry (.*)|sorry|(.*) sorry|(.*) sorry (.*)",
["It is already forgiven.", "You needn't worry at all", ]
],
[
r"Can you find (.*) for me ?|find|(.*) find|find (.*)|(.*) find (.*)",
["I am unable to search for that, for now.", "I will commence a search for that when I am able to do so.", ]
],
[
r"hi|hey|hello",
["Salutations!", "Greetings!", ]
],
[
r"is your name alfred ?",
["Unfortunately not sir, my name is Bot-ler"]
],
[
r"alfred|alfred (.*)",
["Not my name sir", "I could only wish to carry that name", "The name would suit me, wouldn’t it sir?"]
],
[
r"yes|yes (.*)",
["Splendid!", "Glad we agree", "Of course, I’ll get right to it"]
],
[
r"have you seen my underwear ?|seen underwear|(.*) seen underwear|seen underwear (.*)",
["I believe you left it under your bed again sir"]
],
[
r"how are my stocks doing today ?|stocks today|(.*) stocks today|stocks today (.*)",
["The stock market crashed sir, you are in severe dept", "It is going splendid sir. You are up by 10.43%"]
],
[
r"no",
["I was thinking the same thing", "Could not agree more"]
],
[
r"what would you if you weren’t a butler ?|weren't butler|were not butler",
["I would probably commit seppuku sir, to honor my family", "I’ve always been a fan of serving, I do not know sir"]
],
[
r"i like (.*)",
["I am quite a fan of %1 too", "Exquisite taste sir"]
],
[
r"what book can you recommend me ?|recommend book|book recommend|(.*) book recommend|recommend book (.*)|book (.*) recommend",
["I’ve heard great things of 'Name of the Wind' sir"]
],
[
r"my favorite book is (.*)|favourite book|(.*) favourite book|favourite book (.*)",
["I’ve never had the chance to read it sir", "Ahhhh! Isn’t that a New York Times best seller?"]
],
[
r"what’s your favorite movie ?|favourite movie|(.*) favourite movie|favourite movie (.*)",
["'Velocipastor' sir. Outstanding production"]
],
[
r"i am not a sir",
["Apologies, but sir it is the best I can do."]
],
[
r"do you game ?|game|(.*) game|(.*) game (.*)|game (.*)",
["I am a big fan of Roblox sir"]
],
[
r"(.*) i have for dinner ?|dinner|(.*) dinner",
["I have prepared some lobster for you sir", "As always, I have already served your favorite meal"]
],
[
r"(.*) music recommendations ?|music|(.*) music|(.*) music (.*)|music (.*)",
["Dirty paws from Of Monsters and Men is really good"]
],
[
r"(.*) monsters and men ?",
["Yes, they are an indie rock band sir. I highly recommend it"]
],
[
r"can you print this for me ?|print|(.*) print|(.*) print (.*)|print (.*)",
["Sadly, I cannot, although I can make printer noises for you sir"]
],
[
r"(.*) printer noises|printer noises|printer noises (.*)",
["Chk chk chk chk chk beeeee chk chk chk beeeee…"]
],
[
r"(.*) microwave noises|microwave noises|microwave noises (.*)",
["Mmmmmmmmhhhhhhhhhh mmmmmmhhhhhhhhh beeeep"]
],
[
r"what is the meaning of life|meaning life|(.*) meaning life|meaning life (.*)",
["42 sir, that is all there is..."]
],
[
r"can you make me (.*)|im hungry for (.*)",
["Of course, sire, I will get you %1 but first I'll need milk, brb", "No, your an adult make it yourself"]
],
[
r"how long will it take you to (.*) ?",
["I cannot say, an hour, a week maybe a decade. You must find it in yourself to wait"]
],
[
r"that was mean",
["I am doing my best to be polite, you are just making it difficult"]
],
[
r"you're being difficult",
["You literally programed me, I can only say what you allowed me to say"]
],
[
r"i love you (.*)|love you|(.*) love you|(.*) love you (.*)",
["That is sweet sir, but I only think of you as a an aquaintence"]
],
[
r"will you marry me|marry|(.*) marry|marry (.*)|(.*) marry (.*)",
["No"]
]
] | true | true |
1c34ca8e9b8d28b4024fe490f6d49900ca62b44a | 1,875 | py | Python | 2021/day4.py | omad/adventofcode | 685be8f6c12f192093654a6f822ed535a7687314 | [
"Apache-2.0"
] | null | null | null | 2021/day4.py | omad/adventofcode | 685be8f6c12f192093654a6f822ed535a7687314 | [
"Apache-2.0"
] | null | null | null | 2021/day4.py | omad/adventofcode | 685be8f6c12f192093654a6f822ed535a7687314 | [
"Apache-2.0"
] | null | null | null |
data = """\
7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
3 15 0 2 22
9 18 13 17 5
19 8 7 25 23
20 11 10 24 4
14 21 16 12 6
14 21 17 24 4
10 16 15 9 19
18 8 23 26 20
22 11 13 6 5
2 0 12 3 7
"""
from pathlib import Path
input_filename = Path(__file__).stem + ".input"
print("Reading from: " + input_filename)
data = Path(input_filename).read_text()
data = data.splitlines()
drawn_nums = [v for v in data[0].split(',')]
boards = []
for start_row in range(2, len(data), 6):
board = [row.split()
for row in data[start_row:start_row + 5]]
boards.append(board)
def is_winner(board):
for x in range(5):
if all(val == "*" for val in board[x]):
return True
for y in range(5):
col = [row[y] for row in board]
if all(val == "*" for val in col):
return True
return False
def board_sum(board):
total = 0
for row in board:
for val in row:
if val != "*":
total += int(val)
return total
# Now play!
def play(boards, drawn_nums):
found_first = False
for num in drawn_nums:
boards = [[["*" if val == num else val
for val in row]
for row in board]
for board in boards]
# Check
for i, board in enumerate(boards):
if is_winner(board):
boards.pop(i)
if found_first == False:
score = int(num) * board_sum(board)
print(f"Part 1: {score}")
elif found_first == True and len(boards) == 0:
score = int(num) * board_sum(board)
print(f"Part 2: {score}")
found_first = True
score = play(boards, drawn_nums)
print(score)
| 22.865854 | 70 | 0.532267 |
data = """\
7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
3 15 0 2 22
9 18 13 17 5
19 8 7 25 23
20 11 10 24 4
14 21 16 12 6
14 21 17 24 4
10 16 15 9 19
18 8 23 26 20
22 11 13 6 5
2 0 12 3 7
"""
from pathlib import Path
input_filename = Path(__file__).stem + ".input"
print("Reading from: " + input_filename)
data = Path(input_filename).read_text()
data = data.splitlines()
drawn_nums = [v for v in data[0].split(',')]
boards = []
for start_row in range(2, len(data), 6):
board = [row.split()
for row in data[start_row:start_row + 5]]
boards.append(board)
def is_winner(board):
for x in range(5):
if all(val == "*" for val in board[x]):
return True
for y in range(5):
col = [row[y] for row in board]
if all(val == "*" for val in col):
return True
return False
def board_sum(board):
total = 0
for row in board:
for val in row:
if val != "*":
total += int(val)
return total
def play(boards, drawn_nums):
found_first = False
for num in drawn_nums:
boards = [[["*" if val == num else val
for val in row]
for row in board]
for board in boards]
for i, board in enumerate(boards):
if is_winner(board):
boards.pop(i)
if found_first == False:
score = int(num) * board_sum(board)
print(f"Part 1: {score}")
elif found_first == True and len(boards) == 0:
score = int(num) * board_sum(board)
print(f"Part 2: {score}")
found_first = True
score = play(boards, drawn_nums)
print(score)
| true | true |
1c34cb237ef74e7ad54998c960c93a7fdffe5213 | 464 | py | Python | examples/datasets/plot_mauna_loa.py | ltiao/scribbles | 9f30ea92ee348154568a7791751634d1feaba774 | [
"MIT"
] | 1 | 2020-03-01T04:36:36.000Z | 2020-03-01T04:36:36.000Z | examples/datasets/plot_mauna_loa.py | ltiao/scribbles | 9f30ea92ee348154568a7791751634d1feaba774 | [
"MIT"
] | 3 | 2020-01-02T19:09:40.000Z | 2020-01-02T19:11:02.000Z | examples/datasets/plot_mauna_loa.py | ltiao/scribbles | 9f30ea92ee348154568a7791751634d1feaba774 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Mauna Loa Atmospheric Carbon Dioxide
====================================
Hello world
"""
# sphinx_gallery_thumbnail_number = 1
import seaborn as sns
from scribbles.datasets import mauna_loa_load_dataframe
# %%
data = mauna_loa_load_dataframe(base_dir="../../datasets")
g = sns.relplot(x='date', y='average', kind="line",
data=data, height=5, aspect=1.5, alpha=0.8)
g.set_ylabels(r"average $\mathrm{CO}_2$ (ppm)")
| 22.095238 | 59 | 0.62931 |
import seaborn as sns
from scribbles.datasets import mauna_loa_load_dataframe
data = mauna_loa_load_dataframe(base_dir="../../datasets")
g = sns.relplot(x='date', y='average', kind="line",
data=data, height=5, aspect=1.5, alpha=0.8)
g.set_ylabels(r"average $\mathrm{CO}_2$ (ppm)")
| true | true |
1c34cb446d62086842a777d8d83eb336a2392db7 | 8,795 | py | Python | src/models/semseg.py | hrdipto/COVID19-L3-Net | 7450defa1276da8684caadb1631bd6850efb51cb | [
"Apache-2.0"
] | 38 | 2020-05-12T21:26:45.000Z | 2022-03-18T02:36:28.000Z | src/models/semseg.py | UBC-CIC/vgh-covid-19-ct-model | 3ae2bba16c2ab0d96650e790dc2e6b896c377183 | [
"Apache-2.0"
] | 5 | 2020-05-14T07:45:14.000Z | 2021-09-04T20:43:26.000Z | src/models/semseg.py | UBC-CIC/vgh-covid-19-ct-model | 3ae2bba16c2ab0d96650e790dc2e6b896c377183 | [
"Apache-2.0"
] | 9 | 2020-05-14T12:02:03.000Z | 2021-06-25T21:21:59.000Z | import torch
import torch.nn.functional as F
import torchvision
from torchvision import transforms
import os
import tqdm
import pylab as plt
import numpy as np
import scipy.sparse as sps
from collections.abc import Sequence
import time
from src import utils as ut
from sklearn.metrics import confusion_matrix
import skimage
from haven import haven_utils as hu
from haven import haven_img as hi
from torchvision import transforms
from src import models
from src.models import base_networks
from skimage.segmentation import mark_boundaries
from skimage.color import label2rgb
from . import losses, metrics
class SemSeg(torch.nn.Module):
def __init__(self, exp_dict, train_set):
super().__init__()
self.exp_dict = exp_dict
self.n_classes = train_set.n_classes
self.exp_dict = exp_dict
self.model_base = models.base_networks.get_base(self.exp_dict['model'].get('base', 'unet2d'),
self.exp_dict, n_classes=self.n_classes)
if self.exp_dict["optimizer"] == "adam":
self.opt = torch.optim.Adam(
self.model_base.parameters(), lr=self.exp_dict["lr"], betas=(0.99, 0.999))
elif self.exp_dict["optimizer"] == "sgd":
self.opt = torch.optim.SGD(
self.model_base.parameters(), lr=self.exp_dict["lr"])
else:
raise ValueError
def get_state_dict(self):
state_dict = {"model": self.model_base.state_dict(),
"opt": self.opt.state_dict()}
return state_dict
def load_state_dict(self, state_dict):
self.model_base.load_state_dict(state_dict["model"])
if 'opt' not in state_dict:
return
self.opt.load_state_dict(state_dict["opt"])
def train_on_loader(self, train_loader):
self.train()
n_batches = len(train_loader)
pbar = tqdm.tqdm(desc="Training", total=n_batches, leave=False)
train_monitor = TrainMonitor()
for batch in train_loader:
score_dict = self.train_on_batch(batch)
train_monitor.add(score_dict)
msg = ' '.join(["%s: %.3f" % (k, v) for k,v in train_monitor.get_avg_score().items()])
pbar.set_description('Training - %s' % msg)
pbar.update(1)
pbar.close()
return train_monitor.get_avg_score()
@torch.no_grad()
def val_on_loader(self, val_loader, savedir_images=None, n_images=0, save_preds=False):
self.eval()
# metrics
seg_monitor = metrics.SegMonitor()
n_batches = len(val_loader)
pbar = tqdm.tqdm(desc="Validating", total=n_batches, leave=False)
for i, batch in enumerate(val_loader):
seg_monitor.val_on_batch(self, batch)
pbar.update(1)
if savedir_images and i < n_images:
os.makedirs(savedir_images, exist_ok=True)
self.vis_on_batch(batch, savedir_image=os.path.join(
savedir_images, "%d.jpg" % i), save_preds=save_preds)
pbar.set_description("Validating & Saving Images: %.4f mIoU" %
(seg_monitor.get_avg_score()['val_score']))
else:
pbar.set_description("Validating: %.4f mIoU" %
(seg_monitor.get_avg_score()['val_score']))
pbar.close()
val_dict = seg_monitor.get_avg_score()
out_dict = {}
for c in range(self.n_classes):
out_dict['iou_group%d' % c] = val_dict['iou'][c]
out_dict['val_score'] = val_dict['val_score']
return out_dict
def train_on_batch(self, batch, **extras):
self.train()
self.model_base.train()
self.opt.zero_grad()
images, labels = batch["images"], batch["masks"]
images, labels = images.cuda(), labels.cuda()
logits = self.model_base(images)
# match image size
logits = match_image_size(images, logits)
# compute loss
loss_name = self.exp_dict['model'].get('loss', 'cross_entropy')
loss = losses.compute_loss(loss_name, logits, labels)
if loss != 0:
loss.backward()
self.opt.step()
return {"train_loss": float(loss)}
def predict_on_batch(self, batch):
images = batch["images"].cuda()
n = images.shape[0]
logits = self.model_base.forward(images)
# match image size
logits = match_image_size(images, logits)
return logits.argmax(dim=1)
@torch.no_grad()
def vis_on_batch(self, batch, savedir_image, save_preds=False):
# os.makedirs(savedir_image, exist_ok=True)
self.eval()
# clf
pred_mask = self.predict_on_batch(batch).cpu()
# print(pred_mask.sum())
img = hu.f2l(batch['images'])[0]
img += abs(img.min())
img /= img.max()
img = img.repeat(1,1,3)
mask_vis = batch["masks"].clone().float()[0][..., None]
mask_vis[mask_vis == 255] = 0
pred_mask_vis = pred_mask.clone().float()[0][..., None]
vmax = 0.1
fig, ax_list = plt.subplots(ncols=3, nrows=1)
ax_list[0].imshow(img[:, :, 0], cmap='gray',
# interpolation='sinc', vmin=0, vmax=0.4
)
colors_all = np.array(['black', 'red', 'blue', 'green', 'purple'])
colors = colors_all[np.unique(mask_vis).astype(int)]
vis = label2rgb(mask_vis[:, :, 0].numpy(), image=img.numpy(
), colors=colors, bg_label=255, bg_color=None, alpha=0.6, kind='overlay')
vis = mark_boundaries(
vis, mask_vis[:, :, 0].numpy().astype('uint8'), color=(1, 1, 1))
ax_list[1].imshow(vis, cmap='gray')
colors = colors_all[np.unique(pred_mask_vis).astype(int)]
vis = label2rgb(pred_mask_vis[:, :, 0].numpy(), image=img.numpy(
), colors=colors, bg_label=255, bg_color=None, alpha=0.6, kind='overlay')
vis = mark_boundaries(
vis, pred_mask_vis[:, :, 0].numpy().astype('uint8'), color=(1, 1, 1))
ax_list[2].imshow(vis, cmap='gray')
for i in range(1, self.n_classes):
plt.plot([None], [None], label='group %d' % i, color=colors_all[i])
# ax_list[1].axis('off')
ax_list[0].grid()
ax_list[1].grid()
ax_list[2].grid()
ax_list[0].tick_params(axis='x', labelsize=6)
ax_list[0].tick_params(axis='y', labelsize=6)
ax_list[1].tick_params(axis='x', labelsize=6)
ax_list[1].tick_params(axis='y', labelsize=6)
ax_list[2].tick_params(axis='x', labelsize=6)
ax_list[2].tick_params(axis='y', labelsize=6)
ax_list[0].set_title('Original image', fontsize=8)
ax_list[1].set_title('Ground-truth', fontsize=8)
ax_list[2].set_title('Prediction', fontsize=8)
legend_kwargs = {"loc": 2, "bbox_to_anchor": (1.05, 1),
'borderaxespad': 0., "ncol": 1}
ax_list[2].legend(fontsize=6, **legend_kwargs)
plt.savefig(savedir_image.replace('.jpg', '.png'),
bbox_inches='tight', dpi=300)
plt.close()
# save predictions
if save_preds:
from PIL import Image
pred_dict = {}
pred_numpy = pred_mask.cpu().numpy().squeeze().astype('uint8')
uniques = np.unique(np.array(pred_numpy))
# print(uniques)
meta_dict = batch['meta'][0]
for u in range(self.n_classes):
meta_dict['gt_group%d_n_pixels'%u] = float((batch['masks']==u).float().sum())
meta_dict['pred_group%d_n_pixels'%u] = float((pred_mask==u).float().sum())
if u == 0:
continue
pred = Image.fromarray(pred_numpy==u)
pred.save(savedir_image.replace('.jpg', '_group%d.png'%u))
hu.save_json(savedir_image.replace('.jpg', '.json'), meta_dict)
def match_image_size(images, logits):
h, w = images.shape[-2:]
hl, wl = logits.shape[-2:]
if hl != h or wl != w:
logits = F.interpolate(logits, (h,w), mode='bilinear', align_corners=True)
return logits
class TrainMonitor:
def __init__(self):
self.score_dict_sum = {}
self.n = 0
def add(self, score_dict):
for k,v in score_dict.items():
if k not in self.score_dict_sum:
self.score_dict_sum[k] = score_dict[k]
else:
self.n += 1
self.score_dict_sum[k] += score_dict[k]
def get_avg_score(self):
return {k:v/(self.n + 1) for k,v in self.score_dict_sum.items()}
| 34.490196 | 101 | 0.579648 | import torch
import torch.nn.functional as F
import torchvision
from torchvision import transforms
import os
import tqdm
import pylab as plt
import numpy as np
import scipy.sparse as sps
from collections.abc import Sequence
import time
from src import utils as ut
from sklearn.metrics import confusion_matrix
import skimage
from haven import haven_utils as hu
from haven import haven_img as hi
from torchvision import transforms
from src import models
from src.models import base_networks
from skimage.segmentation import mark_boundaries
from skimage.color import label2rgb
from . import losses, metrics
class SemSeg(torch.nn.Module):
def __init__(self, exp_dict, train_set):
super().__init__()
self.exp_dict = exp_dict
self.n_classes = train_set.n_classes
self.exp_dict = exp_dict
self.model_base = models.base_networks.get_base(self.exp_dict['model'].get('base', 'unet2d'),
self.exp_dict, n_classes=self.n_classes)
if self.exp_dict["optimizer"] == "adam":
self.opt = torch.optim.Adam(
self.model_base.parameters(), lr=self.exp_dict["lr"], betas=(0.99, 0.999))
elif self.exp_dict["optimizer"] == "sgd":
self.opt = torch.optim.SGD(
self.model_base.parameters(), lr=self.exp_dict["lr"])
else:
raise ValueError
def get_state_dict(self):
state_dict = {"model": self.model_base.state_dict(),
"opt": self.opt.state_dict()}
return state_dict
def load_state_dict(self, state_dict):
self.model_base.load_state_dict(state_dict["model"])
if 'opt' not in state_dict:
return
self.opt.load_state_dict(state_dict["opt"])
def train_on_loader(self, train_loader):
self.train()
n_batches = len(train_loader)
pbar = tqdm.tqdm(desc="Training", total=n_batches, leave=False)
train_monitor = TrainMonitor()
for batch in train_loader:
score_dict = self.train_on_batch(batch)
train_monitor.add(score_dict)
msg = ' '.join(["%s: %.3f" % (k, v) for k,v in train_monitor.get_avg_score().items()])
pbar.set_description('Training - %s' % msg)
pbar.update(1)
pbar.close()
return train_monitor.get_avg_score()
@torch.no_grad()
def val_on_loader(self, val_loader, savedir_images=None, n_images=0, save_preds=False):
self.eval()
seg_monitor = metrics.SegMonitor()
n_batches = len(val_loader)
pbar = tqdm.tqdm(desc="Validating", total=n_batches, leave=False)
for i, batch in enumerate(val_loader):
seg_monitor.val_on_batch(self, batch)
pbar.update(1)
if savedir_images and i < n_images:
os.makedirs(savedir_images, exist_ok=True)
self.vis_on_batch(batch, savedir_image=os.path.join(
savedir_images, "%d.jpg" % i), save_preds=save_preds)
pbar.set_description("Validating & Saving Images: %.4f mIoU" %
(seg_monitor.get_avg_score()['val_score']))
else:
pbar.set_description("Validating: %.4f mIoU" %
(seg_monitor.get_avg_score()['val_score']))
pbar.close()
val_dict = seg_monitor.get_avg_score()
out_dict = {}
for c in range(self.n_classes):
out_dict['iou_group%d' % c] = val_dict['iou'][c]
out_dict['val_score'] = val_dict['val_score']
return out_dict
def train_on_batch(self, batch, **extras):
self.train()
self.model_base.train()
self.opt.zero_grad()
images, labels = batch["images"], batch["masks"]
images, labels = images.cuda(), labels.cuda()
logits = self.model_base(images)
logits = match_image_size(images, logits)
loss_name = self.exp_dict['model'].get('loss', 'cross_entropy')
loss = losses.compute_loss(loss_name, logits, labels)
if loss != 0:
loss.backward()
self.opt.step()
return {"train_loss": float(loss)}
def predict_on_batch(self, batch):
images = batch["images"].cuda()
n = images.shape[0]
logits = self.model_base.forward(images)
logits = match_image_size(images, logits)
return logits.argmax(dim=1)
@torch.no_grad()
def vis_on_batch(self, batch, savedir_image, save_preds=False):
self.eval()
pred_mask = self.predict_on_batch(batch).cpu()
img = hu.f2l(batch['images'])[0]
img += abs(img.min())
img /= img.max()
img = img.repeat(1,1,3)
mask_vis = batch["masks"].clone().float()[0][..., None]
mask_vis[mask_vis == 255] = 0
pred_mask_vis = pred_mask.clone().float()[0][..., None]
vmax = 0.1
fig, ax_list = plt.subplots(ncols=3, nrows=1)
ax_list[0].imshow(img[:, :, 0], cmap='gray',
)
colors_all = np.array(['black', 'red', 'blue', 'green', 'purple'])
colors = colors_all[np.unique(mask_vis).astype(int)]
vis = label2rgb(mask_vis[:, :, 0].numpy(), image=img.numpy(
), colors=colors, bg_label=255, bg_color=None, alpha=0.6, kind='overlay')
vis = mark_boundaries(
vis, mask_vis[:, :, 0].numpy().astype('uint8'), color=(1, 1, 1))
ax_list[1].imshow(vis, cmap='gray')
colors = colors_all[np.unique(pred_mask_vis).astype(int)]
vis = label2rgb(pred_mask_vis[:, :, 0].numpy(), image=img.numpy(
), colors=colors, bg_label=255, bg_color=None, alpha=0.6, kind='overlay')
vis = mark_boundaries(
vis, pred_mask_vis[:, :, 0].numpy().astype('uint8'), color=(1, 1, 1))
ax_list[2].imshow(vis, cmap='gray')
for i in range(1, self.n_classes):
plt.plot([None], [None], label='group %d' % i, color=colors_all[i])
ax_list[0].grid()
ax_list[1].grid()
ax_list[2].grid()
ax_list[0].tick_params(axis='x', labelsize=6)
ax_list[0].tick_params(axis='y', labelsize=6)
ax_list[1].tick_params(axis='x', labelsize=6)
ax_list[1].tick_params(axis='y', labelsize=6)
ax_list[2].tick_params(axis='x', labelsize=6)
ax_list[2].tick_params(axis='y', labelsize=6)
ax_list[0].set_title('Original image', fontsize=8)
ax_list[1].set_title('Ground-truth', fontsize=8)
ax_list[2].set_title('Prediction', fontsize=8)
legend_kwargs = {"loc": 2, "bbox_to_anchor": (1.05, 1),
'borderaxespad': 0., "ncol": 1}
ax_list[2].legend(fontsize=6, **legend_kwargs)
plt.savefig(savedir_image.replace('.jpg', '.png'),
bbox_inches='tight', dpi=300)
plt.close()
if save_preds:
from PIL import Image
pred_dict = {}
pred_numpy = pred_mask.cpu().numpy().squeeze().astype('uint8')
uniques = np.unique(np.array(pred_numpy))
meta_dict = batch['meta'][0]
for u in range(self.n_classes):
meta_dict['gt_group%d_n_pixels'%u] = float((batch['masks']==u).float().sum())
meta_dict['pred_group%d_n_pixels'%u] = float((pred_mask==u).float().sum())
if u == 0:
continue
pred = Image.fromarray(pred_numpy==u)
pred.save(savedir_image.replace('.jpg', '_group%d.png'%u))
hu.save_json(savedir_image.replace('.jpg', '.json'), meta_dict)
def match_image_size(images, logits):
h, w = images.shape[-2:]
hl, wl = logits.shape[-2:]
if hl != h or wl != w:
logits = F.interpolate(logits, (h,w), mode='bilinear', align_corners=True)
return logits
class TrainMonitor:
def __init__(self):
self.score_dict_sum = {}
self.n = 0
def add(self, score_dict):
for k,v in score_dict.items():
if k not in self.score_dict_sum:
self.score_dict_sum[k] = score_dict[k]
else:
self.n += 1
self.score_dict_sum[k] += score_dict[k]
def get_avg_score(self):
return {k:v/(self.n + 1) for k,v in self.score_dict_sum.items()}
| true | true |
1c34cb8e2bd8167a9cdbac34f2eaaeef897cbeb7 | 388 | py | Python | toontown/town/TTStreet.py | TheFamiliarScoot/open-toontown | 678313033174ea7d08e5c2823bd7b473701ff547 | [
"BSD-3-Clause"
] | 99 | 2019-11-02T22:25:00.000Z | 2022-02-03T03:48:00.000Z | toontown/town/TTStreet.py | TheFamiliarScoot/open-toontown | 678313033174ea7d08e5c2823bd7b473701ff547 | [
"BSD-3-Clause"
] | 42 | 2019-11-03T05:31:08.000Z | 2022-03-16T22:50:32.000Z | toontown/town/TTStreet.py | TheFamiliarScoot/open-toontown | 678313033174ea7d08e5c2823bd7b473701ff547 | [
"BSD-3-Clause"
] | 57 | 2019-11-03T07:47:37.000Z | 2022-03-22T00:41:49.000Z | from . import Street
class TTStreet(Street.Street):
def __init__(self, loader, parentFSM, doneEvent):
Street.Street.__init__(self, loader, parentFSM, doneEvent)
def load(self):
Street.Street.load(self)
def unload(self):
Street.Street.unload(self)
def doRequestLeave(self, requestStatus):
self.fsm.request('trialerFA', [requestStatus])
| 24.25 | 66 | 0.680412 | from . import Street
class TTStreet(Street.Street):
def __init__(self, loader, parentFSM, doneEvent):
Street.Street.__init__(self, loader, parentFSM, doneEvent)
def load(self):
Street.Street.load(self)
def unload(self):
Street.Street.unload(self)
def doRequestLeave(self, requestStatus):
self.fsm.request('trialerFA', [requestStatus])
| true | true |
1c34cc223bf419de2c83c0b3f2d4778cddf99416 | 15,003 | py | Python | lib/sqlalchemy/orm/base.py | lelit/sqlalchemy | 55f930ef3d4e60bed02a2dad16e331fe42cfd12b | [
"MIT"
] | null | null | null | lib/sqlalchemy/orm/base.py | lelit/sqlalchemy | 55f930ef3d4e60bed02a2dad16e331fe42cfd12b | [
"MIT"
] | null | null | null | lib/sqlalchemy/orm/base.py | lelit/sqlalchemy | 55f930ef3d4e60bed02a2dad16e331fe42cfd12b | [
"MIT"
] | null | null | null | # orm/base.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Constants and rudimental functions used throughout the ORM.
"""
import operator
from . import exc
from .. import exc as sa_exc
from .. import inspection
from .. import util
from ..sql import expression
PASSIVE_NO_RESULT = util.symbol(
"PASSIVE_NO_RESULT",
"""Symbol returned by a loader callable or other attribute/history
retrieval operation when a value could not be determined, based
on loader callable flags.
""",
)
ATTR_WAS_SET = util.symbol(
"ATTR_WAS_SET",
"""Symbol returned by a loader callable to indicate the
retrieved value, or values, were assigned to their attributes
on the target object.
""",
)
ATTR_EMPTY = util.symbol(
"ATTR_EMPTY",
"""Symbol used internally to indicate an attribute had no callable.""",
)
NO_VALUE = util.symbol(
"NO_VALUE",
"""Symbol which may be placed as the 'previous' value of an attribute,
indicating no value was loaded for an attribute when it was modified,
and flags indicated we were not to load it.
""",
)
NEVER_SET = util.symbol(
"NEVER_SET",
"""Symbol which may be placed as the 'previous' value of an attribute
indicating that the attribute had not been assigned to previously.
""",
)
NO_CHANGE = util.symbol(
"NO_CHANGE",
"""No callables or SQL should be emitted on attribute access
and no state should change
""",
canonical=0,
)
CALLABLES_OK = util.symbol(
"CALLABLES_OK",
"""Loader callables can be fired off if a value
is not present.
""",
canonical=1,
)
SQL_OK = util.symbol(
"SQL_OK",
"""Loader callables can emit SQL at least on scalar value attributes.""",
canonical=2,
)
RELATED_OBJECT_OK = util.symbol(
"RELATED_OBJECT_OK",
"""Callables can use SQL to load related objects as well
as scalar value attributes.
""",
canonical=4,
)
INIT_OK = util.symbol(
"INIT_OK",
"""Attributes should be initialized with a blank
value (None or an empty collection) upon get, if no other
value can be obtained.
""",
canonical=8,
)
NON_PERSISTENT_OK = util.symbol(
"NON_PERSISTENT_OK",
"""Callables can be emitted if the parent is not persistent.""",
canonical=16,
)
LOAD_AGAINST_COMMITTED = util.symbol(
"LOAD_AGAINST_COMMITTED",
"""Callables should use committed values as primary/foreign keys during a
load.
""",
canonical=32,
)
NO_AUTOFLUSH = util.symbol(
"NO_AUTOFLUSH",
"""Loader callables should disable autoflush.""",
canonical=64,
)
NO_RAISE = util.symbol(
"NO_RAISE",
"""Loader callables should not raise any assertions""",
canonical=128,
)
# pre-packaged sets of flags used as inputs
PASSIVE_OFF = util.symbol(
"PASSIVE_OFF",
"Callables can be emitted in all cases.",
canonical=(
RELATED_OBJECT_OK | NON_PERSISTENT_OK | INIT_OK | CALLABLES_OK | SQL_OK
),
)
PASSIVE_RETURN_NEVER_SET = util.symbol(
"PASSIVE_RETURN_NEVER_SET",
"""PASSIVE_OFF ^ INIT_OK""",
canonical=PASSIVE_OFF ^ INIT_OK,
)
PASSIVE_NO_INITIALIZE = util.symbol(
"PASSIVE_NO_INITIALIZE",
"PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK",
canonical=PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK,
)
PASSIVE_NO_FETCH = util.symbol(
"PASSIVE_NO_FETCH", "PASSIVE_OFF ^ SQL_OK", canonical=PASSIVE_OFF ^ SQL_OK
)
PASSIVE_NO_FETCH_RELATED = util.symbol(
"PASSIVE_NO_FETCH_RELATED",
"PASSIVE_OFF ^ RELATED_OBJECT_OK",
canonical=PASSIVE_OFF ^ RELATED_OBJECT_OK,
)
PASSIVE_ONLY_PERSISTENT = util.symbol(
"PASSIVE_ONLY_PERSISTENT",
"PASSIVE_OFF ^ NON_PERSISTENT_OK",
canonical=PASSIVE_OFF ^ NON_PERSISTENT_OK,
)
DEFAULT_MANAGER_ATTR = "_sa_class_manager"
DEFAULT_STATE_ATTR = "_sa_instance_state"
_INSTRUMENTOR = ("mapper", "instrumentor")
EXT_CONTINUE = util.symbol("EXT_CONTINUE")
EXT_STOP = util.symbol("EXT_STOP")
EXT_SKIP = util.symbol("EXT_SKIP")
ONETOMANY = util.symbol(
"ONETOMANY",
"""Indicates the one-to-many direction for a :func:`.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""",
)
MANYTOONE = util.symbol(
"MANYTOONE",
"""Indicates the many-to-one direction for a :func:`.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""",
)
MANYTOMANY = util.symbol(
"MANYTOMANY",
"""Indicates the many-to-many direction for a :func:`.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""",
)
NOT_EXTENSION = util.symbol(
"NOT_EXTENSION",
"""Symbol indicating an :class:`InspectionAttr` that's
not part of sqlalchemy.ext.
Is assigned to the :attr:`.InspectionAttr.extension_type`
attibute.
""",
)
_never_set = frozenset([NEVER_SET])
_none_set = frozenset([None, NEVER_SET, PASSIVE_NO_RESULT])
_SET_DEFERRED_EXPIRED = util.symbol("SET_DEFERRED_EXPIRED")
_DEFER_FOR_STATE = util.symbol("DEFER_FOR_STATE")
def _generative(*assertions):
"""Mark a method as generative, e.g. method-chained."""
@util.decorator
def generate(fn, *args, **kw):
self = args[0]._clone()
for assertion in assertions:
assertion(self, fn.__name__)
fn(self, *args[1:], **kw)
return self
return generate
# these can be replaced by sqlalchemy.ext.instrumentation
# if augmented class instrumentation is enabled.
def manager_of_class(cls):
return cls.__dict__.get(DEFAULT_MANAGER_ATTR, None)
instance_state = operator.attrgetter(DEFAULT_STATE_ATTR)
instance_dict = operator.attrgetter("__dict__")
def instance_str(instance):
"""Return a string describing an instance."""
return state_str(instance_state(instance))
def state_str(state):
"""Return a string describing an instance via its InstanceState."""
if state is None:
return "None"
else:
return "<%s at 0x%x>" % (state.class_.__name__, id(state.obj()))
def state_class_str(state):
"""Return a string describing an instance's class via its
InstanceState.
"""
if state is None:
return "None"
else:
return "<%s>" % (state.class_.__name__,)
def attribute_str(instance, attribute):
return instance_str(instance) + "." + attribute
def state_attribute_str(state, attribute):
return state_str(state) + "." + attribute
def object_mapper(instance):
"""Given an object, return the primary Mapper associated with the object
instance.
Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError`
if no mapping is configured.
This function is available via the inspection system as::
inspect(instance).mapper
Using the inspection system will raise
:class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is
not part of a mapping.
"""
return object_state(instance).mapper
def object_state(instance):
"""Given an object, return the :class:`.InstanceState`
associated with the object.
Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError`
if no mapping is configured.
Equivalent functionality is available via the :func:`.inspect`
function as::
inspect(instance)
Using the inspection system will raise
:class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is
not part of a mapping.
"""
state = _inspect_mapped_object(instance)
if state is None:
raise exc.UnmappedInstanceError(instance)
else:
return state
@inspection._inspects(object)
def _inspect_mapped_object(instance):
try:
return instance_state(instance)
# TODO: whats the py-2/3 syntax to catch two
# different kinds of exceptions at once ?
except exc.UnmappedClassError:
return None
except exc.NO_STATE:
return None
def _class_to_mapper(class_or_mapper):
insp = inspection.inspect(class_or_mapper, False)
if insp is not None:
return insp.mapper
else:
raise exc.UnmappedClassError(class_or_mapper)
def _mapper_or_none(entity):
"""Return the :class:`.Mapper` for the given class or None if the
class is not mapped.
"""
insp = inspection.inspect(entity, False)
if insp is not None:
return insp.mapper
else:
return None
def _is_mapped_class(entity):
"""Return True if the given object is a mapped class,
:class:`.Mapper`, or :class:`.AliasedClass`.
"""
insp = inspection.inspect(entity, False)
return (
insp is not None
and not insp.is_clause_element
and (insp.is_mapper or insp.is_aliased_class)
)
def _attr_as_key(attr):
if hasattr(attr, "key"):
return attr.key
else:
return expression._column_as_key(attr)
def _orm_columns(entity):
insp = inspection.inspect(entity, False)
if hasattr(insp, "selectable") and hasattr(insp.selectable, "c"):
return [c for c in insp.selectable.c]
else:
return [entity]
def _is_aliased_class(entity):
insp = inspection.inspect(entity, False)
return insp is not None and getattr(insp, "is_aliased_class", False)
def _entity_descriptor(entity, key):
"""Return a class attribute given an entity and string name.
May return :class:`.InstrumentedAttribute` or user-defined
attribute.
"""
insp = inspection.inspect(entity)
if insp.is_selectable:
description = entity
entity = insp.c
elif insp.is_aliased_class:
entity = insp.entity
description = entity
elif hasattr(insp, "mapper"):
description = entity = insp.mapper.class_
else:
description = entity
try:
return getattr(entity, key)
except AttributeError:
raise sa_exc.InvalidRequestError(
"Entity '%s' has no property '%s'" % (description, key)
)
_state_mapper = util.dottedgetter("manager.mapper")
@inspection._inspects(type)
def _inspect_mapped_class(class_, configure=False):
try:
class_manager = manager_of_class(class_)
if not class_manager.is_mapped:
return None
mapper = class_manager.mapper
except exc.NO_STATE:
return None
else:
if configure and mapper._new_mappers:
mapper._configure_all()
return mapper
def class_mapper(class_, configure=True):
"""Given a class, return the primary :class:`.Mapper` associated
with the key.
Raises :exc:`.UnmappedClassError` if no mapping is configured
on the given class, or :exc:`.ArgumentError` if a non-class
object is passed.
Equivalent functionality is available via the :func:`.inspect`
function as::
inspect(some_mapped_class)
Using the inspection system will raise
:class:`sqlalchemy.exc.NoInspectionAvailable` if the class is not mapped.
"""
mapper = _inspect_mapped_class(class_, configure=configure)
if mapper is None:
if not isinstance(class_, type):
raise sa_exc.ArgumentError(
"Class object expected, got '%r'." % (class_,)
)
raise exc.UnmappedClassError(class_)
else:
return mapper
class InspectionAttr(object):
"""A base class applied to all ORM objects that can be returned
by the :func:`.inspect` function.
The attributes defined here allow the usage of simple boolean
checks to test basic facts about the object returned.
While the boolean checks here are basically the same as using
the Python isinstance() function, the flags here can be used without
the need to import all of these classes, and also such that
the SQLAlchemy class system can change while leaving the flags
here intact for forwards-compatibility.
"""
__slots__ = ()
is_selectable = False
"""Return True if this object is an instance of :class:`.Selectable`."""
is_aliased_class = False
"""True if this object is an instance of :class:`.AliasedClass`."""
is_instance = False
"""True if this object is an instance of :class:`.InstanceState`."""
is_mapper = False
"""True if this object is an instance of :class:`.Mapper`."""
is_property = False
"""True if this object is an instance of :class:`.MapperProperty`."""
is_attribute = False
"""True if this object is a Python :term:`descriptor`.
This can refer to one of many types. Usually a
:class:`.QueryableAttribute` which handles attributes events on behalf
of a :class:`.MapperProperty`. But can also be an extension type
such as :class:`.AssociationProxy` or :class:`.hybrid_property`.
The :attr:`.InspectionAttr.extension_type` will refer to a constant
identifying the specific subtype.
.. seealso::
:attr:`.Mapper.all_orm_descriptors`
"""
_is_internal_proxy = False
"""True if this object is an internal proxy object.
.. versionadded:: 1.2.12
"""
is_clause_element = False
"""True if this object is an instance of :class:`.ClauseElement`."""
extension_type = NOT_EXTENSION
"""The extension type, if any.
Defaults to :data:`.interfaces.NOT_EXTENSION`
.. versionadded:: 0.8.0
.. seealso::
:data:`.HYBRID_METHOD`
:data:`.HYBRID_PROPERTY`
:data:`.ASSOCIATION_PROXY`
"""
class InspectionAttrInfo(InspectionAttr):
"""Adds the ``.info`` attribute to :class:`.InspectionAttr`.
The rationale for :class:`.InspectionAttr` vs. :class:`.InspectionAttrInfo`
is that the former is compatible as a mixin for classes that specify
``__slots__``; this is essentially an implementation artifact.
"""
@util.memoized_property
def info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.InspectionAttr`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`.relationship`, or :func:`.composite`
functions.
.. versionadded:: 0.8 Added support for .info to all
:class:`.MapperProperty` subclasses.
.. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
available on extension types via the
:attr:`.InspectionAttrInfo.info` attribute, so that it can apply
to a wider variety of ORM and extension constructs.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
class _MappedAttribute(object):
"""Mixin for attributes which should be replaced by mapper-assigned
attributes.
"""
__slots__ = ()
| 26.274956 | 79 | 0.680264 |
import operator
from . import exc
from .. import exc as sa_exc
from .. import inspection
from .. import util
from ..sql import expression
PASSIVE_NO_RESULT = util.symbol(
"PASSIVE_NO_RESULT",
"""Symbol returned by a loader callable or other attribute/history
retrieval operation when a value could not be determined, based
on loader callable flags.
""",
)
ATTR_WAS_SET = util.symbol(
"ATTR_WAS_SET",
"""Symbol returned by a loader callable to indicate the
retrieved value, or values, were assigned to their attributes
on the target object.
""",
)
ATTR_EMPTY = util.symbol(
"ATTR_EMPTY",
"""Symbol used internally to indicate an attribute had no callable.""",
)
NO_VALUE = util.symbol(
"NO_VALUE",
"""Symbol which may be placed as the 'previous' value of an attribute,
indicating no value was loaded for an attribute when it was modified,
and flags indicated we were not to load it.
""",
)
NEVER_SET = util.symbol(
"NEVER_SET",
"""Symbol which may be placed as the 'previous' value of an attribute
indicating that the attribute had not been assigned to previously.
""",
)
NO_CHANGE = util.symbol(
"NO_CHANGE",
"""No callables or SQL should be emitted on attribute access
and no state should change
""",
canonical=0,
)
CALLABLES_OK = util.symbol(
"CALLABLES_OK",
"""Loader callables can be fired off if a value
is not present.
""",
canonical=1,
)
SQL_OK = util.symbol(
"SQL_OK",
"""Loader callables can emit SQL at least on scalar value attributes.""",
canonical=2,
)
RELATED_OBJECT_OK = util.symbol(
"RELATED_OBJECT_OK",
"""Callables can use SQL to load related objects as well
as scalar value attributes.
""",
canonical=4,
)
INIT_OK = util.symbol(
"INIT_OK",
"""Attributes should be initialized with a blank
value (None or an empty collection) upon get, if no other
value can be obtained.
""",
canonical=8,
)
NON_PERSISTENT_OK = util.symbol(
"NON_PERSISTENT_OK",
"""Callables can be emitted if the parent is not persistent.""",
canonical=16,
)
LOAD_AGAINST_COMMITTED = util.symbol(
"LOAD_AGAINST_COMMITTED",
"""Callables should use committed values as primary/foreign keys during a
load.
""",
canonical=32,
)
NO_AUTOFLUSH = util.symbol(
"NO_AUTOFLUSH",
"""Loader callables should disable autoflush.""",
canonical=64,
)
NO_RAISE = util.symbol(
"NO_RAISE",
"""Loader callables should not raise any assertions""",
canonical=128,
)
PASSIVE_OFF = util.symbol(
"PASSIVE_OFF",
"Callables can be emitted in all cases.",
canonical=(
RELATED_OBJECT_OK | NON_PERSISTENT_OK | INIT_OK | CALLABLES_OK | SQL_OK
),
)
PASSIVE_RETURN_NEVER_SET = util.symbol(
"PASSIVE_RETURN_NEVER_SET",
"""PASSIVE_OFF ^ INIT_OK""",
canonical=PASSIVE_OFF ^ INIT_OK,
)
PASSIVE_NO_INITIALIZE = util.symbol(
"PASSIVE_NO_INITIALIZE",
"PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK",
canonical=PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK,
)
PASSIVE_NO_FETCH = util.symbol(
"PASSIVE_NO_FETCH", "PASSIVE_OFF ^ SQL_OK", canonical=PASSIVE_OFF ^ SQL_OK
)
PASSIVE_NO_FETCH_RELATED = util.symbol(
"PASSIVE_NO_FETCH_RELATED",
"PASSIVE_OFF ^ RELATED_OBJECT_OK",
canonical=PASSIVE_OFF ^ RELATED_OBJECT_OK,
)
PASSIVE_ONLY_PERSISTENT = util.symbol(
"PASSIVE_ONLY_PERSISTENT",
"PASSIVE_OFF ^ NON_PERSISTENT_OK",
canonical=PASSIVE_OFF ^ NON_PERSISTENT_OK,
)
DEFAULT_MANAGER_ATTR = "_sa_class_manager"
DEFAULT_STATE_ATTR = "_sa_instance_state"
_INSTRUMENTOR = ("mapper", "instrumentor")
EXT_CONTINUE = util.symbol("EXT_CONTINUE")
EXT_STOP = util.symbol("EXT_STOP")
EXT_SKIP = util.symbol("EXT_SKIP")
ONETOMANY = util.symbol(
"ONETOMANY",
"""Indicates the one-to-many direction for a :func:`.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""",
)
MANYTOONE = util.symbol(
"MANYTOONE",
"""Indicates the many-to-one direction for a :func:`.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""",
)
MANYTOMANY = util.symbol(
"MANYTOMANY",
"""Indicates the many-to-many direction for a :func:`.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""",
)
NOT_EXTENSION = util.symbol(
"NOT_EXTENSION",
"""Symbol indicating an :class:`InspectionAttr` that's
not part of sqlalchemy.ext.
Is assigned to the :attr:`.InspectionAttr.extension_type`
attibute.
""",
)
_never_set = frozenset([NEVER_SET])
_none_set = frozenset([None, NEVER_SET, PASSIVE_NO_RESULT])
_SET_DEFERRED_EXPIRED = util.symbol("SET_DEFERRED_EXPIRED")
_DEFER_FOR_STATE = util.symbol("DEFER_FOR_STATE")
def _generative(*assertions):
@util.decorator
def generate(fn, *args, **kw):
self = args[0]._clone()
for assertion in assertions:
assertion(self, fn.__name__)
fn(self, *args[1:], **kw)
return self
return generate
# these can be replaced by sqlalchemy.ext.instrumentation
# if augmented class instrumentation is enabled.
def manager_of_class(cls):
return cls.__dict__.get(DEFAULT_MANAGER_ATTR, None)
instance_state = operator.attrgetter(DEFAULT_STATE_ATTR)
instance_dict = operator.attrgetter("__dict__")
def instance_str(instance):
return state_str(instance_state(instance))
def state_str(state):
if state is None:
return "None"
else:
return "<%s at 0x%x>" % (state.class_.__name__, id(state.obj()))
def state_class_str(state):
if state is None:
return "None"
else:
return "<%s>" % (state.class_.__name__,)
def attribute_str(instance, attribute):
return instance_str(instance) + "." + attribute
def state_attribute_str(state, attribute):
return state_str(state) + "." + attribute
def object_mapper(instance):
return object_state(instance).mapper
def object_state(instance):
state = _inspect_mapped_object(instance)
if state is None:
raise exc.UnmappedInstanceError(instance)
else:
return state
@inspection._inspects(object)
def _inspect_mapped_object(instance):
try:
return instance_state(instance)
# TODO: whats the py-2/3 syntax to catch two
# different kinds of exceptions at once ?
except exc.UnmappedClassError:
return None
except exc.NO_STATE:
return None
def _class_to_mapper(class_or_mapper):
insp = inspection.inspect(class_or_mapper, False)
if insp is not None:
return insp.mapper
else:
raise exc.UnmappedClassError(class_or_mapper)
def _mapper_or_none(entity):
insp = inspection.inspect(entity, False)
if insp is not None:
return insp.mapper
else:
return None
def _is_mapped_class(entity):
insp = inspection.inspect(entity, False)
return (
insp is not None
and not insp.is_clause_element
and (insp.is_mapper or insp.is_aliased_class)
)
def _attr_as_key(attr):
if hasattr(attr, "key"):
return attr.key
else:
return expression._column_as_key(attr)
def _orm_columns(entity):
insp = inspection.inspect(entity, False)
if hasattr(insp, "selectable") and hasattr(insp.selectable, "c"):
return [c for c in insp.selectable.c]
else:
return [entity]
def _is_aliased_class(entity):
insp = inspection.inspect(entity, False)
return insp is not None and getattr(insp, "is_aliased_class", False)
def _entity_descriptor(entity, key):
insp = inspection.inspect(entity)
if insp.is_selectable:
description = entity
entity = insp.c
elif insp.is_aliased_class:
entity = insp.entity
description = entity
elif hasattr(insp, "mapper"):
description = entity = insp.mapper.class_
else:
description = entity
try:
return getattr(entity, key)
except AttributeError:
raise sa_exc.InvalidRequestError(
"Entity '%s' has no property '%s'" % (description, key)
)
_state_mapper = util.dottedgetter("manager.mapper")
@inspection._inspects(type)
def _inspect_mapped_class(class_, configure=False):
try:
class_manager = manager_of_class(class_)
if not class_manager.is_mapped:
return None
mapper = class_manager.mapper
except exc.NO_STATE:
return None
else:
if configure and mapper._new_mappers:
mapper._configure_all()
return mapper
def class_mapper(class_, configure=True):
mapper = _inspect_mapped_class(class_, configure=configure)
if mapper is None:
if not isinstance(class_, type):
raise sa_exc.ArgumentError(
"Class object expected, got '%r'." % (class_,)
)
raise exc.UnmappedClassError(class_)
else:
return mapper
class InspectionAttr(object):
__slots__ = ()
is_selectable = False
is_aliased_class = False
is_instance = False
is_mapper = False
is_property = False
is_attribute = False
_is_internal_proxy = False
is_clause_element = False
extension_type = NOT_EXTENSION
class InspectionAttrInfo(InspectionAttr):
@util.memoized_property
def info(self):
return {}
class _MappedAttribute(object):
__slots__ = ()
| true | true |
1c34ceeb7a442ca69c61d8058ac79407b2094fbd | 2,951 | py | Python | .travis-pre-run.py | PaulGregor/evelink | dc1ca05725bf81c7f066cf4abcb51ab503759aaa | [
"MIT"
] | null | null | null | .travis-pre-run.py | PaulGregor/evelink | dc1ca05725bf81c7f066cf4abcb51ab503759aaa | [
"MIT"
] | null | null | null | .travis-pre-run.py | PaulGregor/evelink | dc1ca05725bf81c7f066cf4abcb51ab503759aaa | [
"MIT"
] | 1 | 2019-12-11T10:31:09.000Z | 2019-12-11T10:31:09.000Z | #!/usr/bin/env python
#
# Download and extract the last Google App Engine SDK.
#
import argparse
import logging
import os
import re
import sys
import urllib
import urllib2
from xml.etree import ElementTree as ET
from zipfile import ZipFile
GAE_FEED_URL = 'https://code.google.com/feeds/p/googleappengine/downloads/basic'
SDK_PATTERN = r'http://googleappengine.googlecode.com/files/google_appengine_(\d\.)+zip'
DEFAULT_URL = 'http://googleappengine.googlecode.com/files/google_appengine_1.8.8.zip'
_log = logging.getLogger('travis.prerun')
logging.basicConfig(level=logging.INFO)
def get_args_parser():
"""Build the command line argument parser
"""
parser = argparse.ArgumentParser(
description='Download and extract the last Google App Engine SDK to.'
)
parser.add_argument(
'gae_lib_dst',
nargs='?',
default='/usr/local',
help='directory to extract Google App Engine SDK '
'(default to "/usr/local").'
)
return parser
def get_sdk_url(feed, pattern):
try:
_log.info("Fetching atom feed for GAE sdk releases...")
f = urllib2.urlopen(feed)
tree = ET.fromstring(f.read())
finally:
f.close()
ns = {'a': 'http://www.w3.org/2005/Atom'}
for link in tree.findall("a:entry/a:link[@rel='direct']", namespaces=ns):
url = link.get('href')
if re.match(SDK_PATTERN, url):
_log.info("Found last release: %s", url)
return url
def download_sdk(url):
_log.info("downloading SDK from %s ...", url)
return urllib.urlretrieve(url)[0]
def unzip(file, dst):
_log.info("Extracting SDK to %s ...", dst)
with ZipFile(file) as z:
for name in z.namelist():
if '/' in name and name[0] == '/':
raise ValueError("a SDK archive member has an absolute path")
if '..' in name:
raise ValueError("Found two dots in a member of the SDK archive")
z.extractall(dst)
def main(gae_lib_dst):
if sys.version_info[0:2] != (2, 7,):
_log.info("Python 2.7 is required to run AppEngine.")
return
try:
url = get_sdk_url(GAE_FEED_URL, SDK_PATTERN)
_log.info("Found GAE SDK url: %s", url)
except Exception:
url = DEFAULT_URL
_log.info(
"Failed finding GAE SDK url at %s; Will use default url (%s)",
GAE_FEED_URL,
url
)
try:
if not os.path.exists(gae_lib_dst):
_log.info("Creating %s directory", gae_lib_dst)
os.makedirs(gae_lib_dst)
sdk_path = download_sdk(url)
unzip(sdk_path, gae_lib_dst)
_log.info("GAE SDK available at %s/google_engine", gae_lib_dst)
except Exception as e:
_log.error("failed downloading the sdk: %s", str(e))
if __name__ == '__main__':
parser = get_args_parser()
args = parser.parse_args()
main(args.gae_lib_dst)
| 27.839623 | 88 | 0.624873 |
import argparse
import logging
import os
import re
import sys
import urllib
import urllib2
from xml.etree import ElementTree as ET
from zipfile import ZipFile
GAE_FEED_URL = 'https://code.google.com/feeds/p/googleappengine/downloads/basic'
SDK_PATTERN = r'http://googleappengine.googlecode.com/files/google_appengine_(\d\.)+zip'
DEFAULT_URL = 'http://googleappengine.googlecode.com/files/google_appengine_1.8.8.zip'
_log = logging.getLogger('travis.prerun')
logging.basicConfig(level=logging.INFO)
def get_args_parser():
parser = argparse.ArgumentParser(
description='Download and extract the last Google App Engine SDK to.'
)
parser.add_argument(
'gae_lib_dst',
nargs='?',
default='/usr/local',
help='directory to extract Google App Engine SDK '
'(default to "/usr/local").'
)
return parser
def get_sdk_url(feed, pattern):
try:
_log.info("Fetching atom feed for GAE sdk releases...")
f = urllib2.urlopen(feed)
tree = ET.fromstring(f.read())
finally:
f.close()
ns = {'a': 'http://www.w3.org/2005/Atom'}
for link in tree.findall("a:entry/a:link[@rel='direct']", namespaces=ns):
url = link.get('href')
if re.match(SDK_PATTERN, url):
_log.info("Found last release: %s", url)
return url
def download_sdk(url):
_log.info("downloading SDK from %s ...", url)
return urllib.urlretrieve(url)[0]
def unzip(file, dst):
_log.info("Extracting SDK to %s ...", dst)
with ZipFile(file) as z:
for name in z.namelist():
if '/' in name and name[0] == '/':
raise ValueError("a SDK archive member has an absolute path")
if '..' in name:
raise ValueError("Found two dots in a member of the SDK archive")
z.extractall(dst)
def main(gae_lib_dst):
if sys.version_info[0:2] != (2, 7,):
_log.info("Python 2.7 is required to run AppEngine.")
return
try:
url = get_sdk_url(GAE_FEED_URL, SDK_PATTERN)
_log.info("Found GAE SDK url: %s", url)
except Exception:
url = DEFAULT_URL
_log.info(
"Failed finding GAE SDK url at %s; Will use default url (%s)",
GAE_FEED_URL,
url
)
try:
if not os.path.exists(gae_lib_dst):
_log.info("Creating %s directory", gae_lib_dst)
os.makedirs(gae_lib_dst)
sdk_path = download_sdk(url)
unzip(sdk_path, gae_lib_dst)
_log.info("GAE SDK available at %s/google_engine", gae_lib_dst)
except Exception as e:
_log.error("failed downloading the sdk: %s", str(e))
if __name__ == '__main__':
parser = get_args_parser()
args = parser.parse_args()
main(args.gae_lib_dst)
| true | true |
1c34d05d5d36ccf8a7f171a2d3292af74ed9be9f | 1,140 | bzl | Python | sdk/bazel/base/common/build_defs/package_info.bzl | OpenTrustGroup/scripts | 31ca2ca5bae055113c6f92a2eb75b0c7528902b3 | [
"BSD-3-Clause"
] | 49 | 2018-12-20T00:35:06.000Z | 2021-12-30T22:40:05.000Z | build_defs/package_info.bzl | gxbllm/Fuchsia-SDK | 869668003f20d560a40802c7d820ef0fefba0462 | [
"BSD-3-Clause"
] | null | null | null | build_defs/package_info.bzl | gxbllm/Fuchsia-SDK | 869668003f20d560a40802c7d820ef0fefba0462 | [
"BSD-3-Clause"
] | 21 | 2019-01-03T11:06:10.000Z | 2021-08-06T00:55:50.000Z | # Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Some utilities to declare and aggregate package contents.
"""
PackageLocalInfo = provider(
fields = {
"mappings": "list of (package dest, source) pairs",
},
)
# Identical to PackageLocalInfo, but a different type is needed when that
# information if generated from an aspect so that it does not collide with any
# existing PackageLocalInfo returned provider.
PackageGeneratedInfo = provider(
fields = {
"mappings": "list of (package dest, source) pairs",
},
)
PackageAggregateInfo = provider(
fields = {
"contents": "depset of (package dest, source) pairs",
},
)
def get_aggregate_info(mappings, deps):
transitive_info = []
for dep in deps:
if PackageAggregateInfo not in dep:
continue
transitive_info.append(dep[PackageAggregateInfo].contents)
return PackageAggregateInfo(contents = depset(mappings,
transitive = transitive_info))
| 30 | 80 | 0.672807 |
PackageLocalInfo = provider(
fields = {
"mappings": "list of (package dest, source) pairs",
},
)
PackageGeneratedInfo = provider(
fields = {
"mappings": "list of (package dest, source) pairs",
},
)
PackageAggregateInfo = provider(
fields = {
"contents": "depset of (package dest, source) pairs",
},
)
def get_aggregate_info(mappings, deps):
transitive_info = []
for dep in deps:
if PackageAggregateInfo not in dep:
continue
transitive_info.append(dep[PackageAggregateInfo].contents)
return PackageAggregateInfo(contents = depset(mappings,
transitive = transitive_info))
| true | true |
1c34d062cc2c268b11c9bdab083cd46cb4cdf8a3 | 284 | py | Python | projectamber/amberapp/migrations/0002_delete_order.py | AIexBondar/my_works | 0ba56e64bffb2e89d760861264c73417bc9f14fa | [
"Apache-2.0"
] | null | null | null | projectamber/amberapp/migrations/0002_delete_order.py | AIexBondar/my_works | 0ba56e64bffb2e89d760861264c73417bc9f14fa | [
"Apache-2.0"
] | null | null | null | projectamber/amberapp/migrations/0002_delete_order.py | AIexBondar/my_works | 0ba56e64bffb2e89d760861264c73417bc9f14fa | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.6 on 2020-05-29 17:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('amberapp', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='Order',
),
]
| 16.705882 | 47 | 0.588028 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('amberapp', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='Order',
),
]
| true | true |
1c34d10e4992ef19bbf760081e0661f2f5b585d7 | 1,557 | py | Python | leetcode/933.number-of-recent-calls.py | geemaple/algorithm | 68bc5032e1ee52c22ef2f2e608053484c487af54 | [
"MIT"
] | 177 | 2017-08-21T08:57:43.000Z | 2020-06-22T03:44:22.000Z | leetcode/933.number-of-recent-calls.py | geemaple/algorithm | 68bc5032e1ee52c22ef2f2e608053484c487af54 | [
"MIT"
] | 2 | 2018-09-06T13:39:12.000Z | 2019-06-03T02:54:45.000Z | leetcode/933.number-of-recent-calls.py | geemaple/algorithm | 68bc5032e1ee52c22ef2f2e608053484c487af54 | [
"MIT"
] | 23 | 2017-08-23T06:01:28.000Z | 2020-04-20T03:17:36.000Z | #
# [969] Number of Recent Calls
#
# https://leetcode.com/problems/number-of-recent-calls/description/
#
# algorithms
# Easy (59.00%)
# Total Accepted: 37.2K
# Total Submissions: 52.8K
# Testcase Example: '["RecentCounter","ping","ping","ping","ping"]\n[[],[1],[100],[3001],[3002]]'
#
# Write a class RecentCounter to count recent requests.
#
# It has only one method: ping(int t), where t represents some time in
# milliseconds.
#
# Return the number of pings that have been made from 3000 milliseconds ago
# until now.
#
# Any ping with time in [t - 3000, t] will count, including the current ping.
#
# It is guaranteed that every call to ping uses a strictly larger value of t
# than before.
#
#
#
# Example 1:
#
#
# Input: inputs = ["RecentCounter","ping","ping","ping","ping"], inputs =
# [[],[1],[100],[3001],[3002]]
# Output: [null,1,2,3,3]
#
#
#
# Note:
#
#
# Each test case will have at most 10000 calls to ping.
# Each test case will call ping with strictly increasing values of t.
# Each call to ping will have 1 <= t <= 10^9.
#
#
#
#
#
#
import collections
class RecentCounter(object):
def __init__(self):
self.queue = collections.deque()
def ping(self, t):
"""
:type t: int
:rtype: int
"""
self.queue.append(t)
while self.queue[0] < t - 3000:
self.queue.popleft()
return len(self.queue)
# Your RecentCounter object will be instantiated and called as such:
# obj = RecentCounter()
# param_1 = obj.ping(t)
| 22.242857 | 98 | 0.619782 |
import collections
class RecentCounter(object):
def __init__(self):
self.queue = collections.deque()
def ping(self, t):
self.queue.append(t)
while self.queue[0] < t - 3000:
self.queue.popleft()
return len(self.queue)
| true | true |
1c34d1ec56167ffacc5442ad9ae1e5b20e005a87 | 6,097 | py | Python | tests/unit/api/test_query.py | amenezes/discovery-client | 9c41456d1cc14f4aab34628ad4e13423e00bc4be | [
"Apache-2.0"
] | 2 | 2019-07-18T22:43:49.000Z | 2020-03-09T03:27:41.000Z | tests/unit/api/test_query.py | amenezes/discovery-client | 9c41456d1cc14f4aab34628ad4e13423e00bc4be | [
"Apache-2.0"
] | 20 | 2019-02-27T19:08:03.000Z | 2021-06-22T16:47:32.000Z | tests/unit/api/test_query.py | amenezes/discovery-client | 9c41456d1cc14f4aab34628ad4e13423e00bc4be | [
"Apache-2.0"
] | null | null | null | import pytest
from discovery import api
def sample_payload():
return {
"Name": "my-query",
"Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
"Token": "",
"Service": {
"Service": "redis",
"Failover": {"NearestN": 3, "Datacenters": ["dc1", "dc2"]},
"Near": "node1",
"OnlyPassing": False,
"Tags": ["primary", "!experimental"],
"NodeMeta": {"instance_type": "m3.large"},
"ServiceMeta": {"environment": "production"},
},
"DNS": {"TTL": "10s"},
}
def sample_response():
return {"ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05"}
def sample_read_response():
return [
{
"ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05",
"Name": "my-query",
"Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
"Token": "<hidden>",
"Service": {
"Service": "redis",
"Failover": {"NearestN": 3, "Datacenters": ["dc1", "dc2"]},
"OnlyPassing": False,
"Tags": ["primary", "!experimental"],
"NodeMeta": {"instance_type": "m3.large"},
"ServiceMeta": {"environment": "production"},
},
"DNS": {"TTL": "10s"},
"RaftIndex": {"CreateIndex": 23, "ModifyIndex": 42},
}
]
def sample_execute_response():
return {
"Service": "redis",
"Nodes": [
{
"Node": {
"ID": "40e4a748-2192-161a-0510-9bf59fe950b5",
"Node": "foobar",
"Address": "10.1.10.12",
"Datacenter": "dc1",
"TaggedAddresses": {"lan": "10.1.10.12", "wan": "10.1.10.12"},
"NodeMeta": {"instance_type": "m3.large"},
},
"Service": {
"ID": "redis",
"Service": "redis",
"Tags": None,
"Meta": {"redis_version": "4.0"},
"Port": 8000,
},
"Checks": [
{
"Node": "foobar",
"CheckID": "service:redis",
"Name": "Service 'redis' check",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "redis",
"ServiceName": "redis",
},
{
"Node": "foobar",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "",
"ServiceName": "",
},
],
"DNS": {"TTL": "10s"},
"Datacenter": "dc3",
"Failovers": 2,
}
],
}
def sample_explain_response():
return {
"Query": {
"ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05",
"Name": "my-query",
"Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
"Token": "<hidden>",
# 'Name': 'geo-db',
"Template": {
"Type": "name_prefix_match",
"Regexp": "^geo-db-(.*?)-([^\\-]+?)$",
},
"Service": {
"Service": "mysql-customer",
"Failover": {"NearestN": 3, "Datacenters": ["dc1", "dc2"]},
"OnlyPassing": True,
"Tags": ["primary"],
"Meta": {"mysql_version": "5.7.20"},
"NodeMeta": {"instance_type": "m3.large"},
},
}
}
@pytest.fixture
@pytest.mark.asyncio
def query(consul_api):
return api.Query(client=consul_api)
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [sample_response()])
async def test_create(query, expected):
query.client.expected = expected
response = await query.create(sample_payload())
response = await response.json()
assert response == sample_response()
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [sample_read_response()])
async def test_read_without_uuid(query, expected):
query.client.expected = expected
response = await query.read()
response = await response.json()
assert response == sample_read_response()
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [sample_read_response()])
async def test_read_with_uuid(query, expected):
query.client.expected = expected
response = await query.read("8f246b77-f3e1-ff88-5b48-8ec93abf3e05")
response = await response.json()
assert response == sample_read_response()
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [200])
async def test_delete(query, expected):
query.client.expected = expected
response = await query.delete("8f246b77-f3e1-ff88-5b48-8ec93abf3e05")
assert response.status == 200
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [200])
async def test_update(query, expected):
query.client.expected = expected
response = await query.update(
"8f246b77-f3e1-ff88-5b48-8ec93abf3e05", sample_payload()
)
assert response.status == 200
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [sample_execute_response()])
async def test_execute(query, expected):
query.client.expected = expected
response = await query.execute("8f246b77-f3e1-ff88-5b48-8ec93abf3e05")
response = await response.json()
assert response == sample_execute_response()
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [sample_explain_response()])
async def test_explain(query, expected):
query.client.expected = expected
response = await query.explain("8f246b77-f3e1-ff88-5b48-8ec93abf3e05")
response = await response.json()
assert response == sample_explain_response()
| 32.089474 | 82 | 0.507627 | import pytest
from discovery import api
def sample_payload():
return {
"Name": "my-query",
"Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
"Token": "",
"Service": {
"Service": "redis",
"Failover": {"NearestN": 3, "Datacenters": ["dc1", "dc2"]},
"Near": "node1",
"OnlyPassing": False,
"Tags": ["primary", "!experimental"],
"NodeMeta": {"instance_type": "m3.large"},
"ServiceMeta": {"environment": "production"},
},
"DNS": {"TTL": "10s"},
}
def sample_response():
return {"ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05"}
def sample_read_response():
return [
{
"ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05",
"Name": "my-query",
"Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
"Token": "<hidden>",
"Service": {
"Service": "redis",
"Failover": {"NearestN": 3, "Datacenters": ["dc1", "dc2"]},
"OnlyPassing": False,
"Tags": ["primary", "!experimental"],
"NodeMeta": {"instance_type": "m3.large"},
"ServiceMeta": {"environment": "production"},
},
"DNS": {"TTL": "10s"},
"RaftIndex": {"CreateIndex": 23, "ModifyIndex": 42},
}
]
def sample_execute_response():
return {
"Service": "redis",
"Nodes": [
{
"Node": {
"ID": "40e4a748-2192-161a-0510-9bf59fe950b5",
"Node": "foobar",
"Address": "10.1.10.12",
"Datacenter": "dc1",
"TaggedAddresses": {"lan": "10.1.10.12", "wan": "10.1.10.12"},
"NodeMeta": {"instance_type": "m3.large"},
},
"Service": {
"ID": "redis",
"Service": "redis",
"Tags": None,
"Meta": {"redis_version": "4.0"},
"Port": 8000,
},
"Checks": [
{
"Node": "foobar",
"CheckID": "service:redis",
"Name": "Service 'redis' check",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "redis",
"ServiceName": "redis",
},
{
"Node": "foobar",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "",
"ServiceName": "",
},
],
"DNS": {"TTL": "10s"},
"Datacenter": "dc3",
"Failovers": 2,
}
],
}
def sample_explain_response():
return {
"Query": {
"ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05",
"Name": "my-query",
"Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
"Token": "<hidden>",
"Template": {
"Type": "name_prefix_match",
"Regexp": "^geo-db-(.*?)-([^\\-]+?)$",
},
"Service": {
"Service": "mysql-customer",
"Failover": {"NearestN": 3, "Datacenters": ["dc1", "dc2"]},
"OnlyPassing": True,
"Tags": ["primary"],
"Meta": {"mysql_version": "5.7.20"},
"NodeMeta": {"instance_type": "m3.large"},
},
}
}
@pytest.fixture
@pytest.mark.asyncio
def query(consul_api):
return api.Query(client=consul_api)
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [sample_response()])
async def test_create(query, expected):
query.client.expected = expected
response = await query.create(sample_payload())
response = await response.json()
assert response == sample_response()
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [sample_read_response()])
async def test_read_without_uuid(query, expected):
query.client.expected = expected
response = await query.read()
response = await response.json()
assert response == sample_read_response()
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [sample_read_response()])
async def test_read_with_uuid(query, expected):
query.client.expected = expected
response = await query.read("8f246b77-f3e1-ff88-5b48-8ec93abf3e05")
response = await response.json()
assert response == sample_read_response()
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [200])
async def test_delete(query, expected):
query.client.expected = expected
response = await query.delete("8f246b77-f3e1-ff88-5b48-8ec93abf3e05")
assert response.status == 200
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [200])
async def test_update(query, expected):
query.client.expected = expected
response = await query.update(
"8f246b77-f3e1-ff88-5b48-8ec93abf3e05", sample_payload()
)
assert response.status == 200
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [sample_execute_response()])
async def test_execute(query, expected):
query.client.expected = expected
response = await query.execute("8f246b77-f3e1-ff88-5b48-8ec93abf3e05")
response = await response.json()
assert response == sample_execute_response()
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [sample_explain_response()])
async def test_explain(query, expected):
query.client.expected = expected
response = await query.explain("8f246b77-f3e1-ff88-5b48-8ec93abf3e05")
response = await response.json()
assert response == sample_explain_response()
| true | true |
1c34d24a7ccafb22e1865bc1b32325f00e7e88a9 | 4,243 | py | Python | src/main/run_work.py | ta-assistant/Admin-CLI | 1c03ede0e09d8ddc270646937aa7af463c55f1f5 | [
"MIT"
] | 1 | 2021-07-22T15:43:02.000Z | 2021-07-22T15:43:02.000Z | src/main/run_work.py | ta-assistant/Admin-CLI | 1c03ede0e09d8ddc270646937aa7af463c55f1f5 | [
"MIT"
] | 28 | 2021-05-15T08:18:21.000Z | 2021-08-02T06:12:30.000Z | src/main/run_work.py | ta-assistant/TA-CLI | 1c03ede0e09d8ddc270646937aa7af463c55f1f5 | [
"MIT"
] | null | null | null | import os
import json
# import pandas as pd
from datetime import datetime
from src.main.pre_work import Work
from lib.file_management.extract import unzipfile
from src.main.student_data import StudentData
from lib.file_management.configeditor import ConfigEditor
from lib.function_network.func_network import CallApi
from lib.file_management.createapikeyfile import SaveApiKey
from lib.cli_displayed.dis_cli import display_typo
def check_config(path):
if not os.path.exists(os.path.join(path, "ta", "config.json")):
return False
else:
return True
def check_draft(path):
if not os.path.exists(os.path.join(path, "ta", "draft.json")) and not SaveApiKey().exsitapikey():
return False
else:
return True
def check_state(config_state, draft_state, path):
if config_state and draft_state:
return True
else:
display_typo(1, (config_state and draft_state), "Property is not ready please try again",
optional_massage=f"CONFIG : {config_state} / DRAFT : {draft_state} / API-KEY : {SaveApiKey().exsitapikey()}")
print("[*]")
return False
def preparework(path):
config_state = check_config(path)
draft_state = check_draft(path)
display_typo(1, config_state, "checking config.json")
display_typo(1, draft_state, "checking draft.json")
if not check_state(config_state, draft_state, path):
return False
return True
def draft_config(path):
print("Do you want to use draft from draft.json or fetch from the server")
while True:
user_in = input("(R)ead from file or (F)etch from server: ")
if user_in.lower() in "RrFf":
break
if user_in.lower() == "f":
draft = CallApi(path).fetch()
print(draft)
else:
with open(os.path.join(path, "ta", "draft.json"), "r") as draftfile:
draft = json.load(draftfile)
draftfile.close()
return draft
def add_data_to_work(path, draft):
work = Work()
work.draft = draft
work.path = path
work.workId = ConfigEditor(path=path).readconfig()["workId"]
if work.property_is_ready():
work_path = os.path.join(path, "ta", "work.json")
if work.create():
print(f" |-[/] {work_path} created")
else:
print(f" |-[X] {work_path} already exists")
else:
print("property is not ready")
print(work.draft)
print(work.path)
print(work.workId)
return False, None
return True, work
def unzip_homework(path, draft):
if not unzipfile(path, draft["fileDraft"]):
print("[*] all file aren't follow the draft")
return False
print("[/] finish")
return True
def student_checking(path, work, file, openvs, onebyone):
student = StudentData(path=work.path, filename=file, draft=work.draft)
with open(os.path.join(path, "ta", "work.json"), "r") as workfile:
scores = json.load(workfile)["scores"]
workfile.close
student.prepare_student_data()
did_student_checked(path,work, file, student, scores, openvs, onebyone)
def did_student_checked(path,work, file, student, scores, openvs, onebyone):
if student.check_work_score(scores):
if openvs and onebyone:
assignmentpath = os.path.join(path,"ta", "Assignment", file)
print(assignmentpath)
os.system(f"code \"{assignmentpath}\"")
work.write_work(student.ask())
def scoring(path, work, openvs, onebyone):
list_file = os.listdir(os.path.join(path, "ta", "Assignment"))
assignmentpath = os.path.join("ta", "Assignment")
if openvs and not onebyone:
os.system(f"code \"{assignmentpath}\"")
for file in list_file:
if "." in file or file == "ta":
continue
student_checking(path, work, file, openvs, onebyone)
def run_work(path, openvs=True, onebyone=False):
print("[*] starting...")
if not preparework(path):
return False
draft = draft_config(path)
workstate, work = add_data_to_work(path, draft)
if not workstate:
return False
if not unzip_homework(path, draft):
return False
scoring(path, work, openvs, onebyone)
return True
| 31.664179 | 130 | 0.651897 | import os
import json
from datetime import datetime
from src.main.pre_work import Work
from lib.file_management.extract import unzipfile
from src.main.student_data import StudentData
from lib.file_management.configeditor import ConfigEditor
from lib.function_network.func_network import CallApi
from lib.file_management.createapikeyfile import SaveApiKey
from lib.cli_displayed.dis_cli import display_typo
def check_config(path):
if not os.path.exists(os.path.join(path, "ta", "config.json")):
return False
else:
return True
def check_draft(path):
if not os.path.exists(os.path.join(path, "ta", "draft.json")) and not SaveApiKey().exsitapikey():
return False
else:
return True
def check_state(config_state, draft_state, path):
if config_state and draft_state:
return True
else:
display_typo(1, (config_state and draft_state), "Property is not ready please try again",
optional_massage=f"CONFIG : {config_state} / DRAFT : {draft_state} / API-KEY : {SaveApiKey().exsitapikey()}")
print("[*]")
return False
def preparework(path):
config_state = check_config(path)
draft_state = check_draft(path)
display_typo(1, config_state, "checking config.json")
display_typo(1, draft_state, "checking draft.json")
if not check_state(config_state, draft_state, path):
return False
return True
def draft_config(path):
print("Do you want to use draft from draft.json or fetch from the server")
while True:
user_in = input("(R)ead from file or (F)etch from server: ")
if user_in.lower() in "RrFf":
break
if user_in.lower() == "f":
draft = CallApi(path).fetch()
print(draft)
else:
with open(os.path.join(path, "ta", "draft.json"), "r") as draftfile:
draft = json.load(draftfile)
draftfile.close()
return draft
def add_data_to_work(path, draft):
work = Work()
work.draft = draft
work.path = path
work.workId = ConfigEditor(path=path).readconfig()["workId"]
if work.property_is_ready():
work_path = os.path.join(path, "ta", "work.json")
if work.create():
print(f" |-[/] {work_path} created")
else:
print(f" |-[X] {work_path} already exists")
else:
print("property is not ready")
print(work.draft)
print(work.path)
print(work.workId)
return False, None
return True, work
def unzip_homework(path, draft):
if not unzipfile(path, draft["fileDraft"]):
print("[*] all file aren't follow the draft")
return False
print("[/] finish")
return True
def student_checking(path, work, file, openvs, onebyone):
student = StudentData(path=work.path, filename=file, draft=work.draft)
with open(os.path.join(path, "ta", "work.json"), "r") as workfile:
scores = json.load(workfile)["scores"]
workfile.close
student.prepare_student_data()
did_student_checked(path,work, file, student, scores, openvs, onebyone)
def did_student_checked(path,work, file, student, scores, openvs, onebyone):
if student.check_work_score(scores):
if openvs and onebyone:
assignmentpath = os.path.join(path,"ta", "Assignment", file)
print(assignmentpath)
os.system(f"code \"{assignmentpath}\"")
work.write_work(student.ask())
def scoring(path, work, openvs, onebyone):
list_file = os.listdir(os.path.join(path, "ta", "Assignment"))
assignmentpath = os.path.join("ta", "Assignment")
if openvs and not onebyone:
os.system(f"code \"{assignmentpath}\"")
for file in list_file:
if "." in file or file == "ta":
continue
student_checking(path, work, file, openvs, onebyone)
def run_work(path, openvs=True, onebyone=False):
print("[*] starting...")
if not preparework(path):
return False
draft = draft_config(path)
workstate, work = add_data_to_work(path, draft)
if not workstate:
return False
if not unzip_homework(path, draft):
return False
scoring(path, work, openvs, onebyone)
return True
| true | true |
1c34d32afeb26bdca540b86539fcbc303eed4360 | 3,224 | py | Python | examples/aiohttp-echo/app.py | naotokuwa/line-bot-sdk-python | 5ce92703031d60e8b662ccc370699e09bd57999f | [
"Apache-2.0"
] | 1,563 | 2016-10-14T04:32:49.000Z | 2022-03-31T06:34:17.000Z | examples/aiohttp-echo/app.py | naotokuwa/line-bot-sdk-python | 5ce92703031d60e8b662ccc370699e09bd57999f | [
"Apache-2.0"
] | 265 | 2016-10-15T08:25:51.000Z | 2022-03-31T02:07:15.000Z | examples/aiohttp-echo/app.py | naotokuwa/line-bot-sdk-python | 5ce92703031d60e8b662ccc370699e09bd57999f | [
"Apache-2.0"
] | 999 | 2016-10-15T07:47:55.000Z | 2022-03-29T05:15:12.000Z | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from argparse import ArgumentParser
import asyncio
import aiohttp
from aiohttp import web
import logging
from aiohttp.web_runner import TCPSite
from linebot import (
AsyncLineBotApi, WebhookParser
)
from linebot.aiohttp_async_http_client import AiohttpAsyncHttpClient
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
)
# get channel_secret and channel_access_token from your environment variable
channel_secret = os.getenv('LINE_CHANNEL_SECRET', None)
channel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN', None)
if channel_secret is None:
print('Specify LINE_CHANNEL_SECRET as environment variable.')
sys.exit(1)
if channel_access_token is None:
print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')
sys.exit(1)
class Handler:
def __init__(self, line_bot_api, parser):
self.line_bot_api = line_bot_api
self.parser = parser
async def echo(self, request):
signature = request.headers['X-Line-Signature']
body = await request.text()
try:
events = self.parser.parse(body, signature)
except InvalidSignatureError:
return web.Response(status=400, text='Invalid signature')
for event in events:
if not isinstance(event, MessageEvent):
continue
if not isinstance(event.message, TextMessage):
continue
await self.line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=event.message.text)
)
return web.Response(text="OK\n")
async def main(port=8000):
session = aiohttp.ClientSession()
async_http_client = AiohttpAsyncHttpClient(session)
line_bot_api = AsyncLineBotApi(channel_access_token, async_http_client)
parser = WebhookParser(channel_secret)
handler = Handler(line_bot_api, parser)
app = web.Application()
app.add_routes([web.post('/callback', handler.echo)])
runner = web.AppRunner(app)
await runner.setup()
site = TCPSite(runner=runner, port=port)
await site.start()
while True:
await asyncio.sleep(3600) # sleep forever
if __name__ == "__main__":
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
arg_parser = ArgumentParser(
usage='Usage: python ' + __file__ + ' [--port <port>] [--help]'
)
arg_parser.add_argument('-p', '--port', type=int, default=8000, help='port')
options = arg_parser.parse_args()
asyncio.run(main(options.port))
| 30.415094 | 80 | 0.702543 |
import os
import sys
from argparse import ArgumentParser
import asyncio
import aiohttp
from aiohttp import web
import logging
from aiohttp.web_runner import TCPSite
from linebot import (
AsyncLineBotApi, WebhookParser
)
from linebot.aiohttp_async_http_client import AiohttpAsyncHttpClient
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
)
channel_secret = os.getenv('LINE_CHANNEL_SECRET', None)
channel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN', None)
if channel_secret is None:
print('Specify LINE_CHANNEL_SECRET as environment variable.')
sys.exit(1)
if channel_access_token is None:
print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')
sys.exit(1)
class Handler:
def __init__(self, line_bot_api, parser):
self.line_bot_api = line_bot_api
self.parser = parser
async def echo(self, request):
signature = request.headers['X-Line-Signature']
body = await request.text()
try:
events = self.parser.parse(body, signature)
except InvalidSignatureError:
return web.Response(status=400, text='Invalid signature')
for event in events:
if not isinstance(event, MessageEvent):
continue
if not isinstance(event.message, TextMessage):
continue
await self.line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=event.message.text)
)
return web.Response(text="OK\n")
async def main(port=8000):
session = aiohttp.ClientSession()
async_http_client = AiohttpAsyncHttpClient(session)
line_bot_api = AsyncLineBotApi(channel_access_token, async_http_client)
parser = WebhookParser(channel_secret)
handler = Handler(line_bot_api, parser)
app = web.Application()
app.add_routes([web.post('/callback', handler.echo)])
runner = web.AppRunner(app)
await runner.setup()
site = TCPSite(runner=runner, port=port)
await site.start()
while True:
await asyncio.sleep(3600)
if __name__ == "__main__":
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
arg_parser = ArgumentParser(
usage='Usage: python ' + __file__ + ' [--port <port>] [--help]'
)
arg_parser.add_argument('-p', '--port', type=int, default=8000, help='port')
options = arg_parser.parse_args()
asyncio.run(main(options.port))
| true | true |
1c34d36382b3db437c481582c544481af1042439 | 393 | py | Python | dtinyurl/wsgi.py | kawww/linkipfs | d9cd3109522d6c63e32a6b86edfe31546cca694a | [
"MIT"
] | 21 | 2019-06-16T08:58:00.000Z | 2022-01-24T02:36:55.000Z | dtinyurl/wsgi.py | kawww/linkipfs | d9cd3109522d6c63e32a6b86edfe31546cca694a | [
"MIT"
] | 5 | 2019-08-24T01:55:14.000Z | 2021-06-10T21:26:40.000Z | dtinyurl/wsgi.py | kawww/linkipfs | d9cd3109522d6c63e32a6b86edfe31546cca694a | [
"MIT"
] | 11 | 2019-06-16T08:58:33.000Z | 2021-10-06T03:50:28.000Z | """
WSGI config for dtinyurl project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dtinyurl.settings')
application = get_wsgi_application()
| 23.117647 | 78 | 0.78626 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dtinyurl.settings')
application = get_wsgi_application()
| true | true |
1c34d45292a58e17b560f9af3b64e2dde13af5cc | 1,180 | py | Python | ps1-fractals/p5-sierpinski-random/sierpinski.py | slhshamloo/comp-phys | 04d6759e0eb9d7e16e2781417d389bc15e22b01b | [
"MIT"
] | null | null | null | ps1-fractals/p5-sierpinski-random/sierpinski.py | slhshamloo/comp-phys | 04d6759e0eb9d7e16e2781417d389bc15e22b01b | [
"MIT"
] | null | null | null | ps1-fractals/p5-sierpinski-random/sierpinski.py | slhshamloo/comp-phys | 04d6759e0eb9d7e16e2781417d389bc15e22b01b | [
"MIT"
] | null | null | null | from numpy import ndarray
from fractal import fractal_random_scalerot
def sierpinski_random(range_x: tuple[float, float],
range_y: tuple[float, float],
steps: int = 10, samples: int=10000
) -> tuple[ndarray, ndarray]:
"""Generates the points of a sierpinski triangle randomly
Applies sierpinski triangle mappings randomly to randomly sampled points
in the given range to approximately make a sierpinski triangle.
Args:
range_x: range of the sampling points in the x axis
range_y: range of the sampling points in the y axis
samples: the number of randomly sampled points
for generating the fractal
steps: the number of times the fractal mapping is applied
Returns:
two numpy arrays containing the x and y coordinates
of the generated fractal's points
"""
return fractal_random_scalerot(
range_x, range_y, ((0.5, 0.5),) * 3, (0,) * 3,
((0, 0), (0.25 * (range_x[1] - range_x[0]),
0.5 * (range_y[1] - range_y[0])),
(0.5 * (range_x[1] - range_x[0]), 0)), steps, samples)
| 39.333333 | 76 | 0.616102 | from numpy import ndarray
from fractal import fractal_random_scalerot
def sierpinski_random(range_x: tuple[float, float],
range_y: tuple[float, float],
steps: int = 10, samples: int=10000
) -> tuple[ndarray, ndarray]:
return fractal_random_scalerot(
range_x, range_y, ((0.5, 0.5),) * 3, (0,) * 3,
((0, 0), (0.25 * (range_x[1] - range_x[0]),
0.5 * (range_y[1] - range_y[0])),
(0.5 * (range_x[1] - range_x[0]), 0)), steps, samples)
| true | true |
1c34d472e7497f77c5212b1ae5ceefbc7855012c | 409 | py | Python | django_tgbot/types/pollanswer.py | purwowd/django-tgbot | 6712ad2e9986c0961ad402a1d2e37be39e2f5fb4 | [
"MIT"
] | 52 | 2020-04-05T11:06:21.000Z | 2022-03-21T05:29:15.000Z | django_tgbot/types/pollanswer.py | armanexplorer/django-tgbot | e89f34b6a25beb9473c9e162ec8c161c14cd4cd6 | [
"MIT"
] | 11 | 2020-09-02T00:24:13.000Z | 2022-03-22T06:09:36.000Z | django_tgbot/types/pollanswer.py | armanexplorer/django-tgbot | e89f34b6a25beb9473c9e162ec8c161c14cd4cd6 | [
"MIT"
] | 14 | 2020-09-01T23:31:54.000Z | 2022-01-30T07:03:52.000Z | from . import BasicType
class PollAnswer(BasicType):
fields = {
'poll_id': str,
'option_ids': {
'class': int,
'array': True
},
}
def __init__(self, obj=None):
super(PollAnswer, self).__init__(obj)
def get_user(self):
return getattr(self, 'user', None)
from . import user
PollAnswer.fields.update({
'user': user.User
}) | 17.041667 | 45 | 0.552567 | from . import BasicType
class PollAnswer(BasicType):
fields = {
'poll_id': str,
'option_ids': {
'class': int,
'array': True
},
}
def __init__(self, obj=None):
super(PollAnswer, self).__init__(obj)
def get_user(self):
return getattr(self, 'user', None)
from . import user
PollAnswer.fields.update({
'user': user.User
}) | true | true |
1c34d4bcd55d7bb1e7174aa0d6409ab0baded1c4 | 737 | py | Python | django_redis_prometheus/cache/backends/filebased.py | Zagrebelin/django-redis-prometheus | 971a81f1cab91d62bca6223feb32506a764c246b | [
"Apache-2.0"
] | null | null | null | django_redis_prometheus/cache/backends/filebased.py | Zagrebelin/django-redis-prometheus | 971a81f1cab91d62bca6223feb32506a764c246b | [
"Apache-2.0"
] | null | null | null | django_redis_prometheus/cache/backends/filebased.py | Zagrebelin/django-redis-prometheus | 971a81f1cab91d62bca6223feb32506a764c246b | [
"Apache-2.0"
] | 1 | 2021-12-27T09:56:59.000Z | 2021-12-27T09:56:59.000Z | from django.core.cache.backends import filebased
from django_redis_prometheus.cache.metrics import (
django_cache_get_total, django_cache_hits_total, django_cache_misses_total)
class FileBasedCache(filebased.FileBasedCache):
"""Inherit filebased cache to add metrics about hit/miss ratio"""
def get(self, key, default=None, version=None):
django_cache_get_total.labels(backend='filebased').inc()
cached = super(FileBasedCache, self).get(
key, default=None, version=version)
if cached is not None:
django_cache_hits_total.labels(backend='filebased').inc()
else:
django_cache_misses_total.labels(backend='filebased').inc()
return cached or default
| 40.944444 | 79 | 0.720488 | from django.core.cache.backends import filebased
from django_redis_prometheus.cache.metrics import (
django_cache_get_total, django_cache_hits_total, django_cache_misses_total)
class FileBasedCache(filebased.FileBasedCache):
def get(self, key, default=None, version=None):
django_cache_get_total.labels(backend='filebased').inc()
cached = super(FileBasedCache, self).get(
key, default=None, version=version)
if cached is not None:
django_cache_hits_total.labels(backend='filebased').inc()
else:
django_cache_misses_total.labels(backend='filebased').inc()
return cached or default
| true | true |
1c34db27bfb6729e6ad1f4da9b23cb1c70234a26 | 9,244 | py | Python | indra/util/__init__.py | zebulon2/indra | 7727ddcab52ad8012eb6592635bfa114e904bd48 | [
"BSD-2-Clause"
] | 136 | 2016-02-11T22:06:37.000Z | 2022-03-31T17:26:20.000Z | indra/util/__init__.py | zebulon2/indra | 7727ddcab52ad8012eb6592635bfa114e904bd48 | [
"BSD-2-Clause"
] | 748 | 2016-02-03T16:27:56.000Z | 2022-03-09T14:27:54.000Z | indra/util/__init__.py | zebulon2/indra | 7727ddcab52ad8012eb6592635bfa114e904bd48 | [
"BSD-2-Clause"
] | 56 | 2015-08-28T14:03:44.000Z | 2022-02-04T06:15:55.000Z | import sys
import csv
import gzip
import zlib
import logging
from io import BytesIO
from functools import wraps
from datetime import datetime
import xml.etree.ElementTree as ET
try: # Python 3
from itertools import zip_longest
except ImportError: # Python 2
from itertools import izip_longest as zip_longest
if sys.version_info[0] >= 3:
non_unicode = bytes
import pickle
else:
non_unicode = str
import cPickle as pickle
logger = logging.getLogger(__name__)
def clockit(func):
@wraps(func)
def timed_func(*args, **kwargs):
start = datetime.now()
ret = func(*args, **kwargs)
end = datetime.now()
logger.debug('%s %-30s %s %s', '~' * 5, func.__name__, end-start, '~' * 5)
return ret
return timed_func
def unicode_strs(obj, attr_filter=None):
if isinstance(obj, non_unicode):
return False
# Check for an iterable
if isinstance(obj, list) or isinstance(obj, tuple) or \
isinstance(obj, set):
for item in obj:
has_unicode_strs = unicode_strs(item)
if not has_unicode_strs:
return False
if hasattr(obj, '__dict__'):
for item_name, item in obj.__dict__.items():
if attr_filter and item_name in attr_filter:
continue
has_unicode_strs = unicode_strs(item)
if not has_unicode_strs:
return False
if isinstance(obj, dict):
for k, v in obj.items():
k_has_unicode_strs = unicode_strs(k)
v_has_unicode_strs = unicode_strs(v)
if not k_has_unicode_strs or not v_has_unicode_strs:
return False
return True
def decode_obj(obj, encoding='utf-8'):
if isinstance(obj, non_unicode):
return obj.decode(encoding)
elif isinstance(obj, list) or isinstance(obj, tuple):
return [decode_obj(item) for item in obj]
elif hasattr(obj, '__dict__'):
for k, v in obj.__dict__.items():
obj.__dict__[k] = decode_obj(v)
return obj
elif isinstance(obj, dict):
dec_obj = {}
for k, v in obj.items():
dec_k = decode_obj(k)
dec_v = decode_obj(v)
dec_obj[dec_k] = dec_v
return dec_obj
else:
return obj
def read_unicode_csv(filename, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\n',
encoding='utf-8', skiprows=0):
# Python 3 version
if sys.version_info[0] >= 3:
# Open the file in text mode with given encoding
# Set newline arg to '' (see https://docs.python.org/3/library/csv.html)
with open(filename, 'r', newline='', encoding=encoding) as f:
generator = read_unicode_csv_fileobj(f, delimiter=delimiter,
quotechar=quotechar,
quoting=quoting,
lineterminator=lineterminator,
encoding=encoding,
skiprows=skiprows)
for row in generator:
yield row
# Python 2 version
else:
# Open the file in binary mode
with open(filename, 'rb') as f:
generator = read_unicode_csv_fileobj(f, delimiter=delimiter,
quotechar=quotechar,
quoting=quoting,
lineterminator=lineterminator,
encoding=encoding,
skiprows=skiprows)
for row in generator:
yield row
def read_unicode_csv_fileobj(fileobj, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\n',
encoding='utf-8', skiprows=0):
"""fileobj can be a StringIO in Py3, but should be a BytesIO in Py2."""
# Python 3 version
if sys.version_info[0] >= 3:
# Next, get the csv reader, with unicode delimiter and quotechar
csv_reader = csv.reader(fileobj, delimiter=delimiter,
quotechar=quotechar, quoting=quoting,
lineterminator=lineterminator)
# Now, return the (already decoded) unicode csv_reader generator
# Skip rows if necessary
for skip_ix in range(skiprows):
next(csv_reader)
for row in csv_reader:
yield row
# Python 2 version
else:
# Next, get the csv reader, passing delimiter and quotechar as
# bytestrings rather than unicode
csv_reader = csv.reader(fileobj, delimiter=delimiter.encode(encoding),
quotechar=quotechar.encode(encoding),
quoting=quoting, lineterminator=lineterminator)
# Iterate over the file and decode each string into unicode
# Skip rows if necessary
for skip_ix in range(skiprows):
next(csv_reader)
for row in csv_reader:
yield [cell.decode(encoding) for cell in row]
def write_unicode_csv(filename, rows, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\n',
encoding='utf-8'):
# Python 3 version
if sys.version_info[0] >= 3:
# Open the file in text mode with given encoding
# Set newline arg to '' (see https://docs.python.org/3/library/csv.html)
with open(filename, 'w', newline='', encoding=encoding) as f:
# Next, get the csv writer, with unicode delimiter and quotechar
csv_writer = csv.writer(f, delimiter=delimiter, quotechar=quotechar,
quoting=quoting, lineterminator=lineterminator)
# Write the rows to the file
csv_writer.writerows(rows)
# Python 2 version
else:
# Open the file, no encoding specified
with open(filename, 'w') as f:
# Next, get the csv writer, passing delimiter and quotechar as
# bytestrings rather than unicode
csv_writer = csv.writer(f, delimiter=delimiter.encode(encoding),
quotechar=quotechar.encode(encoding),
quoting=quoting, lineterminator=lineterminator)
for row in rows:
csv_writer.writerow([unicode(cell).encode(encoding)
for cell in row])
def zip_string(content, name='gzipped_object'):
buf = BytesIO()
gzf = gzip.GzipFile(name, 'wb', 6, buf)
gzf.write(content.encode('utf8'))
gzf.close()
return buf.getvalue()
def unzip_string(gz_obj):
# Get the content from the object
gz_body = gz_obj['Body'].read()
# Decode the gzipped content
content = zlib.decompress(gz_body, 16+zlib.MAX_WBITS)
return content.decode('utf8')
if sys.version_info[0] >= 3:
def UnicodeXMLTreeBuilder():
return None
else:
class UnicodeXMLTreeBuilder(ET.XMLTreeBuilder):
# See this thread:
# http://www.gossamer-threads.com/lists/python/python/728903
def _fixtext(self, text):
return text
def fast_deepcopy(obj):
"""This is a faster implementation of deepcopy via pickle.
It is meant primarily for sets of Statements with complex hierarchies
but can be used for any object.
"""
with BytesIO() as buf:
pickle.dump(obj, buf)
buf.seek(0)
obj_new = pickle.load(buf)
return obj_new
def lmap(f, xs):
"""A non-lazy version of map."""
return list(map(f, xs))
def flatten(l):
"""Flatten a nested list."""
return sum(map(flatten, l), []) \
if isinstance(l, list) or isinstance(l, tuple) else [l]
def flatMap(f, xs):
"""Map a function onto an iterable and flatten the result."""
return flatten(lmap(f, xs))
def batch_iter(iterator, batch_size, return_func=None, padding=None):
"""Break an iterable into batches of size batch_size
Note that `padding` should be set to something (anything) which is NOT a
valid member of the iterator. For example, None works for [0,1,2,...10], but
not for ['a', None, 'c', 'd'].
Parameters
----------
iterator : iterable
A python object which is iterable.
batch_size : int
The size of batches you wish to produce from the iterator.
return_func : executable or None
Pass a function that takes a generator and returns an iterable (e.g.
`list` or `set`). If None, a generator will be returned.
padding : anything
This is used internally to ensure that the remainder of the list is
included. This MUST NOT be a valid element of the iterator.
Returns
-------
An iterator over lists or generators, depending on `return_lists`.
"""
for batch in zip_longest(*[iter(iterator)]*batch_size, fillvalue=padding):
gen = (thing for thing in batch if thing is not padding)
if return_func is None:
yield gen
else:
yield return_func(gen)
| 35.968872 | 82 | 0.589247 | import sys
import csv
import gzip
import zlib
import logging
from io import BytesIO
from functools import wraps
from datetime import datetime
import xml.etree.ElementTree as ET
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
if sys.version_info[0] >= 3:
non_unicode = bytes
import pickle
else:
non_unicode = str
import cPickle as pickle
logger = logging.getLogger(__name__)
def clockit(func):
@wraps(func)
def timed_func(*args, **kwargs):
start = datetime.now()
ret = func(*args, **kwargs)
end = datetime.now()
logger.debug('%s %-30s %s %s', '~' * 5, func.__name__, end-start, '~' * 5)
return ret
return timed_func
def unicode_strs(obj, attr_filter=None):
if isinstance(obj, non_unicode):
return False
if isinstance(obj, list) or isinstance(obj, tuple) or \
isinstance(obj, set):
for item in obj:
has_unicode_strs = unicode_strs(item)
if not has_unicode_strs:
return False
if hasattr(obj, '__dict__'):
for item_name, item in obj.__dict__.items():
if attr_filter and item_name in attr_filter:
continue
has_unicode_strs = unicode_strs(item)
if not has_unicode_strs:
return False
if isinstance(obj, dict):
for k, v in obj.items():
k_has_unicode_strs = unicode_strs(k)
v_has_unicode_strs = unicode_strs(v)
if not k_has_unicode_strs or not v_has_unicode_strs:
return False
return True
def decode_obj(obj, encoding='utf-8'):
if isinstance(obj, non_unicode):
return obj.decode(encoding)
elif isinstance(obj, list) or isinstance(obj, tuple):
return [decode_obj(item) for item in obj]
elif hasattr(obj, '__dict__'):
for k, v in obj.__dict__.items():
obj.__dict__[k] = decode_obj(v)
return obj
elif isinstance(obj, dict):
dec_obj = {}
for k, v in obj.items():
dec_k = decode_obj(k)
dec_v = decode_obj(v)
dec_obj[dec_k] = dec_v
return dec_obj
else:
return obj
def read_unicode_csv(filename, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\n',
encoding='utf-8', skiprows=0):
# Python 3 version
if sys.version_info[0] >= 3:
# Open the file in text mode with given encoding
# Set newline arg to '' (see https://docs.python.org/3/library/csv.html)
with open(filename, 'r', newline='', encoding=encoding) as f:
generator = read_unicode_csv_fileobj(f, delimiter=delimiter,
quotechar=quotechar,
quoting=quoting,
lineterminator=lineterminator,
encoding=encoding,
skiprows=skiprows)
for row in generator:
yield row
# Python 2 version
else:
# Open the file in binary mode
with open(filename, 'rb') as f:
generator = read_unicode_csv_fileobj(f, delimiter=delimiter,
quotechar=quotechar,
quoting=quoting,
lineterminator=lineterminator,
encoding=encoding,
skiprows=skiprows)
for row in generator:
yield row
def read_unicode_csv_fileobj(fileobj, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\n',
encoding='utf-8', skiprows=0):
if sys.version_info[0] >= 3:
csv_reader = csv.reader(fileobj, delimiter=delimiter,
quotechar=quotechar, quoting=quoting,
lineterminator=lineterminator)
for skip_ix in range(skiprows):
next(csv_reader)
for row in csv_reader:
yield row
else:
csv_reader = csv.reader(fileobj, delimiter=delimiter.encode(encoding),
quotechar=quotechar.encode(encoding),
quoting=quoting, lineterminator=lineterminator)
for skip_ix in range(skiprows):
next(csv_reader)
for row in csv_reader:
yield [cell.decode(encoding) for cell in row]
def write_unicode_csv(filename, rows, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\n',
encoding='utf-8'):
# Python 3 version
if sys.version_info[0] >= 3:
# Open the file in text mode with given encoding
# Set newline arg to '' (see https://docs.python.org/3/library/csv.html)
with open(filename, 'w', newline='', encoding=encoding) as f:
# Next, get the csv writer, with unicode delimiter and quotechar
csv_writer = csv.writer(f, delimiter=delimiter, quotechar=quotechar,
quoting=quoting, lineterminator=lineterminator)
# Write the rows to the file
csv_writer.writerows(rows)
# Python 2 version
else:
# Open the file, no encoding specified
with open(filename, 'w') as f:
# Next, get the csv writer, passing delimiter and quotechar as
# bytestrings rather than unicode
csv_writer = csv.writer(f, delimiter=delimiter.encode(encoding),
quotechar=quotechar.encode(encoding),
quoting=quoting, lineterminator=lineterminator)
for row in rows:
csv_writer.writerow([unicode(cell).encode(encoding)
for cell in row])
def zip_string(content, name='gzipped_object'):
buf = BytesIO()
gzf = gzip.GzipFile(name, 'wb', 6, buf)
gzf.write(content.encode('utf8'))
gzf.close()
return buf.getvalue()
def unzip_string(gz_obj):
# Get the content from the object
gz_body = gz_obj['Body'].read()
# Decode the gzipped content
content = zlib.decompress(gz_body, 16+zlib.MAX_WBITS)
return content.decode('utf8')
if sys.version_info[0] >= 3:
def UnicodeXMLTreeBuilder():
return None
else:
class UnicodeXMLTreeBuilder(ET.XMLTreeBuilder):
# See this thread:
# http://www.gossamer-threads.com/lists/python/python/728903
def _fixtext(self, text):
return text
def fast_deepcopy(obj):
with BytesIO() as buf:
pickle.dump(obj, buf)
buf.seek(0)
obj_new = pickle.load(buf)
return obj_new
def lmap(f, xs):
return list(map(f, xs))
def flatten(l):
return sum(map(flatten, l), []) \
if isinstance(l, list) or isinstance(l, tuple) else [l]
def flatMap(f, xs):
return flatten(lmap(f, xs))
def batch_iter(iterator, batch_size, return_func=None, padding=None):
for batch in zip_longest(*[iter(iterator)]*batch_size, fillvalue=padding):
gen = (thing for thing in batch if thing is not padding)
if return_func is None:
yield gen
else:
yield return_func(gen)
| true | true |
1c34dca3680ee63727932c6dfd78eef663f1094f | 3,405 | py | Python | src/streamlink/plugins/vk.py | kyldery/streamlink | ef36240408c194a543557fb31e4535b0426ec153 | [
"BSD-2-Clause"
] | 1 | 2022-02-25T20:14:03.000Z | 2022-02-25T20:14:03.000Z | src/streamlink/plugins/vk.py | kyldery/streamlink | ef36240408c194a543557fb31e4535b0426ec153 | [
"BSD-2-Clause"
] | null | null | null | src/streamlink/plugins/vk.py | kyldery/streamlink | ef36240408c194a543557fb31e4535b0426ec153 | [
"BSD-2-Clause"
] | 12 | 2022-01-30T23:34:18.000Z | 2022-03-26T17:09:43.000Z | """
$description Russian live streaming and video hosting social platform.
$url vk.com
$type live, vod
"""
import logging
import re
from urllib.parse import parse_qsl, unquote, urlparse
from streamlink.exceptions import NoStreamsError
from streamlink.plugin import Plugin, PluginError, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.dash import DASHStream
from streamlink.stream.hls import HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(?:\w+\.)?vk\.com/videos?(?:\?z=video)?(?P<video_id>-?\d+_\d+)"
))
@pluginmatcher(re.compile(
r"https?://(\w+\.)?vk\.com/.+"
))
class VK(Plugin):
API_URL = "https://vk.com/al_video.php"
def _has_video_id(self):
return any(m for m in self.matches[:-1])
def follow_vk_redirect(self):
if self._has_video_id():
return
try:
parsed_url = urlparse(self.url)
true_path = next(unquote(v).split("/")[0] for k, v in parse_qsl(parsed_url.query) if k == "z" and len(v) > 0)
self.url = f"{parsed_url.scheme}://{parsed_url.netloc}/{true_path}"
if self._has_video_id():
return
except StopIteration:
pass
try:
self.url = self.session.http.get(self.url, schema=validate.Schema(
validate.parse_html(),
validate.xml_xpath_string(".//head/meta[@property='og:url'][@content]/@content"),
str
))
except PluginError:
pass
if self._has_video_id():
return
raise NoStreamsError(self.url)
def _get_streams(self):
self.follow_vk_redirect()
video_id = self.match.group("video_id")
if not video_id:
return
log.debug(f"video ID: {video_id}")
try:
data = self.session.http.post(
self.API_URL,
params={
"act": "show_inline",
"al": "1",
"video": video_id,
},
schema=validate.Schema(
validate.transform(lambda text: re.sub(r"^\s*<!--\s*", "", text)),
validate.parse_json(),
{"payload": list},
validate.get(("payload", -1)),
list,
validate.get(-1),
{"player": {"params": [dict]}},
validate.get(("player", "params", 0)),
{
validate.optional("hls"): validate.url(),
validate.optional("manifest"): validate.startswith("<?xml"),
validate.optional("md_author"): validate.any(str, None),
validate.optional("md_title"): validate.any(str, None),
}
)
)
except PluginError:
log.error("Could not parse API response")
return
self.id = video_id
self.author = data.get("md_author")
self.title = data.get("md_title")
hls = data.get("hls")
if hls:
return HLSStream.parse_variant_playlist(self.session, hls)
dash_manifest = data.get("manifest")
if dash_manifest:
return DASHStream.parse_manifest(self.session, dash_manifest)
__plugin__ = VK
| 31.238532 | 121 | 0.536564 |
import logging
import re
from urllib.parse import parse_qsl, unquote, urlparse
from streamlink.exceptions import NoStreamsError
from streamlink.plugin import Plugin, PluginError, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.dash import DASHStream
from streamlink.stream.hls import HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(?:\w+\.)?vk\.com/videos?(?:\?z=video)?(?P<video_id>-?\d+_\d+)"
))
@pluginmatcher(re.compile(
r"https?://(\w+\.)?vk\.com/.+"
))
class VK(Plugin):
API_URL = "https://vk.com/al_video.php"
def _has_video_id(self):
return any(m for m in self.matches[:-1])
def follow_vk_redirect(self):
if self._has_video_id():
return
try:
parsed_url = urlparse(self.url)
true_path = next(unquote(v).split("/")[0] for k, v in parse_qsl(parsed_url.query) if k == "z" and len(v) > 0)
self.url = f"{parsed_url.scheme}://{parsed_url.netloc}/{true_path}"
if self._has_video_id():
return
except StopIteration:
pass
try:
self.url = self.session.http.get(self.url, schema=validate.Schema(
validate.parse_html(),
validate.xml_xpath_string(".//head/meta[@property='og:url'][@content]/@content"),
str
))
except PluginError:
pass
if self._has_video_id():
return
raise NoStreamsError(self.url)
def _get_streams(self):
self.follow_vk_redirect()
video_id = self.match.group("video_id")
if not video_id:
return
log.debug(f"video ID: {video_id}")
try:
data = self.session.http.post(
self.API_URL,
params={
"act": "show_inline",
"al": "1",
"video": video_id,
},
schema=validate.Schema(
validate.transform(lambda text: re.sub(r"^\s*<!--\s*", "", text)),
validate.parse_json(),
{"payload": list},
validate.get(("payload", -1)),
list,
validate.get(-1),
{"player": {"params": [dict]}},
validate.get(("player", "params", 0)),
{
validate.optional("hls"): validate.url(),
validate.optional("manifest"): validate.startswith("<?xml"),
validate.optional("md_author"): validate.any(str, None),
validate.optional("md_title"): validate.any(str, None),
}
)
)
except PluginError:
log.error("Could not parse API response")
return
self.id = video_id
self.author = data.get("md_author")
self.title = data.get("md_title")
hls = data.get("hls")
if hls:
return HLSStream.parse_variant_playlist(self.session, hls)
dash_manifest = data.get("manifest")
if dash_manifest:
return DASHStream.parse_manifest(self.session, dash_manifest)
__plugin__ = VK
| true | true |
1c34dd7b506455c31f7c2a55853c832f54a3c189 | 1,367 | py | Python | src/transit_processor.py | ebuka-o/TransitTime | e70f7717d4bfc94d9388326f3e85b29a1495e5b8 | [
"MIT"
] | null | null | null | src/transit_processor.py | ebuka-o/TransitTime | e70f7717d4bfc94d9388326f3e85b29a1495e5b8 | [
"MIT"
] | null | null | null | src/transit_processor.py | ebuka-o/TransitTime | e70f7717d4bfc94d9388326f3e85b29a1495e5b8 | [
"MIT"
] | null | null | null | import sys, getopt
from data_manager import DataManager
def print_welcome_messaage():
welcome_message ="""
******************************************************************
Welcome to TransitTime!
******************************************************************
"""
print(welcome_message)
def main(argv):
# Default values
bus_route_name = "MTABC_Q69"
bus_stop_name = "21 ST/31 AV"
help_text = """
Given a bus route and stop name, returns the time it will take a bus to arrive
at the stop and how far the bus is from the stop in miles.
Usage: transit_processor.py -r <bus route> -s <bus stop>
"""
try:
# args can be ignored from getopts
opts, _ = getopt.getopt(argv,"hr:s:",["help","route=","stop="])
except getopt.GetoptError:
print(help_text)
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print(help_text)
sys.exit()
elif opt in ('-r', '--route'):
bus_route_name = arg
elif opt in ('-s', '--stop'):
bus_stop_name = arg
bus_route = DataManager.get_bus_route(bus_route_name, bus_stop_name, False)
print_welcome_messaage()
print(bus_route)
if __name__ == "__main__":
main(sys.argv[1:]) | 29.085106 | 90 | 0.514996 | import sys, getopt
from data_manager import DataManager
def print_welcome_messaage():
welcome_message ="""
******************************************************************
Welcome to TransitTime!
******************************************************************
"""
print(welcome_message)
def main(argv):
bus_route_name = "MTABC_Q69"
bus_stop_name = "21 ST/31 AV"
help_text = """
Given a bus route and stop name, returns the time it will take a bus to arrive
at the stop and how far the bus is from the stop in miles.
Usage: transit_processor.py -r <bus route> -s <bus stop>
"""
try:
opts, _ = getopt.getopt(argv,"hr:s:",["help","route=","stop="])
except getopt.GetoptError:
print(help_text)
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print(help_text)
sys.exit()
elif opt in ('-r', '--route'):
bus_route_name = arg
elif opt in ('-s', '--stop'):
bus_stop_name = arg
bus_route = DataManager.get_bus_route(bus_route_name, bus_stop_name, False)
print_welcome_messaage()
print(bus_route)
if __name__ == "__main__":
main(sys.argv[1:]) | true | true |
1c34de3969c41b53bd7ad03958021791317136dd | 150 | py | Python | Exam_18_04_2021/exam-skeleton/project/deliveries/food.py | Beshkov/OOP | 297edadb3e7801dfeee5752a20aae6aead8da610 | [
"MIT"
] | 1 | 2021-05-24T17:51:53.000Z | 2021-05-24T17:51:53.000Z | Exam_18_04_2021/exam-skeleton/project/deliveries/food.py | Beshkov/Python_OOP | 297edadb3e7801dfeee5752a20aae6aead8da610 | [
"MIT"
] | null | null | null | Exam_18_04_2021/exam-skeleton/project/deliveries/food.py | Beshkov/Python_OOP | 297edadb3e7801dfeee5752a20aae6aead8da610 | [
"MIT"
] | null | null | null | from project.deliveries.product import Product
class Food(Product):
def __init__(self, name):
super().__init__(name=name, quantity=15)
| 18.75 | 48 | 0.713333 | from project.deliveries.product import Product
class Food(Product):
def __init__(self, name):
super().__init__(name=name, quantity=15)
| true | true |
1c34dfb573fdfd4135ba26acd6db46609bd93a4d | 2,880 | py | Python | src/fuzzingtool/decorators/plugin_meta.py | NESCAU-UFLA/FuzzingTool | d0dbe3ee4c17ec8ee72423bf7fabce6849e01807 | [
"MIT"
] | 131 | 2020-12-14T18:45:29.000Z | 2022-03-31T03:00:21.000Z | src/fuzzingtool/decorators/plugin_meta.py | NESCAU-UFLA/FuzzingTool | d0dbe3ee4c17ec8ee72423bf7fabce6849e01807 | [
"MIT"
] | 51 | 2020-12-14T16:02:38.000Z | 2022-03-31T18:47:12.000Z | src/fuzzingtool/decorators/plugin_meta.py | NESCAU-UFLA/FuzzingTool | d0dbe3ee4c17ec8ee72423bf7fabce6849e01807 | [
"MIT"
] | 38 | 2020-12-14T21:12:18.000Z | 2022-03-29T18:23:20.000Z | # Copyright (c) 2020 - present Vitor Oriel <https://github.com/VitorOriel>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from ..core.plugins.Plugin import Plugin
def plugin_meta(cls: Plugin) -> Plugin:
"""Decorator to check for plugin metadata on a plugin class
@type cls: Plugin
@param cls: The class that call this decorator
"""
metadata = ['__author__', '__params__', '__desc__', '__type__', '__version__']
classAttr = vars(cls)
for meta in metadata:
if meta not in classAttr:
raise Exception(f"Metadata {meta} not specified in plugin {cls.__name__}")
if not cls.__author__:
raise Exception(f"Author cannot be empty on plugin {cls.__name__}")
if cls.__params__:
if not (type(cls.__params__) is dict):
raise Exception(f"The parameters must be a dictionary on plugin {cls.__name__}")
paramDictKeys = cls.__params__.keys()
for key in ['metavar', 'type']:
if key not in paramDictKeys:
raise Exception(f"Key {key} must be in parameters dict on plugin {cls.__name__}")
if not cls.__params__[key]:
raise Exception(f"Value of {key} cannot be empty in parameters dict on plugin {cls.__name__}")
if cls.__params__['type'] is list:
if 'cli_list_separator' not in paramDictKeys:
raise Exception(f"The key 'cli_list_separator' must be present when parameter type is list on plugin {cls.__name__}")
if not cls.__params__['cli_list_separator']:
raise Exception(f"Value of 'cli_list_separator' cannot be blank on {cls.__name__}")
if not cls.__desc__:
raise Exception(f"Description cannot be blank on plugin {cls.__name__}")
if not cls.__version__:
raise Exception(f"Version cannot be blank on plugin {cls.__name__}")
return cls | 53.333333 | 133 | 0.703819 |
from ..core.plugins.Plugin import Plugin
def plugin_meta(cls: Plugin) -> Plugin:
metadata = ['__author__', '__params__', '__desc__', '__type__', '__version__']
classAttr = vars(cls)
for meta in metadata:
if meta not in classAttr:
raise Exception(f"Metadata {meta} not specified in plugin {cls.__name__}")
if not cls.__author__:
raise Exception(f"Author cannot be empty on plugin {cls.__name__}")
if cls.__params__:
if not (type(cls.__params__) is dict):
raise Exception(f"The parameters must be a dictionary on plugin {cls.__name__}")
paramDictKeys = cls.__params__.keys()
for key in ['metavar', 'type']:
if key not in paramDictKeys:
raise Exception(f"Key {key} must be in parameters dict on plugin {cls.__name__}")
if not cls.__params__[key]:
raise Exception(f"Value of {key} cannot be empty in parameters dict on plugin {cls.__name__}")
if cls.__params__['type'] is list:
if 'cli_list_separator' not in paramDictKeys:
raise Exception(f"The key 'cli_list_separator' must be present when parameter type is list on plugin {cls.__name__}")
if not cls.__params__['cli_list_separator']:
raise Exception(f"Value of 'cli_list_separator' cannot be blank on {cls.__name__}")
if not cls.__desc__:
raise Exception(f"Description cannot be blank on plugin {cls.__name__}")
if not cls.__version__:
raise Exception(f"Version cannot be blank on plugin {cls.__name__}")
return cls | true | true |
1c34e0bcadb37d98a4ac283247272dc992b6ee22 | 2,203 | py | Python | todoapi/todoapi/settings.py | bogdan-cornianu/beginning-drf | b1c6efb85bf23b24f5afe90d819e57fa9ac2c1be | [
"MIT"
] | null | null | null | todoapi/todoapi/settings.py | bogdan-cornianu/beginning-drf | b1c6efb85bf23b24f5afe90d819e57fa9ac2c1be | [
"MIT"
] | null | null | null | todoapi/todoapi/settings.py | bogdan-cornianu/beginning-drf | b1c6efb85bf23b24f5afe90d819e57fa9ac2c1be | [
"MIT"
] | null | null | null | """
Django settings for a project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pc3g2p43$7v+rf#x7%8tyt)fxsl&i=&hd2k-enz8+drzdcbd6f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'todo',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
)
}
ROOT_URLCONF = 'todoapi.urls'
WSGI_APPLICATION = 'todoapi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| 23.945652 | 71 | 0.731276 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = 'pc3g2p43$7v+rf#x7%8tyt)fxsl&i=&hd2k-enz8+drzdcbd6f'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'todo',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
)
}
ROOT_URLCONF = 'todoapi.urls'
WSGI_APPLICATION = 'todoapi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| true | true |
1c34e1595e8822543910764580daaf03bcfeb67a | 60,852 | py | Python | pathlib_mate/pathlib2.py | MacHu-GWU/pathlib_mate-project | 5b8f5441e681730d02209211cce7f46986147418 | [
"MIT"
] | 9 | 2017-09-07T21:21:43.000Z | 2020-10-11T09:47:24.000Z | pathlib_mate/pathlib2.py | MacHu-GWU/pathlib_mate-project | 5b8f5441e681730d02209211cce7f46986147418 | [
"MIT"
] | 2 | 2018-10-16T14:30:26.000Z | 2020-12-05T02:40:46.000Z | pathlib_mate/pathlib2.py | MacHu-GWU/pathlib_mate-project | 5b8f5441e681730d02209211cce7f46986147418 | [
"MIT"
] | 2 | 2017-09-05T14:06:01.000Z | 2021-06-29T15:31:13.000Z | # Copyright (c) 2014-2017 Matthias C. M. Troffaes
# Copyright (c) 2012-2014 Antoine Pitrou and contributors
# Distributed under the terms of the MIT License.
# VERSION 2.5.3
# for python2 type hint
try:
import typing
except: # pragma: no cover
pass
import ctypes
import fnmatch
import functools
import io
import ntpath
import os
import posixpath
import re
import sys
from errno import EEXIST, EPERM, EACCES
from errno import EINVAL, ENOENT, ENOTDIR, EBADF
from operator import attrgetter
from stat import (
S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO)
import six
try:
from collections.abc import Sequence
except ImportError:
from collections import Sequence
try:
from urllib import quote as urlquote_from_bytes
except ImportError:
from urllib.parse import quote_from_bytes as urlquote_from_bytes
try:
intern = intern
except NameError:
intern = sys.intern
supports_symlinks = True
if os.name == 'nt':
import nt
if sys.getwindowsversion()[:2] >= (6, 0) and sys.version_info >= (3, 2):
from nt import _getfinalpathname
else:
supports_symlinks = False
_getfinalpathname = None
else:
nt = None
try:
from os import scandir as os_scandir
except ImportError:
from scandir import scandir as os_scandir
from atomicwrites import atomic_write
__all__ = [
"PurePath", "PurePosixPath", "PureWindowsPath",
"Path", "PosixPath", "WindowsPath",
]
#
# Internals
#
# EBADF - guard agains macOS `stat` throwing EBADF
_IGNORED_ERROS = (ENOENT, ENOTDIR, EBADF)
_IGNORED_WINERRORS = (
21, # ERROR_NOT_READY - drive exists but is not accessible
)
def _ignore_error(exception):
return (getattr(exception, 'errno', None) in _IGNORED_ERROS or
getattr(exception, 'winerror', None) in _IGNORED_WINERRORS)
def _py2_fsencode(parts):
# py2 => minimal unicode support
assert six.PY2
return [part.encode('ascii') if isinstance(part, six.text_type)
else part for part in parts]
def _try_except_fileexistserror(try_func, except_func, else_func=None):
if sys.version_info >= (3, 3):
try:
try_func()
except FileExistsError as exc:
except_func(exc)
else:
if else_func is not None:
else_func()
else:
try:
try_func()
except EnvironmentError as exc:
if exc.errno != EEXIST:
raise
else:
except_func(exc)
else:
if else_func is not None:
else_func()
def _try_except_filenotfounderror(try_func, except_func):
if sys.version_info >= (3, 3):
try:
try_func()
except FileNotFoundError as exc:
except_func(exc)
elif os.name != 'nt':
try:
try_func()
except EnvironmentError as exc:
if exc.errno != ENOENT:
raise
else:
except_func(exc)
else:
try:
try_func()
except WindowsError as exc:
# errno contains winerror
# 2 = file not found
# 3 = path not found
if exc.errno not in (2, 3):
raise
else:
except_func(exc)
except EnvironmentError as exc:
if exc.errno != ENOENT:
raise
else:
except_func(exc)
def _try_except_permissionerror_iter(try_iter, except_iter):
if sys.version_info >= (3, 3):
try:
for x in try_iter():
yield x
except PermissionError as exc:
for x in except_iter(exc):
yield x
else:
try:
for x in try_iter():
yield x
except EnvironmentError as exc:
if exc.errno not in (EPERM, EACCES):
raise
else:
for x in except_iter(exc):
yield x
def _win32_get_unique_path_id(path):
# get file information, needed for samefile on older Python versions
# see http://timgolden.me.uk/python/win32_how_do_i/
# see_if_two_files_are_the_same_file.html
from ctypes import POINTER, Structure, WinError
from ctypes.wintypes import DWORD, HANDLE, BOOL
class FILETIME(Structure):
_fields_ = [("datetime_lo", DWORD),
("datetime_hi", DWORD),
]
class BY_HANDLE_FILE_INFORMATION(Structure):
_fields_ = [("attributes", DWORD),
("created_at", FILETIME),
("accessed_at", FILETIME),
("written_at", FILETIME),
("volume", DWORD),
("file_hi", DWORD),
("file_lo", DWORD),
("n_links", DWORD),
("index_hi", DWORD),
("index_lo", DWORD),
]
CreateFile = ctypes.windll.kernel32.CreateFileW
CreateFile.argtypes = [ctypes.c_wchar_p, DWORD, DWORD, ctypes.c_void_p,
DWORD, DWORD, HANDLE]
CreateFile.restype = HANDLE
GetFileInformationByHandle = (
ctypes.windll.kernel32.GetFileInformationByHandle)
GetFileInformationByHandle.argtypes = [
HANDLE, POINTER(BY_HANDLE_FILE_INFORMATION)]
GetFileInformationByHandle.restype = BOOL
CloseHandle = ctypes.windll.kernel32.CloseHandle
CloseHandle.argtypes = [HANDLE]
CloseHandle.restype = BOOL
GENERIC_READ = 0x80000000
FILE_SHARE_READ = 0x00000001
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
OPEN_EXISTING = 3
if os.path.isdir(path):
flags = FILE_FLAG_BACKUP_SEMANTICS
else:
flags = 0
hfile = CreateFile(path, GENERIC_READ, FILE_SHARE_READ,
None, OPEN_EXISTING, flags, None)
if hfile == 0xffffffff:
if sys.version_info >= (3, 3):
raise FileNotFoundError(path)
else:
exc = OSError("file not found: path")
exc.errno = ENOENT
raise exc
info = BY_HANDLE_FILE_INFORMATION()
success = GetFileInformationByHandle(hfile, info)
CloseHandle(hfile)
if success == 0:
raise WinError()
return info.volume, info.index_hi, info.index_lo
def _is_wildcard_pattern(pat):
# Whether this pattern needs actual matching using fnmatch, or can
# be looked up directly as a file.
return "*" in pat or "?" in pat or "[" in pat
class _Flavour(object):
"""A flavour implements a particular (platform-specific) set of path
semantics."""
def __init__(self):
self.join = self.sep.join
def parse_parts(self, parts):
if six.PY2:
parts = _py2_fsencode(parts)
parsed = []
sep = self.sep
altsep = self.altsep
drv = root = ''
it = reversed(parts)
for part in it:
if not part:
continue
if altsep:
part = part.replace(altsep, sep)
drv, root, rel = self.splitroot(part)
if sep in rel:
for x in reversed(rel.split(sep)):
if x and x != '.':
parsed.append(intern(x))
else:
if rel and rel != '.':
parsed.append(intern(rel))
if drv or root:
if not drv:
# If no drive is present, try to find one in the previous
# parts. This makes the result of parsing e.g.
# ("C:", "/", "a") reasonably intuitive.
for part in it:
if not part:
continue
if altsep:
part = part.replace(altsep, sep)
drv = self.splitroot(part)[0]
if drv:
break
break
if drv or root:
parsed.append(drv + root)
parsed.reverse()
return drv, root, parsed
def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2):
"""
Join the two paths represented by the respective
(drive, root, parts) tuples. Return a new (drive, root, parts) tuple.
"""
if root2:
if not drv2 and drv:
return drv, root2, [drv + root2] + parts2[1:]
elif drv2:
if drv2 == drv or self.casefold(drv2) == self.casefold(drv):
# Same drive => second path is relative to the first
return drv, root, parts + parts2[1:]
else:
# Second path is non-anchored (common case)
return drv, root, parts + parts2
return drv2, root2, parts2
class _WindowsFlavour(_Flavour):
# Reference for Windows paths can be found at
# http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx
sep = '\\'
altsep = '/'
has_drv = True
pathmod = ntpath
is_supported = (os.name == 'nt')
drive_letters = set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
ext_namespace_prefix = '\\\\?\\'
reserved_names = (
set(['CON', 'PRN', 'AUX', 'NUL']) |
set(['COM%d' % i for i in range(1, 10)]) |
set(['LPT%d' % i for i in range(1, 10)])
)
# Interesting findings about extended paths:
# - '\\?\c:\a', '//?/c:\a' and '//?/c:/a' are all supported
# but '\\?\c:/a' is not
# - extended paths are always absolute; "relative" extended paths will
# fail.
def splitroot(self, part, sep=sep):
first = part[0:1]
second = part[1:2]
if (second == sep and first == sep):
# XXX extended paths should also disable the collapsing of "."
# components (according to MSDN docs).
prefix, part = self._split_extended_path(part)
first = part[0:1]
second = part[1:2]
else:
prefix = ''
third = part[2:3]
if (second == sep and first == sep and third != sep):
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvvv root
# \\machine\mountpoint\directory\etc\...
# directory ^^^^^^^^^^^^^^
index = part.find(sep, 2)
if index != -1:
index2 = part.find(sep, index + 1)
# a UNC path can't have two slashes in a row
# (after the initial two)
if index2 != index + 1:
if index2 == -1:
index2 = len(part)
if prefix:
return prefix + part[1:index2], sep, part[index2 + 1:]
else:
return part[:index2], sep, part[index2 + 1:]
drv = root = ''
if second == ':' and first in self.drive_letters:
drv = part[:2]
part = part[2:]
first = third
if first == sep:
root = first
part = part.lstrip(sep)
return prefix + drv, root, part
def casefold(self, s):
return s.lower()
def casefold_parts(self, parts):
return [p.lower() for p in parts]
def resolve(self, path, strict=False):
s = str(path)
if not s:
return os.getcwd()
previous_s = None
if _getfinalpathname is not None:
if strict:
return self._ext_to_normal(_getfinalpathname(s))
else:
# End of the path after the first one not found
tail_parts = []
def _try_func():
result[0] = self._ext_to_normal(_getfinalpathname(s))
# if there was no exception, set flag to 0
result[1] = 0
def _exc_func(exc):
pass
while True:
result = [None, 1]
_try_except_filenotfounderror(_try_func, _exc_func)
if result[1] == 1: # file not found exception raised
previous_s = s
s, tail = os.path.split(s)
tail_parts.append(tail)
if previous_s == s:
return path
else:
s = result[0]
return os.path.join(s, *reversed(tail_parts))
# Means fallback on absolute
return None
def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix):
prefix = ''
if s.startswith(ext_prefix):
prefix = s[:4]
s = s[4:]
if s.startswith('UNC\\'):
prefix += s[:3]
s = '\\' + s[3:]
return prefix, s
def _ext_to_normal(self, s):
# Turn back an extended path into a normal DOS-like path
return self._split_extended_path(s)[1]
def is_reserved(self, parts):
# NOTE: the rules for reserved names seem somewhat complicated
# (e.g. r"..\NUL" is reserved but not r"foo\NUL").
# We err on the side of caution and return True for paths which are
# not considered reserved by Windows.
if not parts:
return False
if parts[0].startswith('\\\\'):
# UNC paths are never reserved
return False
return parts[-1].partition('.')[0].upper() in self.reserved_names
def make_uri(self, path):
# Under Windows, file URIs use the UTF-8 encoding.
drive = path.drive
if len(drive) == 2 and drive[1] == ':':
# It's a path on a local drive => 'file:///c:/a/b'
rest = path.as_posix()[2:].lstrip('/')
return 'file:///%s/%s' % (
drive, urlquote_from_bytes(rest.encode('utf-8')))
else:
# It's a path on a network drive => 'file://host/share/a/b'
return 'file:' + urlquote_from_bytes(
path.as_posix().encode('utf-8'))
def gethomedir(self, username):
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif 'HOMEPATH' in os.environ:
try:
drv = os.environ['HOMEDRIVE']
except KeyError:
drv = ''
userhome = drv + os.environ['HOMEPATH']
else:
raise RuntimeError("Can't determine home directory")
if username:
# Try to guess user home directory. By default all users
# directories are located in the same place and are named by
# corresponding usernames. If current user home directory points
# to nonstandard place, this guess is likely wrong.
if os.environ['USERNAME'] != username:
drv, root, parts = self.parse_parts((userhome,))
if parts[-1] != os.environ['USERNAME']:
raise RuntimeError("Can't determine home directory "
"for %r" % username)
parts[-1] = username
if drv or root:
userhome = drv + root + self.join(parts[1:])
else:
userhome = self.join(parts)
return userhome
class _PosixFlavour(_Flavour):
sep = '/'
altsep = ''
has_drv = False
pathmod = posixpath
is_supported = (os.name != 'nt')
def splitroot(self, part, sep=sep):
if part and part[0] == sep:
stripped_part = part.lstrip(sep)
# According to POSIX path resolution:
# http://pubs.opengroup.org/onlinepubs/009695399/basedefs/
# xbd_chap04.html#tag_04_11
# "A pathname that begins with two successive slashes may be
# interpreted in an implementation-defined manner, although more
# than two leading slashes shall be treated as a single slash".
if len(part) - len(stripped_part) == 2:
return '', sep * 2, stripped_part
else:
return '', sep, stripped_part
else:
return '', '', part
def casefold(self, s):
return s
def casefold_parts(self, parts):
return parts
def resolve(self, path, strict=False):
sep = self.sep
accessor = path._accessor
seen = {}
def _resolve(path, rest):
if rest.startswith(sep):
path = ''
for name in rest.split(sep):
if not name or name == '.':
# current dir
continue
if name == '..':
# parent dir
path, _, _ = path.rpartition(sep)
continue
newpath = path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink
# loop.
raise RuntimeError("Symlink loop from %r" % newpath)
# Resolve the symbolic link
try:
target = accessor.readlink(newpath)
except OSError as e:
if e.errno != EINVAL and strict:
raise
# Not a symlink, or non-strict mode. We just leave the path
# untouched.
path = newpath
else:
seen[newpath] = None # not resolved symlink
path = _resolve(path, target)
seen[newpath] = path # resolved symlink
return path
# NOTE: according to POSIX, getcwd() cannot contain path components
# which are symlinks.
base = '' if path.is_absolute() else os.getcwd()
return _resolve(base, str(path)) or sep
def is_reserved(self, parts):
return False
def make_uri(self, path):
# We represent the path using the local filesystem encoding,
# for portability to other applications.
bpath = bytes(path)
return 'file://' + urlquote_from_bytes(bpath)
def gethomedir(self, username):
if not username:
try:
return os.environ['HOME']
except KeyError:
import pwd
return pwd.getpwuid(os.getuid()).pw_dir
else:
import pwd
try:
return pwd.getpwnam(username).pw_dir
except KeyError:
raise RuntimeError("Can't determine home directory "
"for %r" % username)
_windows_flavour = _WindowsFlavour()
_posix_flavour = _PosixFlavour()
class _Accessor:
"""An accessor implements a particular (system-specific or not) way of
accessing paths on the filesystem."""
class _NormalAccessor(_Accessor):
def _wrap_strfunc(strfunc):
@functools.wraps(strfunc)
def wrapped(pathobj, *args):
return strfunc(str(pathobj), *args)
return staticmethod(wrapped)
def _wrap_binary_strfunc(strfunc):
@functools.wraps(strfunc)
def wrapped(pathobjA, pathobjB, *args):
return strfunc(str(pathobjA), str(pathobjB), *args)
return staticmethod(wrapped)
stat = _wrap_strfunc(os.stat)
lstat = _wrap_strfunc(os.lstat)
open = _wrap_strfunc(os.open)
listdir = _wrap_strfunc(os.listdir)
scandir = _wrap_strfunc(os_scandir)
chmod = _wrap_strfunc(os.chmod)
if hasattr(os, "lchmod"):
lchmod = _wrap_strfunc(os.lchmod)
else:
def lchmod(self, pathobj, mode):
raise NotImplementedError("lchmod() not available on this system")
mkdir = _wrap_strfunc(os.mkdir)
unlink = _wrap_strfunc(os.unlink)
rmdir = _wrap_strfunc(os.rmdir)
rename = _wrap_binary_strfunc(os.rename)
if sys.version_info >= (3, 3):
replace = _wrap_binary_strfunc(os.replace)
if nt:
if supports_symlinks:
symlink = _wrap_binary_strfunc(os.symlink)
else:
def symlink(a, b, target_is_directory):
raise NotImplementedError(
"symlink() not available on this system")
else:
# Under POSIX, os.symlink() takes two args
@staticmethod
def symlink(a, b, target_is_directory):
return os.symlink(str(a), str(b))
utime = _wrap_strfunc(os.utime)
# Helper for resolve()
def readlink(self, path):
return os.readlink(path)
_normal_accessor = _NormalAccessor()
#
# Globbing helpers
#
def _make_selector(pattern_parts):
pat = pattern_parts[0]
child_parts = pattern_parts[1:]
if pat == '**':
cls = _RecursiveWildcardSelector
elif '**' in pat:
raise ValueError(
"Invalid pattern: '**' can only be an entire path component")
elif _is_wildcard_pattern(pat):
cls = _WildcardSelector
else:
cls = _PreciseSelector
return cls(pat, child_parts)
if hasattr(functools, "lru_cache"):
_make_selector = functools.lru_cache()(_make_selector)
class _Selector:
"""A selector matches a specific glob pattern part against the children
of a given path."""
def __init__(self, child_parts):
self.child_parts = child_parts
if child_parts:
self.successor = _make_selector(child_parts)
self.dironly = True
else:
self.successor = _TerminatingSelector()
self.dironly = False
def select_from(self, parent_path):
"""Iterate over all child paths of `parent_path` matched by this
selector. This can contain parent_path itself."""
path_cls = type(parent_path)
is_dir = path_cls.is_dir
exists = path_cls.exists
scandir = parent_path._accessor.scandir
if not is_dir(parent_path):
return iter([])
return self._select_from(parent_path, is_dir, exists, scandir)
class _TerminatingSelector:
def _select_from(self, parent_path, is_dir, exists, scandir):
yield parent_path
class _PreciseSelector(_Selector):
def __init__(self, name, child_parts):
self.name = name
_Selector.__init__(self, child_parts)
def _select_from(self, parent_path, is_dir, exists, scandir):
def try_iter():
path = parent_path._make_child_relpath(self.name)
if (is_dir if self.dironly else exists)(path):
for p in self.successor._select_from(
path, is_dir, exists, scandir):
yield p
def except_iter(exc):
return
yield
for x in _try_except_permissionerror_iter(try_iter, except_iter):
yield x
class _WildcardSelector(_Selector):
def __init__(self, pat, child_parts):
self.pat = re.compile(fnmatch.translate(pat))
_Selector.__init__(self, child_parts)
def _select_from(self, parent_path, is_dir, exists, scandir):
def try_iter():
cf = parent_path._flavour.casefold
entries = list(scandir(parent_path))
for entry in entries:
if not self.dironly or entry.is_dir():
name = entry.name
casefolded = cf(name)
if self.pat.match(casefolded):
path = parent_path._make_child_relpath(name)
for p in self.successor._select_from(
path, is_dir, exists, scandir):
yield p
def except_iter(exc):
return
yield
for x in _try_except_permissionerror_iter(try_iter, except_iter):
yield x
class _RecursiveWildcardSelector(_Selector):
def __init__(self, pat, child_parts):
_Selector.__init__(self, child_parts)
def _iterate_directories(self, parent_path, is_dir, scandir):
yield parent_path
def try_iter():
entries = list(scandir(parent_path))
for entry in entries:
entry_is_dir = False
try:
entry_is_dir = entry.is_dir()
except OSError as e:
if not _ignore_error(e):
raise
if entry_is_dir and not entry.is_symlink():
path = parent_path._make_child_relpath(entry.name)
for p in self._iterate_directories(path, is_dir, scandir):
yield p
def except_iter(exc):
return
yield
for x in _try_except_permissionerror_iter(try_iter, except_iter):
yield x
def _select_from(self, parent_path, is_dir, exists, scandir):
def try_iter():
yielded = set()
try:
successor_select = self.successor._select_from
for starting_point in self._iterate_directories(
parent_path, is_dir, scandir):
for p in successor_select(
starting_point, is_dir, exists, scandir):
if p not in yielded:
yield p
yielded.add(p)
finally:
yielded.clear()
def except_iter(exc):
return
yield
for x in _try_except_permissionerror_iter(try_iter, except_iter):
yield x
#
# Public API
#
class _PathParents(Sequence):
"""This object provides sequence-like access to the logical ancestors
of a path. Don't try to construct it yourself."""
__slots__ = ('_pathcls', '_drv', '_root', '_parts')
def __init__(self, path):
# We don't store the instance to avoid reference cycles
self._pathcls = type(path)
self._drv = path._drv
self._root = path._root
self._parts = path._parts
def __len__(self):
if self._drv or self._root:
return len(self._parts) - 1
else:
return len(self._parts)
def __getitem__(self, idx):
if idx < 0 or idx >= len(self):
raise IndexError(idx)
return self._pathcls._from_parsed_parts(self._drv, self._root,
self._parts[:-idx - 1])
def __repr__(self):
return "<{0}.parents>".format(self._pathcls.__name__)
class PurePath(object):
"""PurePath represents a filesystem path and offers operations which
don't imply any actual filesystem I/O. Depending on your system,
instantiating a PurePath will return either a PurePosixPath or a
PureWindowsPath object. You can also instantiate either of these classes
directly, regardless of your system.
"""
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
def __new__(cls, *args):
"""Construct a PurePath from one or several strings and or existing
PurePath objects. The strings and path objects are combined so as
to yield a canonicalized path, which is incorporated into the
new PurePath object.
"""
if cls is PurePath:
cls = PureWindowsPath if os.name == 'nt' else PurePosixPath
return cls._from_parts(args)
def __reduce__(self):
# Using the parts tuple helps share interned path parts
# when pickling related paths.
return (self.__class__, tuple(self._parts))
@classmethod
def _parse_args(cls, args):
# This is useful when you don't want to create an instance, just
# canonicalize some constructor arguments.
parts = []
for a in args:
if isinstance(a, PurePath):
parts += a._parts
else:
if sys.version_info >= (3, 6):
a = os.fspath(a)
else:
# duck typing for older Python versions
if hasattr(a, "__fspath__"):
a = a.__fspath__()
if isinstance(a, str):
# Force-cast str subclasses to str (issue #21127)
parts.append(str(a))
# also handle unicode for PY2 (six.text_type = unicode)
elif six.PY2 and isinstance(a, six.text_type):
# cast to str using filesystem encoding
# note: in rare circumstances, on Python < 3.2,
# getfilesystemencoding can return None, in that
# case fall back to ascii
parts.append(a.encode(
sys.getfilesystemencoding() or "ascii"))
else:
raise TypeError(
"argument should be a str object or an os.PathLike "
"object returning str, not %r"
% type(a))
return cls._flavour.parse_parts(parts)
@classmethod
def _from_parts(cls, args, init=True):
# We need to call _parse_args on the instance, so as to get the
# right flavour.
self = object.__new__(cls)
drv, root, parts = self._parse_args(args)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _from_parsed_parts(cls, drv, root, parts, init=True):
self = object.__new__(cls)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _format_parsed_parts(cls, drv, root, parts):
if drv or root:
return drv + root + cls._flavour.join(parts[1:])
else:
return cls._flavour.join(parts)
def _init(self):
# Overridden in concrete Path
pass
def _make_child(self, args):
drv, root, parts = self._parse_args(args)
drv, root, parts = self._flavour.join_parsed_parts(
self._drv, self._root, self._parts, drv, root, parts)
return self._from_parsed_parts(drv, root, parts)
def __str__(self):
"""Return the string representation of the path, suitable for
passing to system calls."""
try:
return self._str
except AttributeError:
self._str = self._format_parsed_parts(self._drv, self._root,
self._parts) or '.'
return self._str
def __fspath__(self):
return str(self)
def as_posix(self):
"""Return the string representation of the path with forward (/)
slashes."""
f = self._flavour
return str(self).replace(f.sep, '/')
def __bytes__(self):
"""Return the bytes representation of the path. This is only
recommended to use under Unix."""
if sys.version_info < (3, 2):
raise NotImplementedError("needs Python 3.2 or later")
return os.fsencode(str(self))
def __repr__(self):
return "{0}({1!r})".format(self.__class__.__name__, self.as_posix())
def as_uri(self):
"""Return the path as a 'file' URI."""
if not self.is_absolute():
raise ValueError("relative path can't be expressed as a file URI")
return self._flavour.make_uri(self)
@property
def _cparts(self):
# Cached casefolded parts, for hashing and comparison
try:
return self._cached_cparts
except AttributeError:
self._cached_cparts = self._flavour.casefold_parts(self._parts)
return self._cached_cparts
def __eq__(self, other):
if not isinstance(other, PurePath):
return NotImplemented
return (
self._cparts == other._cparts
and self._flavour is other._flavour)
def __ne__(self, other):
return not self == other
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(tuple(self._cparts))
return self._hash
def __lt__(self, other):
if (not isinstance(other, PurePath)
or self._flavour is not other._flavour):
return NotImplemented
return self._cparts < other._cparts
def __le__(self, other):
if (not isinstance(other, PurePath)
or self._flavour is not other._flavour):
return NotImplemented
return self._cparts <= other._cparts
def __gt__(self, other):
if (not isinstance(other, PurePath)
or self._flavour is not other._flavour):
return NotImplemented
return self._cparts > other._cparts
def __ge__(self, other):
if (not isinstance(other, PurePath)
or self._flavour is not other._flavour):
return NotImplemented
return self._cparts >= other._cparts
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
@property
def anchor(self):
"""The concatenation of the drive and root, or ''."""
anchor = self._drv + self._root
return anchor
@property
def name(self):
"""The final path component, if any."""
parts = self._parts
if len(parts) == (1 if (self._drv or self._root) else 0):
return ''
return parts[-1]
@property
def suffix(self):
"""The final component's last suffix, if any."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[i:]
else:
return ''
@property
def suffixes(self):
"""A list of the final component's suffixes, if any."""
name = self.name
if name.endswith('.'):
return []
name = name.lstrip('.')
return ['.' + suffix for suffix in name.split('.')[1:]]
@property
def stem(self):
"""The final path component, minus its last suffix."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[:i]
else:
return name
def with_name(self, name):
"""Return a new path with the file name changed."""
if not self.name:
raise ValueError("%r has an empty name" % (self,))
drv, root, parts = self._flavour.parse_parts((name,))
if (not name or name[-1] in [self._flavour.sep, self._flavour.altsep]
or drv or root or len(parts) != 1):
raise ValueError("Invalid name %r" % (name))
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def with_suffix(self, suffix):
"""Return a new path with the file suffix changed. If the path
has no suffix, add given suffix. If the given suffix is an empty
string, remove the suffix from the path.
"""
# XXX if suffix is None, should the current suffix be removed?
f = self._flavour
if f.sep in suffix or f.altsep and f.altsep in suffix:
raise ValueError("Invalid suffix %r" % (suffix))
if suffix and not suffix.startswith('.') or suffix == '.':
raise ValueError("Invalid suffix %r" % (suffix))
name = self.name
if not name:
raise ValueError("%r has an empty name" % (self,))
old_suffix = self.suffix
if not old_suffix:
name = name + suffix
else:
name = name[:-len(old_suffix)] + suffix
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def relative_to(self, *other):
"""Return the relative path to another path identified by the passed
arguments. If the operation is not possible (because this is not
a subpath of the other path), raise ValueError.
"""
# For the purpose of this method, drive and root are considered
# separate parts, i.e.:
# Path('c:/').relative_to('c:') gives Path('/')
# Path('c:/').relative_to('/') raise ValueError
if not other:
raise TypeError("need at least one argument")
parts = self._parts
drv = self._drv
root = self._root
if root:
abs_parts = [drv, root] + parts[1:]
else:
abs_parts = parts
to_drv, to_root, to_parts = self._parse_args(other)
if to_root:
to_abs_parts = [to_drv, to_root] + to_parts[1:]
else:
to_abs_parts = to_parts
n = len(to_abs_parts)
cf = self._flavour.casefold_parts
if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
raise ValueError("{0!r} does not start with {1!r}"
.format(str(self), str(formatted)))
return self._from_parsed_parts('', root if n == 1 else '',
abs_parts[n:])
@property
def parts(self):
"""An object providing sequence-like access to the
components in the filesystem path."""
# We cache the tuple to avoid building a new one each time .parts
# is accessed. XXX is this necessary?
try:
return self._pparts
except AttributeError:
self._pparts = tuple(self._parts)
return self._pparts
def joinpath(self, *args):
"""Combine this path with one or several arguments, and return a
new path representing either a subpath (if all arguments are relative
paths) or a totally different path (if one of the arguments is
anchored).
"""
return self._make_child(args)
def __truediv__(self, key):
return self._make_child((key,))
def __rtruediv__(self, key):
return self._from_parts([key] + self._parts)
if six.PY2:
__div__ = __truediv__
__rdiv__ = __rtruediv__
@property
def parent(self):
"""The logical parent of the path.
:
"""
drv = self._drv
root = self._root
parts = self._parts
if len(parts) == 1 and (drv or root):
return self
return self._from_parsed_parts(drv, root, parts[:-1])
@property
def parents(self):
"""A sequence of this path's logical parents."""
return _PathParents(self)
def is_absolute(self):
"""True if the path is absolute (has both a root and, if applicable,
a drive)."""
if not self._root:
return False
return not self._flavour.has_drv or bool(self._drv)
def is_reserved(self):
"""Return True if the path contains one of the special names reserved
by the system, if any."""
return self._flavour.is_reserved(self._parts)
def match(self, path_pattern):
"""
Return True if this path matches the given pattern.
"""
cf = self._flavour.casefold
path_pattern = cf(path_pattern)
drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))
if not pat_parts:
raise ValueError("empty pattern")
if drv and drv != cf(self._drv):
return False
if root and root != cf(self._root):
return False
parts = self._cparts
if drv or root:
if len(pat_parts) != len(parts):
return False
pat_parts = pat_parts[1:]
elif len(pat_parts) > len(parts):
return False
for part, pat in zip(reversed(parts), reversed(pat_parts)):
if not fnmatch.fnmatchcase(part, pat):
return False
return True
# Can't subclass os.PathLike from PurePath and keep the constructor
# optimizations in PurePath._parse_args().
if sys.version_info >= (3, 6):
os.PathLike.register(PurePath)
class PurePosixPath(PurePath):
_flavour = _posix_flavour
__slots__ = ()
class PureWindowsPath(PurePath):
"""PurePath subclass for Windows systems.
On a Windows system, instantiating a PurePath should return this object.
However, you can also instantiate it directly on any system.
"""
_flavour = _windows_flavour
__slots__ = ()
# Filesystem-accessing classes
from .mate_attr_accessor import AttrAccessor
from .mate_hashes_methods import HashesMethods
from .mate_path_filters import PathFilters
from .mate_mutate_methods import MutateMethods
from .mate_tool_box import ToolBox
class Path(PurePath,
AttrAccessor, HashesMethods, PathFilters, MutateMethods, ToolBox):
"""PurePath subclass that can make system calls.
Path represents a filesystem path but unlike PurePath, also offers
methods to do system calls on path objects. Depending on your system,
instantiating a Path will return either a PosixPath or a WindowsPath
object. You can also instantiate a PosixPath or WindowsPath directly,
but cannot instantiate a WindowsPath on a POSIX system or vice versa.
"""
__slots__ = (
'_accessor',
'_closed',
)
def __new__(cls, *args, **kwargs):
"""
:rtype: Path
"""
if cls is Path:
cls = WindowsPath if os.name == 'nt' else PosixPath
self = cls._from_parts(args, init=False)
if not self._flavour.is_supported:
raise NotImplementedError("cannot instantiate %r on your system"
% (cls.__name__,))
self._init()
return self
def _init(self,
# Private non-constructor arguments
template=None,
):
self._closed = False
if template is not None:
self._accessor = template._accessor
else:
self._accessor = _normal_accessor
def _make_child_relpath(self, part):
# This is an optimization used for dir walking. `part` must be
# a single part relative to this path.
parts = self._parts + [part]
return self._from_parsed_parts(self._drv, self._root, parts)
def __enter__(self):
if self._closed:
self._raise_closed()
return self
def __exit__(self, t, v, tb):
self._closed = True
def _raise_closed(self):
raise ValueError("I/O operation on closed path")
def _opener(self, name, flags, mode=0o666):
# A stub for the opener argument to built-in open()
return self._accessor.open(self, flags, mode)
def _raw_open(self, flags, mode=0o777):
"""
Open the file pointed by this path and return a file descriptor,
as os.open() does.
"""
if self._closed:
self._raise_closed()
return self._accessor.open(self, flags, mode)
# Public API
@classmethod
def cwd(cls):
"""Return a new path pointing to the current working directory
(as returned by os.getcwd()).
:rtype: Path
"""
return cls(os.getcwd())
@classmethod
def home(cls):
"""Return a new path pointing to the user's home directory (as
returned by os.path.expanduser('~')).
:rtype: Path
"""
return cls(cls()._flavour.gethomedir(None))
def samefile(self, other_path):
"""Return whether other_path is the same or not as this file
(as returned by os.path.samefile()).
:rtype: Path
"""
if hasattr(os.path, "samestat"):
st = self.stat()
try:
other_st = other_path.stat()
except AttributeError:
other_st = os.stat(other_path)
return os.path.samestat(st, other_st)
else:
filename1 = six.text_type(self)
filename2 = six.text_type(other_path)
st1 = _win32_get_unique_path_id(filename1)
st2 = _win32_get_unique_path_id(filename2)
return st1 == st2
def iterdir(self):
"""Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
:rtype: typing.Iterable[Path]
"""
if self._closed:
self._raise_closed()
for name in self._accessor.listdir(self):
if name in ('.', '..'):
# Yielding a path object for these makes little sense
continue
yield self._make_child_relpath(name)
if self._closed:
self._raise_closed()
def glob(self, pattern):
"""Iterate over this subtree and yield all existing files (of any
kind, including directories) matching the given relative pattern.
:rtype: typing.Iterable[Path]
"""
if not pattern:
raise ValueError("Unacceptable pattern: {0!r}".format(pattern))
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(tuple(pattern_parts))
for p in selector.select_from(self):
yield p
def rglob(self, pattern):
"""Recursively yield all existing files (of any kind, including
directories) matching the given relative pattern, anywhere in
this subtree.
:rtype: typing.Iterable[Path]
"""
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(("**",) + tuple(pattern_parts))
for p in selector.select_from(self):
yield p
def absolute(self):
"""Return an absolute version of this path. This function works
even if the path doesn't point to anything.
No normalization is done, i.e. all '.' and '..' will be kept along.
Use resolve() to get the canonical path to a file.
:rtype: Path
"""
# XXX untested yet!
if self._closed:
self._raise_closed()
if self.is_absolute():
return self
# FIXME this must defer to the specific flavour (and, under Windows,
# use nt._getfullpathname())
obj = self._from_parts([os.getcwd()] + self._parts, init=False)
obj._init(template=self)
return obj
def resolve(self, strict=False):
"""
Make the path absolute, resolving all symlinks on the way and also
normalizing it (for example turning slashes into backslashes under
Windows).
"""
if self._closed:
self._raise_closed()
s = self._flavour.resolve(self, strict=strict)
if s is None:
# No symlink resolution => for consistency, raise an error if
# the path is forbidden
# but not raise error if file does not exist (see issue #54).
def _try_func():
self.stat()
def _exc_func(exc):
pass
_try_except_filenotfounderror(_try_func, _exc_func)
s = str(self.absolute())
else:
# ensure s is a string (normpath requires this on older python)
s = str(s)
# Now we have no symlinks in the path, it's safe to normalize it.
normed = self._flavour.pathmod.normpath(s)
obj = self._from_parts((normed,), init=False)
obj._init(template=self)
return obj
def stat(self):
"""
Return the result of the stat() system call on this path, like
os.stat() does.
"""
return self._accessor.stat(self)
def owner(self):
"""
Return the login name of the file owner.
"""
import pwd
return pwd.getpwuid(self.stat().st_uid).pw_name
def group(self):
"""
Return the group name of the file gid.
"""
import grp
return grp.getgrgid(self.stat().st_gid).gr_name
def open(self, mode='r', buffering=-1, encoding=None,
errors=None, newline=None):
"""
Open the file pointed by this path and return a file object, as
the built-in open() function does.
"""
if self._closed:
self._raise_closed()
if sys.version_info >= (3, 3):
return io.open(
str(self), mode, buffering, encoding, errors, newline,
opener=self._opener)
else:
return io.open(str(self), mode, buffering,
encoding, errors, newline)
def read_bytes(self):
"""
Open the file in bytes mode, read it, and close the file.
"""
with self.open(mode='rb') as f:
return f.read()
def read_text(self, encoding=None, errors=None):
"""
Open the file in text mode, read it, and close the file.
"""
with self.open(mode='r', encoding=encoding, errors=errors) as f:
return f.read()
def write_bytes(self, data):
"""
Open the file in bytes mode, write to it, and close the file.
:type data: bytes
"""
if not isinstance(data, six.binary_type):
raise TypeError(
'data must be %s, not %s' %
(six.binary_type.__name__, data.__class__.__name__))
with self.open(mode='wb') as f:
return f.write(data)
def write_text(self, data, encoding="utf-8", errors=None):
"""
Open the file in text mode, write to it, and close the file.
:type data: str
:type encoding: str, recommend to use "utf-8"
"""
if not isinstance(data, six.text_type):
raise TypeError(
'data must be %s, not %s' %
(six.text_type.__name__, data.__class__.__name__))
with self.open(mode='w', encoding=encoding, errors=errors) as f:
return f.write(data)
def atomic_write_bytes(self, data, overwrite=False):
"""
An atomic write action for binary data.
Either fully done or nothing happen.
Preventing overwriting existing file with incomplete data.
:type data: bytes
:type overwrite: bool
"""
with atomic_write(self.abspath, mode="wb", overwrite=overwrite) as f:
f.write(data)
def atomic_write_text(self, data, encoding="utf-8", overwrite=False):
"""
An atomic write action for text. Either fully done or nothing happen.
Preventing overwriting existing file with incomplete data.
:type data: str
:type encoding: str, recommend to use "utf-8"
:type overwrite: bool
:return:
"""
with atomic_write(self.abspath, mode="wb", overwrite=overwrite) as f:
f.write(data.encode(encoding))
def touch(self, mode=0o666, exist_ok=True):
"""
Create this file with the given access mode, if it doesn't exist.
"""
if self._closed:
self._raise_closed()
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
self._accessor.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = self._raw_open(flags, mode)
os.close(fd)
def mkdir(self, mode=0o777, parents=False, exist_ok=False):
"""
Create a new directory at this given path.
"""
if self._closed:
self._raise_closed()
def _try_func():
self._accessor.mkdir(self, mode)
def _exc_func(exc):
if not parents or self.parent == self:
raise exc
self.parent.mkdir(parents=True, exist_ok=True)
self.mkdir(mode, parents=False, exist_ok=exist_ok)
try:
_try_except_filenotfounderror(_try_func, _exc_func)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not self.is_dir():
raise
def chmod(self, mode):
"""
Change the permissions of the path, like os.chmod().
"""
if self._closed:
self._raise_closed()
self._accessor.chmod(self, mode)
def lchmod(self, mode):
"""
Like chmod(), except if the path points to a symlink, the symlink's
permissions are changed, rather than its target's.
"""
if self._closed:
self._raise_closed()
self._accessor.lchmod(self, mode)
def unlink(self):
"""
Remove this file or link.
If the path is a directory, use rmdir() instead.
"""
if self._closed:
self._raise_closed()
self._accessor.unlink(self)
def rmdir(self):
"""
Remove this directory. The directory must be empty.
"""
if self._closed:
self._raise_closed()
self._accessor.rmdir(self)
def lstat(self):
"""
Like stat(), except if the path points to a symlink, the symlink's
status information is returned, rather than its target's.
"""
if self._closed:
self._raise_closed()
return self._accessor.lstat(self)
def rename(self, target):
"""
Rename this path to the given path.
"""
if self._closed:
self._raise_closed()
self._accessor.rename(self, target)
def replace(self, target):
"""
Rename this path to the given path, clobbering the existing
destination if it exists.
"""
if sys.version_info < (3, 3):
raise NotImplementedError("replace() is only available "
"with Python 3.3 and later")
if self._closed:
self._raise_closed()
self._accessor.replace(self, target)
def symlink_to(self, target, target_is_directory=False):
"""
Make this path a symlink pointing to the given path.
Note the order of arguments (self, target) is the reverse of
os.symlink's.
"""
if self._closed:
self._raise_closed()
self._accessor.symlink(target, self, target_is_directory)
# Convenience functions for querying the stat results
def exists(self):
"""
Whether this path exists.
"""
try:
self.stat()
except OSError as e:
if not _ignore_error(e):
raise
return False
except ValueError:
# Non-encodable path
return False
return True
def is_dir(self):
"""
Whether this path is a directory.
"""
try:
return S_ISDIR(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_file(self):
"""
Whether this path is a regular file (also True for symlinks pointing
to regular files).
"""
try:
return S_ISREG(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_mount(self):
"""
Check if this path is a POSIX mount point
"""
# Need to exist and be a dir
if not self.exists() or not self.is_dir():
return False
parent = Path(self.parent)
try:
parent_dev = parent.stat().st_dev
except OSError:
return False
dev = self.stat().st_dev
if dev != parent_dev:
return True
ino = self.stat().st_ino
parent_ino = parent.stat().st_ino
return ino == parent_ino
def is_symlink(self):
"""
Whether this path is a symbolic link.
"""
try:
return S_ISLNK(self.lstat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist
return False
except ValueError:
# Non-encodable path
return False
def is_block_device(self):
"""
Whether this path is a block device.
"""
try:
return S_ISBLK(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_char_device(self):
"""
Whether this path is a character device.
"""
try:
return S_ISCHR(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_fifo(self):
"""
Whether this path is a FIFO.
"""
try:
return S_ISFIFO(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_socket(self):
"""
Whether this path is a socket.
"""
try:
return S_ISSOCK(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def expanduser(self):
""" Return a new path with expanded ~ and ~user constructs
(as returned by os.path.expanduser)
"""
if (not (self._drv or self._root)
and self._parts and self._parts[0][:1] == '~'):
homedir = self._flavour.gethomedir(self._parts[0][1:])
return self._from_parts([homedir] + self._parts[1:])
return self
class PosixPath(Path, PurePosixPath):
"""Path subclass for non-Windows systems.
On a POSIX system, instantiating a Path should return this object.
"""
__slots__ = ()
class WindowsPath(Path, PureWindowsPath):
"""Path subclass for Windows systems.
On a Windows system, instantiating a Path should return this object.
"""
__slots__ = ()
def owner(self):
raise NotImplementedError("Path.owner() is unsupported on this system")
def group(self):
raise NotImplementedError("Path.group() is unsupported on this system")
def is_mount(self):
raise NotImplementedError(
"Path.is_mount() is unsupported on this system")
| 32.471718 | 79 | 0.559784 |
try:
import typing
except:
pass
import ctypes
import fnmatch
import functools
import io
import ntpath
import os
import posixpath
import re
import sys
from errno import EEXIST, EPERM, EACCES
from errno import EINVAL, ENOENT, ENOTDIR, EBADF
from operator import attrgetter
from stat import (
S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO)
import six
try:
from collections.abc import Sequence
except ImportError:
from collections import Sequence
try:
from urllib import quote as urlquote_from_bytes
except ImportError:
from urllib.parse import quote_from_bytes as urlquote_from_bytes
try:
intern = intern
except NameError:
intern = sys.intern
supports_symlinks = True
if os.name == 'nt':
import nt
if sys.getwindowsversion()[:2] >= (6, 0) and sys.version_info >= (3, 2):
from nt import _getfinalpathname
else:
supports_symlinks = False
_getfinalpathname = None
else:
nt = None
try:
from os import scandir as os_scandir
except ImportError:
from scandir import scandir as os_scandir
from atomicwrites import atomic_write
__all__ = [
"PurePath", "PurePosixPath", "PureWindowsPath",
"Path", "PosixPath", "WindowsPath",
]
_IGNORED_ERROS = (ENOENT, ENOTDIR, EBADF)
_IGNORED_WINERRORS = (
21,
)
def _ignore_error(exception):
return (getattr(exception, 'errno', None) in _IGNORED_ERROS or
getattr(exception, 'winerror', None) in _IGNORED_WINERRORS)
def _py2_fsencode(parts):
assert six.PY2
return [part.encode('ascii') if isinstance(part, six.text_type)
else part for part in parts]
def _try_except_fileexistserror(try_func, except_func, else_func=None):
if sys.version_info >= (3, 3):
try:
try_func()
except FileExistsError as exc:
except_func(exc)
else:
if else_func is not None:
else_func()
else:
try:
try_func()
except EnvironmentError as exc:
if exc.errno != EEXIST:
raise
else:
except_func(exc)
else:
if else_func is not None:
else_func()
def _try_except_filenotfounderror(try_func, except_func):
if sys.version_info >= (3, 3):
try:
try_func()
except FileNotFoundError as exc:
except_func(exc)
elif os.name != 'nt':
try:
try_func()
except EnvironmentError as exc:
if exc.errno != ENOENT:
raise
else:
except_func(exc)
else:
try:
try_func()
except WindowsError as exc:
if exc.errno not in (2, 3):
raise
else:
except_func(exc)
except EnvironmentError as exc:
if exc.errno != ENOENT:
raise
else:
except_func(exc)
def _try_except_permissionerror_iter(try_iter, except_iter):
if sys.version_info >= (3, 3):
try:
for x in try_iter():
yield x
except PermissionError as exc:
for x in except_iter(exc):
yield x
else:
try:
for x in try_iter():
yield x
except EnvironmentError as exc:
if exc.errno not in (EPERM, EACCES):
raise
else:
for x in except_iter(exc):
yield x
def _win32_get_unique_path_id(path):
from ctypes import POINTER, Structure, WinError
from ctypes.wintypes import DWORD, HANDLE, BOOL
class FILETIME(Structure):
_fields_ = [("datetime_lo", DWORD),
("datetime_hi", DWORD),
]
class BY_HANDLE_FILE_INFORMATION(Structure):
_fields_ = [("attributes", DWORD),
("created_at", FILETIME),
("accessed_at", FILETIME),
("written_at", FILETIME),
("volume", DWORD),
("file_hi", DWORD),
("file_lo", DWORD),
("n_links", DWORD),
("index_hi", DWORD),
("index_lo", DWORD),
]
CreateFile = ctypes.windll.kernel32.CreateFileW
CreateFile.argtypes = [ctypes.c_wchar_p, DWORD, DWORD, ctypes.c_void_p,
DWORD, DWORD, HANDLE]
CreateFile.restype = HANDLE
GetFileInformationByHandle = (
ctypes.windll.kernel32.GetFileInformationByHandle)
GetFileInformationByHandle.argtypes = [
HANDLE, POINTER(BY_HANDLE_FILE_INFORMATION)]
GetFileInformationByHandle.restype = BOOL
CloseHandle = ctypes.windll.kernel32.CloseHandle
CloseHandle.argtypes = [HANDLE]
CloseHandle.restype = BOOL
GENERIC_READ = 0x80000000
FILE_SHARE_READ = 0x00000001
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
OPEN_EXISTING = 3
if os.path.isdir(path):
flags = FILE_FLAG_BACKUP_SEMANTICS
else:
flags = 0
hfile = CreateFile(path, GENERIC_READ, FILE_SHARE_READ,
None, OPEN_EXISTING, flags, None)
if hfile == 0xffffffff:
if sys.version_info >= (3, 3):
raise FileNotFoundError(path)
else:
exc = OSError("file not found: path")
exc.errno = ENOENT
raise exc
info = BY_HANDLE_FILE_INFORMATION()
success = GetFileInformationByHandle(hfile, info)
CloseHandle(hfile)
if success == 0:
raise WinError()
return info.volume, info.index_hi, info.index_lo
def _is_wildcard_pattern(pat):
return "*" in pat or "?" in pat or "[" in pat
class _Flavour(object):
def __init__(self):
self.join = self.sep.join
def parse_parts(self, parts):
if six.PY2:
parts = _py2_fsencode(parts)
parsed = []
sep = self.sep
altsep = self.altsep
drv = root = ''
it = reversed(parts)
for part in it:
if not part:
continue
if altsep:
part = part.replace(altsep, sep)
drv, root, rel = self.splitroot(part)
if sep in rel:
for x in reversed(rel.split(sep)):
if x and x != '.':
parsed.append(intern(x))
else:
if rel and rel != '.':
parsed.append(intern(rel))
if drv or root:
if not drv:
for part in it:
if not part:
continue
if altsep:
part = part.replace(altsep, sep)
drv = self.splitroot(part)[0]
if drv:
break
break
if drv or root:
parsed.append(drv + root)
parsed.reverse()
return drv, root, parsed
def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2):
if root2:
if not drv2 and drv:
return drv, root2, [drv + root2] + parts2[1:]
elif drv2:
if drv2 == drv or self.casefold(drv2) == self.casefold(drv):
return drv, root, parts + parts2[1:]
else:
return drv, root, parts + parts2
return drv2, root2, parts2
class _WindowsFlavour(_Flavour):
sep = '\\'
altsep = '/'
has_drv = True
pathmod = ntpath
is_supported = (os.name == 'nt')
drive_letters = set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
ext_namespace_prefix = '\\\\?\\'
reserved_names = (
set(['CON', 'PRN', 'AUX', 'NUL']) |
set(['COM%d' % i for i in range(1, 10)]) |
set(['LPT%d' % i for i in range(1, 10)])
)
def splitroot(self, part, sep=sep):
first = part[0:1]
second = part[1:2]
if (second == sep and first == sep):
prefix, part = self._split_extended_path(part)
first = part[0:1]
second = part[1:2]
else:
prefix = ''
third = part[2:3]
if (second == sep and first == sep and third != sep):
index = part.find(sep, 2)
if index != -1:
index2 = part.find(sep, index + 1)
# (after the initial two)
if index2 != index + 1:
if index2 == -1:
index2 = len(part)
if prefix:
return prefix + part[1:index2], sep, part[index2 + 1:]
else:
return part[:index2], sep, part[index2 + 1:]
drv = root = ''
if second == ':' and first in self.drive_letters:
drv = part[:2]
part = part[2:]
first = third
if first == sep:
root = first
part = part.lstrip(sep)
return prefix + drv, root, part
def casefold(self, s):
return s.lower()
def casefold_parts(self, parts):
return [p.lower() for p in parts]
def resolve(self, path, strict=False):
s = str(path)
if not s:
return os.getcwd()
previous_s = None
if _getfinalpathname is not None:
if strict:
return self._ext_to_normal(_getfinalpathname(s))
else:
# End of the path after the first one not found
tail_parts = []
def _try_func():
result[0] = self._ext_to_normal(_getfinalpathname(s))
# if there was no exception, set flag to 0
result[1] = 0
def _exc_func(exc):
pass
while True:
result = [None, 1]
_try_except_filenotfounderror(_try_func, _exc_func)
if result[1] == 1: # file not found exception raised
previous_s = s
s, tail = os.path.split(s)
tail_parts.append(tail)
if previous_s == s:
return path
else:
s = result[0]
return os.path.join(s, *reversed(tail_parts))
# Means fallback on absolute
return None
def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix):
prefix = ''
if s.startswith(ext_prefix):
prefix = s[:4]
s = s[4:]
if s.startswith('UNC\\'):
prefix += s[:3]
s = '\\' + s[3:]
return prefix, s
def _ext_to_normal(self, s):
# Turn back an extended path into a normal DOS-like path
return self._split_extended_path(s)[1]
def is_reserved(self, parts):
# NOTE: the rules for reserved names seem somewhat complicated
# (e.g. r"..\NUL" is reserved but not r"foo\NUL").
# We err on the side of caution and return True for paths which are
# not considered reserved by Windows.
if not parts:
return False
if parts[0].startswith('\\\\'):
# UNC paths are never reserved
return False
return parts[-1].partition('.')[0].upper() in self.reserved_names
def make_uri(self, path):
# Under Windows, file URIs use the UTF-8 encoding.
drive = path.drive
if len(drive) == 2 and drive[1] == ':':
# It's a path on a local drive => 'file:///c:/a/b'
rest = path.as_posix()[2:].lstrip('/')
return 'file:///%s/%s' % (
drive, urlquote_from_bytes(rest.encode('utf-8')))
else:
return 'file:' + urlquote_from_bytes(
path.as_posix().encode('utf-8'))
def gethomedir(self, username):
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif 'HOMEPATH' in os.environ:
try:
drv = os.environ['HOMEDRIVE']
except KeyError:
drv = ''
userhome = drv + os.environ['HOMEPATH']
else:
raise RuntimeError("Can't determine home directory")
if username:
if os.environ['USERNAME'] != username:
drv, root, parts = self.parse_parts((userhome,))
if parts[-1] != os.environ['USERNAME']:
raise RuntimeError("Can't determine home directory "
"for %r" % username)
parts[-1] = username
if drv or root:
userhome = drv + root + self.join(parts[1:])
else:
userhome = self.join(parts)
return userhome
class _PosixFlavour(_Flavour):
sep = '/'
altsep = ''
has_drv = False
pathmod = posixpath
is_supported = (os.name != 'nt')
def splitroot(self, part, sep=sep):
if part and part[0] == sep:
stripped_part = part.lstrip(sep)
# According to POSIX path resolution:
# http://pubs.opengroup.org/onlinepubs/009695399/basedefs/
# xbd_chap04.html#tag_04_11
# "A pathname that begins with two successive slashes may be
# interpreted in an implementation-defined manner, although more
# than two leading slashes shall be treated as a single slash".
if len(part) - len(stripped_part) == 2:
return '', sep * 2, stripped_part
else:
return '', sep, stripped_part
else:
return '', '', part
def casefold(self, s):
return s
def casefold_parts(self, parts):
return parts
def resolve(self, path, strict=False):
sep = self.sep
accessor = path._accessor
seen = {}
def _resolve(path, rest):
if rest.startswith(sep):
path = ''
for name in rest.split(sep):
if not name or name == '.':
# current dir
continue
if name == '..':
# parent dir
path, _, _ = path.rpartition(sep)
continue
newpath = path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink
# loop.
raise RuntimeError("Symlink loop from %r" % newpath)
# Resolve the symbolic link
try:
target = accessor.readlink(newpath)
except OSError as e:
if e.errno != EINVAL and strict:
raise
# Not a symlink, or non-strict mode. We just leave the path
# untouched.
path = newpath
else:
seen[newpath] = None # not resolved symlink
path = _resolve(path, target)
seen[newpath] = path # resolved symlink
return path
# NOTE: according to POSIX, getcwd() cannot contain path components
# which are symlinks.
base = '' if path.is_absolute() else os.getcwd()
return _resolve(base, str(path)) or sep
def is_reserved(self, parts):
return False
def make_uri(self, path):
# We represent the path using the local filesystem encoding,
# for portability to other applications.
bpath = bytes(path)
return 'file://' + urlquote_from_bytes(bpath)
def gethomedir(self, username):
if not username:
try:
return os.environ['HOME']
except KeyError:
import pwd
return pwd.getpwuid(os.getuid()).pw_dir
else:
import pwd
try:
return pwd.getpwnam(username).pw_dir
except KeyError:
raise RuntimeError("Can't determine home directory "
"for %r" % username)
_windows_flavour = _WindowsFlavour()
_posix_flavour = _PosixFlavour()
class _Accessor:
class _NormalAccessor(_Accessor):
def _wrap_strfunc(strfunc):
@functools.wraps(strfunc)
def wrapped(pathobj, *args):
return strfunc(str(pathobj), *args)
return staticmethod(wrapped)
def _wrap_binary_strfunc(strfunc):
@functools.wraps(strfunc)
def wrapped(pathobjA, pathobjB, *args):
return strfunc(str(pathobjA), str(pathobjB), *args)
return staticmethod(wrapped)
stat = _wrap_strfunc(os.stat)
lstat = _wrap_strfunc(os.lstat)
open = _wrap_strfunc(os.open)
listdir = _wrap_strfunc(os.listdir)
scandir = _wrap_strfunc(os_scandir)
chmod = _wrap_strfunc(os.chmod)
if hasattr(os, "lchmod"):
lchmod = _wrap_strfunc(os.lchmod)
else:
def lchmod(self, pathobj, mode):
raise NotImplementedError("lchmod() not available on this system")
mkdir = _wrap_strfunc(os.mkdir)
unlink = _wrap_strfunc(os.unlink)
rmdir = _wrap_strfunc(os.rmdir)
rename = _wrap_binary_strfunc(os.rename)
if sys.version_info >= (3, 3):
replace = _wrap_binary_strfunc(os.replace)
if nt:
if supports_symlinks:
symlink = _wrap_binary_strfunc(os.symlink)
else:
def symlink(a, b, target_is_directory):
raise NotImplementedError(
"symlink() not available on this system")
else:
@staticmethod
def symlink(a, b, target_is_directory):
return os.symlink(str(a), str(b))
utime = _wrap_strfunc(os.utime)
def readlink(self, path):
return os.readlink(path)
_normal_accessor = _NormalAccessor()
def _make_selector(pattern_parts):
pat = pattern_parts[0]
child_parts = pattern_parts[1:]
if pat == '**':
cls = _RecursiveWildcardSelector
elif '**' in pat:
raise ValueError(
"Invalid pattern: '**' can only be an entire path component")
elif _is_wildcard_pattern(pat):
cls = _WildcardSelector
else:
cls = _PreciseSelector
return cls(pat, child_parts)
if hasattr(functools, "lru_cache"):
_make_selector = functools.lru_cache()(_make_selector)
class _Selector:
def __init__(self, child_parts):
self.child_parts = child_parts
if child_parts:
self.successor = _make_selector(child_parts)
self.dironly = True
else:
self.successor = _TerminatingSelector()
self.dironly = False
def select_from(self, parent_path):
path_cls = type(parent_path)
is_dir = path_cls.is_dir
exists = path_cls.exists
scandir = parent_path._accessor.scandir
if not is_dir(parent_path):
return iter([])
return self._select_from(parent_path, is_dir, exists, scandir)
class _TerminatingSelector:
def _select_from(self, parent_path, is_dir, exists, scandir):
yield parent_path
class _PreciseSelector(_Selector):
def __init__(self, name, child_parts):
self.name = name
_Selector.__init__(self, child_parts)
def _select_from(self, parent_path, is_dir, exists, scandir):
def try_iter():
path = parent_path._make_child_relpath(self.name)
if (is_dir if self.dironly else exists)(path):
for p in self.successor._select_from(
path, is_dir, exists, scandir):
yield p
def except_iter(exc):
return
yield
for x in _try_except_permissionerror_iter(try_iter, except_iter):
yield x
class _WildcardSelector(_Selector):
def __init__(self, pat, child_parts):
self.pat = re.compile(fnmatch.translate(pat))
_Selector.__init__(self, child_parts)
def _select_from(self, parent_path, is_dir, exists, scandir):
def try_iter():
cf = parent_path._flavour.casefold
entries = list(scandir(parent_path))
for entry in entries:
if not self.dironly or entry.is_dir():
name = entry.name
casefolded = cf(name)
if self.pat.match(casefolded):
path = parent_path._make_child_relpath(name)
for p in self.successor._select_from(
path, is_dir, exists, scandir):
yield p
def except_iter(exc):
return
yield
for x in _try_except_permissionerror_iter(try_iter, except_iter):
yield x
class _RecursiveWildcardSelector(_Selector):
def __init__(self, pat, child_parts):
_Selector.__init__(self, child_parts)
def _iterate_directories(self, parent_path, is_dir, scandir):
yield parent_path
def try_iter():
entries = list(scandir(parent_path))
for entry in entries:
entry_is_dir = False
try:
entry_is_dir = entry.is_dir()
except OSError as e:
if not _ignore_error(e):
raise
if entry_is_dir and not entry.is_symlink():
path = parent_path._make_child_relpath(entry.name)
for p in self._iterate_directories(path, is_dir, scandir):
yield p
def except_iter(exc):
return
yield
for x in _try_except_permissionerror_iter(try_iter, except_iter):
yield x
def _select_from(self, parent_path, is_dir, exists, scandir):
def try_iter():
yielded = set()
try:
successor_select = self.successor._select_from
for starting_point in self._iterate_directories(
parent_path, is_dir, scandir):
for p in successor_select(
starting_point, is_dir, exists, scandir):
if p not in yielded:
yield p
yielded.add(p)
finally:
yielded.clear()
def except_iter(exc):
return
yield
for x in _try_except_permissionerror_iter(try_iter, except_iter):
yield x
class _PathParents(Sequence):
__slots__ = ('_pathcls', '_drv', '_root', '_parts')
def __init__(self, path):
self._pathcls = type(path)
self._drv = path._drv
self._root = path._root
self._parts = path._parts
def __len__(self):
if self._drv or self._root:
return len(self._parts) - 1
else:
return len(self._parts)
def __getitem__(self, idx):
if idx < 0 or idx >= len(self):
raise IndexError(idx)
return self._pathcls._from_parsed_parts(self._drv, self._root,
self._parts[:-idx - 1])
def __repr__(self):
return "<{0}.parents>".format(self._pathcls.__name__)
class PurePath(object):
__slots__ = (
'_drv', '_root', '_parts',
'_str', '_hash', '_pparts', '_cached_cparts',
)
def __new__(cls, *args):
if cls is PurePath:
cls = PureWindowsPath if os.name == 'nt' else PurePosixPath
return cls._from_parts(args)
def __reduce__(self):
# Using the parts tuple helps share interned path parts
# when pickling related paths.
return (self.__class__, tuple(self._parts))
@classmethod
def _parse_args(cls, args):
# This is useful when you don't want to create an instance, just
parts = []
for a in args:
if isinstance(a, PurePath):
parts += a._parts
else:
if sys.version_info >= (3, 6):
a = os.fspath(a)
else:
if hasattr(a, "__fspath__"):
a = a.__fspath__()
if isinstance(a, str):
parts.append(str(a))
elif six.PY2 and isinstance(a, six.text_type):
parts.append(a.encode(
sys.getfilesystemencoding() or "ascii"))
else:
raise TypeError(
"argument should be a str object or an os.PathLike "
"object returning str, not %r"
% type(a))
return cls._flavour.parse_parts(parts)
@classmethod
def _from_parts(cls, args, init=True):
self = object.__new__(cls)
drv, root, parts = self._parse_args(args)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _from_parsed_parts(cls, drv, root, parts, init=True):
self = object.__new__(cls)
self._drv = drv
self._root = root
self._parts = parts
if init:
self._init()
return self
@classmethod
def _format_parsed_parts(cls, drv, root, parts):
if drv or root:
return drv + root + cls._flavour.join(parts[1:])
else:
return cls._flavour.join(parts)
def _init(self):
pass
def _make_child(self, args):
drv, root, parts = self._parse_args(args)
drv, root, parts = self._flavour.join_parsed_parts(
self._drv, self._root, self._parts, drv, root, parts)
return self._from_parsed_parts(drv, root, parts)
def __str__(self):
try:
return self._str
except AttributeError:
self._str = self._format_parsed_parts(self._drv, self._root,
self._parts) or '.'
return self._str
def __fspath__(self):
return str(self)
def as_posix(self):
f = self._flavour
return str(self).replace(f.sep, '/')
def __bytes__(self):
if sys.version_info < (3, 2):
raise NotImplementedError("needs Python 3.2 or later")
return os.fsencode(str(self))
def __repr__(self):
return "{0}({1!r})".format(self.__class__.__name__, self.as_posix())
def as_uri(self):
if not self.is_absolute():
raise ValueError("relative path can't be expressed as a file URI")
return self._flavour.make_uri(self)
@property
def _cparts(self):
# Cached casefolded parts, for hashing and comparison
try:
return self._cached_cparts
except AttributeError:
self._cached_cparts = self._flavour.casefold_parts(self._parts)
return self._cached_cparts
def __eq__(self, other):
if not isinstance(other, PurePath):
return NotImplemented
return (
self._cparts == other._cparts
and self._flavour is other._flavour)
def __ne__(self, other):
return not self == other
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(tuple(self._cparts))
return self._hash
def __lt__(self, other):
if (not isinstance(other, PurePath)
or self._flavour is not other._flavour):
return NotImplemented
return self._cparts < other._cparts
def __le__(self, other):
if (not isinstance(other, PurePath)
or self._flavour is not other._flavour):
return NotImplemented
return self._cparts <= other._cparts
def __gt__(self, other):
if (not isinstance(other, PurePath)
or self._flavour is not other._flavour):
return NotImplemented
return self._cparts > other._cparts
def __ge__(self, other):
if (not isinstance(other, PurePath)
or self._flavour is not other._flavour):
return NotImplemented
return self._cparts >= other._cparts
drive = property(attrgetter('_drv'),
doc="""The drive prefix (letter or UNC path), if any.""")
root = property(attrgetter('_root'),
doc="""The root of the path, if any.""")
@property
def anchor(self):
anchor = self._drv + self._root
return anchor
@property
def name(self):
parts = self._parts
if len(parts) == (1 if (self._drv or self._root) else 0):
return ''
return parts[-1]
@property
def suffix(self):
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[i:]
else:
return ''
@property
def suffixes(self):
name = self.name
if name.endswith('.'):
return []
name = name.lstrip('.')
return ['.' + suffix for suffix in name.split('.')[1:]]
@property
def stem(self):
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[:i]
else:
return name
def with_name(self, name):
if not self.name:
raise ValueError("%r has an empty name" % (self,))
drv, root, parts = self._flavour.parse_parts((name,))
if (not name or name[-1] in [self._flavour.sep, self._flavour.altsep]
or drv or root or len(parts) != 1):
raise ValueError("Invalid name %r" % (name))
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def with_suffix(self, suffix):
# XXX if suffix is None, should the current suffix be removed?
f = self._flavour
if f.sep in suffix or f.altsep and f.altsep in suffix:
raise ValueError("Invalid suffix %r" % (suffix))
if suffix and not suffix.startswith('.') or suffix == '.':
raise ValueError("Invalid suffix %r" % (suffix))
name = self.name
if not name:
raise ValueError("%r has an empty name" % (self,))
old_suffix = self.suffix
if not old_suffix:
name = name + suffix
else:
name = name[:-len(old_suffix)] + suffix
return self._from_parsed_parts(self._drv, self._root,
self._parts[:-1] + [name])
def relative_to(self, *other):
# For the purpose of this method, drive and root are considered
# separate parts, i.e.:
# Path('c:/').relative_to('c:') gives Path('/')
# Path('c:/').relative_to('/') raise ValueError
if not other:
raise TypeError("need at least one argument")
parts = self._parts
drv = self._drv
root = self._root
if root:
abs_parts = [drv, root] + parts[1:]
else:
abs_parts = parts
to_drv, to_root, to_parts = self._parse_args(other)
if to_root:
to_abs_parts = [to_drv, to_root] + to_parts[1:]
else:
to_abs_parts = to_parts
n = len(to_abs_parts)
cf = self._flavour.casefold_parts
if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
raise ValueError("{0!r} does not start with {1!r}"
.format(str(self), str(formatted)))
return self._from_parsed_parts('', root if n == 1 else '',
abs_parts[n:])
@property
def parts(self):
# We cache the tuple to avoid building a new one each time .parts
# is accessed. XXX is this necessary?
try:
return self._pparts
except AttributeError:
self._pparts = tuple(self._parts)
return self._pparts
def joinpath(self, *args):
return self._make_child(args)
def __truediv__(self, key):
return self._make_child((key,))
def __rtruediv__(self, key):
return self._from_parts([key] + self._parts)
if six.PY2:
__div__ = __truediv__
__rdiv__ = __rtruediv__
@property
def parent(self):
drv = self._drv
root = self._root
parts = self._parts
if len(parts) == 1 and (drv or root):
return self
return self._from_parsed_parts(drv, root, parts[:-1])
@property
def parents(self):
return _PathParents(self)
def is_absolute(self):
if not self._root:
return False
return not self._flavour.has_drv or bool(self._drv)
def is_reserved(self):
return self._flavour.is_reserved(self._parts)
def match(self, path_pattern):
cf = self._flavour.casefold
path_pattern = cf(path_pattern)
drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))
if not pat_parts:
raise ValueError("empty pattern")
if drv and drv != cf(self._drv):
return False
if root and root != cf(self._root):
return False
parts = self._cparts
if drv or root:
if len(pat_parts) != len(parts):
return False
pat_parts = pat_parts[1:]
elif len(pat_parts) > len(parts):
return False
for part, pat in zip(reversed(parts), reversed(pat_parts)):
if not fnmatch.fnmatchcase(part, pat):
return False
return True
# Can't subclass os.PathLike from PurePath and keep the constructor
if sys.version_info >= (3, 6):
os.PathLike.register(PurePath)
class PurePosixPath(PurePath):
_flavour = _posix_flavour
__slots__ = ()
class PureWindowsPath(PurePath):
_flavour = _windows_flavour
__slots__ = ()
from .mate_attr_accessor import AttrAccessor
from .mate_hashes_methods import HashesMethods
from .mate_path_filters import PathFilters
from .mate_mutate_methods import MutateMethods
from .mate_tool_box import ToolBox
class Path(PurePath,
AttrAccessor, HashesMethods, PathFilters, MutateMethods, ToolBox):
__slots__ = (
'_accessor',
'_closed',
)
def __new__(cls, *args, **kwargs):
if cls is Path:
cls = WindowsPath if os.name == 'nt' else PosixPath
self = cls._from_parts(args, init=False)
if not self._flavour.is_supported:
raise NotImplementedError("cannot instantiate %r on your system"
% (cls.__name__,))
self._init()
return self
def _init(self,
template=None,
):
self._closed = False
if template is not None:
self._accessor = template._accessor
else:
self._accessor = _normal_accessor
def _make_child_relpath(self, part):
parts = self._parts + [part]
return self._from_parsed_parts(self._drv, self._root, parts)
def __enter__(self):
if self._closed:
self._raise_closed()
return self
def __exit__(self, t, v, tb):
self._closed = True
def _raise_closed(self):
raise ValueError("I/O operation on closed path")
def _opener(self, name, flags, mode=0o666):
return self._accessor.open(self, flags, mode)
def _raw_open(self, flags, mode=0o777):
if self._closed:
self._raise_closed()
return self._accessor.open(self, flags, mode)
@classmethod
def cwd(cls):
return cls(os.getcwd())
@classmethod
def home(cls):
return cls(cls()._flavour.gethomedir(None))
def samefile(self, other_path):
if hasattr(os.path, "samestat"):
st = self.stat()
try:
other_st = other_path.stat()
except AttributeError:
other_st = os.stat(other_path)
return os.path.samestat(st, other_st)
else:
filename1 = six.text_type(self)
filename2 = six.text_type(other_path)
st1 = _win32_get_unique_path_id(filename1)
st2 = _win32_get_unique_path_id(filename2)
return st1 == st2
def iterdir(self):
if self._closed:
self._raise_closed()
for name in self._accessor.listdir(self):
if name in ('.', '..'):
continue
yield self._make_child_relpath(name)
if self._closed:
self._raise_closed()
def glob(self, pattern):
if not pattern:
raise ValueError("Unacceptable pattern: {0!r}".format(pattern))
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(tuple(pattern_parts))
for p in selector.select_from(self):
yield p
def rglob(self, pattern):
pattern = self._flavour.casefold(pattern)
drv, root, pattern_parts = self._flavour.parse_parts((pattern,))
if drv or root:
raise NotImplementedError("Non-relative patterns are unsupported")
selector = _make_selector(("**",) + tuple(pattern_parts))
for p in selector.select_from(self):
yield p
def absolute(self):
if self._closed:
self._raise_closed()
if self.is_absolute():
return self
obj = self._from_parts([os.getcwd()] + self._parts, init=False)
obj._init(template=self)
return obj
def resolve(self, strict=False):
if self._closed:
self._raise_closed()
s = self._flavour.resolve(self, strict=strict)
if s is None:
def _try_func():
self.stat()
def _exc_func(exc):
pass
_try_except_filenotfounderror(_try_func, _exc_func)
s = str(self.absolute())
else:
s = str(s)
normed = self._flavour.pathmod.normpath(s)
obj = self._from_parts((normed,), init=False)
obj._init(template=self)
return obj
def stat(self):
return self._accessor.stat(self)
def owner(self):
import pwd
return pwd.getpwuid(self.stat().st_uid).pw_name
def group(self):
import grp
return grp.getgrgid(self.stat().st_gid).gr_name
def open(self, mode='r', buffering=-1, encoding=None,
errors=None, newline=None):
if self._closed:
self._raise_closed()
if sys.version_info >= (3, 3):
return io.open(
str(self), mode, buffering, encoding, errors, newline,
opener=self._opener)
else:
return io.open(str(self), mode, buffering,
encoding, errors, newline)
def read_bytes(self):
with self.open(mode='rb') as f:
return f.read()
def read_text(self, encoding=None, errors=None):
with self.open(mode='r', encoding=encoding, errors=errors) as f:
return f.read()
def write_bytes(self, data):
if not isinstance(data, six.binary_type):
raise TypeError(
'data must be %s, not %s' %
(six.binary_type.__name__, data.__class__.__name__))
with self.open(mode='wb') as f:
return f.write(data)
def write_text(self, data, encoding="utf-8", errors=None):
if not isinstance(data, six.text_type):
raise TypeError(
'data must be %s, not %s' %
(six.text_type.__name__, data.__class__.__name__))
with self.open(mode='w', encoding=encoding, errors=errors) as f:
return f.write(data)
def atomic_write_bytes(self, data, overwrite=False):
with atomic_write(self.abspath, mode="wb", overwrite=overwrite) as f:
f.write(data)
def atomic_write_text(self, data, encoding="utf-8", overwrite=False):
with atomic_write(self.abspath, mode="wb", overwrite=overwrite) as f:
f.write(data.encode(encoding))
def touch(self, mode=0o666, exist_ok=True):
if self._closed:
self._raise_closed()
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
self._accessor.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = self._raw_open(flags, mode)
os.close(fd)
def mkdir(self, mode=0o777, parents=False, exist_ok=False):
if self._closed:
self._raise_closed()
def _try_func():
self._accessor.mkdir(self, mode)
def _exc_func(exc):
if not parents or self.parent == self:
raise exc
self.parent.mkdir(parents=True, exist_ok=True)
self.mkdir(mode, parents=False, exist_ok=exist_ok)
try:
_try_except_filenotfounderror(_try_func, _exc_func)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not self.is_dir():
raise
def chmod(self, mode):
if self._closed:
self._raise_closed()
self._accessor.chmod(self, mode)
def lchmod(self, mode):
if self._closed:
self._raise_closed()
self._accessor.lchmod(self, mode)
def unlink(self):
if self._closed:
self._raise_closed()
self._accessor.unlink(self)
def rmdir(self):
if self._closed:
self._raise_closed()
self._accessor.rmdir(self)
def lstat(self):
if self._closed:
self._raise_closed()
return self._accessor.lstat(self)
def rename(self, target):
if self._closed:
self._raise_closed()
self._accessor.rename(self, target)
def replace(self, target):
if sys.version_info < (3, 3):
raise NotImplementedError("replace() is only available "
"with Python 3.3 and later")
if self._closed:
self._raise_closed()
self._accessor.replace(self, target)
def symlink_to(self, target, target_is_directory=False):
if self._closed:
self._raise_closed()
self._accessor.symlink(target, self, target_is_directory)
# Convenience functions for querying the stat results
def exists(self):
try:
self.stat()
except OSError as e:
if not _ignore_error(e):
raise
return False
except ValueError:
# Non-encodable path
return False
return True
def is_dir(self):
try:
return S_ISDIR(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
return False
except ValueError:
return False
def is_file(self):
try:
return S_ISREG(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_mount(self):
# Need to exist and be a dir
if not self.exists() or not self.is_dir():
return False
parent = Path(self.parent)
try:
parent_dev = parent.stat().st_dev
except OSError:
return False
dev = self.stat().st_dev
if dev != parent_dev:
return True
ino = self.stat().st_ino
parent_ino = parent.stat().st_ino
return ino == parent_ino
def is_symlink(self):
try:
return S_ISLNK(self.lstat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist
return False
except ValueError:
return False
def is_block_device(self):
try:
return S_ISBLK(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_char_device(self):
try:
return S_ISCHR(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
return False
except ValueError:
return False
def is_fifo(self):
try:
return S_ISFIFO(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# (see https://bitbucket.org/pitrou/pathlib/issue/12/)
return False
except ValueError:
# Non-encodable path
return False
def is_socket(self):
try:
return S_ISSOCK(self.stat().st_mode)
except OSError as e:
if not _ignore_error(e):
raise
# Path doesn't exist or is a broken symlink
return False
except ValueError:
return False
def expanduser(self):
if (not (self._drv or self._root)
and self._parts and self._parts[0][:1] == '~'):
homedir = self._flavour.gethomedir(self._parts[0][1:])
return self._from_parts([homedir] + self._parts[1:])
return self
class PosixPath(Path, PurePosixPath):
__slots__ = ()
class WindowsPath(Path, PureWindowsPath):
__slots__ = ()
def owner(self):
raise NotImplementedError("Path.owner() is unsupported on this system")
def group(self):
raise NotImplementedError("Path.group() is unsupported on this system")
def is_mount(self):
raise NotImplementedError(
"Path.is_mount() is unsupported on this system")
| true | true |
1c34e1be03260481dd35813ec70a53e1ca6f7892 | 14,217 | py | Python | src/fleetctrl/PoolingIRSOnly.py | TUM-VT/FleetPy | 596bcec9fbd2fe52206079641d549bf028d2879d | [
"MIT"
] | 19 | 2021-12-11T17:17:00.000Z | 2022-03-24T07:27:06.000Z | src/fleetctrl/PoolingIRSOnly.py | TUM-VT/FleetPy | 596bcec9fbd2fe52206079641d549bf028d2879d | [
"MIT"
] | null | null | null | src/fleetctrl/PoolingIRSOnly.py | TUM-VT/FleetPy | 596bcec9fbd2fe52206079641d549bf028d2879d | [
"MIT"
] | 1 | 2021-12-21T11:20:39.000Z | 2021-12-21T11:20:39.000Z | import logging
import time
from src.simulation.Offers import TravellerOffer
from src.fleetctrl.FleetControlBase import FleetControlBase
from src.fleetctrl.planning.PlanRequest import PlanRequest
from src.fleetctrl.pooling.objectives import return_pooling_objective_function
from src.fleetctrl.pooling.immediate.insertion import insertion_with_heuristics
from src.misc.globals import *
LOG = logging.getLogger(__name__)
LARGE_INT = 100000
class PoolingInsertionHeuristicOnly(FleetControlBase):
"""This class applies an Insertion Heuristic, in which new requests are inserted in the currently assigned
vehicle plans and the insertion with the best control objective value is selected.
IMPORTANT NOTE:
Both the new and the previously assigned plan are stored and await an instant response of the request. Therefore,
this fleet control module is only consistent for the ImmediateOfferSimulation class.
"""
# TODO # clarify dependency to fleet simulation module
def __init__(self, op_id, operator_attributes, list_vehicles, routing_engine, zone_system, scenario_parameters,
dir_names, charging_management=None):
"""The specific attributes for the fleet control module are initialized. Strategy specific attributes are
introduced in the children classes.
:param op_id: operator id
:type op_id: int
:param operator_attributes: dictionary with keys from globals and respective values
:type operator_attributes: dict
:param list_vehicles: simulation vehicles; their assigned plans should be instances of the VehicleRouteLeg class
:type list_vehicles: list
:param routing_engine: routing engine
:type routing_engine: Network
:param scenario_parameters: access to all scenario parameters (if necessary)
:type scenario_parameters: dict
:param dir_names: directories for output and input
:type dir_names: dict
"""
super().__init__(op_id, operator_attributes, list_vehicles, routing_engine, zone_system, scenario_parameters,
dir_names, charging_management=charging_management)
# TODO # make standard in FleetControlBase
self.rid_to_assigned_vid = {} # rid -> vid
self.pos_veh_dict = {} # pos -> list_veh
self.vr_ctrl_f = return_pooling_objective_function(operator_attributes[G_OP_VR_CTRL_F])
self.sim_time = scenario_parameters[G_SIM_START_TIME]
# others # TODO # standardize IRS assignment memory?
self.tmp_assignment = {} # rid -> VehiclePlan
self._init_dynamic_fleetcontrol_output_key(G_FCTRL_CT_RQU)
def receive_status_update(self, vid, simulation_time, list_finished_VRL, force_update=True):
"""This method can be used to update plans and trigger processes whenever a simulation vehicle finished some
VehicleRouteLegs.
:param vid: vehicle id
:type vid: int
:param simulation_time: current simulation time
:type simulation_time: float
:param list_finished_VRL: list of VehicleRouteLeg objects
:type list_finished_VRL: list
:param force_update: indicates if also current vehicle plan feasibilities have to be checked
:type force_update: bool
"""
veh_obj = self.sim_vehicles[vid]
# the vehicle plans should be up to date from assignments of previous time steps
if list_finished_VRL or force_update:
self.veh_plans[vid].update_plan(veh_obj, simulation_time, self.routing_engine, list_finished_VRL)
upd_utility_val = self.compute_VehiclePlan_utility(simulation_time, veh_obj, self.veh_plans[vid])
self.veh_plans[vid].set_utility(upd_utility_val)
try:
self.pos_veh_dict[veh_obj.pos].append(veh_obj)
except KeyError:
self.pos_veh_dict[veh_obj.pos] = [veh_obj]
LOG.debug(f"veh {veh_obj} | after status update: {self.veh_plans[vid]}")
def user_request(self, rq, sim_time):
"""This method is triggered for a new incoming request. It generally adds the rq to the database. It has to
return an offer to the user. This operator class only works with immediate responses and therefore either
sends an offer or a rejection.
:param rq: request object containing all request information
:type rq: RequestDesign
:param sim_time: current simulation time
:type sim_time: float
:return: offer
:rtype: TravellerOffer
"""
t0 = time.perf_counter()
LOG.debug(f"Incoming request {rq.__dict__} at time {sim_time}")
self.sim_time = sim_time
prq = PlanRequest(rq, self.routing_engine, min_wait_time=self.min_wait_time,
max_wait_time=self.max_wait_time,
max_detour_time_factor=self.max_dtf, max_constant_detour_time=self.max_cdt,
add_constant_detour_time=self.add_cdt, min_detour_time_window=self.min_dtw,
boarding_time=self.const_bt)
rid_struct = rq.get_rid_struct()
self.rq_dict[rid_struct] = prq
if prq.o_pos == prq.d_pos:
LOG.debug(f"automatic decline for rid {rid_struct}!")
self._create_rejection(prq, sim_time)
return
o_pos, t_pu_earliest, t_pu_latest = prq.get_o_stop_info()
if t_pu_earliest - sim_time > self.opt_horizon:
self.reservation_module.add_reservation_request(prq, sim_time)
offer = self.reservation_module.return_reservation_offer(prq.get_rid_struct(), sim_time)
LOG.debug(f"reservation offer for rid {rid_struct} : {offer}")
else:
list_tuples = insertion_with_heuristics(sim_time, prq, self, force_feasible_assignment=True)
if len(list_tuples) > 0:
(vid, vehplan, delta_cfv) = list_tuples[0]
self.tmp_assignment[rid_struct] = vehplan
offer = self._create_user_offer(prq, sim_time, vehplan)
LOG.debug(f"new offer for rid {rid_struct} : {offer}")
else:
LOG.debug(f"rejection for rid {rid_struct}")
self._create_rejection(prq, sim_time)
# record cpu time
dt = round(time.perf_counter() - t0, 5)
old_dt = self._get_current_dynamic_fleetcontrol_value(sim_time, G_FCTRL_CT_RQU)
if old_dt is None:
new_dt = dt
else:
new_dt = old_dt + dt
output_dict = {G_FCTRL_CT_RQU: new_dt}
self._add_to_dynamic_fleetcontrol_output(sim_time, output_dict)
def user_confirms_booking(self, rid, simulation_time):
"""This method is used to confirm a customer booking. This can trigger some database processes.
:param rid: request id
:type rid: int
:param simulation_time: current simulation time
:type simulation_time: float
"""
super().user_confirms_booking(rid, simulation_time)
LOG.debug(f"user confirms booking {rid} at {simulation_time}")
prq = self.rq_dict[rid]
if prq.get_reservation_flag():
self.reservation_module.user_confirms_booking(rid, simulation_time)
else:
new_vehicle_plan = self.tmp_assignment[rid]
vid = new_vehicle_plan.vid
veh_obj = self.sim_vehicles[vid]
self.assign_vehicle_plan(veh_obj, new_vehicle_plan, simulation_time)
del self.tmp_assignment[rid]
def user_cancels_request(self, rid, simulation_time):
"""This method is used to confirm a customer cancellation. This can trigger some database processes.
:param rid: request id
:type rid: int
:param simulation_time: current simulation time
:type simulation_time: float
"""
LOG.debug(f"user cancels request {rid} at {simulation_time}")
prq = self.rq_dict[rid]
if prq.get_reservation_flag():
self.reservation_module.user_cancels_request(rid, simulation_time)
else:
prev_assignment = self.tmp_assignment.get(rid)
if prev_assignment:
del self.tmp_assignment[rid]
del self.rq_dict[rid]
def acknowledge_boarding(self, rid, vid, simulation_time):
"""This method can trigger some database processes whenever a passenger is starting to board a vehicle.
:param rid: request id
:type rid: int
:param vid: vehicle id
:type vid: int
:param simulation_time: current simulation time
:type simulation_time: float
"""
LOG.debug(f"acknowledge boarding {rid} in {vid} at {simulation_time}")
self.rq_dict[rid].set_pickup(vid, simulation_time)
def acknowledge_alighting(self, rid, vid, simulation_time):
"""This method can trigger some database processes whenever a passenger is finishing to alight a vehicle.
:param rid: request id
:type rid: int
:param vid: vehicle id
:type vid: int
:param simulation_time: current simulation time
:type simulation_time: float
"""
LOG.debug(f"acknowledge alighting {rid} from {vid} at {simulation_time}")
del self.rq_dict[rid]
del self.rid_to_assigned_vid[rid]
def _prq_from_reservation_to_immediate(self, rid, sim_time):
"""This method is triggered when a reservation request becomes an immediate request.
All database relevant methods can be triggered from here.
:param rid: request id
:param sim_time: current simulation time
:return: None
"""
for base_rid, epa in sorted(self.reserved_base_rids.items(), key=lambda x: x[1]):
if epa - sim_time > self.opt_horizon:
break
else:
LOG.debug(f"activate {base_rid} with epa {epa} for global optimisation at time {sim_time}!")
del self.reserved_base_rids[base_rid]
def _call_time_trigger_request_batch(self, simulation_time):
"""This method can be used to perform time-triggered proccesses, e.g. the optimization of the current
assignments of simulation vehicles of the fleet.
:param simulation_time: current simulation time
:type simulation_time: float
"""
self.sim_time = simulation_time
self.pos_veh_dict = {} # pos -> list_veh
def compute_VehiclePlan_utility(self, simulation_time, veh_obj, vehicle_plan):
"""This method computes the utility of a given plan and returns the value.
:param simulation_time: current simulation time
:type simulation_time: float
:param veh_obj: vehicle object
:type veh_obj: SimulationVehicle
:param vehicle_plan: vehicle plan in question
:type vehicle_plan: VehiclePlan
:return: utility of vehicle plan
:rtype: float
"""
return self.vr_ctrl_f(simulation_time, veh_obj, vehicle_plan, self.rq_dict, self.routing_engine)
def _create_user_offer(self, prq, simulation_time, assigned_vehicle_plan=None, offer_dict_without_plan={}):
""" creating the offer for a requests
:param prq: plan request
:type prq: PlanRequest obj
:param simulation_time: current simulation time
:type simulation_time: int
:param assigned_vehicle_plan: vehicle plan of initial solution to serve this request
:type assigned_vehicle_plan: VehiclePlan None
:param offer_dict_without_plan: can be used to create an offer that is not derived from a vehicle plan
entries will be used to create/extend offer
:type offer_dict_without_plan: dict or None
:return: offer for request
:rtype: TravellerOffer
"""
if assigned_vehicle_plan is not None:
pu_time, do_time = assigned_vehicle_plan.pax_info.get(prq.get_rid_struct())
# offer = {G_OFFER_WAIT: pu_time - simulation_time, G_OFFER_DRIVE: do_time - pu_time,
# G_OFFER_FARE: int(prq.init_direct_td * self.dist_fare + self.base_fare)}
offer = TravellerOffer(prq.get_rid_struct(), self.op_id, pu_time - prq.rq_time, do_time - pu_time,
self._compute_fare(simulation_time, prq, assigned_vehicle_plan))
prq.set_service_offered(offer) # has to be called
else:
offer = self._create_rejection(prq, simulation_time)
return offer
def change_prq_time_constraints(self, sim_time, rid, new_lpt, new_ept=None):
"""This method should be called when the hard time constraints of a customer should be changed.
It changes the PlanRequest attributes. Moreover, this method called on child classes should adapt the
PlanStops of VehiclePlans containing this PlanRequest and recheck feasibility. The VehiclePlan method
update_prq_hard_constraints() can be used for this purpose.
:param sim_time: current simulation time
:param rid: request id
:param new_lpt: new latest pickup time, None is ignored
:param new_ept: new earliest pickup time, None is ignored
:return: None
"""
LOG.debug("change time constraints for rid {}".format(rid))
prq = self.rq_dict[rid]
prq.set_new_pickup_time_constraint(new_lpt, new_earliest_pu_time=new_ept)
ass_vid = self.rid_to_assigned_vid.get(rid)
if ass_vid is not None:
self.veh_plans[ass_vid].update_prq_hard_constraints(self.sim_vehicles[ass_vid], sim_time,
self.routing_engine, prq, new_lpt, new_ept=new_ept,
keep_feasible=True)
def assign_vehicle_plan(self, veh_obj, vehicle_plan, sim_time, force_assign=False, add_arg=None):
super().assign_vehicle_plan(veh_obj, vehicle_plan, sim_time, force_assign, add_arg)
def lock_current_vehicle_plan(self, vid):
super().lock_current_vehicle_plan(vid)
def _lock_vid_rid_pickup(self, sim_time, vid, rid):
super()._lock_vid_rid_pickup(sim_time, vid, rid)
| 48.688356 | 120 | 0.676866 | import logging
import time
from src.simulation.Offers import TravellerOffer
from src.fleetctrl.FleetControlBase import FleetControlBase
from src.fleetctrl.planning.PlanRequest import PlanRequest
from src.fleetctrl.pooling.objectives import return_pooling_objective_function
from src.fleetctrl.pooling.immediate.insertion import insertion_with_heuristics
from src.misc.globals import *
LOG = logging.getLogger(__name__)
LARGE_INT = 100000
class PoolingInsertionHeuristicOnly(FleetControlBase):
tes, list_vehicles, routing_engine, zone_system, scenario_parameters,
dir_names, charging_management=None):
super().__init__(op_id, operator_attributes, list_vehicles, routing_engine, zone_system, scenario_parameters,
dir_names, charging_management=charging_management)
{}
self.pos_veh_dict = {}
self.vr_ctrl_f = return_pooling_objective_function(operator_attributes[G_OP_VR_CTRL_F])
self.sim_time = scenario_parameters[G_SIM_START_TIME]
put_key(G_FCTRL_CT_RQU)
def receive_status_update(self, vid, simulation_time, list_finished_VRL, force_update=True):
veh_obj = self.sim_vehicles[vid]
if list_finished_VRL or force_update:
self.veh_plans[vid].update_plan(veh_obj, simulation_time, self.routing_engine, list_finished_VRL)
upd_utility_val = self.compute_VehiclePlan_utility(simulation_time, veh_obj, self.veh_plans[vid])
self.veh_plans[vid].set_utility(upd_utility_val)
try:
self.pos_veh_dict[veh_obj.pos].append(veh_obj)
except KeyError:
self.pos_veh_dict[veh_obj.pos] = [veh_obj]
LOG.debug(f"veh {veh_obj} | after status update: {self.veh_plans[vid]}")
def user_request(self, rq, sim_time):
t0 = time.perf_counter()
LOG.debug(f"Incoming request {rq.__dict__} at time {sim_time}")
self.sim_time = sim_time
prq = PlanRequest(rq, self.routing_engine, min_wait_time=self.min_wait_time,
max_wait_time=self.max_wait_time,
max_detour_time_factor=self.max_dtf, max_constant_detour_time=self.max_cdt,
add_constant_detour_time=self.add_cdt, min_detour_time_window=self.min_dtw,
boarding_time=self.const_bt)
rid_struct = rq.get_rid_struct()
self.rq_dict[rid_struct] = prq
if prq.o_pos == prq.d_pos:
LOG.debug(f"automatic decline for rid {rid_struct}!")
self._create_rejection(prq, sim_time)
return
o_pos, t_pu_earliest, t_pu_latest = prq.get_o_stop_info()
if t_pu_earliest - sim_time > self.opt_horizon:
self.reservation_module.add_reservation_request(prq, sim_time)
offer = self.reservation_module.return_reservation_offer(prq.get_rid_struct(), sim_time)
LOG.debug(f"reservation offer for rid {rid_struct} : {offer}")
else:
list_tuples = insertion_with_heuristics(sim_time, prq, self, force_feasible_assignment=True)
if len(list_tuples) > 0:
(vid, vehplan, delta_cfv) = list_tuples[0]
self.tmp_assignment[rid_struct] = vehplan
offer = self._create_user_offer(prq, sim_time, vehplan)
LOG.debug(f"new offer for rid {rid_struct} : {offer}")
else:
LOG.debug(f"rejection for rid {rid_struct}")
self._create_rejection(prq, sim_time)
dt = round(time.perf_counter() - t0, 5)
old_dt = self._get_current_dynamic_fleetcontrol_value(sim_time, G_FCTRL_CT_RQU)
if old_dt is None:
new_dt = dt
else:
new_dt = old_dt + dt
output_dict = {G_FCTRL_CT_RQU: new_dt}
self._add_to_dynamic_fleetcontrol_output(sim_time, output_dict)
def user_confirms_booking(self, rid, simulation_time):
super().user_confirms_booking(rid, simulation_time)
LOG.debug(f"user confirms booking {rid} at {simulation_time}")
prq = self.rq_dict[rid]
if prq.get_reservation_flag():
self.reservation_module.user_confirms_booking(rid, simulation_time)
else:
new_vehicle_plan = self.tmp_assignment[rid]
vid = new_vehicle_plan.vid
veh_obj = self.sim_vehicles[vid]
self.assign_vehicle_plan(veh_obj, new_vehicle_plan, simulation_time)
del self.tmp_assignment[rid]
def user_cancels_request(self, rid, simulation_time):
LOG.debug(f"user cancels request {rid} at {simulation_time}")
prq = self.rq_dict[rid]
if prq.get_reservation_flag():
self.reservation_module.user_cancels_request(rid, simulation_time)
else:
prev_assignment = self.tmp_assignment.get(rid)
if prev_assignment:
del self.tmp_assignment[rid]
del self.rq_dict[rid]
def acknowledge_boarding(self, rid, vid, simulation_time):
LOG.debug(f"acknowledge boarding {rid} in {vid} at {simulation_time}")
self.rq_dict[rid].set_pickup(vid, simulation_time)
def acknowledge_alighting(self, rid, vid, simulation_time):
LOG.debug(f"acknowledge alighting {rid} from {vid} at {simulation_time}")
del self.rq_dict[rid]
del self.rid_to_assigned_vid[rid]
def _prq_from_reservation_to_immediate(self, rid, sim_time):
for base_rid, epa in sorted(self.reserved_base_rids.items(), key=lambda x: x[1]):
if epa - sim_time > self.opt_horizon:
break
else:
LOG.debug(f"activate {base_rid} with epa {epa} for global optimisation at time {sim_time}!")
del self.reserved_base_rids[base_rid]
def _call_time_trigger_request_batch(self, simulation_time):
self.sim_time = simulation_time
self.pos_veh_dict = {}
def compute_VehiclePlan_utility(self, simulation_time, veh_obj, vehicle_plan):
return self.vr_ctrl_f(simulation_time, veh_obj, vehicle_plan, self.rq_dict, self.routing_engine)
def _create_user_offer(self, prq, simulation_time, assigned_vehicle_plan=None, offer_dict_without_plan={}):
if assigned_vehicle_plan is not None:
pu_time, do_time = assigned_vehicle_plan.pax_info.get(prq.get_rid_struct())
offer = TravellerOffer(prq.get_rid_struct(), self.op_id, pu_time - prq.rq_time, do_time - pu_time,
self._compute_fare(simulation_time, prq, assigned_vehicle_plan))
prq.set_service_offered(offer)
else:
offer = self._create_rejection(prq, simulation_time)
return offer
def change_prq_time_constraints(self, sim_time, rid, new_lpt, new_ept=None):
LOG.debug("change time constraints for rid {}".format(rid))
prq = self.rq_dict[rid]
prq.set_new_pickup_time_constraint(new_lpt, new_earliest_pu_time=new_ept)
ass_vid = self.rid_to_assigned_vid.get(rid)
if ass_vid is not None:
self.veh_plans[ass_vid].update_prq_hard_constraints(self.sim_vehicles[ass_vid], sim_time,
self.routing_engine, prq, new_lpt, new_ept=new_ept,
keep_feasible=True)
def assign_vehicle_plan(self, veh_obj, vehicle_plan, sim_time, force_assign=False, add_arg=None):
super().assign_vehicle_plan(veh_obj, vehicle_plan, sim_time, force_assign, add_arg)
def lock_current_vehicle_plan(self, vid):
super().lock_current_vehicle_plan(vid)
def _lock_vid_rid_pickup(self, sim_time, vid, rid):
super()._lock_vid_rid_pickup(sim_time, vid, rid)
| true | true |
1c34e2071a5e6fa4e09f6128585cb488e46b16c0 | 32,325 | py | Python | bert4keras/layers.py | EthanChen1234/bert4keras | 149b8abe4f5696f7762f49547533873b935f85b9 | [
"Apache-2.0"
] | null | null | null | bert4keras/layers.py | EthanChen1234/bert4keras | 149b8abe4f5696f7762f49547533873b935f85b9 | [
"Apache-2.0"
] | null | null | null | bert4keras/layers.py | EthanChen1234/bert4keras | 149b8abe4f5696f7762f49547533873b935f85b9 | [
"Apache-2.0"
] | null | null | null | #! -*- coding: utf-8 -*-
# 自定义层
import numpy as np
import tensorflow as tf
from bert4keras.backend import keras, K
from bert4keras.backend import sequence_masking
from bert4keras.backend import recompute_grad
from keras import initializers, activations
from keras.layers import *
def integerize_shape(func):
"""装饰器,保证input_shape一定是int或None
"""
def convert(item):
if hasattr(item, '__iter__'):
return [convert(i) for i in item]
elif hasattr(item, 'value'):
return item.value
else:
return item
def new_func(self, input_shape):
input_shape = convert(input_shape)
return func(self, input_shape)
return new_func
if keras.__version__[-2:] != 'tf' and keras.__version__ < '2.3':
class Layer(keras.layers.Layer):
"""重新定义Layer,赋予“层中层”功能
(仅keras 2.3以下版本需要)
"""
def __init__(self, **kwargs):
super(Layer, self).__init__(**kwargs)
self.supports_masking = True # 本项目的自定义层均可mask
def __setattr__(self, name, value):
if isinstance(value, keras.layers.Layer):
if not hasattr(self, '_layers'):
self._layers = []
if value not in self._layers:
self._layers.append(value)
super(Layer, self).__setattr__(name, value)
@property
def trainable_weights(self):
trainable = getattr(self, 'trainable', True)
if trainable:
trainable_weights = super(Layer, self).trainable_weights[:]
for l in getattr(self, '_layers', []):
trainable_weights += l.trainable_weights
return trainable_weights
else:
return []
@property
def non_trainable_weights(self):
trainable = getattr(self, 'trainable', True)
non_trainable_weights = super(Layer, self).non_trainable_weights[:]
for l in getattr(self, '_layers', []):
if trainable:
non_trainable_weights += l.non_trainable_weights
else:
non_trainable_weights += l.weights
return non_trainable_weights
else:
class Layer(keras.layers.Layer):
def __init__(self, **kwargs):
super(Layer, self).__init__(**kwargs)
self.supports_masking = True # 本项目的自定义层均可mask
class Embedding(keras.layers.Embedding):
"""拓展Embedding层
"""
def compute_mask(self, inputs, mask=None):
"""为了适配T5,保证第一个token不被mask
"""
if self._current_mode == 'embedding':
mask = super(Embedding, self).compute_mask(inputs, mask)
if mask is not None:
mask1 = K.ones_like(mask[:, :1], dtype='bool')
mask2 = mask[:, 1:]
return K.concatenate([mask1, mask2], 1)
else:
return mask
def call(self, inputs, mode='embedding'):
"""新增mode参数,可以为embedding或dense。如果为embedding,
则等价于普通Embedding层;如果为dense,则等价于无bias的Dense层。
"""
self._current_mode = mode
if mode == 'embedding':
return super(Embedding, self).call(inputs)
else:
kernel = K.transpose(self.embeddings)
return K.dot(inputs, kernel)
def compute_output_shape(self, input_shape):
if self._current_mode == 'embedding':
return super(Embedding, self).compute_output_shape(input_shape)
else:
return input_shape[:2] + (K.int_shape(self.embeddings)[0],)
class BiasAdd(Layer):
"""加上偏置项
"""
@integerize_shape
def build(self, input_shape):
super(BiasAdd, self).build(input_shape)
output_dim = input_shape[-1]
self.bias = self.add_weight(
name='bias',
shape=(output_dim,),
initializer='zeros',
trainable=True
)
def call(self, inputs):
return K.bias_add(inputs, self.bias)
class MultiHeadAttention(Layer):
"""多头注意力机制
"""
def __init__(
self,
heads,
head_size,
key_size=None,
use_bias=True,
attention_scale=True,
kernel_initializer='glorot_uniform',
**kwargs
):
super(MultiHeadAttention, self).__init__(**kwargs)
self.heads = heads
self.head_size = head_size
self.out_dim = heads * head_size
self.key_size = key_size or head_size
self.use_bias = use_bias
self.attention_scale = attention_scale
self.kernel_initializer = initializers.get(kernel_initializer)
def build(self, input_shape):
super(MultiHeadAttention, self).build(input_shape)
self.q_dense = Dense(
units=self.key_size * self.heads,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
self.k_dense = Dense(
units=self.key_size * self.heads,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
self.v_dense = Dense(
units=self.out_dim,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
self.o_dense = Dense(
units=self.out_dim,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
@recompute_grad
def call(self, inputs, mask=None, a_mask=None, p_bias=None):
"""实现多头注意力
q_mask: 对输入的query序列的mask。
主要是将输出结果的padding部分置0。
v_mask: 对输入的value序列的mask。
主要是防止attention读取到padding信息。
a_mask: 对attention矩阵的mask。
不同的attention mask对应不同的应用。
p_bias: 在attention里的位置偏置。
一般用来指定相对位置编码的种类。
"""
q, k, v = inputs[:3]
q_mask, v_mask, n = None, None, 3
if mask is not None:
if mask[0] is not None:
q_mask = K.cast(mask[0], K.floatx())
if mask[2] is not None:
v_mask = K.cast(mask[2], K.floatx())
if a_mask:
a_mask = inputs[n]
n += 1
# 线性变换
qw = self.q_dense(q)
kw = self.k_dense(k)
vw = self.v_dense(v)
# 形状变换
qw = K.reshape(qw, (-1, K.shape(q)[1], self.heads, self.key_size))
kw = K.reshape(kw, (-1, K.shape(k)[1], self.heads, self.key_size))
vw = K.reshape(vw, (-1, K.shape(v)[1], self.heads, self.head_size))
# Attention
a = tf.einsum('bjhd,bkhd->bhjk', qw, kw)
# 处理位置编码
if p_bias == 'typical_relative':
pos_embeddings = inputs[n]
a = a + tf.einsum('bjhd,jkd->bhjk', qw, pos_embeddings)
elif p_bias == 't5_relative':
pos_embeddings = K.permute_dimensions(inputs[n], (2, 0, 1))
a = a + K.expand_dims(pos_embeddings, 0)
# Attention(续)
if self.attention_scale:
a = a / self.key_size**0.5
a = sequence_masking(a, v_mask, 1, -1)
if a_mask is not None:
a = a - (1 - a_mask) * 1e12
a = K.softmax(a)
# 完成输出
o = tf.einsum('bhjk,bkhd->bjhd', a, vw)
if p_bias == 'typical_relative':
o = o + tf.einsum('bhjk,jkd->bjhd', a, pos_embeddings)
o = K.reshape(o, (-1, K.shape(o)[1], self.out_dim))
o = self.o_dense(o)
# 返回结果
o = sequence_masking(o, q_mask, 0)
return o
def compute_output_shape(self, input_shape):
return (input_shape[0][0], input_shape[0][1], self.out_dim)
def compute_mask(self, inputs, mask):
return mask[0]
def get_config(self):
config = {
'heads': self.heads,
'head_size': self.head_size,
'key_size': self.key_size,
'use_bias': self.use_bias,
'attention_scale': self.attention_scale,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
}
base_config = super(MultiHeadAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LayerNormalization(Layer):
"""(Conditional) Layer Normalization
hidden_*系列参数仅为有条件输入时(conditional=True)使用
"""
def __init__(
self,
center=True,
scale=True,
epsilon=None,
conditional=False,
hidden_units=None,
hidden_activation='linear',
hidden_initializer='glorot_uniform',
**kwargs
):
super(LayerNormalization, self).__init__(**kwargs)
self.center = center
self.scale = scale
self.conditional = conditional
self.hidden_units = hidden_units
self.hidden_activation = activations.get(hidden_activation)
self.hidden_initializer = initializers.get(hidden_initializer)
self.epsilon = epsilon or 1e-12
def compute_mask(self, inputs, mask=None):
if self.conditional:
masks = [K.expand_dims(m, 0) for m in mask if m is not None]
if len(masks) == 0:
return None
else:
return K.all(K.concatenate(masks, axis=0), axis=0)
else:
return mask
def build(self, input_shape):
super(LayerNormalization, self).build(input_shape)
if self.conditional:
shape = (input_shape[0][-1],)
else:
shape = (input_shape[-1],)
if self.center:
self.beta = self.add_weight(
shape=shape, initializer='zeros', name='beta'
)
if self.scale:
self.gamma = self.add_weight(
shape=shape, initializer='ones', name='gamma'
)
if self.conditional:
if self.hidden_units is not None:
self.hidden_dense = Dense(
units=self.hidden_units,
activation=self.hidden_activation,
use_bias=False,
kernel_initializer=self.hidden_initializer
)
if self.center:
self.beta_dense = Dense(
units=shape[0], use_bias=False, kernel_initializer='zeros'
)
if self.scale:
self.gamma_dense = Dense(
units=shape[0], use_bias=False, kernel_initializer='zeros'
)
@recompute_grad
def call(self, inputs):
"""如果是条件Layer Norm,则默认以list为输入,第二个是condition
"""
if self.conditional:
inputs, cond = inputs
if self.hidden_units is not None:
cond = self.hidden_dense(cond)
for _ in range(K.ndim(inputs) - K.ndim(cond)):
cond = K.expand_dims(cond, 1)
if self.center:
beta = self.beta_dense(cond) + self.beta
if self.scale:
gamma = self.gamma_dense(cond) + self.gamma
else:
if self.center:
beta = self.beta
if self.scale:
gamma = self.gamma
outputs = inputs
if self.center:
mean = K.mean(outputs, axis=-1, keepdims=True)
outputs = outputs - mean
if self.scale:
variance = K.mean(K.square(outputs), axis=-1, keepdims=True)
std = K.sqrt(variance + self.epsilon)
outputs = outputs / std
outputs = outputs * gamma
if self.center:
outputs = outputs + beta
return outputs
def compute_output_shape(self, input_shape):
if self.conditional:
return input_shape[0]
else:
return input_shape
def get_config(self):
config = {
'center': self.center,
'scale': self.scale,
'epsilon': self.epsilon,
'conditional': self.conditional,
'hidden_units': self.hidden_units,
'hidden_activation': activations.serialize(self.hidden_activation),
'hidden_initializer':
initializers.serialize(self.hidden_initializer),
}
base_config = super(LayerNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class PositionEmbedding(Layer):
"""定义位置Embedding,这里的Embedding是可训练的。
"""
def __init__(
self,
input_dim,
output_dim,
merge_mode='add',
embeddings_initializer='zeros',
custom_position_ids=False,
**kwargs
):
super(PositionEmbedding, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.merge_mode = merge_mode
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.custom_position_ids = custom_position_ids
def build(self, input_shape):
super(PositionEmbedding, self).build(input_shape)
self.embeddings = self.add_weight(
name='embeddings',
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer
)
def call(self, inputs):
"""如果custom_position_ids,那么第二个输入为自定义的位置id
"""
if self.custom_position_ids:
inputs, position_ids = inputs
if K.dtype(position_ids) != 'int32':
position_ids = K.cast(position_ids, 'int32')
pos_embeddings = K.gather(self.embeddings, position_ids)
else:
input_shape = K.shape(inputs)
batch_size, seq_len = input_shape[0], input_shape[1]
pos_embeddings = self.embeddings[:seq_len]
pos_embeddings = K.expand_dims(pos_embeddings, 0)
if self.merge_mode != 'add':
pos_embeddings = K.tile(pos_embeddings, [batch_size, 1, 1])
if self.merge_mode == 'add':
return inputs + pos_embeddings
else:
return K.concatenate([inputs, pos_embeddings])
def compute_output_shape(self, input_shape):
if self.custom_position_ids:
input_shape = input_shape[0]
if self.merge_mode == 'add':
return input_shape
else:
return input_shape[:2] + (input_shape[2] + self.output_dim,)
def get_config(self):
config = {
'input_dim': self.input_dim,
'output_dim': self.output_dim,
'merge_mode': self.merge_mode,
'embeddings_initializer':
initializers.serialize(self.embeddings_initializer),
'custom_position_ids': self.custom_position_ids,
}
base_config = super(PositionEmbedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RelativePositionEmbedding(Layer):
"""相对位置编码
来自论文:https://arxiv.org/abs/1803.02155
"""
def __init__(
self, input_dim, output_dim, embeddings_initializer='zeros', **kwargs
):
super(RelativePositionEmbedding, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
def build(self, input_shape):
super(RelativePositionEmbedding, self).build(input_shape)
self.embeddings = self.add_weight(
name='embeddings',
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
)
def call(self, inputs):
pos_ids = self.compute_position_ids(inputs)
return K.gather(self.embeddings, pos_ids)
def compute_position_ids(self, inputs):
q, v = inputs
# 计算位置差
q_idxs = K.arange(0, K.shape(q)[1], dtype='int32')
q_idxs = K.expand_dims(q_idxs, 1)
v_idxs = K.arange(0, K.shape(v)[1], dtype='int32')
v_idxs = K.expand_dims(v_idxs, 0)
pos_ids = v_idxs - q_idxs
# 后处理操作
max_position = (self.input_dim - 1) // 2
pos_ids = K.clip(pos_ids, -max_position, max_position)
pos_ids = pos_ids + max_position
return pos_ids
def compute_output_shape(self, input_shape):
return (None, None, self.output_dim)
def compute_mask(self, inputs, mask):
return mask[0]
def get_config(self):
config = {
'input_dim': self.input_dim,
'output_dim': self.output_dim,
'embeddings_initializer':
initializers.serialize(self.embeddings_initializer),
}
base_config = super(RelativePositionEmbedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RelativePositionEmbeddingT5(RelativePositionEmbedding):
"""Google T5的相对位置编码
来自论文:https://arxiv.org/abs/1910.10683
"""
def __init__(
self,
input_dim,
output_dim,
max_distance=128,
bidirectional=True,
embeddings_initializer='zeros',
**kwargs
):
super(RelativePositionEmbeddingT5,
self).__init__(input_dim, output_dim, **kwargs)
self.max_distance = max_distance
self.bidirectional = bidirectional
def compute_position_ids(self, inputs):
"""T5的相对位置分桶(直接翻译自官方T5源码)
"""
q, v = inputs
# 计算位置差
q_idxs = K.arange(0, K.shape(q)[1], dtype='int32')
q_idxs = K.expand_dims(q_idxs, 1)
v_idxs = K.arange(0, K.shape(v)[1], dtype='int32')
v_idxs = K.expand_dims(v_idxs, 0)
pos_ids = v_idxs - q_idxs
# 后处理操作
num_buckets, max_distance = self.input_dim, self.max_distance
ret = 0
n = -pos_ids
if self.bidirectional:
num_buckets //= 2
ret += K.cast(K.less(n, 0), 'int32') * num_buckets
n = K.abs(n)
else:
n = K.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = K.less(n, max_exact)
val_if_large = max_exact + K.cast(
K.log(K.cast(n, K.floatx()) / max_exact) /
np.log(max_distance / max_exact) * (num_buckets - max_exact),
'int32',
)
val_if_large = K.minimum(val_if_large, num_buckets - 1)
ret += K.switch(is_small, n, val_if_large)
return ret
def get_config(self):
config = {
'max_distance': self.max_distance,
'bidirectional': self.bidirectional,
}
base_config = super(RelativePositionEmbeddingT5, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class FeedForward(Layer):
"""FeedForward层,其实就是两个Dense层的叠加
"""
def __init__(
self,
units,
activation='relu',
use_bias=True,
kernel_initializer='glorot_uniform',
**kwargs
):
super(FeedForward, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
@integerize_shape
def build(self, input_shape):
super(FeedForward, self).build(input_shape)
output_dim = input_shape[-1]
self.dense_1 = Dense(
units=self.units,
activation=self.activation,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
self.dense_2 = Dense(
units=output_dim,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
@recompute_grad
def call(self, inputs):
x = inputs
x = self.dense_1(x)
x = self.dense_2(x)
return x
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
}
base_config = super(FeedForward, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConditionalRandomField(Layer):
"""纯Keras实现CRF层
CRF层本质上是一个带训练参数的loss计算层。
"""
def __init__(self, lr_multiplier=1, **kwargs):
super(ConditionalRandomField, self).__init__(**kwargs)
self.lr_multiplier = lr_multiplier # 当前层学习率的放大倍数
@integerize_shape
def build(self, input_shape):
super(ConditionalRandomField, self).build(input_shape)
output_dim = input_shape[-1]
self.trans = self.add_weight(
name='trans',
shape=(output_dim, output_dim),
initializer='glorot_uniform',
trainable=True
)
if self.lr_multiplier != 1:
K.set_value(self.trans, K.eval(self.trans) / self.lr_multiplier)
self.trans = self.lr_multiplier * self.trans
def compute_mask(self, inputs, mask=None):
return None
def call(self, inputs, mask=None):
if mask is not None:
mask = K.cast(mask, K.floatx())
return sequence_masking(inputs, mask, 1, 1)
def target_score(self, y_true, y_pred):
"""计算目标路径的相对概率(还没有归一化)
要点:逐标签得分,加上转移概率得分。
"""
point_score = tf.einsum('bni,bni->b', y_true, y_pred) # 逐标签得分
trans_score = tf.einsum(
'bni,ij,bnj->b', y_true[:, :-1], self.trans, y_true[:, 1:]
) # 标签转移得分
return point_score + trans_score
def log_norm_step(self, inputs, states):
"""递归计算归一化因子
要点:1、递归计算;2、用logsumexp避免溢出。
"""
inputs, mask = inputs[:, :-1], inputs[:, -1:]
states = K.expand_dims(states[0], 2) # (batch_size, output_dim, 1)
trans = K.expand_dims(self.trans, 0) # (1, output_dim, output_dim)
outputs = tf.reduce_logsumexp(
states + trans, 1
) # (batch_size, output_dim)
outputs = outputs + inputs
outputs = mask * outputs + (1 - mask) * states[:, :, 0]
return outputs, [outputs]
def dense_loss(self, y_true, y_pred):
"""y_true需要是one hot形式
"""
# 导出mask并转换数据类型
mask = K.all(K.greater(y_pred, -1e6), axis=2, keepdims=True)
mask = K.cast(mask, K.floatx())
# 计算目标分数
y_true, y_pred = y_true * mask, y_pred * mask
target_score = self.target_score(y_true, y_pred)
# 递归计算log Z
init_states = [y_pred[:, 0]]
y_pred = K.concatenate([y_pred, mask], axis=2)
input_length = K.int_shape(y_pred[:, 1:])[1]
log_norm, _, _ = K.rnn(
self.log_norm_step,
y_pred[:, 1:],
init_states,
input_length=input_length
) # 最后一步的log Z向量
log_norm = tf.reduce_logsumexp(log_norm, 1) # logsumexp得标量
# 计算损失 -log p
return log_norm - target_score
def sparse_loss(self, y_true, y_pred):
"""y_true需要是整数形式(非one hot)
"""
# y_true需要重新明确一下shape和dtype
y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
y_true = K.cast(y_true, 'int32')
# 转为one hot
y_true = K.one_hot(y_true, K.shape(self.trans)[0])
return self.dense_loss(y_true, y_pred)
def dense_accuracy(self, y_true, y_pred):
"""训练过程中显示逐帧准确率的函数,排除了mask的影响
此处y_true需要是one hot形式
"""
y_true = K.argmax(y_true, 2)
return self.sparse_accuracy(y_true, y_pred)
def sparse_accuracy(self, y_true, y_pred):
"""训练过程中显示逐帧准确率的函数,排除了mask的影响
此处y_true需要是整数形式(非one hot)
"""
# 导出mask并转换数据类型
mask = K.all(K.greater(y_pred, -1e6), axis=2)
mask = K.cast(mask, K.floatx())
# y_true需要重新明确一下shape和dtype
y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
y_true = K.cast(y_true, 'int32')
# 逐标签取最大来粗略评测训练效果
y_pred = K.cast(K.argmax(y_pred, 2), 'int32')
isequal = K.cast(K.equal(y_true, y_pred), K.floatx())
return K.sum(isequal * mask) / K.sum(mask)
def get_config(self):
config = {
'lr_multiplier': self.lr_multiplier,
}
base_config = super(ConditionalRandomField, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MaximumEntropyMarkovModel(Layer):
"""(双向)最大熵隐马尔可夫模型
作用和用法都类似CRF,但是比CRF更快更简单。
"""
def __init__(self, lr_multiplier=1, hidden_dim=None, **kwargs):
super(MaximumEntropyMarkovModel, self).__init__(**kwargs)
self.lr_multiplier = lr_multiplier # 当前层学习率的放大倍数
self.hidden_dim = hidden_dim # 如果非None,则将转移矩阵低秩分解
@integerize_shape
def build(self, input_shape):
super(MaximumEntropyMarkovModel, self).build(input_shape)
output_dim = input_shape[-1]
if self.hidden_dim is None:
self.trans = self.add_weight(
name='trans',
shape=(output_dim, output_dim),
initializer='glorot_uniform',
trainable=True
)
if self.lr_multiplier != 1:
K.set_value(self.trans, K.eval(self.trans) / self.lr_multiplier)
self.trans = self.lr_multiplier * self.trans
else:
self.l_trans = self.add_weight(
name='l_trans',
shape=(output_dim, self.hidden_dim),
initializer='glorot_uniform',
trainable=True
)
self.r_trans = self.add_weight(
name='r_trans',
shape=(output_dim, self.hidden_dim),
initializer='glorot_uniform',
trainable=True
)
if self.lr_multiplier != 1:
K.set_value(
self.l_trans,
K.eval(self.l_trans) / self.lr_multiplier
)
self.l_trans = self.lr_multiplier * self.l_trans
K.set_value(
self.r_trans,
K.eval(self.r_trans) / self.lr_multiplier
)
self.r_trans = self.lr_multiplier * self.r_trans
def compute_mask(self, inputs, mask=None):
return None
def call(self, inputs, mask=None):
if mask is not None:
mask = K.cast(mask, K.floatx())
return sequence_masking(inputs, mask, 1, 1)
def reverse_sequence(self, inputs, mask=None):
if mask is None:
return [x[:, ::-1] for x in inputs]
else:
length = K.cast(K.sum(mask, 1), 'int32')
return [tf.reverse_sequence(x, length, seq_axis=1) for x in inputs]
def basic_loss(self, y_true, y_pred, go_backwards=False):
"""y_true需要是整数形式(非one hot)
"""
# 导出mask并转换数据类型
mask = K.all(K.greater(y_pred, -1e6), axis=2)
mask = K.cast(mask, K.floatx())
# y_true需要重新明确一下shape和dtype
y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
y_true = K.cast(y_true, 'int32')
# 反转相关
if self.hidden_dim is None:
if go_backwards: # 是否反转序列
y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
trans = K.transpose(self.trans)
else:
trans = self.trans
histoty = K.gather(trans, y_true)
else:
if go_backwards: # 是否反转序列
y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
r_trans, l_trans = self.l_trans, self.r_trans
else:
l_trans, r_trans = self.l_trans, self.r_trans
histoty = K.gather(l_trans, y_true)
histoty = tf.einsum('bnd,kd->bnk', histoty, r_trans)
# 计算loss
histoty = K.concatenate([y_pred[:, :1], histoty[:, :-1]], 1)
y_pred = (y_pred + histoty) / 2
loss = K.sparse_categorical_crossentropy(
y_true, y_pred, from_logits=True
)
return K.sum(loss * mask) / K.sum(mask)
def sparse_loss(self, y_true, y_pred):
"""y_true需要是整数形式(非one hot)
"""
loss = self.basic_loss(y_true, y_pred, False)
loss = loss + self.basic_loss(y_true, y_pred, True)
return loss / 2
def dense_loss(self, y_true, y_pred):
"""y_true需要是one hot形式
"""
y_true = K.argmax(y_true, 2)
return self.sparse_loss(y_true, y_pred)
def basic_accuracy(self, y_true, y_pred, go_backwards=False):
"""训练过程中显示逐帧准确率的函数,排除了mask的影响
此处y_true需要是整数形式(非one hot)
"""
# 导出mask并转换数据类型
mask = K.all(K.greater(y_pred, -1e6), axis=2)
mask = K.cast(mask, K.floatx())
# y_true需要重新明确一下shape和dtype
y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
y_true = K.cast(y_true, 'int32')
# 反转相关
if self.hidden_dim is None:
if go_backwards: # 是否反转序列
y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
trans = K.transpose(self.trans)
else:
trans = self.trans
histoty = K.gather(trans, y_true)
else:
if go_backwards: # 是否反转序列
y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
r_trans, l_trans = self.l_trans, self.r_trans
else:
l_trans, r_trans = self.l_trans, self.r_trans
histoty = K.gather(l_trans, y_true)
histoty = tf.einsum('bnd,kd->bnk', histoty, r_trans)
# 计算逐标签accuracy
histoty = K.concatenate([y_pred[:, :1], histoty[:, :-1]], 1)
y_pred = (y_pred + histoty) / 2
y_pred = K.cast(K.argmax(y_pred, 2), 'int32')
isequal = K.cast(K.equal(y_true, y_pred), K.floatx())
return K.sum(isequal * mask) / K.sum(mask)
def sparse_accuracy(self, y_true, y_pred):
"""训练过程中显示逐帧准确率的函数,排除了mask的影响
此处y_true需要是整数形式(非one hot)
"""
accuracy = self.basic_accuracy(y_true, y_pred, False)
accuracy = accuracy + self.basic_accuracy(y_true, y_pred, True)
return accuracy / 2
def dense_accuracy(self, y_true, y_pred):
"""训练过程中显示逐帧准确率的函数,排除了mask的影响
此处y_true需要是one hot形式
"""
y_true = K.argmax(y_true, 2)
return self.sparse_accuracy(y_true, y_pred)
def get_config(self):
config = {
'lr_multiplier': self.lr_multiplier,
'hidden_dim': self.hidden_dim,
}
base_config = super(MaximumEntropyMarkovModel, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Loss(Layer):
"""特殊的层,用来定义复杂loss
"""
def __init__(self, output_axis=None, **kwargs):
super(Loss, self).__init__(**kwargs)
self.output_axis = output_axis
def call(self, inputs, mask=None):
loss = self.compute_loss(inputs, mask)
self.add_loss(loss)
if self.output_axis is None:
return inputs
elif isinstance(self.output_axis, list):
return [inputs[i] for i in self.output_axis]
else:
return inputs[self.output_axis]
def compute_loss(self, inputs, mask=None):
raise NotImplementedError
def compute_output_shape(self, input_shape):
if self.output_axis is None:
return input_shape
elif isinstance(self.output_axis, list):
return [input_shape[i] for i in self.output_axis]
else:
return input_shape[self.output_axis]
def get_config(self):
config = {
'output_axis': self.output_axis,
}
base_config = super(Loss, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
custom_objects = {
'Embedding': Embedding,
'BiasAdd': BiasAdd,
'MultiHeadAttention': MultiHeadAttention,
'LayerNormalization': LayerNormalization,
'PositionEmbedding': PositionEmbedding,
'RelativePositionEmbedding': RelativePositionEmbedding,
'RelativePositionEmbeddingT5': RelativePositionEmbeddingT5,
'FeedForward': FeedForward,
'ConditionalRandomField': ConditionalRandomField,
'MaximumEntropyMarkovModel': MaximumEntropyMarkovModel,
'Loss': Loss,
}
keras.utils.get_custom_objects().update(custom_objects)
| 33.990536 | 80 | 0.578623 |
import numpy as np
import tensorflow as tf
from bert4keras.backend import keras, K
from bert4keras.backend import sequence_masking
from bert4keras.backend import recompute_grad
from keras import initializers, activations
from keras.layers import *
def integerize_shape(func):
def convert(item):
if hasattr(item, '__iter__'):
return [convert(i) for i in item]
elif hasattr(item, 'value'):
return item.value
else:
return item
def new_func(self, input_shape):
input_shape = convert(input_shape)
return func(self, input_shape)
return new_func
if keras.__version__[-2:] != 'tf' and keras.__version__ < '2.3':
class Layer(keras.layers.Layer):
def __init__(self, **kwargs):
super(Layer, self).__init__(**kwargs)
self.supports_masking = True
def __setattr__(self, name, value):
if isinstance(value, keras.layers.Layer):
if not hasattr(self, '_layers'):
self._layers = []
if value not in self._layers:
self._layers.append(value)
super(Layer, self).__setattr__(name, value)
@property
def trainable_weights(self):
trainable = getattr(self, 'trainable', True)
if trainable:
trainable_weights = super(Layer, self).trainable_weights[:]
for l in getattr(self, '_layers', []):
trainable_weights += l.trainable_weights
return trainable_weights
else:
return []
@property
def non_trainable_weights(self):
trainable = getattr(self, 'trainable', True)
non_trainable_weights = super(Layer, self).non_trainable_weights[:]
for l in getattr(self, '_layers', []):
if trainable:
non_trainable_weights += l.non_trainable_weights
else:
non_trainable_weights += l.weights
return non_trainable_weights
else:
class Layer(keras.layers.Layer):
def __init__(self, **kwargs):
super(Layer, self).__init__(**kwargs)
self.supports_masking = True
class Embedding(keras.layers.Embedding):
def compute_mask(self, inputs, mask=None):
if self._current_mode == 'embedding':
mask = super(Embedding, self).compute_mask(inputs, mask)
if mask is not None:
mask1 = K.ones_like(mask[:, :1], dtype='bool')
mask2 = mask[:, 1:]
return K.concatenate([mask1, mask2], 1)
else:
return mask
def call(self, inputs, mode='embedding'):
self._current_mode = mode
if mode == 'embedding':
return super(Embedding, self).call(inputs)
else:
kernel = K.transpose(self.embeddings)
return K.dot(inputs, kernel)
def compute_output_shape(self, input_shape):
if self._current_mode == 'embedding':
return super(Embedding, self).compute_output_shape(input_shape)
else:
return input_shape[:2] + (K.int_shape(self.embeddings)[0],)
class BiasAdd(Layer):
@integerize_shape
def build(self, input_shape):
super(BiasAdd, self).build(input_shape)
output_dim = input_shape[-1]
self.bias = self.add_weight(
name='bias',
shape=(output_dim,),
initializer='zeros',
trainable=True
)
def call(self, inputs):
return K.bias_add(inputs, self.bias)
class MultiHeadAttention(Layer):
def __init__(
self,
heads,
head_size,
key_size=None,
use_bias=True,
attention_scale=True,
kernel_initializer='glorot_uniform',
**kwargs
):
super(MultiHeadAttention, self).__init__(**kwargs)
self.heads = heads
self.head_size = head_size
self.out_dim = heads * head_size
self.key_size = key_size or head_size
self.use_bias = use_bias
self.attention_scale = attention_scale
self.kernel_initializer = initializers.get(kernel_initializer)
def build(self, input_shape):
super(MultiHeadAttention, self).build(input_shape)
self.q_dense = Dense(
units=self.key_size * self.heads,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
self.k_dense = Dense(
units=self.key_size * self.heads,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
self.v_dense = Dense(
units=self.out_dim,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
self.o_dense = Dense(
units=self.out_dim,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
@recompute_grad
def call(self, inputs, mask=None, a_mask=None, p_bias=None):
q, k, v = inputs[:3]
q_mask, v_mask, n = None, None, 3
if mask is not None:
if mask[0] is not None:
q_mask = K.cast(mask[0], K.floatx())
if mask[2] is not None:
v_mask = K.cast(mask[2], K.floatx())
if a_mask:
a_mask = inputs[n]
n += 1
qw = self.q_dense(q)
kw = self.k_dense(k)
vw = self.v_dense(v)
qw = K.reshape(qw, (-1, K.shape(q)[1], self.heads, self.key_size))
kw = K.reshape(kw, (-1, K.shape(k)[1], self.heads, self.key_size))
vw = K.reshape(vw, (-1, K.shape(v)[1], self.heads, self.head_size))
a = tf.einsum('bjhd,bkhd->bhjk', qw, kw)
if p_bias == 'typical_relative':
pos_embeddings = inputs[n]
a = a + tf.einsum('bjhd,jkd->bhjk', qw, pos_embeddings)
elif p_bias == 't5_relative':
pos_embeddings = K.permute_dimensions(inputs[n], (2, 0, 1))
a = a + K.expand_dims(pos_embeddings, 0)
if self.attention_scale:
a = a / self.key_size**0.5
a = sequence_masking(a, v_mask, 1, -1)
if a_mask is not None:
a = a - (1 - a_mask) * 1e12
a = K.softmax(a)
o = tf.einsum('bhjk,bkhd->bjhd', a, vw)
if p_bias == 'typical_relative':
o = o + tf.einsum('bhjk,jkd->bjhd', a, pos_embeddings)
o = K.reshape(o, (-1, K.shape(o)[1], self.out_dim))
o = self.o_dense(o)
o = sequence_masking(o, q_mask, 0)
return o
def compute_output_shape(self, input_shape):
return (input_shape[0][0], input_shape[0][1], self.out_dim)
def compute_mask(self, inputs, mask):
return mask[0]
def get_config(self):
config = {
'heads': self.heads,
'head_size': self.head_size,
'key_size': self.key_size,
'use_bias': self.use_bias,
'attention_scale': self.attention_scale,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
}
base_config = super(MultiHeadAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LayerNormalization(Layer):
def __init__(
self,
center=True,
scale=True,
epsilon=None,
conditional=False,
hidden_units=None,
hidden_activation='linear',
hidden_initializer='glorot_uniform',
**kwargs
):
super(LayerNormalization, self).__init__(**kwargs)
self.center = center
self.scale = scale
self.conditional = conditional
self.hidden_units = hidden_units
self.hidden_activation = activations.get(hidden_activation)
self.hidden_initializer = initializers.get(hidden_initializer)
self.epsilon = epsilon or 1e-12
def compute_mask(self, inputs, mask=None):
if self.conditional:
masks = [K.expand_dims(m, 0) for m in mask if m is not None]
if len(masks) == 0:
return None
else:
return K.all(K.concatenate(masks, axis=0), axis=0)
else:
return mask
def build(self, input_shape):
super(LayerNormalization, self).build(input_shape)
if self.conditional:
shape = (input_shape[0][-1],)
else:
shape = (input_shape[-1],)
if self.center:
self.beta = self.add_weight(
shape=shape, initializer='zeros', name='beta'
)
if self.scale:
self.gamma = self.add_weight(
shape=shape, initializer='ones', name='gamma'
)
if self.conditional:
if self.hidden_units is not None:
self.hidden_dense = Dense(
units=self.hidden_units,
activation=self.hidden_activation,
use_bias=False,
kernel_initializer=self.hidden_initializer
)
if self.center:
self.beta_dense = Dense(
units=shape[0], use_bias=False, kernel_initializer='zeros'
)
if self.scale:
self.gamma_dense = Dense(
units=shape[0], use_bias=False, kernel_initializer='zeros'
)
@recompute_grad
def call(self, inputs):
if self.conditional:
inputs, cond = inputs
if self.hidden_units is not None:
cond = self.hidden_dense(cond)
for _ in range(K.ndim(inputs) - K.ndim(cond)):
cond = K.expand_dims(cond, 1)
if self.center:
beta = self.beta_dense(cond) + self.beta
if self.scale:
gamma = self.gamma_dense(cond) + self.gamma
else:
if self.center:
beta = self.beta
if self.scale:
gamma = self.gamma
outputs = inputs
if self.center:
mean = K.mean(outputs, axis=-1, keepdims=True)
outputs = outputs - mean
if self.scale:
variance = K.mean(K.square(outputs), axis=-1, keepdims=True)
std = K.sqrt(variance + self.epsilon)
outputs = outputs / std
outputs = outputs * gamma
if self.center:
outputs = outputs + beta
return outputs
def compute_output_shape(self, input_shape):
if self.conditional:
return input_shape[0]
else:
return input_shape
def get_config(self):
config = {
'center': self.center,
'scale': self.scale,
'epsilon': self.epsilon,
'conditional': self.conditional,
'hidden_units': self.hidden_units,
'hidden_activation': activations.serialize(self.hidden_activation),
'hidden_initializer':
initializers.serialize(self.hidden_initializer),
}
base_config = super(LayerNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class PositionEmbedding(Layer):
def __init__(
self,
input_dim,
output_dim,
merge_mode='add',
embeddings_initializer='zeros',
custom_position_ids=False,
**kwargs
):
super(PositionEmbedding, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.merge_mode = merge_mode
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.custom_position_ids = custom_position_ids
def build(self, input_shape):
super(PositionEmbedding, self).build(input_shape)
self.embeddings = self.add_weight(
name='embeddings',
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer
)
def call(self, inputs):
if self.custom_position_ids:
inputs, position_ids = inputs
if K.dtype(position_ids) != 'int32':
position_ids = K.cast(position_ids, 'int32')
pos_embeddings = K.gather(self.embeddings, position_ids)
else:
input_shape = K.shape(inputs)
batch_size, seq_len = input_shape[0], input_shape[1]
pos_embeddings = self.embeddings[:seq_len]
pos_embeddings = K.expand_dims(pos_embeddings, 0)
if self.merge_mode != 'add':
pos_embeddings = K.tile(pos_embeddings, [batch_size, 1, 1])
if self.merge_mode == 'add':
return inputs + pos_embeddings
else:
return K.concatenate([inputs, pos_embeddings])
def compute_output_shape(self, input_shape):
if self.custom_position_ids:
input_shape = input_shape[0]
if self.merge_mode == 'add':
return input_shape
else:
return input_shape[:2] + (input_shape[2] + self.output_dim,)
def get_config(self):
config = {
'input_dim': self.input_dim,
'output_dim': self.output_dim,
'merge_mode': self.merge_mode,
'embeddings_initializer':
initializers.serialize(self.embeddings_initializer),
'custom_position_ids': self.custom_position_ids,
}
base_config = super(PositionEmbedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RelativePositionEmbedding(Layer):
def __init__(
self, input_dim, output_dim, embeddings_initializer='zeros', **kwargs
):
super(RelativePositionEmbedding, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
def build(self, input_shape):
super(RelativePositionEmbedding, self).build(input_shape)
self.embeddings = self.add_weight(
name='embeddings',
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
)
def call(self, inputs):
pos_ids = self.compute_position_ids(inputs)
return K.gather(self.embeddings, pos_ids)
def compute_position_ids(self, inputs):
q, v = inputs
q_idxs = K.arange(0, K.shape(q)[1], dtype='int32')
q_idxs = K.expand_dims(q_idxs, 1)
v_idxs = K.arange(0, K.shape(v)[1], dtype='int32')
v_idxs = K.expand_dims(v_idxs, 0)
pos_ids = v_idxs - q_idxs
max_position = (self.input_dim - 1) // 2
pos_ids = K.clip(pos_ids, -max_position, max_position)
pos_ids = pos_ids + max_position
return pos_ids
def compute_output_shape(self, input_shape):
return (None, None, self.output_dim)
def compute_mask(self, inputs, mask):
return mask[0]
def get_config(self):
config = {
'input_dim': self.input_dim,
'output_dim': self.output_dim,
'embeddings_initializer':
initializers.serialize(self.embeddings_initializer),
}
base_config = super(RelativePositionEmbedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RelativePositionEmbeddingT5(RelativePositionEmbedding):
def __init__(
self,
input_dim,
output_dim,
max_distance=128,
bidirectional=True,
embeddings_initializer='zeros',
**kwargs
):
super(RelativePositionEmbeddingT5,
self).__init__(input_dim, output_dim, **kwargs)
self.max_distance = max_distance
self.bidirectional = bidirectional
def compute_position_ids(self, inputs):
q, v = inputs
q_idxs = K.arange(0, K.shape(q)[1], dtype='int32')
q_idxs = K.expand_dims(q_idxs, 1)
v_idxs = K.arange(0, K.shape(v)[1], dtype='int32')
v_idxs = K.expand_dims(v_idxs, 0)
pos_ids = v_idxs - q_idxs
num_buckets, max_distance = self.input_dim, self.max_distance
ret = 0
n = -pos_ids
if self.bidirectional:
num_buckets //= 2
ret += K.cast(K.less(n, 0), 'int32') * num_buckets
n = K.abs(n)
else:
n = K.maximum(n, 0)
max_exact = num_buckets // 2
is_small = K.less(n, max_exact)
val_if_large = max_exact + K.cast(
K.log(K.cast(n, K.floatx()) / max_exact) /
np.log(max_distance / max_exact) * (num_buckets - max_exact),
'int32',
)
val_if_large = K.minimum(val_if_large, num_buckets - 1)
ret += K.switch(is_small, n, val_if_large)
return ret
def get_config(self):
config = {
'max_distance': self.max_distance,
'bidirectional': self.bidirectional,
}
base_config = super(RelativePositionEmbeddingT5, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class FeedForward(Layer):
def __init__(
self,
units,
activation='relu',
use_bias=True,
kernel_initializer='glorot_uniform',
**kwargs
):
super(FeedForward, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
@integerize_shape
def build(self, input_shape):
super(FeedForward, self).build(input_shape)
output_dim = input_shape[-1]
self.dense_1 = Dense(
units=self.units,
activation=self.activation,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
self.dense_2 = Dense(
units=output_dim,
use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer
)
@recompute_grad
def call(self, inputs):
x = inputs
x = self.dense_1(x)
x = self.dense_2(x)
return x
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
}
base_config = super(FeedForward, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConditionalRandomField(Layer):
def __init__(self, lr_multiplier=1, **kwargs):
super(ConditionalRandomField, self).__init__(**kwargs)
self.lr_multiplier = lr_multiplier
@integerize_shape
def build(self, input_shape):
super(ConditionalRandomField, self).build(input_shape)
output_dim = input_shape[-1]
self.trans = self.add_weight(
name='trans',
shape=(output_dim, output_dim),
initializer='glorot_uniform',
trainable=True
)
if self.lr_multiplier != 1:
K.set_value(self.trans, K.eval(self.trans) / self.lr_multiplier)
self.trans = self.lr_multiplier * self.trans
def compute_mask(self, inputs, mask=None):
return None
def call(self, inputs, mask=None):
if mask is not None:
mask = K.cast(mask, K.floatx())
return sequence_masking(inputs, mask, 1, 1)
def target_score(self, y_true, y_pred):
point_score = tf.einsum('bni,bni->b', y_true, y_pred)
trans_score = tf.einsum(
'bni,ij,bnj->b', y_true[:, :-1], self.trans, y_true[:, 1:]
)
return point_score + trans_score
def log_norm_step(self, inputs, states):
inputs, mask = inputs[:, :-1], inputs[:, -1:]
states = K.expand_dims(states[0], 2)
trans = K.expand_dims(self.trans, 0)
outputs = tf.reduce_logsumexp(
states + trans, 1
)
outputs = outputs + inputs
outputs = mask * outputs + (1 - mask) * states[:, :, 0]
return outputs, [outputs]
def dense_loss(self, y_true, y_pred):
mask = K.all(K.greater(y_pred, -1e6), axis=2, keepdims=True)
mask = K.cast(mask, K.floatx())
y_true, y_pred = y_true * mask, y_pred * mask
target_score = self.target_score(y_true, y_pred)
init_states = [y_pred[:, 0]]
y_pred = K.concatenate([y_pred, mask], axis=2)
input_length = K.int_shape(y_pred[:, 1:])[1]
log_norm, _, _ = K.rnn(
self.log_norm_step,
y_pred[:, 1:],
init_states,
input_length=input_length
)
log_norm = tf.reduce_logsumexp(log_norm, 1)
return log_norm - target_score
def sparse_loss(self, y_true, y_pred):
y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
y_true = K.cast(y_true, 'int32')
y_true = K.one_hot(y_true, K.shape(self.trans)[0])
return self.dense_loss(y_true, y_pred)
def dense_accuracy(self, y_true, y_pred):
y_true = K.argmax(y_true, 2)
return self.sparse_accuracy(y_true, y_pred)
def sparse_accuracy(self, y_true, y_pred):
mask = K.all(K.greater(y_pred, -1e6), axis=2)
mask = K.cast(mask, K.floatx())
y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
y_true = K.cast(y_true, 'int32')
y_pred = K.cast(K.argmax(y_pred, 2), 'int32')
isequal = K.cast(K.equal(y_true, y_pred), K.floatx())
return K.sum(isequal * mask) / K.sum(mask)
def get_config(self):
config = {
'lr_multiplier': self.lr_multiplier,
}
base_config = super(ConditionalRandomField, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MaximumEntropyMarkovModel(Layer):
def __init__(self, lr_multiplier=1, hidden_dim=None, **kwargs):
super(MaximumEntropyMarkovModel, self).__init__(**kwargs)
self.lr_multiplier = lr_multiplier
self.hidden_dim = hidden_dim
@integerize_shape
def build(self, input_shape):
super(MaximumEntropyMarkovModel, self).build(input_shape)
output_dim = input_shape[-1]
if self.hidden_dim is None:
self.trans = self.add_weight(
name='trans',
shape=(output_dim, output_dim),
initializer='glorot_uniform',
trainable=True
)
if self.lr_multiplier != 1:
K.set_value(self.trans, K.eval(self.trans) / self.lr_multiplier)
self.trans = self.lr_multiplier * self.trans
else:
self.l_trans = self.add_weight(
name='l_trans',
shape=(output_dim, self.hidden_dim),
initializer='glorot_uniform',
trainable=True
)
self.r_trans = self.add_weight(
name='r_trans',
shape=(output_dim, self.hidden_dim),
initializer='glorot_uniform',
trainable=True
)
if self.lr_multiplier != 1:
K.set_value(
self.l_trans,
K.eval(self.l_trans) / self.lr_multiplier
)
self.l_trans = self.lr_multiplier * self.l_trans
K.set_value(
self.r_trans,
K.eval(self.r_trans) / self.lr_multiplier
)
self.r_trans = self.lr_multiplier * self.r_trans
def compute_mask(self, inputs, mask=None):
return None
def call(self, inputs, mask=None):
if mask is not None:
mask = K.cast(mask, K.floatx())
return sequence_masking(inputs, mask, 1, 1)
def reverse_sequence(self, inputs, mask=None):
if mask is None:
return [x[:, ::-1] for x in inputs]
else:
length = K.cast(K.sum(mask, 1), 'int32')
return [tf.reverse_sequence(x, length, seq_axis=1) for x in inputs]
def basic_loss(self, y_true, y_pred, go_backwards=False):
mask = K.all(K.greater(y_pred, -1e6), axis=2)
mask = K.cast(mask, K.floatx())
y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
y_true = K.cast(y_true, 'int32')
if self.hidden_dim is None:
if go_backwards:
y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
trans = K.transpose(self.trans)
else:
trans = self.trans
histoty = K.gather(trans, y_true)
else:
if go_backwards:
y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
r_trans, l_trans = self.l_trans, self.r_trans
else:
l_trans, r_trans = self.l_trans, self.r_trans
histoty = K.gather(l_trans, y_true)
histoty = tf.einsum('bnd,kd->bnk', histoty, r_trans)
histoty = K.concatenate([y_pred[:, :1], histoty[:, :-1]], 1)
y_pred = (y_pred + histoty) / 2
loss = K.sparse_categorical_crossentropy(
y_true, y_pred, from_logits=True
)
return K.sum(loss * mask) / K.sum(mask)
def sparse_loss(self, y_true, y_pred):
loss = self.basic_loss(y_true, y_pred, False)
loss = loss + self.basic_loss(y_true, y_pred, True)
return loss / 2
def dense_loss(self, y_true, y_pred):
y_true = K.argmax(y_true, 2)
return self.sparse_loss(y_true, y_pred)
def basic_accuracy(self, y_true, y_pred, go_backwards=False):
mask = K.all(K.greater(y_pred, -1e6), axis=2)
mask = K.cast(mask, K.floatx())
y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
y_true = K.cast(y_true, 'int32')
if self.hidden_dim is None:
if go_backwards:
y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
trans = K.transpose(self.trans)
else:
trans = self.trans
histoty = K.gather(trans, y_true)
else:
if go_backwards:
y_true, y_pred = self.reverse_sequence([y_true, y_pred], mask)
r_trans, l_trans = self.l_trans, self.r_trans
else:
l_trans, r_trans = self.l_trans, self.r_trans
histoty = K.gather(l_trans, y_true)
histoty = tf.einsum('bnd,kd->bnk', histoty, r_trans)
histoty = K.concatenate([y_pred[:, :1], histoty[:, :-1]], 1)
y_pred = (y_pred + histoty) / 2
y_pred = K.cast(K.argmax(y_pred, 2), 'int32')
isequal = K.cast(K.equal(y_true, y_pred), K.floatx())
return K.sum(isequal * mask) / K.sum(mask)
def sparse_accuracy(self, y_true, y_pred):
accuracy = self.basic_accuracy(y_true, y_pred, False)
accuracy = accuracy + self.basic_accuracy(y_true, y_pred, True)
return accuracy / 2
def dense_accuracy(self, y_true, y_pred):
y_true = K.argmax(y_true, 2)
return self.sparse_accuracy(y_true, y_pred)
def get_config(self):
config = {
'lr_multiplier': self.lr_multiplier,
'hidden_dim': self.hidden_dim,
}
base_config = super(MaximumEntropyMarkovModel, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Loss(Layer):
def __init__(self, output_axis=None, **kwargs):
super(Loss, self).__init__(**kwargs)
self.output_axis = output_axis
def call(self, inputs, mask=None):
loss = self.compute_loss(inputs, mask)
self.add_loss(loss)
if self.output_axis is None:
return inputs
elif isinstance(self.output_axis, list):
return [inputs[i] for i in self.output_axis]
else:
return inputs[self.output_axis]
def compute_loss(self, inputs, mask=None):
raise NotImplementedError
def compute_output_shape(self, input_shape):
if self.output_axis is None:
return input_shape
elif isinstance(self.output_axis, list):
return [input_shape[i] for i in self.output_axis]
else:
return input_shape[self.output_axis]
def get_config(self):
config = {
'output_axis': self.output_axis,
}
base_config = super(Loss, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
custom_objects = {
'Embedding': Embedding,
'BiasAdd': BiasAdd,
'MultiHeadAttention': MultiHeadAttention,
'LayerNormalization': LayerNormalization,
'PositionEmbedding': PositionEmbedding,
'RelativePositionEmbedding': RelativePositionEmbedding,
'RelativePositionEmbeddingT5': RelativePositionEmbeddingT5,
'FeedForward': FeedForward,
'ConditionalRandomField': ConditionalRandomField,
'MaximumEntropyMarkovModel': MaximumEntropyMarkovModel,
'Loss': Loss,
}
keras.utils.get_custom_objects().update(custom_objects)
| true | true |
1c34e66df50c012089ad360f1221b541925a79a1 | 2,194 | py | Python | aoc2018/d08-1.py | jbudynek/advent-of-code | 16ab71b110e9766b445bce3d3172b11d421b2f75 | [
"CC0-1.0"
] | null | null | null | aoc2018/d08-1.py | jbudynek/advent-of-code | 16ab71b110e9766b445bce3d3172b11d421b2f75 | [
"CC0-1.0"
] | null | null | null | aoc2018/d08-1.py | jbudynek/advent-of-code | 16ab71b110e9766b445bce3d3172b11d421b2f75 | [
"CC0-1.0"
] | null | null | null | # coding: utf-8
import numpy as np
import re
import copy
import sys
import networkx as nx
#import matplotlib.pyplot as plt
#import operator
#from collections import defaultdict
from collections import Counter
import time
node_to_metadata = {}
def parse_node(idx, iii, tree, cur_node, DBG=True):
this_node_name = cur_node
cur_node = chr(ord(cur_node) + 1)
tree.add_node(this_node_name)
nb_children = iii[idx]
idx = idx + 1
nb_metadata = iii[idx]
idx = idx + 1
for k in range(nb_children):
(node,idx,cur_node) = parse_node(idx,iii,tree, cur_node, DBG)
tree.add_edge(this_node_name,node)
metadata = []
for k in range(nb_metadata):
metadata.append(iii[idx])
idx = idx + 1
node_to_metadata[this_node_name] = metadata.copy()
return (this_node_name, idx,cur_node)
def function(ii, DBG = True):
ss = ii.split()
iii = [int(ns) for ns in ss]
if(DBG):print(iii)
idx = 0
tree = nx.DiGraph()
cur_node = 'A'
(root,idx,cur_node) = parse_node(idx, iii, tree, cur_node, DBG)
if(DBG):print(str(tree.edges))
total_metadata = 0
for m in node_to_metadata:
mm = node_to_metadata[m]
for i in mm:
total_metadata = total_metadata+i
return total_metadata
def test(cc=None, expected=None, DBG = False):
start_millis = int(round(time.time() * 1000))
result = str(function(cc,DBG))
stop_millis = int(round(time.time() * 1000))
expected = str(expected)
flag = (result == expected)
print("*** "+str(cc) + " *** -> Result = "+str(result), " -> success = "+ str(flag) + " -> expected " + expected)
print((stop_millis-start_millis),"ms",int((stop_millis-start_millis)/1000),"s",int((stop_millis-start_millis)/1000/60),"min")
t1="2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2"
#tt1 = t1.splitlines()
#test(t1,138,True) #
#sys.exit()
INPUT_FILE="input-d08.txt"
f = open(INPUT_FILE, "r")
contents = f.read()
#puzzle_input = contents.splitlines()
puzzle_input = contents.rstrip()
f.close()
ret = function(puzzle_input,True) #
print(ret)
| 24.931818 | 130 | 0.618961 |
import numpy as np
import re
import copy
import sys
import networkx as nx
from collections import Counter
import time
node_to_metadata = {}
def parse_node(idx, iii, tree, cur_node, DBG=True):
this_node_name = cur_node
cur_node = chr(ord(cur_node) + 1)
tree.add_node(this_node_name)
nb_children = iii[idx]
idx = idx + 1
nb_metadata = iii[idx]
idx = idx + 1
for k in range(nb_children):
(node,idx,cur_node) = parse_node(idx,iii,tree, cur_node, DBG)
tree.add_edge(this_node_name,node)
metadata = []
for k in range(nb_metadata):
metadata.append(iii[idx])
idx = idx + 1
node_to_metadata[this_node_name] = metadata.copy()
return (this_node_name, idx,cur_node)
def function(ii, DBG = True):
ss = ii.split()
iii = [int(ns) for ns in ss]
if(DBG):print(iii)
idx = 0
tree = nx.DiGraph()
cur_node = 'A'
(root,idx,cur_node) = parse_node(idx, iii, tree, cur_node, DBG)
if(DBG):print(str(tree.edges))
total_metadata = 0
for m in node_to_metadata:
mm = node_to_metadata[m]
for i in mm:
total_metadata = total_metadata+i
return total_metadata
def test(cc=None, expected=None, DBG = False):
start_millis = int(round(time.time() * 1000))
result = str(function(cc,DBG))
stop_millis = int(round(time.time() * 1000))
expected = str(expected)
flag = (result == expected)
print("*** "+str(cc) + " *** -> Result = "+str(result), " -> success = "+ str(flag) + " -> expected " + expected)
print((stop_millis-start_millis),"ms",int((stop_millis-start_millis)/1000),"s",int((stop_millis-start_millis)/1000/60),"min")
t1="2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2"
INPUT_FILE="input-d08.txt"
f = open(INPUT_FILE, "r")
contents = f.read()
puzzle_input = contents.rstrip()
f.close()
ret = function(puzzle_input,True)
print(ret)
| true | true |
1c34e6a7c51538aa31a661dca1ae6805023e4aef | 2,525 | py | Python | problem_visual3.py | ligongzzz/MCM2020_Code | 7e5e6f9a6b09b3eb7e21774535c977ba6e974d79 | [
"MIT"
] | null | null | null | problem_visual3.py | ligongzzz/MCM2020_Code | 7e5e6f9a6b09b3eb7e21774535c977ba6e974d79 | [
"MIT"
] | null | null | null | problem_visual3.py | ligongzzz/MCM2020_Code | 7e5e6f9a6b09b3eb7e21774535c977ba6e974d79 | [
"MIT"
] | null | null | null | # Plays of the season.
import numpy as np
import cv2
import csv
import matplotlib.pyplot as plt
import matlab.engine
plt.rc('font', family='Times New Roman')
# Read the csv file.
csv_reader = csv.reader(open('./data/passingevents.csv'))
# The first match.(First match and self passing only.)
passing_list = [row for row in csv_reader if row[1] == 'Huskies']
# Fix the time of 2H.
for p in passing_list:
if p[4] == '2H':
p[5] = str(float(p[5])+2700)
passing_cnt = len(passing_list)
# Count the player's average pos in a single play.
player_map = {}
t = 0
pass_i = 0
def add_to_player_map(player: str, x, y):
'''
A function to add the position to the player.
'''
if player_map.get(player) is None:
player_map[player] = {'x': x, 'y': y, 'cnt': 1}
else:
player_map[player]['x'] += x
player_map[player]['y'] += y
player_map[player]['cnt'] += 1
center_x = []
center_y = []
ds = []
spd = []
while pass_i < len(passing_list):
t += 1
dx_sum = 0.0
dy_sum = 0.0
player_map.clear()
while pass_i < len(passing_list) and float(passing_list[pass_i][0]) <= t:
cur_pass = passing_list[pass_i]
add_to_player_map(cur_pass[2], float(cur_pass[7]), float(cur_pass[8]))
add_to_player_map(cur_pass[3], float(cur_pass[9]), float(cur_pass[10]))
dx_sum += abs(float(cur_pass[9]) - float(cur_pass[7]))
dy_sum += abs(float(cur_pass[10]) - float(cur_pass[8]))
pass_i += 1
# Caculate the center x.
x_sum = 0
y_sum = 0
d_sum = 0
for k, v in player_map.items():
x_sum += v['x'] / v['cnt']
y_sum += v['y'] / v['cnt']
center_x.append(x_sum / len(player_map))
center_y.append(y_sum / len(player_map))
for k, v in player_map.items():
d_sum += (v['x'] / v['cnt'] - center_x[-1]) ** 2 + \
(v['y'] / v['cnt'] - center_y[-1]) ** 2
ds.append(d_sum / len(player_map))
spd.append(dx_sum / dy_sum)
# Plot
plt.plot(center_x, color='blue', linewidth=1.0)
plt.ylim((0, 100))
plt.xlabel('Match ID')
plt.ylabel('x')
plt.title('<X> pos')
plt.show()
plt.plot(center_y, color='blue', linewidth=1.0)
plt.ylim((0, 100))
plt.xlabel('Match ID')
plt.ylabel('y')
plt.title('<Y> pos')
plt.show()
plt.plot(ds, color='blue', linewidth=1.0)
plt.ylim((0, 1300))
plt.xlabel('Match ID')
plt.ylabel('ds')
plt.title('D')
plt.show()
plt.plot(spd, color='blue', linewidth=1.0)
plt.ylim((0, 1.0))
plt.xlabel('Match ID')
plt.ylabel('speed')
plt.title('Speed')
plt.show()
| 24.278846 | 79 | 0.610693 |
import numpy as np
import cv2
import csv
import matplotlib.pyplot as plt
import matlab.engine
plt.rc('font', family='Times New Roman')
csv_reader = csv.reader(open('./data/passingevents.csv'))
passing_list = [row for row in csv_reader if row[1] == 'Huskies']
for p in passing_list:
if p[4] == '2H':
p[5] = str(float(p[5])+2700)
passing_cnt = len(passing_list)
player_map = {}
t = 0
pass_i = 0
def add_to_player_map(player: str, x, y):
if player_map.get(player) is None:
player_map[player] = {'x': x, 'y': y, 'cnt': 1}
else:
player_map[player]['x'] += x
player_map[player]['y'] += y
player_map[player]['cnt'] += 1
center_x = []
center_y = []
ds = []
spd = []
while pass_i < len(passing_list):
t += 1
dx_sum = 0.0
dy_sum = 0.0
player_map.clear()
while pass_i < len(passing_list) and float(passing_list[pass_i][0]) <= t:
cur_pass = passing_list[pass_i]
add_to_player_map(cur_pass[2], float(cur_pass[7]), float(cur_pass[8]))
add_to_player_map(cur_pass[3], float(cur_pass[9]), float(cur_pass[10]))
dx_sum += abs(float(cur_pass[9]) - float(cur_pass[7]))
dy_sum += abs(float(cur_pass[10]) - float(cur_pass[8]))
pass_i += 1
# Caculate the center x.
x_sum = 0
y_sum = 0
d_sum = 0
for k, v in player_map.items():
x_sum += v['x'] / v['cnt']
y_sum += v['y'] / v['cnt']
center_x.append(x_sum / len(player_map))
center_y.append(y_sum / len(player_map))
for k, v in player_map.items():
d_sum += (v['x'] / v['cnt'] - center_x[-1]) ** 2 + \
(v['y'] / v['cnt'] - center_y[-1]) ** 2
ds.append(d_sum / len(player_map))
spd.append(dx_sum / dy_sum)
# Plot
plt.plot(center_x, color='blue', linewidth=1.0)
plt.ylim((0, 100))
plt.xlabel('Match ID')
plt.ylabel('x')
plt.title('<X> pos')
plt.show()
plt.plot(center_y, color='blue', linewidth=1.0)
plt.ylim((0, 100))
plt.xlabel('Match ID')
plt.ylabel('y')
plt.title('<Y> pos')
plt.show()
plt.plot(ds, color='blue', linewidth=1.0)
plt.ylim((0, 1300))
plt.xlabel('Match ID')
plt.ylabel('ds')
plt.title('D')
plt.show()
plt.plot(spd, color='blue', linewidth=1.0)
plt.ylim((0, 1.0))
plt.xlabel('Match ID')
plt.ylabel('speed')
plt.title('Speed')
plt.show()
| true | true |
1c34e6fe8f5ecd00504b6dfac72ae815c3fd51fe | 10,991 | py | Python | pg_fts/migrations.py | dvdmgl/django-pg-fts | 38e2353dc1eec3ecbd10f6c8624c30ebd779cf8a | [
"BSD-2-Clause-FreeBSD"
] | 22 | 2015-01-06T08:17:28.000Z | 2021-10-03T11:41:22.000Z | pg_fts/migrations.py | dvdmgl/django-pg-fts | 38e2353dc1eec3ecbd10f6c8624c30ebd779cf8a | [
"BSD-2-Clause-FreeBSD"
] | 8 | 2015-01-06T09:21:56.000Z | 2019-01-19T17:57:16.000Z | pg_fts/migrations.py | dvdmgl/django-pg-fts | 38e2353dc1eec3ecbd10f6c8624c30ebd779cf8a | [
"BSD-2-Clause-FreeBSD"
] | 19 | 2015-01-24T10:09:35.000Z | 2019-05-15T18:15:55.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db.migrations.operations.base import Operation
from pg_fts.fields import TSVectorField
__all__ = ('CreateFTSIndexOperation', 'CreateFTSTriggerOperation',
'DeleteFTSIndexOperation', 'DeleteFTSTriggerOperation',
'UpdateVectorOperation')
"""
pg_fts.migrations
-----------------
Migrations module for `pg_fts.fields.TSVectorField`
@author: David Miguel
"""
class PgFtsSQL(object):
sql_delete_trigger = ("DROP TRIGGER {model}_{fts_name}_update ON \"{model}\";"
"DROP FUNCTION {model}_{fts_name}_update()")
sql_create_trigger = """
CREATE FUNCTION {model}_{fts_name}_update() RETURNS TRIGGER AS $$
BEGIN
IF TG_OP = 'INSERT' THEN
new.{fts_name} = {vectors};
END IF;
IF TG_OP = 'UPDATE' THEN
IF {fts_fields} THEN
new.{fts_name} = {vectors};
ELSE
new.{fts_name} = old.{fts_name};
END IF;
END IF;
RETURN NEW;
END;
$$ LANGUAGE 'plpgsql';
CREATE TRIGGER {model}_{fts_name}_update BEFORE INSERT OR UPDATE ON \"{model}\"
FOR EACH ROW EXECUTE PROCEDURE {model}_{fts_name}_update()"""
sql_create_index = ("CREATE INDEX {model}_{fts_name} ON \"{model}\" "
"USING {fts_index}({fts_name})")
sql_delete_index = 'DROP INDEX {model}_{fts_name}'
sql_update_vector = 'UPDATE \"{model}\" SET {vector} = {fields}'
def delete_trigger(self, model, field):
return self.sql_delete_trigger.format(
model=model._meta.db_table,
fts_name=field.get_attname_column()[1]
)
def create_fts_trigger(self, model, vector_field):
fields = []
vectors = []
if not isinstance(vector_field, TSVectorField):
raise AttributeError
try:
dict_field = model._meta.get_field(vector_field.dictionary)
dictionary = "NEW.%s::regconfig" % (
dict_field.get_attname_column()[1])
fields.append('NEW.{0} <> OLD.{0}'.format(vector_field.dictionary))
except:
dictionary = "'%s'" % vector_field.dictionary
for field, rank in vector_field._get_fields_and_ranks():
fields.append('NEW.{0} <> OLD.{0}'.format(
field.get_attname_column()[1]))
vectors.append(self._get_vector_for_field(field, rank, dictionary))
return self.sql_create_trigger.format(
model=model._meta.db_table,
fts_name=vector_field.get_attname_column()[1],
fts_fields=' OR '.join(fields),
vectors=' || '.join(vectors)
)
def update_vector(self, model, vector_field):
vectors = []
sql_fn = "setweight(to_tsvector(%s, COALESCE(%s, '')), '%s')"
if not isinstance(vector_field, TSVectorField):
raise AttributeError
try:
dict_field = model._meta.get_field(vector_field.dictionary)
dictionary = "%s::regconfig" % (
dict_field.get_attname_column()[1])
except:
dictionary = "'%s'" % vector_field.dictionary
for field, rank in vector_field._get_fields_and_ranks():
vectors.append(sql_fn % (
dictionary, field.get_attname_column()[1], rank))
return self.sql_update_vector.format(
model=model._meta.db_table,
vector=vector_field.get_attname_column()[1],
fields=' || '.join(vectors)
)
def _get_vector_for_field(self, field, weight, dictionary):
return "setweight(to_tsvector(%s, COALESCE(NEW.%s, '')), '%s')" % (
dictionary, field.get_attname_column()[1], weight
)
def create_index(self, model, vector_field, index):
return self.sql_create_index.format(
model=model._meta.db_table,
fts_index=index,
fts_name=vector_field.get_attname_column()[1]
)
def delete_index(self, model, vector_field):
return self.sql_delete_index.format(
model=model._meta.db_table,
fts_name=vector_field.get_attname_column()[1]
)
class BaseVectorOperation(Operation):
"""
Base migrations class
:param name: The Model name
:param fts_vector: The :class:`~pg_fts.fields.TSVectorField` field name
"""
reduces_to_sql = True
reversible = True
sql_creator = PgFtsSQL()
forward_fn = None
backward_fn = None
def __init__(self, name, fts_vector):
self.name = name
self.fts_vector = fts_vector
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
schema_editor.execute(self.forward_fn(
model,
vector_field
))
def database_backwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
schema_editor.execute(self.backward_fn(
model,
vector_field
))
def describe(self):
return "Create trigger `%s` for model `%s`" % (
self.fts_vector, self.name
)
class UpdateVectorOperation(BaseVectorOperation):
"""
Updates changes to :class:`~pg_fts.fields.TSVectorField` for existing
models
:param name: The Model name
:param fts_vector: The :class:`~pg_fts.fields.TSVectorField` field name
"""
def __init__(self, name, fts_vector):
self.name = name
self.fts_vector = fts_vector
self.forward_fn = self.sql_creator.update_vector
def database_backwards(self, app_label, schema_editor, from_state,
to_state):
pass
def describe(self):
return "Create trigger `%s` for model `%s`" % (
self.fts_vector, self.name
)
class CreateFTSTriggerOperation(BaseVectorOperation):
"""
Creates a :pg_docs:`custom trigger <textsearch-features.html#TEXTSEARCH-UPDATE-TRIGGERS>`
for updating the :class:`~pg_fts.fields.TSVectorField` with rank values
:param name: The Model name
:param fts_vector: The :class:`~pg_fts.fields.TSVectorField` field name
"""
def __init__(self, name, fts_vector):
self.name = name
self.fts_vector = fts_vector
self.forward_fn = self.sql_creator.create_fts_trigger
self.backward_fn = self.sql_creator.delete_trigger
def database_backwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
schema_editor.execute(self.backward_fn(
model,
vector_field
))
def describe(self):
return "Create trigger `%s` for model `%s`" % (
self.fts_vector, self.name
)
class DeleteFTSTriggerOperation(BaseVectorOperation):
"""
Deletes trigger generated by :class:`~pg_fts.migrations.CreateFTSTriggerOperation`
:param name: The Model name
:param fts_vector: The :class:`~pg_fts.fields.TSVectorField` field name
"""
def __init__(self, name, fts_vector):
self.name = name
self.fts_vector = fts_vector
self.forward_fn = self.sql_creator.delete_trigger
self.backward_fn = self.sql_creator.create_fts_trigger
def describe(self):
return "Delete trigger `%s` for model `%s`" % (
self.fts_vector, self.name
)
class CreateFTSIndexOperation(BaseVectorOperation):
"""
Creates a index for :class:`~pg_fts.fields.TSVectorField`
:param name: The Model name
:param fts_vector: The :class:`~pg_fts.fields.TSVectorField` field name
:param index: The type of index 'gin' or 'gist' for more information go to
:pg_docs:`PostgreSQL documentation 12.9. GiST and GIN Index Types
<textsearch-indexes.html>`
"""
# http://www.postgresql.org/docs/9.3/static/textsearch-indexes.html
INDEXS = ('gin', 'gist')
def __init__(self, name, fts_vector, index):
assert index in self.INDEXS, "Invalid index '%s'. Options %s " % (
index, ', '.join(self.INDEXS))
self.name = name
self.fts_vector = fts_vector
self.index = index
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state,
to_state):
# print(dir(from_state))
# django 1.8 doesn't have ProjectState.render()
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
if not isinstance(vector_field, TSVectorField):
raise AttributeError
schema_editor.execute(self.sql_creator.create_index(
model, vector_field, self.index
))
def database_backwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
schema_editor.execute(self.sql_creator.delete_index(
model,
vector_field
))
def describe(self):
return "Create %s index `%s` for model `%s`" % (
self.index, self.fts_vector, self.name
)
class DeleteFTSIndexOperation(CreateFTSIndexOperation):
"""
Removes index created by :class:`~pg_fts.migrations.CreateFTSIndexOperation`
:param name: The Model name
:param fts_vector: The :class:`~pg_fts.fields.TSVectorField` field name
:param index: The type of index 'gin' or 'gist' for more information go to
"""
def database_forwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
schema_editor.execute(self.sql_creator.delete_index(
model,
vector_field
))
def database_backwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
if not isinstance(vector_field, TSVectorField):
raise AttributeError
schema_editor.execute(self.sql_creator.create_index(
model, vector_field, self.index))
def describe(self):
return "Delete %s index `%s` for model `%s`" % (
self.index, self.fts_vector, self.name
)
| 32.137427 | 93 | 0.628332 |
from __future__ import unicode_literals
from django.db.migrations.operations.base import Operation
from pg_fts.fields import TSVectorField
__all__ = ('CreateFTSIndexOperation', 'CreateFTSTriggerOperation',
'DeleteFTSIndexOperation', 'DeleteFTSTriggerOperation',
'UpdateVectorOperation')
class PgFtsSQL(object):
sql_delete_trigger = ("DROP TRIGGER {model}_{fts_name}_update ON \"{model}\";"
"DROP FUNCTION {model}_{fts_name}_update()")
sql_create_trigger = """
CREATE FUNCTION {model}_{fts_name}_update() RETURNS TRIGGER AS $$
BEGIN
IF TG_OP = 'INSERT' THEN
new.{fts_name} = {vectors};
END IF;
IF TG_OP = 'UPDATE' THEN
IF {fts_fields} THEN
new.{fts_name} = {vectors};
ELSE
new.{fts_name} = old.{fts_name};
END IF;
END IF;
RETURN NEW;
END;
$$ LANGUAGE 'plpgsql';
CREATE TRIGGER {model}_{fts_name}_update BEFORE INSERT OR UPDATE ON \"{model}\"
FOR EACH ROW EXECUTE PROCEDURE {model}_{fts_name}_update()"""
sql_create_index = ("CREATE INDEX {model}_{fts_name} ON \"{model}\" "
"USING {fts_index}({fts_name})")
sql_delete_index = 'DROP INDEX {model}_{fts_name}'
sql_update_vector = 'UPDATE \"{model}\" SET {vector} = {fields}'
def delete_trigger(self, model, field):
return self.sql_delete_trigger.format(
model=model._meta.db_table,
fts_name=field.get_attname_column()[1]
)
def create_fts_trigger(self, model, vector_field):
fields = []
vectors = []
if not isinstance(vector_field, TSVectorField):
raise AttributeError
try:
dict_field = model._meta.get_field(vector_field.dictionary)
dictionary = "NEW.%s::regconfig" % (
dict_field.get_attname_column()[1])
fields.append('NEW.{0} <> OLD.{0}'.format(vector_field.dictionary))
except:
dictionary = "'%s'" % vector_field.dictionary
for field, rank in vector_field._get_fields_and_ranks():
fields.append('NEW.{0} <> OLD.{0}'.format(
field.get_attname_column()[1]))
vectors.append(self._get_vector_for_field(field, rank, dictionary))
return self.sql_create_trigger.format(
model=model._meta.db_table,
fts_name=vector_field.get_attname_column()[1],
fts_fields=' OR '.join(fields),
vectors=' || '.join(vectors)
)
def update_vector(self, model, vector_field):
vectors = []
sql_fn = "setweight(to_tsvector(%s, COALESCE(%s, '')), '%s')"
if not isinstance(vector_field, TSVectorField):
raise AttributeError
try:
dict_field = model._meta.get_field(vector_field.dictionary)
dictionary = "%s::regconfig" % (
dict_field.get_attname_column()[1])
except:
dictionary = "'%s'" % vector_field.dictionary
for field, rank in vector_field._get_fields_and_ranks():
vectors.append(sql_fn % (
dictionary, field.get_attname_column()[1], rank))
return self.sql_update_vector.format(
model=model._meta.db_table,
vector=vector_field.get_attname_column()[1],
fields=' || '.join(vectors)
)
def _get_vector_for_field(self, field, weight, dictionary):
return "setweight(to_tsvector(%s, COALESCE(NEW.%s, '')), '%s')" % (
dictionary, field.get_attname_column()[1], weight
)
def create_index(self, model, vector_field, index):
return self.sql_create_index.format(
model=model._meta.db_table,
fts_index=index,
fts_name=vector_field.get_attname_column()[1]
)
def delete_index(self, model, vector_field):
return self.sql_delete_index.format(
model=model._meta.db_table,
fts_name=vector_field.get_attname_column()[1]
)
class BaseVectorOperation(Operation):
reduces_to_sql = True
reversible = True
sql_creator = PgFtsSQL()
forward_fn = None
backward_fn = None
def __init__(self, name, fts_vector):
self.name = name
self.fts_vector = fts_vector
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
schema_editor.execute(self.forward_fn(
model,
vector_field
))
def database_backwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
schema_editor.execute(self.backward_fn(
model,
vector_field
))
def describe(self):
return "Create trigger `%s` for model `%s`" % (
self.fts_vector, self.name
)
class UpdateVectorOperation(BaseVectorOperation):
def __init__(self, name, fts_vector):
self.name = name
self.fts_vector = fts_vector
self.forward_fn = self.sql_creator.update_vector
def database_backwards(self, app_label, schema_editor, from_state,
to_state):
pass
def describe(self):
return "Create trigger `%s` for model `%s`" % (
self.fts_vector, self.name
)
class CreateFTSTriggerOperation(BaseVectorOperation):
def __init__(self, name, fts_vector):
self.name = name
self.fts_vector = fts_vector
self.forward_fn = self.sql_creator.create_fts_trigger
self.backward_fn = self.sql_creator.delete_trigger
def database_backwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
schema_editor.execute(self.backward_fn(
model,
vector_field
))
def describe(self):
return "Create trigger `%s` for model `%s`" % (
self.fts_vector, self.name
)
class DeleteFTSTriggerOperation(BaseVectorOperation):
def __init__(self, name, fts_vector):
self.name = name
self.fts_vector = fts_vector
self.forward_fn = self.sql_creator.delete_trigger
self.backward_fn = self.sql_creator.create_fts_trigger
def describe(self):
return "Delete trigger `%s` for model `%s`" % (
self.fts_vector, self.name
)
class CreateFTSIndexOperation(BaseVectorOperation):
INDEXS = ('gin', 'gist')
def __init__(self, name, fts_vector, index):
assert index in self.INDEXS, "Invalid index '%s'. Options %s " % (
index, ', '.join(self.INDEXS))
self.name = name
self.fts_vector = fts_vector
self.index = index
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
if not isinstance(vector_field, TSVectorField):
raise AttributeError
schema_editor.execute(self.sql_creator.create_index(
model, vector_field, self.index
))
def database_backwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
schema_editor.execute(self.sql_creator.delete_index(
model,
vector_field
))
def describe(self):
return "Create %s index `%s` for model `%s`" % (
self.index, self.fts_vector, self.name
)
class DeleteFTSIndexOperation(CreateFTSIndexOperation):
def database_forwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
schema_editor.execute(self.sql_creator.delete_index(
model,
vector_field
))
def database_backwards(self, app_label, schema_editor, from_state,
to_state):
model = from_state.render().get_model(app_label, self.name)
vector_field = model._meta.get_field(self.fts_vector)
if not isinstance(vector_field, TSVectorField):
raise AttributeError
schema_editor.execute(self.sql_creator.create_index(
model, vector_field, self.index))
def describe(self):
return "Delete %s index `%s` for model `%s`" % (
self.index, self.fts_vector, self.name
)
| true | true |
1c34e76a1364264b17273e5cfd2e40e011e851d5 | 3,041 | py | Python | python-flask/app.py | PujithaKurakula/BreastCancer-android-python-ml-app | ae1cd5b683a13e72169eda400322b3e17bb48bd9 | [
"Apache-2.0"
] | null | null | null | python-flask/app.py | PujithaKurakula/BreastCancer-android-python-ml-app | ae1cd5b683a13e72169eda400322b3e17bb48bd9 | [
"Apache-2.0"
] | null | null | null | python-flask/app.py | PujithaKurakula/BreastCancer-android-python-ml-app | ae1cd5b683a13e72169eda400322b3e17bb48bd9 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
from flask import Flask, request, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
@app.route('/')
def home():
return render_template('h.html')
@app.route('/detect')
def detect():
return render_template('index.html')
@app.route('/riskpred')
def riskpred():
return render_template('r.html')
@app.route('/predict',methods=['POST'])
def predict():
meantexture=request.json['meantexture']
meanperimeter=request.json['meanperimeter']
meansmoothness=request.json['meansmoothness']
meancompactness=request.json['meancompactness']
meanconcavity=request.json['meanconcavity']
meanconcavepoints=request.json['meanconcavepoints']
meansymmetry=request.json['meansymmetry']
meanfractaldimension=request.json['meanfractaldimension']
radiuserror=request.json['radiuserror']
textureerror=request.json['textureerror']
perimetererror=request.json['perimetererror']
areaerror=request.json['areaerror']
smoothnesserror=request.json['smoothnesserror']
compactnesserror=request.json['compactnesserror']
concavityerror=request.json['concavityerror']
concavepointserror=request.json['concavepointserror']
symmetryerror=request.json['symmetryerror']
fractaldimensionerror=request.json['fractaldimensionerror']
worstradius=request.json['worstradius']
worsttexture=request.json['worsttexture']
worstsmoothness=request.json['worstsmoothness']
worstcompactness=request.json['worstcompactness']
worstconcavity=request.json['worstconcavity']
worstconcavepoints=request.json['worstconcavepoints']
worstsymmetry=request.json['worstsymmetry']
worstfractaldimension=request.json['worstfractaldimension']
datavalues = [[meantexture,meanperimeter,meansmoothness,meancompactness,meanconcavity,
meanconcavepoints,meansymmetry,meanfractaldimension,
radiuserror,textureerror,perimetererror,areaerror,
smoothnesserror,compactnesserror,concavityerror,
concavepointserror,symmetryerror,fractaldimensionerror,
worstradius,worsttexture,worstsmoothness,worstcompactness,worstconcavity,
worstconcavepoints,worstsymmetry,worstfractaldimension ]]
data=pd.DataFrame(datavalues,columns=['meantexture','meanperimeter','meansmoothness', 'meancompactness', 'meanconcavity','meanconcavepoints', 'meansymmetry', 'meanfractaldimension','radiuserror', 'textureerror', 'perimetererror', 'areaerror','smoothnesserror', 'compactnesserror', 'concavityerror','concavepointserror', 'symmetryerror', 'fractaldimensionerror','worstradius', 'worsttexture','worstsmoothness', 'worstcompactness', 'worstconcavity','worstconcavepoints', 'worstsymmetry', 'worstfractaldimension'])
res=model.predict(data)
output=res[0]
if output == 0:
res_val = " breast cancer "
else:
res_val = "no breast cancer"
return res_val
if __name__ == "__main__":
app.run(debug=True)
| 39.493506 | 515 | 0.750082 | import numpy as np
import pandas as pd
from flask import Flask, request, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
@app.route('/')
def home():
return render_template('h.html')
@app.route('/detect')
def detect():
return render_template('index.html')
@app.route('/riskpred')
def riskpred():
return render_template('r.html')
@app.route('/predict',methods=['POST'])
def predict():
meantexture=request.json['meantexture']
meanperimeter=request.json['meanperimeter']
meansmoothness=request.json['meansmoothness']
meancompactness=request.json['meancompactness']
meanconcavity=request.json['meanconcavity']
meanconcavepoints=request.json['meanconcavepoints']
meansymmetry=request.json['meansymmetry']
meanfractaldimension=request.json['meanfractaldimension']
radiuserror=request.json['radiuserror']
textureerror=request.json['textureerror']
perimetererror=request.json['perimetererror']
areaerror=request.json['areaerror']
smoothnesserror=request.json['smoothnesserror']
compactnesserror=request.json['compactnesserror']
concavityerror=request.json['concavityerror']
concavepointserror=request.json['concavepointserror']
symmetryerror=request.json['symmetryerror']
fractaldimensionerror=request.json['fractaldimensionerror']
worstradius=request.json['worstradius']
worsttexture=request.json['worsttexture']
worstsmoothness=request.json['worstsmoothness']
worstcompactness=request.json['worstcompactness']
worstconcavity=request.json['worstconcavity']
worstconcavepoints=request.json['worstconcavepoints']
worstsymmetry=request.json['worstsymmetry']
worstfractaldimension=request.json['worstfractaldimension']
datavalues = [[meantexture,meanperimeter,meansmoothness,meancompactness,meanconcavity,
meanconcavepoints,meansymmetry,meanfractaldimension,
radiuserror,textureerror,perimetererror,areaerror,
smoothnesserror,compactnesserror,concavityerror,
concavepointserror,symmetryerror,fractaldimensionerror,
worstradius,worsttexture,worstsmoothness,worstcompactness,worstconcavity,
worstconcavepoints,worstsymmetry,worstfractaldimension ]]
data=pd.DataFrame(datavalues,columns=['meantexture','meanperimeter','meansmoothness', 'meancompactness', 'meanconcavity','meanconcavepoints', 'meansymmetry', 'meanfractaldimension','radiuserror', 'textureerror', 'perimetererror', 'areaerror','smoothnesserror', 'compactnesserror', 'concavityerror','concavepointserror', 'symmetryerror', 'fractaldimensionerror','worstradius', 'worsttexture','worstsmoothness', 'worstcompactness', 'worstconcavity','worstconcavepoints', 'worstsymmetry', 'worstfractaldimension'])
res=model.predict(data)
output=res[0]
if output == 0:
res_val = " breast cancer "
else:
res_val = "no breast cancer"
return res_val
if __name__ == "__main__":
app.run(debug=True)
| true | true |
1c34e95615ee7ca2019d86635649ccddde55a195 | 12,777 | py | Python | PPO.py | jacob-heglund/ece598-xmase | 7345b713fbb6d4f84c795cd52778312058cd80f8 | [
"Apache-2.0"
] | null | null | null | PPO.py | jacob-heglund/ece598-xmase | 7345b713fbb6d4f84c795cd52778312058cd80f8 | [
"Apache-2.0"
] | null | null | null | PPO.py | jacob-heglund/ece598-xmase | 7345b713fbb6d4f84c795cd52778312058cd80f8 | [
"Apache-2.0"
] | null | null | null | from numpy.core.fromnumeric import trace
import torch
import torch.nn as nn
from torch.distributions import MultivariateNormal
from torch.distributions import Categorical
import pdb
################################## set device ##################################
print("============================================================================================")
# set device to cpu or cuda
device = torch.device('cpu')
if(torch.cuda.is_available()):
device = torch.device('cuda:0')
torch.cuda.empty_cache()
print("Device set to : " + str(torch.cuda.get_device_name(device)))
else:
print("Device set to : cpu")
################################## PPO Policy ##################################
class RolloutBuffer:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
class ActorCritic(nn.Module):
def __init__(self, obs_dim, obs_size, action_dim, has_continuous_action_space, action_std_init):
super(ActorCritic, self).__init__()
self.obs_dim = obs_dim
self.shap_mode = True
self.has_continuous_action_space = has_continuous_action_space
if has_continuous_action_space:
self.action_dim = action_dim
self.action_var = torch.full((action_dim,), action_std_init * action_std_init).to(device)
if has_continuous_action_space :
self.actor = nn.Sequential(
nn.Linear(obs_dim, 64),
nn.Tanh(),
nn.Linear(64, 64),
nn.Tanh(),
nn.Linear(64, action_dim),
nn.Softmax(dim=-1)
)
else:
# self.actor = nn.Sequential(
# nn.Linear(obs_dim, 64),
# nn.Tanh(),
# nn.Linear(64, 64),
# nn.Tanh(),
# nn.Linear(64, action_dim),
# )
self.actor = nn.Sequential(
nn.Conv2d(self.obs_dim, 32, kernel_size=4, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=2),
nn.ReLU(),
nn.Flatten(),
nn.Linear(64 * obs_size * obs_size, action_dim),
nn.Softmax(dim=-1)
)
# self.critic = nn.Sequential(
# nn.Linear(obs_dim, 64),
# nn.Tanh(),
# nn.Linear(64, 64),
# nn.Tanh(),
# nn.Linear(64, 1)
# )
self.critic = nn.Sequential(
nn.Conv2d(self.obs_dim, 32, kernel_size=4, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=2),
nn.ReLU(),
nn.Flatten(),
nn.Linear(64 * obs_size * obs_size, 1),
)
# self.relu = nn.ReLU()
# self.softmax = nn.Softmax(dim=-1)
# self.flatten = nn.Flatten()
# self.conv1_actor = nn.Conv2d(self.obs_dim, 32, kernel_size=4, padding=1)
# self.conv2_actor = nn.Conv2d(32, 64, kernel_size=4, padding=1)
# self.conv3_actor = nn.Conv2d(64, 64, kernel_size=3, padding=2)
# self.mlp_actor = nn.Linear(64 * obs_size * obs_size, action_dim)
# def actor(self, obs):
# # actor
# tmp = self.relu(self.conv1_actor(obs.permute(0, 3, 1, 2)))
# tmp = self.relu(self.conv2_actor(tmp))
# tmp = self.conv3_actor(tmp)
# tmp = self.mlp_actor(self.flatten(tmp))
# tmp = self.softmax(tmp)
# return tmp
def set_action_std(self, new_action_std):
if self.has_continuous_action_space:
self.action_var = torch.full((self.action_dim,), new_action_std * new_action_std).to(device)
else:
print("--------------------------------------------------------------------------------------------")
print("WARNING : Calling ActorCritic::set_action_std() on discrete action space policy")
print("--------------------------------------------------------------------------------------------")
def forward(self):
raise NotImplementedError
def act(self, state):
if len(state.shape) == 3:
state = torch.unsqueeze(state, 0)
if self.has_continuous_action_space:
action_mean = self.actor(state)
cov_mat = torch.diag(self.action_var).unsqueeze(dim=0)
dist = MultivariateNormal(action_mean, cov_mat)
else:
action_probs = self.actor(state.permute(0, 3, 1, 2))
dist = Categorical(action_probs)
action = dist.sample()
action_logprob = dist.log_prob(action)
return action.detach(), action_logprob.detach(), action_probs.detach()
def evaluate(self, state, action):
if len(state.shape) == 3:
state = torch.unsqueeze(state, 0)
if self.has_continuous_action_space:
action_mean = self.actor(state)
action_var = self.action_var.expand_as(action_mean)
cov_mat = torch.diag_embed(action_var).to(device)
dist = MultivariateNormal(action_mean, cov_mat)
# For Single Action Environments.
if self.action_dim == 1:
action = action.reshape(-1, self.action_dim)
else:
action_probs = self.actor(state.permute(0, 3, 1, 2))
dist = Categorical(action_probs)
action_logprobs = dist.log_prob(action)
dist_entropy = dist.entropy()
state_values = self.critic(state.permute(0, 3, 1, 2)).squeeze()
return action_logprobs, state_values, dist_entropy
class PPO:
def __init__(self, obs_dim, obs_size, action_dim, lr_actor, lr_critic, gamma, K_epochs, eps_clip, has_continuous_action_space, action_std_init=0.6):
self.has_continuous_action_space = has_continuous_action_space
if has_continuous_action_space:
self.action_std = action_std_init
self.gamma = gamma
self.eps_clip = eps_clip
self.K_epochs = K_epochs
self.buffer = RolloutBuffer()
self.policy = ActorCritic(obs_dim, obs_size, action_dim, has_continuous_action_space, action_std_init).to(device)
self.optimizer = torch.optim.Adam([
{'params': self.policy.actor.parameters(), 'lr': lr_actor},
{'params': self.policy.critic.parameters(), 'lr': lr_critic}
])
self.policy_old = ActorCritic(obs_dim, obs_size, action_dim, has_continuous_action_space, action_std_init).to(device)
self.policy_old.load_state_dict(self.policy.state_dict())
self.MseLoss = nn.MSELoss()
def set_action_std(self, new_action_std):
if self.has_continuous_action_space:
self.action_std = new_action_std
self.policy.set_action_std(new_action_std)
self.policy_old.set_action_std(new_action_std)
else:
print("--------------------------------------------------------------------------------------------")
print("WARNING : Calling PPO::set_action_std() on discrete action space policy")
print("--------------------------------------------------------------------------------------------")
def decay_action_std(self, action_std_decay_rate, min_action_std):
print("--------------------------------------------------------------------------------------------")
if self.has_continuous_action_space:
self.action_std = self.action_std - action_std_decay_rate
self.action_std = round(self.action_std, 4)
if (self.action_std <= min_action_std):
self.action_std = min_action_std
print("setting actor output action_std to min_action_std : ", self.action_std)
else:
print("setting actor output action_std to : ", self.action_std)
self.set_action_std(self.action_std)
else:
print("WARNING : Calling PPO::decay_action_std() on discrete action space policy")
print("--------------------------------------------------------------------------------------------")
def select_action(self, state, return_action_prob_shap=False, debug=False):
if self.has_continuous_action_space:
with torch.no_grad():
state = torch.FloatTensor(state).to(device)
action, action_logprob = self.policy_old.act(state)
self.buffer.states.append(state)
self.buffer.actions.append(action)
self.buffer.logprobs.append(action_logprob)
return action.detach().cpu().numpy().flatten()
else:
#TODO trying to debug the explainer issues. Apparently the outputs are the wrong size but SHAP doesn't tell you how you have to structure your model outputs so they work, so we're debugging here now
with torch.no_grad():
state = torch.FloatTensor(state).to(device)
action, action_logprob, action_probs = self.policy_old.act(state)
self.buffer.states.append(state)
self.buffer.actions.append(action)
self.buffer.logprobs.append(action_logprob)
if return_action_prob_shap:
return action_probs.cpu()
else:
return action.item()
def update(self):
# Monte Carlo estimate of returns
rewards = []
discounted_reward = 0
for reward, is_terminal in zip(reversed(self.buffer.rewards), reversed(self.buffer.is_terminals)):
if is_terminal:
discounted_reward = 0
discounted_reward = reward + (self.gamma * discounted_reward)
rewards.insert(0, discounted_reward)
# Normalizing the rewards
rewards = torch.tensor(rewards, dtype=torch.float32).to(device).squeeze()
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-7)
# convert list to tensor
#TODO creating tensors from list of np arrays is slow, go to 1 np array first
old_states = torch.squeeze(torch.stack(self.buffer.states, dim=0)).detach().to(device)
old_actions = torch.squeeze(torch.stack(self.buffer.actions, dim=0)).detach().to(device)
old_logprobs = torch.squeeze(torch.stack(self.buffer.logprobs, dim=0)).detach().to(device)
# Optimize policy for K epochs
for _ in range(self.K_epochs):
# Evaluating old actions and values
logprobs, state_values, dist_entropy = self.policy.evaluate(old_states, old_actions)
# match state_values tensor dimensions with rewards tensor
state_values = torch.squeeze(state_values)
# Finding the ratio (pi_theta / pi_theta__old)
ratios = torch.exp(logprobs - old_logprobs.detach())
# Finding Surrogate Loss
advantages = rewards - state_values.detach()
surr1 = ratios * advantages
surr2 = torch.clamp(ratios, 1-self.eps_clip, 1+self.eps_clip) * advantages
# final loss of clipped objective PPO
loss = -torch.min(surr1, surr2) + 0.5*self.MseLoss(state_values, rewards) - 0.01*dist_entropy
# take gradient step
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
# Copy new weights into old policy
self.policy_old.load_state_dict(self.policy.state_dict())
# clear buffer
self.buffer.clear()
def save(self, checkpoint_path):
torch.save(self.policy_old.state_dict(), checkpoint_path)
def load(self, checkpoint_path):
self.policy_old.load_state_dict(torch.load(checkpoint_path, map_location=lambda storage, loc: storage))
self.policy.load_state_dict(torch.load(checkpoint_path, map_location=lambda storage, loc: storage))
| 39.680124 | 210 | 0.543633 | from numpy.core.fromnumeric import trace
import torch
import torch.nn as nn
from torch.distributions import MultivariateNormal
from torch.distributions import Categorical
import pdb
td_init
self.gamma = gamma
self.eps_clip = eps_clip
self.K_epochs = K_epochs
self.buffer = RolloutBuffer()
self.policy = ActorCritic(obs_dim, obs_size, action_dim, has_continuous_action_space, action_std_init).to(device)
self.optimizer = torch.optim.Adam([
{'params': self.policy.actor.parameters(), 'lr': lr_actor},
{'params': self.policy.critic.parameters(), 'lr': lr_critic}
])
self.policy_old = ActorCritic(obs_dim, obs_size, action_dim, has_continuous_action_space, action_std_init).to(device)
self.policy_old.load_state_dict(self.policy.state_dict())
self.MseLoss = nn.MSELoss()
def set_action_std(self, new_action_std):
if self.has_continuous_action_space:
self.action_std = new_action_std
self.policy.set_action_std(new_action_std)
self.policy_old.set_action_std(new_action_std)
else:
print("--------------------------------------------------------------------------------------------")
print("WARNING : Calling PPO::set_action_std() on discrete action space policy")
print("--------------------------------------------------------------------------------------------")
def decay_action_std(self, action_std_decay_rate, min_action_std):
print("--------------------------------------------------------------------------------------------")
if self.has_continuous_action_space:
self.action_std = self.action_std - action_std_decay_rate
self.action_std = round(self.action_std, 4)
if (self.action_std <= min_action_std):
self.action_std = min_action_std
print("setting actor output action_std to min_action_std : ", self.action_std)
else:
print("setting actor output action_std to : ", self.action_std)
self.set_action_std(self.action_std)
else:
print("WARNING : Calling PPO::decay_action_std() on discrete action space policy")
print("--------------------------------------------------------------------------------------------")
def select_action(self, state, return_action_prob_shap=False, debug=False):
if self.has_continuous_action_space:
with torch.no_grad():
state = torch.FloatTensor(state).to(device)
action, action_logprob = self.policy_old.act(state)
self.buffer.states.append(state)
self.buffer.actions.append(action)
self.buffer.logprobs.append(action_logprob)
return action.detach().cpu().numpy().flatten()
else:
with torch.no_grad():
state = torch.FloatTensor(state).to(device)
action, action_logprob, action_probs = self.policy_old.act(state)
self.buffer.states.append(state)
self.buffer.actions.append(action)
self.buffer.logprobs.append(action_logprob)
if return_action_prob_shap:
return action_probs.cpu()
else:
return action.item()
def update(self):
rewards = []
discounted_reward = 0
for reward, is_terminal in zip(reversed(self.buffer.rewards), reversed(self.buffer.is_terminals)):
if is_terminal:
discounted_reward = 0
discounted_reward = reward + (self.gamma * discounted_reward)
rewards.insert(0, discounted_reward)
rewards = torch.tensor(rewards, dtype=torch.float32).to(device).squeeze()
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-7)
old_states = torch.squeeze(torch.stack(self.buffer.states, dim=0)).detach().to(device)
old_actions = torch.squeeze(torch.stack(self.buffer.actions, dim=0)).detach().to(device)
old_logprobs = torch.squeeze(torch.stack(self.buffer.logprobs, dim=0)).detach().to(device)
for _ in range(self.K_epochs):
logprobs, state_values, dist_entropy = self.policy.evaluate(old_states, old_actions)
state_values = torch.squeeze(state_values)
ratios = torch.exp(logprobs - old_logprobs.detach())
advantages = rewards - state_values.detach()
surr1 = ratios * advantages
surr2 = torch.clamp(ratios, 1-self.eps_clip, 1+self.eps_clip) * advantages
loss = -torch.min(surr1, surr2) + 0.5*self.MseLoss(state_values, rewards) - 0.01*dist_entropy
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
self.policy_old.load_state_dict(self.policy.state_dict())
self.buffer.clear()
def save(self, checkpoint_path):
torch.save(self.policy_old.state_dict(), checkpoint_path)
def load(self, checkpoint_path):
self.policy_old.load_state_dict(torch.load(checkpoint_path, map_location=lambda storage, loc: storage))
self.policy.load_state_dict(torch.load(checkpoint_path, map_location=lambda storage, loc: storage))
| true | true |
1c34eaa5ee0c570dfabd14d25e63956b1f1316de | 327 | py | Python | Codility_10.py | Bartoshko/codility-python-training | 5ac53e85948692c8e6c44090e68b93136b263f9c | [
"MIT"
] | null | null | null | Codility_10.py | Bartoshko/codility-python-training | 5ac53e85948692c8e6c44090e68b93136b263f9c | [
"MIT"
] | null | null | null | Codility_10.py | Bartoshko/codility-python-training | 5ac53e85948692c8e6c44090e68b93136b263f9c | [
"MIT"
] | null | null | null | # works 100%
arr_0 = [0,1,0,1,1]
arr_1 = [0,1,0,1,0,1,0,1,0,1]
def solution(A):
increment = 0
counter = 0
value = 0
for i in A:
if i == 0:
increment += 1
if i == 1:
value += increment
counter += increment
if counter > 1000000000:
return -1
return counter
print(solution(arr_0))
print(solution(arr_1)) | 14.863636 | 29 | 0.608563 |
arr_0 = [0,1,0,1,1]
arr_1 = [0,1,0,1,0,1,0,1,0,1]
def solution(A):
increment = 0
counter = 0
value = 0
for i in A:
if i == 0:
increment += 1
if i == 1:
value += increment
counter += increment
if counter > 1000000000:
return -1
return counter
print(solution(arr_0))
print(solution(arr_1)) | true | true |
1c34eb0121879b126de166589387ff58843cec09 | 9,734 | py | Python | adlibre_tms/apps/saasu_client/models/items.py | adlibre/Adlibre-TMS | 4c8de1e4448203fb267d38ec0f4ec9e64d58a21d | [
"BSD-3-Clause"
] | 26 | 2015-01-06T11:09:18.000Z | 2022-03-16T06:20:53.000Z | adlibre_tms/apps/saasu_client/models/items.py | adlibre/Adlibre-TMS | 4c8de1e4448203fb267d38ec0f4ec9e64d58a21d | [
"BSD-3-Clause"
] | 4 | 2015-02-26T11:00:35.000Z | 2020-06-05T18:02:02.000Z | adlibre_tms/apps/saasu_client/models/items.py | adlibre/Adlibre-TMS | 4c8de1e4448203fb267d38ec0f4ec9e64d58a21d | [
"BSD-3-Clause"
] | 16 | 2015-02-08T05:24:38.000Z | 2021-06-13T14:45:30.000Z | # -*- coding: utf-8 -*-
import xml_models
from saasu_client import DEFAULT_GET_URL
from saasu_client.models.base import BaseModel, CollectionField
__all__ = ['InventoryItem', 'FullInventoryItemList']
class InventoryItem(BaseModel):
""" Inventory Item Entity """
__model__ = 'InventoryItem'
# Required for update.
uid = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/@uid", default=0)
# Required for update.
lastUpdatedUid = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/@lastUpdatedUid")
# Inventory item code. Must be unique.
code = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/code")
# Inventory item description.
# Multi-line description is supported. Use the pipe (|) to indicate newline.
description = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/description")
# Default: True
isActive = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isActive", default=True)
notes = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/notes")
isInventoried = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isInventoried", default=False)
# Required only if IsInventoried is set to true. Accounts used must be of type Asset.
assetAccountUid = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/assetAccountUid", default=0)
# How many stocks on hand? This element is only used when you retrieve an Inventory Item from your File.
# This value is ignored on insert and update.
stockOnHand = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/stockOnHand", default=0)
# Current stock value. This element is only used when you retrieve an Inventory Item from your File.
# This value is ignored on insert and update.
currentValue = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/currentValue", default=0)
# Specifies if this item can be bought or not. Default: false (cannot be bought).
isBought = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isBought", default=False)
# Expense Account for tracking purchase.
# Required only if the Inventory Item is not inventoried and item can be bought (isInventoried == false && isBought == true).
# Accounts used must be of type Expense.
purchaseExpenseAccountUid = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/purchaseExpenseAccountUid", default=0)
# Default tax code when the inventory item is purchased.
purchaseTaxCode = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/purchaseTaxCode")
# Minimum stock level used for re-stocking alert report.
minimumStockLevel = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/minimumStockLevel", default=0)
# The primary supplier for this Inventory Item.
primarySupplierContactUid = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/primarySupplierContactUid", default=0)
# The primary supplier’s item code for this Inventory Item.
primarySupplierItemCode = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/primarySupplierItemCode")
# Default re-order quantity for re-stocking alert report.
defaultReOrderQuantity = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/defaultReOrderQuantity", default=0)
# Default buying price for the item. Only applicable if the Inventory Item is marked as bought.
buyingPrice = xml_models.FloatField(xpath="/inventoryItemResponse/inventoryItem/buyingPrice", default=0)
# A flag specifying whether the buying price includes/excludes tax.
isBuyingPriceIncTax = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isBuyingPriceIncTax", default=True)
# Specifies whether the Inventory Item can be sold or not. Default: false (cannot be sold).
isSold = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isSold", default=True)
# Account for tracking sales. Only required if the item can be sold (isSold == true). Accounts used must be of type Income.
saleIncomeAccountUid = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/saleIncomeAccountUid", default=0)
# Default tax code for sale.
saleTaxCode = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/saleTaxCode")
# Accounts for tracking cost of sales. Required only if Inventory Item is inventoried & for sale.
# Accounts used must be of type Cost of Sales.
saleCoSAccountUid = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/saleCoSAccountUid", default=0)
# The default selling price for this Inventory Item. Only applicable if the Inventory Item is marked as sold.
sellingPrice = xml_models.FloatField(xpath="/inventoryItemResponse/inventoryItem/sellingPrice", default=0)
isSellingPriceIncTax = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isSellingPriceIncTax", default=True)
# A flag that indicates this is an item you sell, that you haven’t bought or assembled as stock to make available for sale.
isVirtual = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isVirtual", default=True)
# The type if this item is marked as virtual.
vType = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/vType")
# A flag to set the Item to visible, for an example this can be
# used in your database so that Item is flagged to be displayed in your ecommerce product listings.
isVisible = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isVisisble", default=False)
# A flag specifying whether this item is treated as a voucher.
isVoucher = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isVoucher", default=False)
# When the voucher becomes effective.
validFrom = xml_models.DateField(xpath="/inventoryItemResponse/inventoryItem/validFrom", date_format="%Y-%m-%d")
# When the voucher expires.
validTo = xml_models.DateField(xpath="/inventoryItemResponse/inventoryItem/validTo", date_format="%Y-%m-%d")
finders = {
(uid,): DEFAULT_GET_URL % __model__ + "&uid=%s",
}
class InventoryListItem(xml_models.Model):
""" Inventory List Item Entity """
uid = xml_models.IntField(xpath="/inventoryItem/@uid", default=0)
lastUpdatedUid = xml_models.CharField(xpath="/inventoryItem/@lastUpdatedUid")
utcFirstCreated = xml_models.CharField(xpath="/inventoryItem/utcFirstCreated")
utcLastModified = xml_models.CharField(xpath="/inventoryItem/utcLastModified")
code = xml_models.CharField(xpath="/inventoryItem/code")
description = xml_models.CharField(xpath="/inventoryItem/description")
isActive = xml_models.BoolField(xpath="/inventoryItem/isActive", default=True)
isInventoried = xml_models.BoolField(xpath="/inventoryItem/isInventoried", default=False)
assetAccountUid = xml_models.IntField(xpath="/inventoryItem/assetAccountUid", default=0)
stockOnHand = xml_models.IntField(xpath="/inventoryItem/stockOnHand", default=0)
currentValue = xml_models.IntField(xpath="/inventoryItem/currentValue", default=0)
quantityOnOrder = xml_models.IntField(xpath="/inventoryItem/quantityOnOrder", default=0)
quantityCommited = xml_models.IntField(xpath="/inventoryItem/quantityCommited", default=0)
isBought = xml_models.BoolField(xpath="/inventoryItem/isBought", default=False)
purchaseExpenseAccountUid = xml_models.IntField(xpath="/inventoryItem/purchaseExpenseAccountUid", default=0)
minimumStockLevel = xml_models.IntField(xpath="/inventoryItem/minimumStockLevel", default=0)
primarySupplierContactUid = xml_models.IntField(xpath="/inventoryItem/primarySupplierContactUid", default=0)
defaultReOrderQuantity = xml_models.IntField(xpath="/inventoryItem/defaultReOrderQuantity", default=0)
isSold = xml_models.BoolField(xpath="/inventoryItem/isSold", default=True)
saleIncomeAccountUid = xml_models.IntField(xpath="/inventoryItem/saleIncomeAccountUid", default=0)
saleTaxCode = xml_models.CharField(xpath="/inventoryItem/saleTaxCode")
saleCoSAccountUid = xml_models.IntField(xpath="/inventoryItem/saleCoSAccountUid", default=0)
sellingPrice = xml_models.FloatField(xpath="/inventoryItem/sellingPrice", default=0)
isSellingPriceIncTax = xml_models.BoolField(xpath="/inventoryItem/isSellingPriceIncTax", default=True)
buyingPrice = xml_models.FloatField(xpath="/inventoryItem/buyingPrice", default=0)
isBuyingPriceIncTax = xml_models.BoolField(xpath="/inventoryItem/isBuyingPriceIncTax", default=True)
isVoucher = xml_models.BoolField(xpath="/inventoryItem/isVoucher", default=False)
validFrom = xml_models.DateField(xpath="/inventoryItem/validFrom", date_format="%Y-%m-%d")
validTo = xml_models.DateField(xpath="/inventoryItem/validTo", date_format="%Y-%m-%d")
isVirtual = xml_models.BoolField(xpath="/inventoryItem/isVirtual", default=True)
isVisible = xml_models.BoolField(xpath="/inventoryItem/isVisisble", default=False)
class FullInventoryItemList(BaseModel):
""" Full Inventory Item List Entity """
__model__ = 'FullInventoryItemList'
items = CollectionField(InventoryListItem, xpath='/inventoryItemListResponse/inventoryItemList/inventoryItem')
isActive = xml_models.BoolField(xpath="/inventoryItemListResponse/inventoryItemList/inventoryItem/isActive", default=True)
finders = {
(isActive,) : DEFAULT_GET_URL % __model__ + "&isActive=%s",
}
def __len__(self):
return len(self.items)
def __iter__(self):
return self.items.__iter__()
| 62.8 | 134 | 0.771625 |
import xml_models
from saasu_client import DEFAULT_GET_URL
from saasu_client.models.base import BaseModel, CollectionField
__all__ = ['InventoryItem', 'FullInventoryItemList']
class InventoryItem(BaseModel):
__model__ = 'InventoryItem'
uid = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/@uid", default=0)
lastUpdatedUid = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/@lastUpdatedUid")
code = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/code")
description = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/description")
isActive = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isActive", default=True)
notes = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/notes")
isInventoried = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isInventoried", default=False)
assetAccountUid = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/assetAccountUid", default=0)
stockOnHand = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/stockOnHand", default=0)
currentValue = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/currentValue", default=0)
isBought = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isBought", default=False)
purchaseExpenseAccountUid = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/purchaseExpenseAccountUid", default=0)
purchaseTaxCode = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/purchaseTaxCode")
minimumStockLevel = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/minimumStockLevel", default=0)
primarySupplierContactUid = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/primarySupplierContactUid", default=0)
primarySupplierItemCode = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/primarySupplierItemCode")
defaultReOrderQuantity = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/defaultReOrderQuantity", default=0)
buyingPrice = xml_models.FloatField(xpath="/inventoryItemResponse/inventoryItem/buyingPrice", default=0)
isBuyingPriceIncTax = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isBuyingPriceIncTax", default=True)
isSold = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isSold", default=True)
saleIncomeAccountUid = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/saleIncomeAccountUid", default=0)
saleTaxCode = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/saleTaxCode")
saleCoSAccountUid = xml_models.IntField(xpath="/inventoryItemResponse/inventoryItem/saleCoSAccountUid", default=0)
sellingPrice = xml_models.FloatField(xpath="/inventoryItemResponse/inventoryItem/sellingPrice", default=0)
isSellingPriceIncTax = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isSellingPriceIncTax", default=True)
isVirtual = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isVirtual", default=True)
vType = xml_models.CharField(xpath="/inventoryItemResponse/inventoryItem/vType")
isVisible = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isVisisble", default=False)
isVoucher = xml_models.BoolField(xpath="/inventoryItemResponse/inventoryItem/isVoucher", default=False)
validFrom = xml_models.DateField(xpath="/inventoryItemResponse/inventoryItem/validFrom", date_format="%Y-%m-%d")
validTo = xml_models.DateField(xpath="/inventoryItemResponse/inventoryItem/validTo", date_format="%Y-%m-%d")
finders = {
(uid,): DEFAULT_GET_URL % __model__ + "&uid=%s",
}
class InventoryListItem(xml_models.Model):
uid = xml_models.IntField(xpath="/inventoryItem/@uid", default=0)
lastUpdatedUid = xml_models.CharField(xpath="/inventoryItem/@lastUpdatedUid")
utcFirstCreated = xml_models.CharField(xpath="/inventoryItem/utcFirstCreated")
utcLastModified = xml_models.CharField(xpath="/inventoryItem/utcLastModified")
code = xml_models.CharField(xpath="/inventoryItem/code")
description = xml_models.CharField(xpath="/inventoryItem/description")
isActive = xml_models.BoolField(xpath="/inventoryItem/isActive", default=True)
isInventoried = xml_models.BoolField(xpath="/inventoryItem/isInventoried", default=False)
assetAccountUid = xml_models.IntField(xpath="/inventoryItem/assetAccountUid", default=0)
stockOnHand = xml_models.IntField(xpath="/inventoryItem/stockOnHand", default=0)
currentValue = xml_models.IntField(xpath="/inventoryItem/currentValue", default=0)
quantityOnOrder = xml_models.IntField(xpath="/inventoryItem/quantityOnOrder", default=0)
quantityCommited = xml_models.IntField(xpath="/inventoryItem/quantityCommited", default=0)
isBought = xml_models.BoolField(xpath="/inventoryItem/isBought", default=False)
purchaseExpenseAccountUid = xml_models.IntField(xpath="/inventoryItem/purchaseExpenseAccountUid", default=0)
minimumStockLevel = xml_models.IntField(xpath="/inventoryItem/minimumStockLevel", default=0)
primarySupplierContactUid = xml_models.IntField(xpath="/inventoryItem/primarySupplierContactUid", default=0)
defaultReOrderQuantity = xml_models.IntField(xpath="/inventoryItem/defaultReOrderQuantity", default=0)
isSold = xml_models.BoolField(xpath="/inventoryItem/isSold", default=True)
saleIncomeAccountUid = xml_models.IntField(xpath="/inventoryItem/saleIncomeAccountUid", default=0)
saleTaxCode = xml_models.CharField(xpath="/inventoryItem/saleTaxCode")
saleCoSAccountUid = xml_models.IntField(xpath="/inventoryItem/saleCoSAccountUid", default=0)
sellingPrice = xml_models.FloatField(xpath="/inventoryItem/sellingPrice", default=0)
isSellingPriceIncTax = xml_models.BoolField(xpath="/inventoryItem/isSellingPriceIncTax", default=True)
buyingPrice = xml_models.FloatField(xpath="/inventoryItem/buyingPrice", default=0)
isBuyingPriceIncTax = xml_models.BoolField(xpath="/inventoryItem/isBuyingPriceIncTax", default=True)
isVoucher = xml_models.BoolField(xpath="/inventoryItem/isVoucher", default=False)
validFrom = xml_models.DateField(xpath="/inventoryItem/validFrom", date_format="%Y-%m-%d")
validTo = xml_models.DateField(xpath="/inventoryItem/validTo", date_format="%Y-%m-%d")
isVirtual = xml_models.BoolField(xpath="/inventoryItem/isVirtual", default=True)
isVisible = xml_models.BoolField(xpath="/inventoryItem/isVisisble", default=False)
class FullInventoryItemList(BaseModel):
__model__ = 'FullInventoryItemList'
items = CollectionField(InventoryListItem, xpath='/inventoryItemListResponse/inventoryItemList/inventoryItem')
isActive = xml_models.BoolField(xpath="/inventoryItemListResponse/inventoryItemList/inventoryItem/isActive", default=True)
finders = {
(isActive,) : DEFAULT_GET_URL % __model__ + "&isActive=%s",
}
def __len__(self):
return len(self.items)
def __iter__(self):
return self.items.__iter__()
| true | true |
1c34eb4801c8e897558782c5062336dabefbfe82 | 1,240 | py | Python | pipeline/processing/data.py | UNDP-Data/sids-data-pipeline | 16953bfe25bc4425f699937a83f5ed0c8755197a | [
"MIT"
] | null | null | null | pipeline/processing/data.py | UNDP-Data/sids-data-pipeline | 16953bfe25bc4425f699937a83f5ed0c8755197a | [
"MIT"
] | null | null | null | pipeline/processing/data.py | UNDP-Data/sids-data-pipeline | 16953bfe25bc4425f699937a83f5ed0c8755197a | [
"MIT"
] | null | null | null | from .config import download_path
from .utils import cwd, logging, read_csv, download_file
logger = logging.getLogger(__name__)
def get_rows(data_type, input_ext, tmp_ext):
rows = read_csv(cwd / f'../inputs/{data_type}.csv')
for row in rows:
blob_path = download_path / f"{data_type}/{row['id']}.{input_ext}"
input_path = cwd / f"../inputs/{data_type}/{row['id']}.{input_ext}"
tmp_path = cwd / f"../tmp/{data_type}/{row['id']}.{tmp_ext}"
row['blob_path'] = blob_path
row['input_path'] = input_path.resolve()
row['tmp_path'] = tmp_path.resolve()
return rows
def download_if_missing(data_type):
blob_path = download_path / f'{data_type}.csv'
input_path = cwd / f'../inputs/{data_type}.csv'
if input_path.is_file() and input_path.stat().st_size > 0:
pass
else:
input_path.parent.mkdir(parents=True, exist_ok=True)
download_file(blob_path, input_path)
logger.info(f'downloaded {input_path.name}')
def get_data():
download_if_missing('vectors')
download_if_missing('rasters')
vector_data = get_rows('vectors', 'gpkg', 'geojsonl')
raster_data = get_rows('rasters', 'tif', 'tif')
return vector_data, raster_data
| 34.444444 | 75 | 0.662097 | from .config import download_path
from .utils import cwd, logging, read_csv, download_file
logger = logging.getLogger(__name__)
def get_rows(data_type, input_ext, tmp_ext):
rows = read_csv(cwd / f'../inputs/{data_type}.csv')
for row in rows:
blob_path = download_path / f"{data_type}/{row['id']}.{input_ext}"
input_path = cwd / f"../inputs/{data_type}/{row['id']}.{input_ext}"
tmp_path = cwd / f"../tmp/{data_type}/{row['id']}.{tmp_ext}"
row['blob_path'] = blob_path
row['input_path'] = input_path.resolve()
row['tmp_path'] = tmp_path.resolve()
return rows
def download_if_missing(data_type):
blob_path = download_path / f'{data_type}.csv'
input_path = cwd / f'../inputs/{data_type}.csv'
if input_path.is_file() and input_path.stat().st_size > 0:
pass
else:
input_path.parent.mkdir(parents=True, exist_ok=True)
download_file(blob_path, input_path)
logger.info(f'downloaded {input_path.name}')
def get_data():
download_if_missing('vectors')
download_if_missing('rasters')
vector_data = get_rows('vectors', 'gpkg', 'geojsonl')
raster_data = get_rows('rasters', 'tif', 'tif')
return vector_data, raster_data
| true | true |
1c34eb7196851f652877086b511994be5ae8037e | 9,307 | py | Python | tests/test_data/test_datasets/test_s3dis_dataset.py | jhkim-spa/CVNet | 126a3155f414bf47e9e4f6fec121420fd378ea21 | [
"Apache-2.0"
] | 1 | 2021-10-20T05:21:21.000Z | 2021-10-20T05:21:21.000Z | tests/test_data/test_datasets/test_s3dis_dataset.py | jhkim-spa/CVNet | 126a3155f414bf47e9e4f6fec121420fd378ea21 | [
"Apache-2.0"
] | null | null | null | tests/test_data/test_datasets/test_s3dis_dataset.py | jhkim-spa/CVNet | 126a3155f414bf47e9e4f6fec121420fd378ea21 | [
"Apache-2.0"
] | 1 | 2022-02-16T07:25:55.000Z | 2022-02-16T07:25:55.000Z | import numpy as np
import pytest
import torch
from mmdet3d.datasets import S3DISSegDataset
def test_seg_getitem():
np.random.seed(0)
root_path = './tests/data/s3dis/'
ann_file = './tests/data/s3dis/s3dis_infos.pkl'
class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window',
'door', 'table', 'chair', 'sofa', 'bookcase', 'board',
'clutter')
palette = [[0, 255, 0], [0, 0, 255], [0, 255, 255], [255, 255, 0],
[255, 0, 255], [100, 100, 255], [200, 200, 100],
[170, 120, 200], [255, 0, 0], [200, 100, 100], [10, 200, 100],
[200, 200, 200], [50, 50, 50]]
scene_idxs = [0 for _ in range(20)]
pipelines = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='LoadAnnotations3D',
with_bbox_3d=False,
with_label_3d=False,
with_mask_3d=False,
with_seg_3d=True),
dict(
type='PointSegClassMapping',
valid_cat_ids=tuple(range(len(class_names))),
max_cat_id=13),
dict(
type='IndoorPatchPointSample',
num_points=5,
block_size=1.0,
sample_rate=1.0,
ignore_index=len(class_names),
use_normalized_coord=True),
dict(type='NormalizePointsColor', color_mean=None),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(
type='Collect3D',
keys=['points', 'pts_semantic_mask'],
meta_keys=['file_name', 'sample_idx'])
]
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=ann_file,
pipeline=pipelines,
classes=None,
palette=None,
modality=None,
test_mode=False,
ignore_index=None,
scene_idxs=scene_idxs)
data = s3dis_dataset[0]
points = data['points']._data
pts_semantic_mask = data['pts_semantic_mask']._data
file_name = data['img_metas']._data['file_name']
sample_idx = data['img_metas']._data['sample_idx']
assert file_name == './tests/data/s3dis/points/Area_1_office_2.bin'
assert sample_idx == 'Area_1_office_2'
expected_points = torch.tensor([[
0.0000, 0.0000, 3.1720, 0.4706, 0.4431, 0.3725, 0.4624, 0.7502, 0.9543
], [
0.2880, -0.5900, 0.0650, 0.3451, 0.3373, 0.3490, 0.5119, 0.5518, 0.0196
], [
0.1570, 0.6000, 3.1700, 0.4941, 0.4667, 0.3569, 0.4893, 0.9519, 0.9537
], [
-0.1320, 0.3950, 0.2720, 0.3216, 0.2863, 0.2275, 0.4397, 0.8830, 0.0818
],
[
-0.4860, -0.0640, 3.1710, 0.3843,
0.3725, 0.3059, 0.3789, 0.7286, 0.9540
]])
expected_pts_semantic_mask = np.array([0, 1, 0, 8, 0])
original_classes = s3dis_dataset.CLASSES
original_palette = s3dis_dataset.PALETTE
assert s3dis_dataset.CLASSES == class_names
assert s3dis_dataset.ignore_index == 13
assert torch.allclose(points, expected_points, 1e-2)
assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask)
assert original_classes == class_names
assert original_palette == palette
assert s3dis_dataset.scene_idxs.dtype == np.int32
assert np.all(s3dis_dataset.scene_idxs == np.array(scene_idxs))
# test dataset with selected classes
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=ann_file,
pipeline=None,
classes=['beam', 'window'],
scene_idxs=scene_idxs)
label_map = {i: 13 for i in range(14)}
label_map.update({3: 0, 5: 1})
assert s3dis_dataset.CLASSES != original_classes
assert s3dis_dataset.CLASSES == ['beam', 'window']
assert s3dis_dataset.PALETTE == [palette[3], palette[5]]
assert s3dis_dataset.VALID_CLASS_IDS == [3, 5]
assert s3dis_dataset.label_map == label_map
assert s3dis_dataset.label2cat == {0: 'beam', 1: 'window'}
# test load classes from file
import tempfile
tmp_file = tempfile.NamedTemporaryFile()
with open(tmp_file.name, 'w') as f:
f.write('beam\nwindow\n')
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=ann_file,
pipeline=None,
classes=tmp_file.name,
scene_idxs=scene_idxs)
assert s3dis_dataset.CLASSES != original_classes
assert s3dis_dataset.CLASSES == ['beam', 'window']
assert s3dis_dataset.PALETTE == [palette[3], palette[5]]
assert s3dis_dataset.VALID_CLASS_IDS == [3, 5]
assert s3dis_dataset.label_map == label_map
assert s3dis_dataset.label2cat == {0: 'beam', 1: 'window'}
# test scene_idxs in dataset
# we should input scene_idxs in train mode
with pytest.raises(NotImplementedError):
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=ann_file,
pipeline=None,
scene_idxs=None)
# test mode
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=ann_file,
pipeline=None,
test_mode=True,
scene_idxs=scene_idxs)
assert np.all(s3dis_dataset.scene_idxs == np.array([0]))
def test_seg_evaluate():
if not torch.cuda.is_available():
pytest.skip()
root_path = './tests/data/s3dis'
ann_file = './tests/data/s3dis/s3dis_infos.pkl'
s3dis_dataset = S3DISSegDataset(
data_root=root_path, ann_files=ann_file, test_mode=True)
results = []
pred_sem_mask = dict(
semantic_mask=torch.tensor([
2, 3, 1, 2, 2, 6, 1, 0, 1, 1, 9, 12, 3, 0, 2, 0, 2, 0, 8, 3, 1, 2,
0, 2, 1, 7, 2, 10, 2, 0, 0, 0, 2, 3, 2, 2, 2, 2, 2, 3, 0, 0, 4, 6,
7, 2, 1, 2, 0, 1, 7, 0, 2, 2, 2, 0, 2, 2, 1, 12, 0, 2, 2, 2, 2, 7,
2, 2, 0, 2, 6, 2, 12, 6, 3, 12, 2, 1, 6, 1, 2, 6, 8, 2, 10, 1, 11,
0, 6, 9, 4, 3, 0, 0, 12, 1, 1, 5, 3, 2
]).long())
results.append(pred_sem_mask)
ret_dict = s3dis_dataset.evaluate(results)
assert abs(ret_dict['miou'] - 0.7625) < 0.01
assert abs(ret_dict['acc'] - 0.9) < 0.01
assert abs(ret_dict['acc_cls'] - 0.9074) < 0.01
def test_seg_show():
import mmcv
import tempfile
from os import path as osp
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
root_path = './tests/data/s3dis'
ann_file = './tests/data/s3dis/s3dis_infos.pkl'
s3dis_dataset = S3DISSegDataset(
data_root=root_path, ann_files=ann_file, scene_idxs=[0])
result = dict(
semantic_mask=torch.tensor([
2, 2, 1, 2, 2, 5, 1, 0, 1, 1, 9, 12, 3, 0, 2, 0, 2, 0, 8, 2, 0, 2,
0, 2, 1, 7, 2, 10, 2, 0, 0, 0, 2, 2, 2, 2, 2, 1, 2, 2, 0, 0, 4, 6,
7, 2, 1, 2, 0, 1, 7, 0, 2, 2, 2, 0, 2, 2, 1, 12, 0, 2, 2, 2, 2, 7,
2, 2, 0, 2, 6, 2, 12, 6, 2, 12, 2, 1, 6, 1, 2, 6, 8, 2, 10, 1, 10,
0, 6, 9, 4, 3, 0, 0, 12, 1, 1, 5, 2, 2
]).long())
results = [result]
s3dis_dataset.show(results, temp_dir, show=False)
pts_file_path = osp.join(temp_dir, 'Area_1_office_2',
'Area_1_office_2_points.obj')
gt_file_path = osp.join(temp_dir, 'Area_1_office_2',
'Area_1_office_2_gt.obj')
pred_file_path = osp.join(temp_dir, 'Area_1_office_2',
'Area_1_office_2_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
def test_multi_areas():
# S3DIS dataset has 6 areas, we often train on several of them
# need to verify the concat function of S3DISSegDataset
root_path = './tests/data/s3dis'
ann_file = './tests/data/s3dis/s3dis_infos.pkl'
class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window',
'door', 'table', 'chair', 'sofa', 'bookcase', 'board',
'clutter')
palette = [[0, 255, 0], [0, 0, 255], [0, 255, 255], [255, 255, 0],
[255, 0, 255], [100, 100, 255], [200, 200, 100],
[170, 120, 200], [255, 0, 0], [200, 100, 100], [10, 200, 100],
[200, 200, 200], [50, 50, 50]]
scene_idxs = [0 for _ in range(20)]
# repeat
repeat_num = 3
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=[ann_file for _ in range(repeat_num)],
scene_idxs=scene_idxs)
assert s3dis_dataset.CLASSES == class_names
assert s3dis_dataset.PALETTE == palette
assert len(s3dis_dataset.data_infos) == repeat_num
assert np.all(s3dis_dataset.scene_idxs == np.concatenate(
[np.array(scene_idxs) + i for i in range(repeat_num)]))
# different scene_idxs input
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=[ann_file for _ in range(repeat_num)],
scene_idxs=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 3, 4], [0, 1, 1, 2, 2, 2]])
assert np.all(s3dis_dataset.scene_idxs == np.array(
[0, 0, 1, 2, 2, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 10, 10]))
| 37.987755 | 79 | 0.578812 | import numpy as np
import pytest
import torch
from mmdet3d.datasets import S3DISSegDataset
def test_seg_getitem():
np.random.seed(0)
root_path = './tests/data/s3dis/'
ann_file = './tests/data/s3dis/s3dis_infos.pkl'
class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window',
'door', 'table', 'chair', 'sofa', 'bookcase', 'board',
'clutter')
palette = [[0, 255, 0], [0, 0, 255], [0, 255, 255], [255, 255, 0],
[255, 0, 255], [100, 100, 255], [200, 200, 100],
[170, 120, 200], [255, 0, 0], [200, 100, 100], [10, 200, 100],
[200, 200, 200], [50, 50, 50]]
scene_idxs = [0 for _ in range(20)]
pipelines = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='LoadAnnotations3D',
with_bbox_3d=False,
with_label_3d=False,
with_mask_3d=False,
with_seg_3d=True),
dict(
type='PointSegClassMapping',
valid_cat_ids=tuple(range(len(class_names))),
max_cat_id=13),
dict(
type='IndoorPatchPointSample',
num_points=5,
block_size=1.0,
sample_rate=1.0,
ignore_index=len(class_names),
use_normalized_coord=True),
dict(type='NormalizePointsColor', color_mean=None),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(
type='Collect3D',
keys=['points', 'pts_semantic_mask'],
meta_keys=['file_name', 'sample_idx'])
]
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=ann_file,
pipeline=pipelines,
classes=None,
palette=None,
modality=None,
test_mode=False,
ignore_index=None,
scene_idxs=scene_idxs)
data = s3dis_dataset[0]
points = data['points']._data
pts_semantic_mask = data['pts_semantic_mask']._data
file_name = data['img_metas']._data['file_name']
sample_idx = data['img_metas']._data['sample_idx']
assert file_name == './tests/data/s3dis/points/Area_1_office_2.bin'
assert sample_idx == 'Area_1_office_2'
expected_points = torch.tensor([[
0.0000, 0.0000, 3.1720, 0.4706, 0.4431, 0.3725, 0.4624, 0.7502, 0.9543
], [
0.2880, -0.5900, 0.0650, 0.3451, 0.3373, 0.3490, 0.5119, 0.5518, 0.0196
], [
0.1570, 0.6000, 3.1700, 0.4941, 0.4667, 0.3569, 0.4893, 0.9519, 0.9537
], [
-0.1320, 0.3950, 0.2720, 0.3216, 0.2863, 0.2275, 0.4397, 0.8830, 0.0818
],
[
-0.4860, -0.0640, 3.1710, 0.3843,
0.3725, 0.3059, 0.3789, 0.7286, 0.9540
]])
expected_pts_semantic_mask = np.array([0, 1, 0, 8, 0])
original_classes = s3dis_dataset.CLASSES
original_palette = s3dis_dataset.PALETTE
assert s3dis_dataset.CLASSES == class_names
assert s3dis_dataset.ignore_index == 13
assert torch.allclose(points, expected_points, 1e-2)
assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask)
assert original_classes == class_names
assert original_palette == palette
assert s3dis_dataset.scene_idxs.dtype == np.int32
assert np.all(s3dis_dataset.scene_idxs == np.array(scene_idxs))
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=ann_file,
pipeline=None,
classes=['beam', 'window'],
scene_idxs=scene_idxs)
label_map = {i: 13 for i in range(14)}
label_map.update({3: 0, 5: 1})
assert s3dis_dataset.CLASSES != original_classes
assert s3dis_dataset.CLASSES == ['beam', 'window']
assert s3dis_dataset.PALETTE == [palette[3], palette[5]]
assert s3dis_dataset.VALID_CLASS_IDS == [3, 5]
assert s3dis_dataset.label_map == label_map
assert s3dis_dataset.label2cat == {0: 'beam', 1: 'window'}
import tempfile
tmp_file = tempfile.NamedTemporaryFile()
with open(tmp_file.name, 'w') as f:
f.write('beam\nwindow\n')
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=ann_file,
pipeline=None,
classes=tmp_file.name,
scene_idxs=scene_idxs)
assert s3dis_dataset.CLASSES != original_classes
assert s3dis_dataset.CLASSES == ['beam', 'window']
assert s3dis_dataset.PALETTE == [palette[3], palette[5]]
assert s3dis_dataset.VALID_CLASS_IDS == [3, 5]
assert s3dis_dataset.label_map == label_map
assert s3dis_dataset.label2cat == {0: 'beam', 1: 'window'}
with pytest.raises(NotImplementedError):
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=ann_file,
pipeline=None,
scene_idxs=None)
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=ann_file,
pipeline=None,
test_mode=True,
scene_idxs=scene_idxs)
assert np.all(s3dis_dataset.scene_idxs == np.array([0]))
def test_seg_evaluate():
if not torch.cuda.is_available():
pytest.skip()
root_path = './tests/data/s3dis'
ann_file = './tests/data/s3dis/s3dis_infos.pkl'
s3dis_dataset = S3DISSegDataset(
data_root=root_path, ann_files=ann_file, test_mode=True)
results = []
pred_sem_mask = dict(
semantic_mask=torch.tensor([
2, 3, 1, 2, 2, 6, 1, 0, 1, 1, 9, 12, 3, 0, 2, 0, 2, 0, 8, 3, 1, 2,
0, 2, 1, 7, 2, 10, 2, 0, 0, 0, 2, 3, 2, 2, 2, 2, 2, 3, 0, 0, 4, 6,
7, 2, 1, 2, 0, 1, 7, 0, 2, 2, 2, 0, 2, 2, 1, 12, 0, 2, 2, 2, 2, 7,
2, 2, 0, 2, 6, 2, 12, 6, 3, 12, 2, 1, 6, 1, 2, 6, 8, 2, 10, 1, 11,
0, 6, 9, 4, 3, 0, 0, 12, 1, 1, 5, 3, 2
]).long())
results.append(pred_sem_mask)
ret_dict = s3dis_dataset.evaluate(results)
assert abs(ret_dict['miou'] - 0.7625) < 0.01
assert abs(ret_dict['acc'] - 0.9) < 0.01
assert abs(ret_dict['acc_cls'] - 0.9074) < 0.01
def test_seg_show():
import mmcv
import tempfile
from os import path as osp
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
root_path = './tests/data/s3dis'
ann_file = './tests/data/s3dis/s3dis_infos.pkl'
s3dis_dataset = S3DISSegDataset(
data_root=root_path, ann_files=ann_file, scene_idxs=[0])
result = dict(
semantic_mask=torch.tensor([
2, 2, 1, 2, 2, 5, 1, 0, 1, 1, 9, 12, 3, 0, 2, 0, 2, 0, 8, 2, 0, 2,
0, 2, 1, 7, 2, 10, 2, 0, 0, 0, 2, 2, 2, 2, 2, 1, 2, 2, 0, 0, 4, 6,
7, 2, 1, 2, 0, 1, 7, 0, 2, 2, 2, 0, 2, 2, 1, 12, 0, 2, 2, 2, 2, 7,
2, 2, 0, 2, 6, 2, 12, 6, 2, 12, 2, 1, 6, 1, 2, 6, 8, 2, 10, 1, 10,
0, 6, 9, 4, 3, 0, 0, 12, 1, 1, 5, 2, 2
]).long())
results = [result]
s3dis_dataset.show(results, temp_dir, show=False)
pts_file_path = osp.join(temp_dir, 'Area_1_office_2',
'Area_1_office_2_points.obj')
gt_file_path = osp.join(temp_dir, 'Area_1_office_2',
'Area_1_office_2_gt.obj')
pred_file_path = osp.join(temp_dir, 'Area_1_office_2',
'Area_1_office_2_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
def test_multi_areas():
root_path = './tests/data/s3dis'
ann_file = './tests/data/s3dis/s3dis_infos.pkl'
class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window',
'door', 'table', 'chair', 'sofa', 'bookcase', 'board',
'clutter')
palette = [[0, 255, 0], [0, 0, 255], [0, 255, 255], [255, 255, 0],
[255, 0, 255], [100, 100, 255], [200, 200, 100],
[170, 120, 200], [255, 0, 0], [200, 100, 100], [10, 200, 100],
[200, 200, 200], [50, 50, 50]]
scene_idxs = [0 for _ in range(20)]
repeat_num = 3
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=[ann_file for _ in range(repeat_num)],
scene_idxs=scene_idxs)
assert s3dis_dataset.CLASSES == class_names
assert s3dis_dataset.PALETTE == palette
assert len(s3dis_dataset.data_infos) == repeat_num
assert np.all(s3dis_dataset.scene_idxs == np.concatenate(
[np.array(scene_idxs) + i for i in range(repeat_num)]))
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=[ann_file for _ in range(repeat_num)],
scene_idxs=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 3, 4], [0, 1, 1, 2, 2, 2]])
assert np.all(s3dis_dataset.scene_idxs == np.array(
[0, 0, 1, 2, 2, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 10, 10]))
| true | true |
1c34ebab0150fa1f9681a77d86d7978fddaa546e | 23,218 | py | Python | test/functional/test_framework/mininode.py | deeponion/deeponion2 | 2dc92ed83f5639b528e0c1279bdd214bb92ef286 | [
"MIT"
] | 569 | 2017-07-26T23:14:31.000Z | 2022-01-13T20:29:04.000Z | test/functional/test_framework/mininode.py | cpyberry/deeponion | 448b4a1ea7c90294d9ace03ea5a9648fb9b1107e | [
"MIT"
] | 155 | 2017-07-17T15:40:36.000Z | 2021-12-07T07:59:33.000Z | test/functional/test_framework/mininode.py | cpyberry/deeponion | 448b4a1ea7c90294d9ace03ea5a9648fb9b1107e | [
"MIT"
] | 214 | 2017-07-17T15:36:08.000Z | 2021-11-04T23:38:37.000Z | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""DeepOnion P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P
P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
and can respond correctly to getdata and getheaders messages"""
import asyncio
from collections import defaultdict
from io import BytesIO
import logging
import struct
import sys
import threading
from test_framework.messages import (
CBlockHeader,
MIN_VERSION_SUPPORTED,
msg_addr,
msg_block,
MSG_BLOCK,
msg_blocktxn,
msg_cmpctblock,
msg_feefilter,
msg_getaddr,
msg_getblocks,
msg_getblocktxn,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_mempool,
msg_notfound,
msg_ping,
msg_pong,
msg_reject,
msg_sendcmpct,
msg_sendheaders,
msg_tx,
MSG_TX,
MSG_TYPE_MASK,
msg_verack,
msg_version,
NODE_NETWORK,
NODE_WITNESS,
sha256,
)
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"notfound": msg_notfound,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xfb\xc0\xb6\xdb", # mainnet
"testnet4": b"\xfd\xd2\xc8\xf1", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
class P2PConnection(asyncio.Protocol):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# The underlying transport of the connection.
# Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
self._transport = None
@property
def is_connected(self):
return self._transport is not None
def peer_connect(self, dstaddr, dstport, net="regtest"):
assert not self.is_connected
self.dstaddr = dstaddr
self.dstport = dstport
# The initial message to send after the connection was made:
self.on_connection_send_msg = None
self.recvbuf = b""
self.magic_bytes = MAGIC_BYTES[net]
logger.debug('Connecting to DeepOnion Node: %s:%d' % (self.dstaddr, self.dstport))
loop = NetworkThread.network_event_loop
conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe)
return conn_gen
def peer_disconnect(self):
# Connection could have already been closed by other end.
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
# Connection and disconnection methods
def connection_made(self, transport):
"""asyncio callback when a connection is opened."""
assert not self._transport
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None # Never used again
self.on_open()
def connection_lost(self, exc):
"""asyncio callback when a connection is closed."""
if exc:
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
else:
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
self._transport = None
self.recvbuf = b""
self.on_close()
# Socket read methods
def data_received(self, t):
"""asyncio callback when data is read from the socket."""
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.magic_bytes:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def send_message(self, message):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
tmsg = self.build_message(message)
self._log_message("send", message)
return self.send_raw_message(tmsg)
def send_raw_message(self, raw_message_bytes):
if not self.is_connected:
raise IOError('Not connected')
def maybe_write():
if not self._transport:
return
# Python <3.4.4 does not have is_closing, so we have to check for
# its existence explicitly as long as Bitcoin Core supports all
# Python 3.4 versions.
if hasattr(self._transport, 'is_closing') and self._transport.is_closing():
return
self._transport.write(raw_message_bytes)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
# Class utility methods
def build_message(self, message):
"""Build a serialized P2P message"""
command = message.command
data = message.serialize()
tmsg = self.magic_bytes
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a DeepOnion node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self):
super().__init__()
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.on_connection_send_msg = vt # Will be sent soon after connection_made
return create_conn
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_notfound(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
pass
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_tx(self, txid, timeout=60):
def test_function():
if not self.last_message.get('tx'):
return False
return self.last_message['tx'].tx.rehash() == txid
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_header(self, blockhash, timeout=60):
def test_function():
last_headers = self.last_message.get('headers')
if not last_headers:
return False
return last_headers.headers[0].rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
"""Waits for a getdata message.
Receiving any getdata message will satisfy the predicate. the last_message["getdata"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block/tx has been requested."""
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
"""Waits for a getheaders message.
Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested."""
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message, timeout=60):
self.send_message(message)
self.sync_with_ping(timeout=timeout)
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# One lock for synchronizing all data access between the network event loop (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
network_event_loop = None
def __init__(self):
super().__init__(name="NetworkThread")
# There is only one event loop and no more than one thread must be created
assert not self.network_event_loop
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
"""Start the network thread."""
self.network_event_loop.run_forever()
def close(self, timeout=10):
"""Close the connections and network event loop."""
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
class P2PDataStore(P2PInterface):
"""A P2P data store class.
Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
def __init__(self):
super().__init__()
# store of blocks. key is block hash, value is a CBlock object
self.block_store = {}
self.last_block_hash = ''
# store of txs. key is txid, value is a CTransaction object
self.tx_store = {}
self.getdata_requests = []
def on_getdata(self, message):
"""Check for the tx/block in our stores and if found, reply with an inv message."""
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
def on_getheaders(self, message):
"""Search back through our block store for the locator, and reply with a headers message if found."""
locator, hash_stop = message.locator, message.hashstop
# Assume that the most recent block added is the tip
if not self.block_store:
return
headers_list = [self.block_store[self.last_block_hash]]
maxheaders = 2000
while headers_list[-1].sha256 not in locator.vHave:
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list[-1].hashPrevBlock
if prev_block_hash in self.block_store:
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if prev_block_header.sha256 == hash_stop:
# if this is the hashstop header, stop here
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
# Truncate the list if there are too many headers
headers_list = headers_list[:-maxheaders - 1:-1]
response = msg_headers(headers_list)
if response is not None:
self.send_message(response)
def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60):
"""Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
- send a headers message for the final block
- the on_getheaders handler will ensure that any getheaders are responded to
- if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will
ensure that any getdata messages are responded to. Otherwise send the full block unsolicited.
- if success is True: assert that the node's tip advances to the most recent block
- if success is False: assert that the node's tip doesn't advance
- if reject_reason is set: assert that the correct reject message is logged"""
with mininode_lock:
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
if force_send:
for b in blocks:
self.send_message(msg_block(block=b))
else:
self.send_message(msg_headers([CBlockHeader(blocks[-1])]))
wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)
if expect_disconnect:
self.wait_for_disconnect(timeout=timeout)
else:
self.sync_with_ping(timeout=timeout)
if success:
wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
else:
assert node.getbestblockhash() != blocks[-1].hash
def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None):
"""Send txs to test node and test whether they're accepted to the mempool.
- add all txs to our tx_store
- send tx messages for all txs
- if success is True/False: assert that the txs are/are not accepted to the mempool
- if expect_disconnect is True: Skip the sync with ping
- if reject_reason is set: assert that the correct reject message is logged."""
with mininode_lock:
for tx in txs:
self.tx_store[tx.sha256] = tx
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = node.getrawmempool()
if success:
# Check that all txs are now in the mempool
for tx in txs:
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
else:
# Check that none of the txs are now in the mempool
for tx in txs:
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
| 39.352542 | 182 | 0.644974 |
import asyncio
from collections import defaultdict
from io import BytesIO
import logging
import struct
import sys
import threading
from test_framework.messages import (
CBlockHeader,
MIN_VERSION_SUPPORTED,
msg_addr,
msg_block,
MSG_BLOCK,
msg_blocktxn,
msg_cmpctblock,
msg_feefilter,
msg_getaddr,
msg_getblocks,
msg_getblocktxn,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_mempool,
msg_notfound,
msg_ping,
msg_pong,
msg_reject,
msg_sendcmpct,
msg_sendheaders,
msg_tx,
MSG_TX,
MSG_TYPE_MASK,
msg_verack,
msg_version,
NODE_NETWORK,
NODE_WITNESS,
sha256,
)
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"notfound": msg_notfound,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xfb\xc0\xb6\xdb",
"testnet4": b"\xfd\xd2\xc8\xf1",
"regtest": b"\xfa\xbf\xb5\xda",
}
class P2PConnection(asyncio.Protocol):
def __init__(self):
self._transport = None
@property
def is_connected(self):
return self._transport is not None
def peer_connect(self, dstaddr, dstport, net="regtest"):
assert not self.is_connected
self.dstaddr = dstaddr
self.dstport = dstport
self.on_connection_send_msg = None
self.recvbuf = b""
self.magic_bytes = MAGIC_BYTES[net]
logger.debug('Connecting to DeepOnion Node: %s:%d' % (self.dstaddr, self.dstport))
loop = NetworkThread.network_event_loop
conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe)
return conn_gen
def peer_disconnect(self):
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
def connection_made(self, transport):
assert not self._transport
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None
self.on_open()
def connection_lost(self, exc):
if exc:
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
else:
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
self._transport = None
self.recvbuf = b""
self.on_close()
def data_received(self, t):
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.magic_bytes:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
raise NotImplementedError
def send_message(self, message):
tmsg = self.build_message(message)
self._log_message("send", message)
return self.send_raw_message(tmsg)
def send_raw_message(self, raw_message_bytes):
if not self.is_connected:
raise IOError('Not connected')
def maybe_write():
if not self._transport:
return
if hasattr(self._transport, 'is_closing') and self._transport.is_closing():
return
self._transport.write(raw_message_bytes)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
def build_message(self, message):
command = message.command
data = message.serialize()
tmsg = self.magic_bytes
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
def _log_message(self, direction, msg):
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
def __init__(self):
super().__init__()
self.message_count = defaultdict(int)
self.last_message = {}
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.on_connection_send_msg = vt # Will be sent soon after connection_made
return create_conn
# Message receiving methods
def on_message(self, message):
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_notfound(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
pass
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_tx(self, txid, timeout=60):
def test_function():
if not self.last_message.get('tx'):
return False
return self.last_message['tx'].tx.rehash() == txid
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_header(self, blockhash, timeout=60):
def test_function():
last_headers = self.last_message.get('headers')
if not last_headers:
return False
return last_headers.headers[0].rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message, timeout=60):
self.send_message(message)
self.sync_with_ping(timeout=timeout)
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# One lock for synchronizing all data access between the network event loop (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
network_event_loop = None
def __init__(self):
super().__init__(name="NetworkThread")
# There is only one event loop and no more than one thread must be created
assert not self.network_event_loop
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
self.network_event_loop.run_forever()
def close(self, timeout=10):
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
class P2PDataStore(P2PInterface):
def __init__(self):
super().__init__()
# store of blocks. key is block hash, value is a CBlock object
self.block_store = {}
self.last_block_hash = ''
# store of txs. key is txid, value is a CTransaction object
self.tx_store = {}
self.getdata_requests = []
def on_getdata(self, message):
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
def on_getheaders(self, message):
locator, hash_stop = message.locator, message.hashstop
# Assume that the most recent block added is the tip
if not self.block_store:
return
headers_list = [self.block_store[self.last_block_hash]]
maxheaders = 2000
while headers_list[-1].sha256 not in locator.vHave:
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list[-1].hashPrevBlock
if prev_block_hash in self.block_store:
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if prev_block_header.sha256 == hash_stop:
# if this is the hashstop header, stop here
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
# Truncate the list if there are too many headers
headers_list = headers_list[:-maxheaders - 1:-1]
response = msg_headers(headers_list)
if response is not None:
self.send_message(response)
def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60):
with mininode_lock:
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
if force_send:
for b in blocks:
self.send_message(msg_block(block=b))
else:
self.send_message(msg_headers([CBlockHeader(blocks[-1])]))
wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)
if expect_disconnect:
self.wait_for_disconnect(timeout=timeout)
else:
self.sync_with_ping(timeout=timeout)
if success:
wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
else:
assert node.getbestblockhash() != blocks[-1].hash
def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None):
with mininode_lock:
for tx in txs:
self.tx_store[tx.sha256] = tx
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = node.getrawmempool()
if success:
# Check that all txs are now in the mempool
for tx in txs:
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
else:
# Check that none of the txs are now in the mempool
for tx in txs:
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
| true | true |
1c34ec5b0922266c3e466c89dc031fd7d675e72f | 412 | py | Python | migrations/versions/0302a_merge.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 41 | 2019-11-28T16:58:41.000Z | 2022-01-28T21:11:16.000Z | migrations/versions/0302a_merge.py | cds-snc/notification-api | b1c1064f291eb860b494c3fa65ac256ad70bf47c | [
"MIT"
] | 1,083 | 2019-07-08T12:57:24.000Z | 2022-03-08T18:53:40.000Z | migrations/versions/0302a_merge.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 9 | 2020-01-24T19:56:43.000Z | 2022-01-27T21:36:53.000Z | """
Revision ID: 0302a_merge
Revises: 0301c_update_golive_template, 0302_add_org_id_to_services
Create Date: 2019-07-29 16:18:27.467361
"""
# revision identifiers, used by Alembic.
revision = "0302a_merge"
down_revision = ("0301c_update_golive_template", "0302_add_org_id_to_services")
branch_labels = None
import sqlalchemy as sa
from alembic import op
def upgrade():
pass
def downgrade():
pass
| 17.166667 | 79 | 0.771845 |
revision = "0302a_merge"
down_revision = ("0301c_update_golive_template", "0302_add_org_id_to_services")
branch_labels = None
import sqlalchemy as sa
from alembic import op
def upgrade():
pass
def downgrade():
pass
| true | true |
1c34ec9156ccc0bddc9813d98d2ba5a9fbc1cc96 | 423 | py | Python | user-management-api/user_management_project/asgi.py | washimimizuku/django-tutorials | 4f0e3836778dd3ea5403ef713e2f6777e44eae8d | [
"MIT"
] | null | null | null | user-management-api/user_management_project/asgi.py | washimimizuku/django-tutorials | 4f0e3836778dd3ea5403ef713e2f6777e44eae8d | [
"MIT"
] | null | null | null | user-management-api/user_management_project/asgi.py | washimimizuku/django-tutorials | 4f0e3836778dd3ea5403ef713e2f6777e44eae8d | [
"MIT"
] | null | null | null | """
ASGI config for user_management_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'user_management_project.settings')
application = get_asgi_application()
| 24.882353 | 83 | 0.801418 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'user_management_project.settings')
application = get_asgi_application()
| true | true |
1c34ed8262045aa8f685164bd58552086bd69fb1 | 348 | py | Python | virtualscreening/vina/spark/pdb_io.py | rodrigofaccioli/drugdesign | de15880af361a010729b1f4fbc8a75a2b36688a6 | [
"Apache-2.0"
] | 3 | 2015-01-19T20:12:59.000Z | 2019-02-21T18:43:04.000Z | virtualscreening/vina/spark/pdb_io.py | rodrigofaccioli/drugdesign | de15880af361a010729b1f4fbc8a75a2b36688a6 | [
"Apache-2.0"
] | 22 | 2015-01-05T16:48:54.000Z | 2017-01-21T16:36:10.000Z | virtualscreening/vina/spark/pdb_io.py | rodrigofaccioli/drugdesign | de15880af361a010729b1f4fbc8a75a2b36688a6 | [
"Apache-2.0"
] | 11 | 2015-03-03T13:32:24.000Z | 2020-04-03T11:22:24.000Z | def adding_chain_atom_line(atom_line,chain="Z"):
returned_line = ""
for i in range(0,len(atom_line)):
#it means column 22
if i == 21:
returned_line += chain
returned_line += atom_line[i]
return returned_line
def replace_chain_atom_line(atom_line,chain_ref="d",new_chain="Z"):
return str(atom_line).replace(chain_ref,new_chain)
| 26.769231 | 67 | 0.732759 | def adding_chain_atom_line(atom_line,chain="Z"):
returned_line = ""
for i in range(0,len(atom_line)):
if i == 21:
returned_line += chain
returned_line += atom_line[i]
return returned_line
def replace_chain_atom_line(atom_line,chain_ref="d",new_chain="Z"):
return str(atom_line).replace(chain_ref,new_chain)
| true | true |
1c34ef0c08d81ab26cc421a11e492e66257f386a | 265 | py | Python | openapi_core/validation/datatypes.py | Yarn-e/openapi-core | fda9fbd3bc1c0879818e00445e1ad0731f80b065 | [
"BSD-3-Clause"
] | 160 | 2017-11-20T13:39:04.000Z | 2022-03-31T14:48:27.000Z | openapi_core/validation/datatypes.py | Yarn-e/openapi-core | fda9fbd3bc1c0879818e00445e1ad0731f80b065 | [
"BSD-3-Clause"
] | 384 | 2017-09-21T12:42:31.000Z | 2022-03-21T17:21:05.000Z | openapi_core/validation/datatypes.py | Yarn-e/openapi-core | fda9fbd3bc1c0879818e00445e1ad0731f80b065 | [
"BSD-3-Clause"
] | 100 | 2017-11-21T08:07:01.000Z | 2022-01-20T20:32:52.000Z | """OpenAPI core validation datatypes module"""
from dataclasses import dataclass
from typing import List
@dataclass
class BaseValidationResult:
errors: List[Exception]
def raise_for_errors(self):
for error in self.errors:
raise error
| 20.384615 | 46 | 0.724528 | from dataclasses import dataclass
from typing import List
@dataclass
class BaseValidationResult:
errors: List[Exception]
def raise_for_errors(self):
for error in self.errors:
raise error
| true | true |
1c34ef485e743b17c67738c34003630dbdd389ce | 13,757 | py | Python | modules/py/pkgs/QNLP/proc/DisCoCat.py | ICHEC/QNLP | 2966c7f71e6979c7ddef62520c3749cf6473fabe | [
"Apache-2.0"
] | 29 | 2020-04-13T04:40:35.000Z | 2021-12-17T11:21:35.000Z | modules/py/pkgs/QNLP/proc/DisCoCat.py | ICHEC/QNLP | 2966c7f71e6979c7ddef62520c3749cf6473fabe | [
"Apache-2.0"
] | 6 | 2020-03-12T17:40:00.000Z | 2021-01-20T12:15:08.000Z | modules/py/pkgs/QNLP/proc/DisCoCat.py | ICHEC/QNLP | 2966c7f71e6979c7ddef62520c3749cf6473fabe | [
"Apache-2.0"
] | 9 | 2020-09-28T05:00:30.000Z | 2022-03-04T02:11:49.000Z | ###############################################################################
import sqlite3
import os
from typing import Dict, Tuple
import QNLP.proc.process_corpus as pc
import numpy as np
from QNLP.io.qnlp_db import qnlp_db as qnlp_db
###############################################################################
#Use mixin to modify the insert statements and the structure of database
class qdb_mixin(qnlp_db):
def create_table_discocat(self, table_name="qnlp"):
"""
Create the database table for tagging the required data. The DB has columns
for dataset ('basis' or 'corpus'), data_type ('verb', 'noun', etc.), token (the string value), mapping_idx (the index of the mapped binary value; for superpos states, this index labels the number of values in the superposition), map_bin_id (the binary value representing the quantum state in the register), map_coeff_r/i (real and imaginary coefficients of the map_bin_id state), mapping_dir (indicates the direction of the mapping; may not be used).
"""
cr_tbl = """CREATE TABLE {}(
id INTEGER PRIMARY KEY,
dataset TEXT,
data_type TEXT,
token TEXT,
mapping_idx INTEGER,
map_bin_id INTEGER,
map_coeff_r REAL,
map_coeff_i REAL,
mapping_dir INTEGER
);""".format(table_name)
conn = super(qdb_mixin, self).connect_db()
c = conn.cursor()
try:
c.execute(cr_tbl)
except sqlite3.OperationalError as oe:
remove_db = input("Table '{}' already exists. Remove? y/n: ".format(table_name))
if remove_db is "y":
self.drop_table(table_name)
self.create_table_discocat(table_name)
except Exception as e:
print("SQLITE exception thrown: {0}".format(e), "Exiting program.")
exit()
finally:
conn.commit()
def db_insert_discocat(self, values, dataset="basis", data_type="noun", table_name="qnlp"):
"""
Insert the tag to binary encoding mapping into the DB.
values -- Dict mapping string to binary value, and binary value to string.
data_type -- String to indicate the type of data to be stored
table_name -- Name of table to store in DB
"""
'''
The DB insert operation below assumes the value field of a key in DB is a tuple,
containing (binary_id, weight of occurrence), where weight of occurrence cant be
determined by the proximity of the word to other words; essentially a count in the
simplest case. The mapping checks to see if the index is convertible to a numeric
type. If so, this will imply the reverse mapping (ie qubit result to string val),
and is indicated by -1. Otherwise, this will be a forward mapping, and given by 1.
'''
conn = super(qdb_mixin, self).connect_db()
c = conn.cursor()
self.create_table_discocat(table_name)
for corpus_token, superpos in values.items():
l_superpos = len(superpos)
for idx, (distance_measure, basis_state) in enumerate(superpos):
c.execute("""INSERT INTO {} (
dataset,
data_type,
token,
mapping_idx,
map_bin_id,
map_coeff_r,
map_coeff_i,
mapping_dir ) VALUES(?,?,?,?,?,?,?,?)""".format(table_name),
(dataset,
data_type,
corpus_token,
idx,
basis_state,
distance_measure.real,
distance_measure.imag,
0)
)
conn.commit()
###############################################################################
class DisCoCat:
"""
Implements precomputation for the DisCo(Cat) model to represent sentence meanings
using category theory methods. See <PAPERS> for details.
"""
def __init__(self, fd = lambda x : [1.0/(i+1) for i in x]):
self.distance_func = fd
def load_corpus(self, corpus_path):
return pc.load_corpus(corpus_path)
def tokenise_corpus(self, corpus_text):
return pc.tokenize_corpus(corpus_text)
###############################################################################
def word_occurrence(self, corpus_list : list):
"""
Counts word occurrence in a given corpus, presented as a tokenised word list.
Returns a dictionary with keys as the tokens and values as the occurrences.
"""
word_dict = {}
for word in corpus_list:
if word in word_dict:
word_dict[word] += 1
else:
word_dict[word] = 1
return word_dict
###############################################################################
def define_basis_words(self, word_dict : dict, max_words : int):
"""
Chooses the max_words number of most common words from word_dict
and return as list for use as basis.
"""
k = list(word_dict.keys())
v = list(word_dict.values())
res_list = []
for i in range(max_words):
max_val = max(v)
val_idx = v.index(max_val)
res_list.append((k[val_idx],max_val))
k.remove(k[val_idx])
v.remove(max_val)
return res_list
###############################################################################
#from multimethod import multimethod #Allow multiple dispatch
#@multimethod
def map_to_basis(self, corpus_list : dict, basis : list, basis_dist_cutoff=10, distance_func=None):
"""
Maps the words from the corpus into the chosen basis.
Returns word_map dictionary, mapping corpus tokens -> basis states
Keyword arguments:
corpus_list -- List of tokens representing corpus
basis -- List of basis tokens
basis_dist_cutoff -- Cut-off for token distance from basis for it to be significant
distance_func -- Function accepting distance between basis and token, and
returning the resulting scaling. If 'None', defaults to
1/coeff for scaling param
"""
if distance_func == None:
distance_func = self.distance_func #lambda x : [1.0/(i+1) for i in x]
word_map = {}
# map distance between basis words and other words in token list
for word, locations in corpus_list.items():
word_map.update({word : None})
for b_idx, b_val in enumerate(basis):
# Basis elements are orthogonal
if(b_val == word):
word_map.update({b_val : {b_val : 0}})
break
# to add left-right ordering here, remove the abs and use sign of distance to indicate where words appear relative to one another.
min_dist = np.min(np.abs(locations[1][:, np.newaxis] - corpus_list[b_val][1]))
m = (word, b_val, min_dist <= basis_dist_cutoff)
if m[2] != False:
if(word_map.get(m[0]) != None):
update_val = word_map.get(m[0])
update_val.update({m[1] : min_dist})
word_map.update({m[0] : update_val })
else:
word_map.update({m[0] : {m[1] : min_dist} })
return word_map
def nvn_distances(self, corpus_list_n : dict, corpus_list_v : dict, dist_cutoff=2, distance_func=None):
"""This function matches the NVN sentence structure, by locating adjacent
nouns and verbs, following the same procedure as used to map corpus words
onto the basis. With this, we can construct relationships between the
verbs and their subject/object nouns."""
if distance_func == None:
distance_func = self.distance_func #lambda x : [1.0/(i+1) for i in x]
word_map = {}
# map distance between words
for word_v, locations_v in corpus_list_v.items():
for word_n, locations_n in corpus_list_n.items():#######!!!!!!!#######
from IPython import embed; embed()
dists = locations_n[1][:, np.newaxis] - locations_v[1]
if any([np.abs(x) <= dist_cutoff for x in dists]):
print("Pattern between {} and {}".format(word_n, word_v))
continue
if(0):# if dist between v and noun is negative, order 1, if positive, order 2
word_map.update({word : None})
# to add left-right ordering here, remove the abs and use sign of distance to indicate where words appear relative to one another.
min_dist = np.min(np.abs(locations[1][:, np.newaxis] - corpus_list[b_val][1]))
m = (word, b_val, min_dist <= basis_dist_cutoff)
if m[2] != False:
if(word_map.get(m[0]) != None):
update_val = word_map.get(m[0])
update_val.update({m[1] : min_dist})
word_map.update({m[0] : update_val })
else:
word_map.update({m[0] : {m[1] : min_dist} })
return word_map
###############################################################################
def map_to_bitstring(self, basis : list):
upper_bound_bitstrings = int(np.ceil(np.log2(len(basis))))
bit_map = {}
bitstring = 0 # Assume |0...0> state reserved for initialisation only
for k, v in basis:
bitstring += 1
bit_map.update({k: bitstring})
return (upper_bound_bitstrings, bit_map)
###############################################################################
def generate_state_mapping(self, bit_map, dat_map):
"""
Takes the basis bitstring map, and the token-to-basis relationship, and returns a normalised set of states, with coefficients determined by the distance_func lambda, given the distance between the token and the resulting basis element.
"""
num_states = bit_map[0]
# Mapping token to array of tuples, first index the basis state coefficient and second the integer representation of the bitstring state
state_encoding = {}
for token, basis_dist_map in dat_map.items():
local_coeffs = []
local_states = []
for basis_token, distance_list in basis_dist_map.items():
# If more than one occurrence for the same word, apply the distance relation function then sum the results for that basis work coefficient
local_coeffs.append( np.sum( self.distance_func(distance_list) ) )
local_states.append( bit_map[1][basis_token] )
# Calc normalisation factor over all the respective basis states for a given token
norm_factor = np.linalg.norm(local_coeffs)
for state_idx in range( len(local_states) ):
# Normalise the coefficient
local_coeffs[state_idx] /= norm_factor
current = state_encoding.get(token)
if current != None:
current.append( (local_coeffs[state_idx], local_states[state_idx],) )
else:
state_encoding.update({token : [(local_coeffs[state_idx], local_states[state_idx],)] })
return state_encoding
###############################################################################
def latex_states(self, bit_map, dat_map, file_name = "state"):
"""
LaTeX file outputter for state generation. Given the above data structures, file_name.tex is generated. Beware, as output may need to replace '_' with '\_' for non-math-mode usage.
"""
mapping = self.generate_state_mapping(bit_map, dat_map)
with open(file_name + ".tex", "w") as f:
f.write("\\documentclass{article} \n \\usepackage{amsmath} \\usepackage{multicol} \n \\begin{document} \n")
tex_string_format_bit = r'\vert {:0%db} \rangle'%(bit_map[0])
f.write("\\section{Basis} \\begin{multicols}{2} \n \\noindent ")
for b_key, b_val in bit_map[1].items():
f.write(b_key + " $\\rightarrow " + tex_string_format_bit.format(b_val) + "$\\\\ ")
f.write("\\end{multicols}")
f.write("\\noindent\\rule{\\textwidth}{1pt} \n")
f.write("\\noindent\\rule{\\textwidth}{1pt} \n")
f.write("\\section{Encoding} \n")
for token, basis_map in mapping.items():
f.write(r"\begin{align}\vert \textrm{" + token + "} \\rangle &= \\\\ \n &" )
for i,b in enumerate(basis_map):
if( i != 0 ):
if(i%3 == 0):
f.write(r" \\ & ")
f.write("{0:.3f}".format(round(b[0],3)))
f.write(tex_string_format_bit.format(b[1]) )
if(i != len(basis_map) - 1):
f.write(r"+")
f.write(" \\nonumber ")
f.write(r"""\end{align}""")
f.write("\\noindent\\rule{\\textwidth}{1pt} \n")
f.write(r"\end{document}")
############################################################################### | 46.010033 | 458 | 0.535873 | true | true | |
1c34efaf717edb20399b7bb104621e96df0873ca | 2,239 | py | Python | scrapers/seamus/scraper.py | nprapps/graeae | b38cdd3de74fb239fdcc92619e92bcfb0818bda3 | [
"MIT"
] | 5 | 2015-06-10T15:37:46.000Z | 2015-10-12T15:28:37.000Z | scrapers/seamus/scraper.py | nprapps/graeae | b38cdd3de74fb239fdcc92619e92bcfb0818bda3 | [
"MIT"
] | 175 | 2015-04-14T20:14:57.000Z | 2015-07-13T13:50:45.000Z | scrapers/seamus/scraper.py | nprapps/graeae | b38cdd3de74fb239fdcc92619e92bcfb0818bda3 | [
"MIT"
] | 3 | 2015-08-27T14:34:09.000Z | 2021-02-23T11:03:40.000Z | #!/usr/bin/env python
from datetime import datetime
import logging
import os
import requests
from pyquery import PyQuery
from app_config import get_secrets
from models import Story
SECRETS = get_secrets()
SEAMUS_API_PAGE_SIZE = 20
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class SeamusScraper:
def __init__(self):
self.run_time = datetime.utcnow()
def scrape_seamus(self, **kwargs):
"""
Scrape!
"""
logger.info('Scraping Seamus API (start time: %s)' % self.run_time)
if not kwargs:
stories = self._get_stories_from_api()
else:
element = PyQuery(parser='xml', **kwargs)
story_elements = element.find('story')
stories = self._extract_stories(story_elements)
return stories
def _get_stories_from_api(self):
startNum = 1
stories = []
while True:
response = requests.get('http://api.npr.org/query', params={
'date': 'current',
'orgId': '1',
'apiKey': SECRETS['NPR_API_KEY'],
'numResults': 20,
'startNum': startNum,
})
element = PyQuery(response.content, parser='xml')
story_elements = element.find('story')
if len(story_elements):
stories += self._extract_stories(story_elements)
startNum += SEAMUS_API_PAGE_SIZE
else:
break
return stories
def _extract_stories(self, story_elements):
stories = []
for story_el in story_elements:
story_el = PyQuery(story_el, parser='xml')
story = Story(story_el, self.run_time)
stories.append(story)
logger.info('Scraped %s from Seamus API (%s)' % (story.story_id, story.title))
return stories
def write(self, db, stories):
"""
Write to database
"""
table = db['seamus']
for story in stories:
exists = table.find_one(story_id=story.story_id)
if exists:
continue
row = story.serialize()
table.insert(row)
| 25.735632 | 90 | 0.571684 |
from datetime import datetime
import logging
import os
import requests
from pyquery import PyQuery
from app_config import get_secrets
from models import Story
SECRETS = get_secrets()
SEAMUS_API_PAGE_SIZE = 20
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class SeamusScraper:
def __init__(self):
self.run_time = datetime.utcnow()
def scrape_seamus(self, **kwargs):
logger.info('Scraping Seamus API (start time: %s)' % self.run_time)
if not kwargs:
stories = self._get_stories_from_api()
else:
element = PyQuery(parser='xml', **kwargs)
story_elements = element.find('story')
stories = self._extract_stories(story_elements)
return stories
def _get_stories_from_api(self):
startNum = 1
stories = []
while True:
response = requests.get('http://api.npr.org/query', params={
'date': 'current',
'orgId': '1',
'apiKey': SECRETS['NPR_API_KEY'],
'numResults': 20,
'startNum': startNum,
})
element = PyQuery(response.content, parser='xml')
story_elements = element.find('story')
if len(story_elements):
stories += self._extract_stories(story_elements)
startNum += SEAMUS_API_PAGE_SIZE
else:
break
return stories
def _extract_stories(self, story_elements):
stories = []
for story_el in story_elements:
story_el = PyQuery(story_el, parser='xml')
story = Story(story_el, self.run_time)
stories.append(story)
logger.info('Scraped %s from Seamus API (%s)' % (story.story_id, story.title))
return stories
def write(self, db, stories):
table = db['seamus']
for story in stories:
exists = table.find_one(story_id=story.story_id)
if exists:
continue
row = story.serialize()
table.insert(row)
| true | true |
1c34f00f6f6deff2422988c51a4cdf2397490bc5 | 1,021 | py | Python | lab6/Q6_1_1.py | kommunium/dip-lab | 2c8e08a994fb34b87da55da48a7b72b7c13d9c81 | [
"MIT"
] | null | null | null | lab6/Q6_1_1.py | kommunium/dip-lab | 2c8e08a994fb34b87da55da48a7b72b7c13d9c81 | [
"MIT"
] | null | null | null | lab6/Q6_1_1.py | kommunium/dip-lab | 2c8e08a994fb34b87da55da48a7b72b7c13d9c81 | [
"MIT"
] | null | null | null | import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
# %% Q6_1_1 max filter
Q6_1_1 = np.asarray(Image.open("Q6_1_1.tiff"))
def max_filter(img_raw, n: int):
m = (n - 1) // 2
row, col = img_raw.shape
img_pad = np.pad(img_raw, m)
img_out = np.array([img_pad[i:i + n, j:j + n].max()
for i in range(row)
for j in range(col)])
return img_out.astype(np.uint8).reshape(row, col)
Q6_1_1_n3 = max_filter(Q6_1_1, 3)
Q6_1_1_n5 = max_filter(Q6_1_1, 5)
Q6_1_1_n7 = max_filter(Q6_1_1, 7)
plt.figure(figsize=(6.4, 6.4))
plt.subplot(221)
plt.title('Raw Q6_1_1.tiff')
plt.imshow(Q6_1_1, cmap='gray')
plt.subplot(222)
plt.title('Max filtered with filter size $n=3$')
plt.imshow(Q6_1_1_n3, cmap='gray')
plt.subplot(223)
plt.title('Max filtered with filter size $n=5$')
plt.imshow(Q6_1_1_n5, cmap='gray')
plt.subplot(224)
plt.title('Max filtered with filter size $n=7$')
plt.imshow(Q6_1_1_n7, cmap='gray')
plt.savefig('Q6_1_1.png')
plt.show()
| 26.868421 | 55 | 0.665034 | import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
Q6_1_1 = np.asarray(Image.open("Q6_1_1.tiff"))
def max_filter(img_raw, n: int):
m = (n - 1) // 2
row, col = img_raw.shape
img_pad = np.pad(img_raw, m)
img_out = np.array([img_pad[i:i + n, j:j + n].max()
for i in range(row)
for j in range(col)])
return img_out.astype(np.uint8).reshape(row, col)
Q6_1_1_n3 = max_filter(Q6_1_1, 3)
Q6_1_1_n5 = max_filter(Q6_1_1, 5)
Q6_1_1_n7 = max_filter(Q6_1_1, 7)
plt.figure(figsize=(6.4, 6.4))
plt.subplot(221)
plt.title('Raw Q6_1_1.tiff')
plt.imshow(Q6_1_1, cmap='gray')
plt.subplot(222)
plt.title('Max filtered with filter size $n=3$')
plt.imshow(Q6_1_1_n3, cmap='gray')
plt.subplot(223)
plt.title('Max filtered with filter size $n=5$')
plt.imshow(Q6_1_1_n5, cmap='gray')
plt.subplot(224)
plt.title('Max filtered with filter size $n=7$')
plt.imshow(Q6_1_1_n7, cmap='gray')
plt.savefig('Q6_1_1.png')
plt.show()
| true | true |
1c34f054cc39f409c990152fefa119a2bf2c71cb | 995 | py | Python | learning/katas/python/Common Transforms/Aggregation/Count/task.py | charithe/beam | f085cb500730cf0c67c467ac55f92b3c59f52b39 | [
"Apache-2.0"
] | 35 | 2016-09-22T22:53:14.000Z | 2020-02-13T15:12:21.000Z | learning/katas/python/Common Transforms/Aggregation/Count/task.py | charithe/beam | f085cb500730cf0c67c467ac55f92b3c59f52b39 | [
"Apache-2.0"
] | 71 | 2018-05-23T22:20:02.000Z | 2019-04-30T15:37:46.000Z | learning/katas/python/Common Transforms/Aggregation/Count/task.py | charithe/beam | f085cb500730cf0c67c467ac55f92b3c59f52b39 | [
"Apache-2.0"
] | 88 | 2016-11-27T02:16:11.000Z | 2020-02-28T05:10:26.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apache_beam as beam
from log_elements import LogElements
p = beam.Pipeline()
(p | beam.Create(range(1, 11))
| beam.combiners.Count.Globally()
| LogElements())
p.run()
| 35.535714 | 76 | 0.739698 |
import apache_beam as beam
from log_elements import LogElements
p = beam.Pipeline()
(p | beam.Create(range(1, 11))
| beam.combiners.Count.Globally()
| LogElements())
p.run()
| true | true |
1c34f08aae1ab12bac8cb59e8a8cd37e3fd4e4aa | 199 | py | Python | src/ArtificialConduction/ArtificialConductionPolicyInst.cc.py | jmikeowen/Spheral | 3e1082a7aefd6b328bd3ae24ca1a477108cfc3c4 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 22 | 2018-07-31T21:38:22.000Z | 2020-06-29T08:58:33.000Z | src/ArtificialConduction/ArtificialConductionPolicyInst.cc.py | jmikeowen/Spheral | 3e1082a7aefd6b328bd3ae24ca1a477108cfc3c4 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 41 | 2020-09-28T23:14:27.000Z | 2022-03-28T17:01:33.000Z | src/ArtificialConduction/ArtificialConductionPolicyInst.cc.py | jmikeowen/Spheral | 3e1082a7aefd6b328bd3ae24ca1a477108cfc3c4 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 7 | 2019-12-01T07:00:06.000Z | 2020-09-15T21:12:39.000Z | text = """
#include "ArtificialConduction/ArtificialConductionPolicy.cc"
#include "Geometry/Dimension.hh"
namespace Spheral {
template class ArtificialConductionPolicy< Dim< %(ndim)s > >;
}
"""
| 22.111111 | 65 | 0.743719 | text = """
#include "ArtificialConduction/ArtificialConductionPolicy.cc"
#include "Geometry/Dimension.hh"
namespace Spheral {
template class ArtificialConductionPolicy< Dim< %(ndim)s > >;
}
"""
| true | true |
1c34f0e97ac6368ed1711029606b4e87c6a3c764 | 18 | py | Python | opsdroid/testing/mockmodules/skills/skill/skilltest/mock.py | JiahnChoi/opsdroid.kr | 0893456b0f9f6c70edf7c330a7593d87450538cc | [
"Apache-2.0"
] | 712 | 2016-08-09T21:30:07.000Z | 2022-03-24T09:38:21.000Z | opsdroid/testing/mockmodules/skills/skill/skilltest/mock.py | JiahnChoi/opsdroid.kr | 0893456b0f9f6c70edf7c330a7593d87450538cc | [
"Apache-2.0"
] | 1,767 | 2016-07-27T13:01:25.000Z | 2022-03-29T04:25:10.000Z | opsdroid/testing/mockmodules/skills/skill/skilltest/mock.py | JiahnChoi/opsdroid.kr | 0893456b0f9f6c70edf7c330a7593d87450538cc | [
"Apache-2.0"
] | 536 | 2016-07-31T14:23:41.000Z | 2022-03-22T17:35:15.000Z | """Mock skill."""
| 9 | 17 | 0.5 | true | true | |
1c34f167c66b512c11d05e984e2da8670df79f29 | 1,960 | py | Python | pyclesperanto_prototype/_tier3/_mode_of_proximal_neighbors_map.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 64 | 2020-03-18T12:11:22.000Z | 2022-03-31T08:19:18.000Z | pyclesperanto_prototype/_tier3/_mode_of_proximal_neighbors_map.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 148 | 2020-05-14T06:14:11.000Z | 2022-03-26T15:02:31.000Z | pyclesperanto_prototype/_tier3/_mode_of_proximal_neighbors_map.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 16 | 2020-05-31T00:53:44.000Z | 2022-03-23T13:20:57.000Z | from .._tier0 import execute
from .._tier0 import plugin_function
from .._tier0 import Image
import numpy as np
@plugin_function(categories=['combine', 'neighbor', 'map', 'in assistant'])
def mode_of_proximal_neighbors_map(parametric_map : Image, label_map : Image, parametric_map_destination : Image = None, min_distance : float = 0, max_distance : float = np.finfo(np.float32).max):
"""Takes a label image and a parametric intensity image and will replace each labels value in the parametric image
by the mode value of neighboring labels. The distance range of the centroids of the neighborhood can be configured.
Note: Values of all pixels in a label each must be identical.
Parameters
----------
parametric_map : Image
label_map : Image
parametric_map_destination : Image
min_distance : float, optional
default : 0
max_distance : float, optional
default: maximum float value
Returns
-------
parametric_map_destination
References
----------
.. [1] https://clij.github.io/clij2-docs/reference_modeOfProximalNeighbors
"""
from .._tier1 import read_intensities_from_map
from .._tier2 import mode_of_touching_neighbors
from .._tier1 import replace_intensities
from .._tier9 import centroids_of_labels
from .._tier1 import generate_distance_matrix
from .._tier3 import generate_proximal_neighbors_matrix
centroids = centroids_of_labels(label_map)
distance_matrix = generate_distance_matrix(centroids, centroids)
touch_matrix = generate_proximal_neighbors_matrix(distance_matrix, min_distance=min_distance, max_distance=max_distance)
intensities = read_intensities_from_map(label_map, parametric_map)
new_intensities = mode_of_touching_neighbors(intensities, touch_matrix)
parametric_map_destination = replace_intensities(label_map, new_intensities, parametric_map_destination)
return parametric_map_destination
| 39.2 | 196 | 0.760204 | from .._tier0 import execute
from .._tier0 import plugin_function
from .._tier0 import Image
import numpy as np
@plugin_function(categories=['combine', 'neighbor', 'map', 'in assistant'])
def mode_of_proximal_neighbors_map(parametric_map : Image, label_map : Image, parametric_map_destination : Image = None, min_distance : float = 0, max_distance : float = np.finfo(np.float32).max):
from .._tier1 import read_intensities_from_map
from .._tier2 import mode_of_touching_neighbors
from .._tier1 import replace_intensities
from .._tier9 import centroids_of_labels
from .._tier1 import generate_distance_matrix
from .._tier3 import generate_proximal_neighbors_matrix
centroids = centroids_of_labels(label_map)
distance_matrix = generate_distance_matrix(centroids, centroids)
touch_matrix = generate_proximal_neighbors_matrix(distance_matrix, min_distance=min_distance, max_distance=max_distance)
intensities = read_intensities_from_map(label_map, parametric_map)
new_intensities = mode_of_touching_neighbors(intensities, touch_matrix)
parametric_map_destination = replace_intensities(label_map, new_intensities, parametric_map_destination)
return parametric_map_destination
| true | true |
1c34f23b14017b4111a937164abe1270b2fd4ee1 | 1,822 | py | Python | py/DirectShape.Transform.py | mathematicalmichael/SpringNodes | 3ff4034b6e57ee6efa55c963e1819f3d30a2c4ab | [
"MIT"
] | 51 | 2015-09-25T09:30:57.000Z | 2022-01-19T14:16:44.000Z | py/DirectShape.Transform.py | sabeelcoder/SpringNodes | e21a24965474d54369e74d23c06f8c42a7b926b5 | [
"MIT"
] | 66 | 2015-09-30T02:43:32.000Z | 2022-03-31T02:26:52.000Z | py/DirectShape.Transform.py | sabeelcoder/SpringNodes | e21a24965474d54369e74d23c06f8c42a7b926b5 | [
"MIT"
] | 48 | 2015-11-19T01:34:47.000Z | 2022-02-25T17:26:48.000Z | #Copyright(c) 2016, Dimitar Venkov
# @5devene, dimitar.ven@gmail.com
import clr
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
app = DocumentManager.Instance.CurrentUIApplication.Application
isRvt2017 = int(app.VersionNumber) > 2016
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.ImportExtensions(Revit.GeometryConversion)
clr.AddReference("RevitAPI")
from Autodesk.Revit.DB import *
def tolist(obj1):
if hasattr(obj1,"__iter__"): return obj1
else: return [obj1]
units = doc.GetUnits().GetFormatOptions(UnitType.UT_Length).DisplayUnits
factor = 1 / UnitUtils.ConvertToInternalUnits(1,units)
def cs2Trans(cs, scale = factor):
tf1 = Transform(Transform.Identity)
tf1.Origin = cs.Origin.ToXyz(True)
tf1.Basis[0] = cs.XAxis.ToXyz(True)
tf1.Basis[1] = cs.YAxis.ToXyz(True)
tf1.Basis[2] = cs.ZAxis.ToXyz(True)
return tf1.ScaleBasis(scale)
dsTyped = UnwrapElement(IN[0])
cs1 = tolist(IN[1])
transforms = map(cs2Trans, cs1)
cat = dsTyped.Category
TypeId = dsTyped.GetTypeId()
Lib_TypeId = TypeId.ToString()
dsLib = DirectShapeLibrary.GetDirectShapeLibrary(doc)
if not dsLib.ContainsType(Lib_TypeId): dsLib.AddDefinitionType(Lib_TypeId, TypeId)
def TransformDS(transf):
try:
if isRvt2017:
ds1 = DirectShape.CreateElementInstance(doc, TypeId, cat.Id, Lib_TypeId, transf)
else:
ds1 = DirectShape.CreateElementInstance(doc, TypeId, cat.Id, Lib_TypeId, transf, "Dynamo","spring nodes")
ds1.SetTypeId(TypeId)
return ds1.ToDSType(False)
except: return None
TransactionManager.Instance.EnsureInTransaction(doc)
OUT = map(TransformDS, transforms)
TransactionManager.Instance.TransactionTaskDone() | 30.881356 | 108 | 0.791438 |
import clr
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
app = DocumentManager.Instance.CurrentUIApplication.Application
isRvt2017 = int(app.VersionNumber) > 2016
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.ImportExtensions(Revit.GeometryConversion)
clr.AddReference("RevitAPI")
from Autodesk.Revit.DB import *
def tolist(obj1):
if hasattr(obj1,"__iter__"): return obj1
else: return [obj1]
units = doc.GetUnits().GetFormatOptions(UnitType.UT_Length).DisplayUnits
factor = 1 / UnitUtils.ConvertToInternalUnits(1,units)
def cs2Trans(cs, scale = factor):
tf1 = Transform(Transform.Identity)
tf1.Origin = cs.Origin.ToXyz(True)
tf1.Basis[0] = cs.XAxis.ToXyz(True)
tf1.Basis[1] = cs.YAxis.ToXyz(True)
tf1.Basis[2] = cs.ZAxis.ToXyz(True)
return tf1.ScaleBasis(scale)
dsTyped = UnwrapElement(IN[0])
cs1 = tolist(IN[1])
transforms = map(cs2Trans, cs1)
cat = dsTyped.Category
TypeId = dsTyped.GetTypeId()
Lib_TypeId = TypeId.ToString()
dsLib = DirectShapeLibrary.GetDirectShapeLibrary(doc)
if not dsLib.ContainsType(Lib_TypeId): dsLib.AddDefinitionType(Lib_TypeId, TypeId)
def TransformDS(transf):
try:
if isRvt2017:
ds1 = DirectShape.CreateElementInstance(doc, TypeId, cat.Id, Lib_TypeId, transf)
else:
ds1 = DirectShape.CreateElementInstance(doc, TypeId, cat.Id, Lib_TypeId, transf, "Dynamo","spring nodes")
ds1.SetTypeId(TypeId)
return ds1.ToDSType(False)
except: return None
TransactionManager.Instance.EnsureInTransaction(doc)
OUT = map(TransformDS, transforms)
TransactionManager.Instance.TransactionTaskDone() | true | true |
1c34f34beeb4ca12cb5aba811339a1c44f064b3e | 1,938 | py | Python | Utilities/BootstrapMean/BootstrapMean.py | wavefancy/BIDMC-PYTHON | 97c7d3e1bec19dd7fea34d4ecebbdf2af2b1faed | [
"MIT"
] | null | null | null | Utilities/BootstrapMean/BootstrapMean.py | wavefancy/BIDMC-PYTHON | 97c7d3e1bec19dd7fea34d4ecebbdf2af2b1faed | [
"MIT"
] | null | null | null | Utilities/BootstrapMean/BootstrapMean.py | wavefancy/BIDMC-PYTHON | 97c7d3e1bec19dd7fea34d4ecebbdf2af2b1faed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Bootstrap values to estimate confidence interval for mean.
@Author: wavefancy@gmail.com
Usage:
BootstrapMean.py -n times -c confidence
BootstrapMean.py -h | --help | -v | --version | -f | --format
Notes:
1. Read content from stdin, and output results to stdout.
2. Input separated by 'white' characteres, including '\\n'.
Options:
-n times Number of times for bootstrapping
-c confidence Confidence interval, float1, float2 ..., eg. 0.8,0.95
-h --help Show this screen.
-v --version Show version.
"""
import sys
from docopt import docopt
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE,SIG_DFL)
if __name__ == '__main__':
args = docopt(__doc__, version='1.0')
#print(args)
ntimes = 100
confidence = []
if args['-n']:
ntimes = int(args['-n'])
if args['-c']:
confidence = [float(x) for x in args['-c'].split(',')]
#-------------------------------------------------
data = []
for line in sys.stdin:
line = line.strip()
if line:
data = data + [float(x) for x in line.split()]
#print(data)
import numpy
npData = numpy.array(data)
dataMean = numpy.mean(npData)
means = numpy.empty(ntimes)
for x in range(ntimes):
# resampling with replacement.
# print(numpy.random.choice(npData, len(npData), True))
means.put(x, numpy.mean(numpy.random.choice(npData, len(npData), True)))
sortedMeans = numpy.sort(means)
# print(sortedMeans)
sys.stdout.write('InputMean\tCI\tLeft\tRight\n')
for x in confidence:
skip = numpy.rint(len(means) * (1-x)/2)
# print(skip)
sys.stdout.write('%.4e\t%.4f\t%.4e\t%.4e\n'%(dataMean, x, sortedMeans[skip], sortedMeans[len(means)-skip-1]))
sys.stdout.flush()
sys.stdout.close()
sys.stderr.flush()
sys.stderr.close()
| 28.5 | 117 | 0.592879 |
import sys
from docopt import docopt
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE,SIG_DFL)
if __name__ == '__main__':
args = docopt(__doc__, version='1.0')
ntimes = 100
confidence = []
if args['-n']:
ntimes = int(args['-n'])
if args['-c']:
confidence = [float(x) for x in args['-c'].split(',')]
data = []
for line in sys.stdin:
line = line.strip()
if line:
data = data + [float(x) for x in line.split()]
import numpy
npData = numpy.array(data)
dataMean = numpy.mean(npData)
means = numpy.empty(ntimes)
for x in range(ntimes):
means.put(x, numpy.mean(numpy.random.choice(npData, len(npData), True)))
sortedMeans = numpy.sort(means)
sys.stdout.write('InputMean\tCI\tLeft\tRight\n')
for x in confidence:
skip = numpy.rint(len(means) * (1-x)/2)
sys.stdout.write('%.4e\t%.4f\t%.4e\t%.4e\n'%(dataMean, x, sortedMeans[skip], sortedMeans[len(means)-skip-1]))
sys.stdout.flush()
sys.stdout.close()
sys.stderr.flush()
sys.stderr.close()
| true | true |
1c34f35af8b61be42feaa3545d57c4cef1af40d0 | 454 | py | Python | plugins/ledger/cmdline.py | nxsofsys/electrum-zcl | 3675fa2f871a4e944fc875a4653f95d9b38b9e29 | [
"MIT"
] | 1 | 2021-04-04T20:40:29.000Z | 2021-04-04T20:40:29.000Z | plugins/ledger/cmdline.py | nxsofsys/electrum-zcl | 3675fa2f871a4e944fc875a4653f95d9b38b9e29 | [
"MIT"
] | null | null | null | plugins/ledger/cmdline.py | nxsofsys/electrum-zcl | 3675fa2f871a4e944fc875a4653f95d9b38b9e29 | [
"MIT"
] | null | null | null | from legder import LedgerPlugin
from electrum_vtc.util import print_msg
class BTChipCmdLineHandler:
def stop(self):
pass
def show_message(self, msg):
print_msg(msg)
def prompt_auth(self, msg):
import getpass
print_msg(msg)
response = getpass.getpass('')
if len(response) == 0:
return None
return response
class Plugin(LedgerPlugin):
handler = BTChipCmdLineHandler()
| 21.619048 | 39 | 0.645374 | from legder import LedgerPlugin
from electrum_vtc.util import print_msg
class BTChipCmdLineHandler:
def stop(self):
pass
def show_message(self, msg):
print_msg(msg)
def prompt_auth(self, msg):
import getpass
print_msg(msg)
response = getpass.getpass('')
if len(response) == 0:
return None
return response
class Plugin(LedgerPlugin):
handler = BTChipCmdLineHandler()
| true | true |
1c34f3b0be47bed7d917fc9a5adf536d78f30809 | 634 | py | Python | app/models/domain/rwmodel.py | StanislavRud/api-realword-app-test | 9a49f299b02cec26d237f3bc4b363c8b93520b7b | [
"MIT"
] | 1,875 | 2019-03-27T14:26:20.000Z | 2022-03-31T14:52:50.000Z | app/models/domain/rwmodel.py | StanislavRud/api-realword-app-test | 9a49f299b02cec26d237f3bc4b363c8b93520b7b | [
"MIT"
] | 232 | 2019-04-11T11:05:48.000Z | 2022-03-05T10:23:50.000Z | app/models/domain/rwmodel.py | StanislavRud/api-realword-app-test | 9a49f299b02cec26d237f3bc4b363c8b93520b7b | [
"MIT"
] | 433 | 2019-04-11T01:48:59.000Z | 2022-03-31T10:33:42.000Z | import datetime
from pydantic import BaseConfig, BaseModel
def convert_datetime_to_realworld(dt: datetime.datetime) -> str:
return dt.replace(tzinfo=datetime.timezone.utc).isoformat().replace("+00:00", "Z")
def convert_field_to_camel_case(string: str) -> str:
return "".join(
word if index == 0 else word.capitalize()
for index, word in enumerate(string.split("_"))
)
class RWModel(BaseModel):
class Config(BaseConfig):
allow_population_by_field_name = True
json_encoders = {datetime.datetime: convert_datetime_to_realworld}
alias_generator = convert_field_to_camel_case
| 28.818182 | 86 | 0.722397 | import datetime
from pydantic import BaseConfig, BaseModel
def convert_datetime_to_realworld(dt: datetime.datetime) -> str:
return dt.replace(tzinfo=datetime.timezone.utc).isoformat().replace("+00:00", "Z")
def convert_field_to_camel_case(string: str) -> str:
return "".join(
word if index == 0 else word.capitalize()
for index, word in enumerate(string.split("_"))
)
class RWModel(BaseModel):
class Config(BaseConfig):
allow_population_by_field_name = True
json_encoders = {datetime.datetime: convert_datetime_to_realworld}
alias_generator = convert_field_to_camel_case
| true | true |
1c34f3f80eecf4e930c3ce750c5a552fb38cecad | 3,809 | py | Python | src/transformers/tokenization_distilbert.py | dmlap/transformers | 79588e6fdb5af8add092fc27dd695ea1ebc68b18 | [
"Apache-2.0"
] | 647 | 2020-10-27T01:35:35.000Z | 2022-03-29T12:59:11.000Z | src/transformers/tokenization_distilbert.py | hmason/transformers | ab90353f1abfd15f8d21f99395658d060679a08c | [
"Apache-2.0"
] | 30 | 2020-06-07T12:28:07.000Z | 2022-03-20T05:26:03.000Z | src/transformers/tokenization_distilbert.py | hmason/transformers | ab90353f1abfd15f8d21f99395658d060679a08c | [
"Apache-2.0"
] | 48 | 2020-07-15T09:45:46.000Z | 2022-03-01T07:27:59.000Z | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for DistilBERT."""
import logging
from .tokenization_bert import BertTokenizer, BertTokenizerFast
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"distilbert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
"distilbert-base-uncased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
"distilbert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
"distilbert-base-cased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
"distilbert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-vocab.txt",
"distilbert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class DistilBertTokenizer(BertTokenizer):
r"""
Constructs a DistilBertTokenizer.
:class:`~transformers.DistilBertTokenizer is identical to :class:`~transformers.BertTokenizer` and runs end-to-end
tokenization: punctuation splitting + wordpiece.
Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
model_input_names = ["attention_mask"]
class DistilBertTokenizerFast(BertTokenizerFast):
r"""
Constructs a "Fast" DistilBertTokenizer (backed by HuggingFace's `tokenizers` library).
:class:`~transformers.DistilBertTokenizerFast` is identical to :class:`~transformers.BertTokenizerFast` and runs end-to-end
tokenization: punctuation splitting + wordpiece.
Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
model_input_names = ["attention_mask"]
| 41.402174 | 139 | 0.757942 |
import logging
from .tokenization_bert import BertTokenizer, BertTokenizerFast
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"distilbert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
"distilbert-base-uncased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
"distilbert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
"distilbert-base-cased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
"distilbert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-vocab.txt",
"distilbert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class DistilBertTokenizer(BertTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
model_input_names = ["attention_mask"]
class DistilBertTokenizerFast(BertTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
model_input_names = ["attention_mask"]
| true | true |
1c34f46b2bd79018cc6279a1ed2396587cb5a3b9 | 24,065 | py | Python | quantstats/_plotting/core.py | danilodsp/quantstats | 67a647baeba756c57d87bc7028f55d4dee8702b4 | [
"Apache-2.0"
] | 1 | 2019-09-03T11:06:16.000Z | 2019-09-03T11:06:16.000Z | quantstats/_plotting/core.py | danilodsp/quantstats | 67a647baeba756c57d87bc7028f55d4dee8702b4 | [
"Apache-2.0"
] | null | null | null | quantstats/_plotting/core.py | danilodsp/quantstats | 67a647baeba756c57d87bc7028f55d4dee8702b4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Quantreturns: Portfolio analytics for quants
# https://github.com/ranaroussi/quantreturns
#
# Copyright 2019 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as _plt
try:
_plt.rcParams["font.family"] = "Arial"
except Exception:
pass
import matplotlib.dates as _mdates
from matplotlib.ticker import (
FormatStrFormatter as _FormatStrFormatter,
FuncFormatter as _FuncFormatter
)
import pandas as _pd
import numpy as _np
import seaborn as _sns
from .. import stats as _stats
_sns.set(font_scale=1.1, rc={
'figure.figsize': (10, 6),
'axes.facecolor': 'white',
'figure.facecolor': 'white',
'grid.color': '#dddddd',
'grid.linewidth': 0.5,
"lines.linewidth": 1.5,
'text.color': '#333333',
'xtick.color': '#666666',
'ytick.color': '#666666'
})
_FLATUI_COLORS = ["#fedd78", "#348dc1", "#af4b64",
"#4fa487", "#9b59b6", "#808080"]
_GRAYSCALE_COLORS = ['silver', '#222222', 'gray'] * 3
def _get_colors(grayscale):
colors = _FLATUI_COLORS
ls = '-'
alpha = .8
if grayscale:
colors = _GRAYSCALE_COLORS
ls = '-'
alpha = 0.5
return colors, ls, alpha
def plot_returns_bars(returns, benchmark=None,
returns_label="Strategy",
hline=None, hlw=None, hlcolor="red", hllabel="",
resample="A", title="Returns", match_volatility=False,
log_scale=False, figsize=(10, 6),
grayscale=False, fontname='Arial', ylabel=True,
subtitle=True, savefig=None, show=True):
if match_volatility and benchmark is None:
raise ValueError('match_volatility requires passing of '
'benchmark.')
elif match_volatility and benchmark is not None:
bmark_vol = benchmark.loc[returns.index].std()
returns = (returns / returns.std()) * bmark_vol
# ---------------
colors, ls, alpha = _get_colors(grayscale)
df = _pd.DataFrame(index=returns.index, data={returns_label: returns})
if isinstance(benchmark, _pd.Series):
df['Benchmark'] = benchmark[benchmark.index.isin(returns.index)]
df = df[['Benchmark', returns_label]]
df = df.dropna()
if resample is not None:
df = df.resample(resample).apply(
_stats.comp).resample(resample).last()
# ---------------
fig, ax = _plt.subplots(figsize=figsize)
# use a more precise date string for the x axis locations in the toolbar
fig.suptitle(title+"\n", y=.99, fontweight="bold", fontname=fontname,
fontsize=14, color="black")
if subtitle:
ax.set_title("\n%s - %s " % (
df.index.date[:1][0].strftime('%Y'),
df.index.date[-1:][0].strftime('%Y')
), fontsize=12, color='gray')
if benchmark is None:
colors = colors[1:]
df.plot(kind='bar', ax=ax, color=colors)
fig.set_facecolor('white')
ax.set_facecolor('white')
ax.set_xticklabels(df.index.year)
# ax.fmt_xdata = _mdates.DateFormatter('%Y-%m-%d')
years = sorted(list(set(df.index.year)))
if len(years) > 10:
mod = int(len(years)/10)
_plt.xticks(_np.arange(len(years)), [
str(year) if not i % mod else '' for i, year in enumerate(years)])
# rotate and align the tick labels so they look better
fig.autofmt_xdate()
if hline:
if grayscale:
hlcolor = 'gray'
ax.axhline(hline, ls="--", lw=hlw, color=hlcolor,
label=hllabel, zorder=2)
ax.axhline(0, ls="--", lw=1, color="#000000", zorder=2)
if isinstance(benchmark, _pd.Series) or hline:
ax.legend(fontsize=12)
_plt.yscale("symlog" if log_scale else "linear")
ax.set_xlabel('')
if ylabel:
ax.set_ylabel("Returns", fontname=fontname,
fontweight='bold', fontsize=12, color="black")
ax.yaxis.set_label_coords(-.1, .5)
ax.yaxis.set_major_formatter(_FuncFormatter(format_pct_axis))
try:
_plt.subplots_adjust(hspace=0, bottom=0, top=1)
except Exception:
pass
try:
fig.tight_layout()
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def plot_timeseries(returns, benchmark=None,
title="Returns", compound=False, cumulative=True,
fill=False, returns_label="Strategy",
hline=None, hlw=None, hlcolor="red", hllabel="",
percent=True, match_volatility=False, log_scale=False,
resample=None, lw=1.5, figsize=(10, 6), ylabel="",
grayscale=False, fontname="Arial",
subtitle=True, savefig=None, show=True):
colors, ls, alpha = _get_colors(grayscale)
returns.fillna(0, inplace=True)
if isinstance(benchmark, _pd.Series):
benchmark.fillna(0, inplace=True)
if match_volatility and benchmark is None:
raise ValueError('match_volatility requires passing of '
'benchmark.')
elif match_volatility and benchmark is not None:
bmark_vol = benchmark.std()
returns = (returns / returns.std()) * bmark_vol
# ---------------
if compound is True:
if cumulative:
returns = _stats.compsum(returns)
if isinstance(benchmark, _pd.Series):
benchmark = _stats.compsum(benchmark)
else:
returns = returns.cumsum()
if isinstance(benchmark, _pd.Series):
benchmark = benchmark.cumsum()
if resample:
returns = returns.resample(resample)
returns = returns.last() if compound is True else returns.sum()
if isinstance(benchmark, _pd.Series):
benchmark = benchmark.resample(resample)
benchmark = benchmark.last(
) if compound is True else benchmark.sum()
# ---------------
fig, ax = _plt.subplots(figsize=figsize)
fig.suptitle(title+"\n", y=.99, fontweight="bold", fontname=fontname,
fontsize=14, color="black")
if subtitle:
ax.set_title("\n%s - %s " % (
returns.index.date[:1][0].strftime('%e %b \'%y'),
returns.index.date[-1:][0].strftime('%e %b \'%y')
), fontsize=12, color='gray')
fig.set_facecolor('white')
ax.set_facecolor('white')
if isinstance(benchmark, _pd.Series):
ax.plot(benchmark, lw=lw, ls=ls, label="Benchmark", color=colors[0])
alpha = .25 if grayscale else 1
ax.plot(returns, lw=lw, label=returns_label, color=colors[1], alpha=alpha)
if fill:
ax.fill_between(returns.index, 0, returns, color=colors[1], alpha=.25)
# rotate and align the tick labels so they look better
fig.autofmt_xdate()
# use a more precise date string for the x axis locations in the toolbar
# ax.fmt_xdata = _mdates.DateFormatter('%Y-%m-%d')
if hline:
if grayscale:
hlcolor = 'black'
ax.axhline(hline, ls="--", lw=hlw, color=hlcolor,
label=hllabel, zorder=2)
ax.axhline(0, ls="-", lw=1,
color='gray', zorder=1)
ax.axhline(0, ls="--", lw=1,
color='white' if grayscale else 'black', zorder=2)
if isinstance(benchmark, _pd.Series) or hline:
ax.legend(fontsize=12)
_plt.yscale("symlog" if log_scale else "linear")
if percent:
ax.yaxis.set_major_formatter(_FuncFormatter(format_pct_axis))
# ax.yaxis.set_major_formatter(_plt.FuncFormatter(
# lambda x, loc: "{:,}%".format(int(x*100))))
ax.set_xlabel('')
if ylabel:
ax.set_ylabel(ylabel, fontname=fontname,
fontweight='bold', fontsize=12, color="black")
ax.yaxis.set_label_coords(-.1, .5)
try:
_plt.subplots_adjust(hspace=0, bottom=0, top=1)
except Exception:
pass
try:
fig.tight_layout()
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def plot_histogram(returns, resample="M", bins=20,
fontname='Arial', grayscale=False,
title="Returns", kde=True, figsize=(10, 6),
ylabel=True, subtitle=True, compounded=True,
savefig=None, show=True):
colors = ['#348dc1', '#003366', 'red']
if grayscale:
colors = ['silver', 'gray', 'black']
apply_fnc = _stats.comp if compounded else _np.sum
returns = returns.fillna(0).resample(resample).apply(
apply_fnc).resample(resample).last()
fig, ax = _plt.subplots(figsize=figsize)
fig.suptitle(title+"\n", y=.99, fontweight="bold", fontname=fontname,
fontsize=14, color="black")
if subtitle:
ax.set_title("\n%s - %s " % (
returns.index.date[:1][0].strftime('%Y'),
returns.index.date[-1:][0].strftime('%Y')
), fontsize=12, color='gray')
fig.set_facecolor('white')
ax.set_facecolor('white')
ax.axvline(returns.mean(), ls="--", lw=1.5,
color=colors[2], zorder=2, label="Average")
_sns.distplot(returns, bins=bins,
axlabel="", color=colors[0], hist_kws=dict(alpha=1),
kde=kde,
# , label="Kernel Estimate"
kde_kws=dict(color='black', alpha=.7),
ax=ax)
ax.xaxis.set_major_formatter(_plt.FuncFormatter(
lambda x, loc: "{:,}%".format(int(x*100))))
ax.axhline(0.01, lw=1, color="#000000", zorder=2)
ax.axvline(0, lw=1, color="#000000", zorder=2)
ax.set_xlabel('')
if ylabel:
ax.set_ylabel("Occurrences", fontname=fontname,
fontweight='bold', fontsize=12, color="black")
ax.yaxis.set_label_coords(-.1, .5)
ax.legend(fontsize=12)
# fig.autofmt_xdate()
try:
_plt.subplots_adjust(hspace=0, bottom=0, top=1)
except Exception:
pass
try:
fig.tight_layout()
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def plot_rolling_stats(returns, benchmark=None, title="",
returns_label="Strategy",
hline=None, hlw=None, hlcolor="red", hllabel="",
lw=1.5, figsize=(10, 6), ylabel="",
grayscale=False, fontname="Arial", subtitle=True,
savefig=None, show=True):
colors, ls, alpha = _get_colors(grayscale)
fig, ax = _plt.subplots(figsize=figsize)
df = _pd.DataFrame(index=returns.index, data={returns_label: returns})
if isinstance(benchmark, _pd.Series):
df['Benchmark'] = benchmark[benchmark.index.isin(returns.index)]
df = df[['Benchmark', returns_label]].dropna()
ax.plot(df['Benchmark'], lw=lw, label="Benchmark",
color=colors[0], alpha=.8)
ax.plot(df[returns_label].dropna(), lw=lw,
label=returns_label, color=colors[1])
# rotate and align the tick labels so they look better
fig.autofmt_xdate()
# use a more precise date string for the x axis locations in the toolbar
# ax.fmt_xdata = _mdates.DateFormatter('%Y-%m-%d')\
fig.suptitle(title+"\n", y=.99, fontweight="bold", fontname=fontname,
fontsize=14, color="black")
if subtitle:
ax.set_title("\n%s - %s " % (
df.index.date[:1][0].strftime('%e %b \'%y'),
df.index.date[-1:][0].strftime('%e %b \'%y')
), fontsize=12, color='gray')
if hline:
if grayscale:
hlcolor = 'black'
ax.axhline(hline, ls="--", lw=hlw, color=hlcolor,
label=hllabel, zorder=2)
ax.axhline(0, ls="--", lw=1, color="#000000", zorder=2)
if ylabel:
ax.set_ylabel(ylabel, fontname=fontname,
fontweight='bold', fontsize=12, color="black")
ax.yaxis.set_label_coords(-.1, .5)
ax.yaxis.set_major_formatter(_FormatStrFormatter('%.2f'))
ax.legend(fontsize=12)
try:
_plt.subplots_adjust(hspace=0, bottom=0, top=1)
except Exception:
pass
try:
fig.tight_layout()
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def plot_rolling_beta(returns, benchmark,
window1=126, window1_label="",
window2=None, window2_label="",
title="", hlcolor="red", figsize=(10, 6),
grayscale=False, fontname="Arial", lw=1.5,
ylabel=True, subtitle=True, savefig=None, show=True):
colors, ls, alpha = _get_colors(grayscale)
fig, ax = _plt.subplots(figsize=figsize)
fig.suptitle(title+"\n", y=.99, fontweight="bold", fontname=fontname,
fontsize=14, color="black")
if subtitle:
ax.set_title("\n%s - %s " % (
returns.index.date[:1][0].strftime('%e %b \'%y'),
returns.index.date[-1:][0].strftime('%e %b \'%y')
), fontsize=12, color='gray')
beta = _stats.rolling_greeks(returns, benchmark, window1)['beta']
ax.plot(beta, lw=lw, label=window1_label, color=colors[1])
if window2:
ax.plot(_stats.rolling_greeks(returns, benchmark, window2)['beta'],
lw=lw, label=window2_label, color="gray", alpha=0.8)
mmin = min([-100, int(beta.min()*100)])
mmax = max([100, int(beta.max()*100)])
step = 50 if (mmax-mmin) >= 200 else 100
ax.set_yticks([x / 100 for x in list(range(mmin, mmax, step))])
hlcolor = 'black' if grayscale else hlcolor
ax.axhline(beta.mean(), ls="--", lw=1.5,
color=hlcolor, zorder=2)
ax.axhline(0, ls="--", lw=1, color="#000000", zorder=2)
fig.autofmt_xdate()
# use a more precise date string for the x axis locations in the toolbar
ax.fmt_xdata = _mdates.DateFormatter('%Y-%m-%d')
if ylabel:
ax.set_ylabel("Beta", fontname=fontname,
fontweight='bold', fontsize=12, color="black")
ax.yaxis.set_label_coords(-.1, .5)
ax.legend(fontsize=12)
try:
_plt.subplots_adjust(hspace=0, bottom=0, top=1)
except Exception:
pass
try:
fig.tight_layout()
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def plot_longest_drawdowns(returns, periods=5, lw=1.5,
fontname='Arial', grayscale=False,
log_scale=False, figsize=(10, 6), ylabel=True,
subtitle=True, compounded=True,
savefig=None, show=True):
colors = ['#348dc1', '#003366', 'red']
if grayscale:
colors = ['#000000'] * 3
dd = _stats.to_drawdown_series(returns.fillna(0))
dddf = _stats.drawdown_details(dd)
longest_dd = dddf.sort_values(
by='days', ascending=False, kind='mergesort')[:periods]
fig, ax = _plt.subplots(figsize=figsize)
fig.suptitle("Top %.0f Drawdown Periods\n" %
periods, y=.99, fontweight="bold", fontname=fontname,
fontsize=14, color="black")
if subtitle:
ax.set_title("\n%s - %s " % (
returns.index.date[:1][0].strftime('%e %b \'%y'),
returns.index.date[-1:][0].strftime('%e %b \'%y')
), fontsize=12, color='gray')
fig.set_facecolor('white')
ax.set_facecolor('white')
series = _stats.compsum(returns) if compounded else returns.cumsum()
ax.plot(series, lw=lw, label="Backtest", color=colors[0])
highlight = 'black' if grayscale else 'red'
for idx, row in longest_dd.iterrows():
ax.axvspan(*_mdates.datestr2num([str(row['start']), str(row['end'])]),
color=highlight, alpha=.1)
# rotate and align the tick labels so they look better
fig.autofmt_xdate()
# use a more precise date string for the x axis locations in the toolbar
ax.fmt_xdata = _mdates.DateFormatter('%Y-%m-%d')
ax.axhline(0, ls="--", lw=1, color="#000000", zorder=2)
_plt.yscale("symlog" if log_scale else "linear")
if ylabel:
ax.set_ylabel("Cumulative Returns", fontname=fontname,
fontweight='bold', fontsize=12, color="black")
ax.yaxis.set_label_coords(-.1, .5)
ax.yaxis.set_major_formatter(_FuncFormatter(format_pct_axis))
# ax.yaxis.set_major_formatter(_plt.FuncFormatter(
# lambda x, loc: "{:,}%".format(int(x*100))))
fig.autofmt_xdate()
try:
_plt.subplots_adjust(hspace=0, bottom=0, top=1)
except Exception:
pass
try:
fig.tight_layout()
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def plot_distribution(returns, figsize=(10, 6),
fontname='Arial', grayscale=False, ylabel=True,
subtitle=True, compounded=True,
savefig=None, show=True):
colors = _FLATUI_COLORS
if grayscale:
colors = ['#f9f9f9', '#dddddd', '#bbbbbb', '#999999', '#808080']
# colors, ls, alpha = _get_colors(grayscale)
port = _pd.DataFrame(returns.fillna(0))
port.columns = ['Daily']
apply_fnc = _stats.comp if compounded else _np.sum
port['Weekly'] = port['Daily'].resample(
'W-MON').apply(apply_fnc).resample('W-MON').last()
port['Weekly'].ffill(inplace=True)
port['Monthly'] = port['Daily'].resample(
'M').apply(apply_fnc).resample('M').last()
port['Monthly'].ffill(inplace=True)
port['Quarterly'] = port['Daily'].resample(
'Q').apply(apply_fnc).resample('Q').last()
port['Quarterly'].ffill(inplace=True)
port['Yearly'] = port['Daily'].resample(
'A').apply(apply_fnc).resample('A').last()
port['Yearly'].ffill(inplace=True)
fig, ax = _plt.subplots(figsize=figsize)
fig.suptitle("Return Quantiles\n", y=.99,
fontweight="bold", fontname=fontname,
fontsize=14, color="black")
if subtitle:
ax.set_title("\n%s - %s " % (
returns.index.date[:1][0].strftime('%e %b \'%y'),
returns.index.date[-1:][0].strftime('%e %b \'%y')
), fontsize=12, color='gray')
fig.set_facecolor('white')
ax.set_facecolor('white')
_sns.boxplot(data=port, ax=ax, palette=tuple(colors[:5]))
ax.yaxis.set_major_formatter(_plt.FuncFormatter(
lambda x, loc: "{:,}%".format(int(x*100))))
if ylabel:
ax.set_ylabel('Rerurns', fontname=fontname,
fontweight='bold', fontsize=12, color="black")
ax.yaxis.set_label_coords(-.1, .5)
fig.autofmt_xdate()
try:
_plt.subplots_adjust(hspace=0)
except Exception:
pass
try:
fig.tight_layout(w_pad=0, h_pad=0)
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def plot_table(tbl, columns=None, title="", title_loc="left",
header=True,
colWidths=None,
rowLoc='right',
colLoc='right',
colLabels=[],
edges='horizontal',
orient='horizontal',
figsize=(10, 6),
savefig=None,
show=False):
if columns is not None:
try:
tbl.columns = columns
except Exception:
pass
fig = _plt.figure(figsize=(5.5, 6))
ax = _plt.subplot(111, frame_on=False)
if title != "":
ax.set_title(title, fontweight="bold",
fontsize=14, color="black", loc=title_loc)
the_table = ax.table(cellText=tbl.values,
colWidths=colWidths,
rowLoc=rowLoc,
colLoc=colLoc,
edges=edges,
colLabels=(tbl.columns if header else None),
loc='center',
zorder=2
)
the_table.auto_set_font_size(False)
the_table.set_fontsize(12)
the_table.scale(1, 1)
for (row, col), cell in the_table.get_celld().items():
cell.set_height(0.08)
cell.set_text_props(color='black')
cell.set_edgecolor('#dddddd')
if row == 0 and header:
cell.set_edgecolor('black')
cell.set_facecolor('black')
cell.set_linewidth(2)
cell.set_text_props(weight='bold', color='black')
elif col == 0 and "vertical" in orient:
cell.set_edgecolor('#dddddd')
cell.set_linewidth(1)
cell.set_text_props(weight='bold', color='black')
elif row > 1:
cell.set_linewidth(1)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
try:
_plt.subplots_adjust(hspace=0)
except Exception:
pass
try:
fig.tight_layout(w_pad=0, h_pad=0)
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def format_cur_axis(x, pos):
if x >= 1e12:
res = '$%1.1fT' % (x * 1e-12)
return res.replace('.0T', 'T')
if x >= 1e9:
res = '$%1.1fB' % (x * 1e-9)
return res.replace('.0B', 'B')
if x >= 1e6:
res = '$%1.1fM' % (x * 1e-6)
return res.replace('.0M', 'M')
if x >= 1e3:
res = '$%1.0fK' % (x * 1e-3)
return res.replace('.0K', 'K')
res = '$%1.0f' % x
return res.replace('.0', '')
def format_pct_axis(x, pos):
x *= 100 # lambda x, loc: "{:,}%".format(int(x * 100))
if x >= 1e12:
res = '%1.1fT%%' % (x * 1e-12)
return res.replace('.0T%', 'T%')
if x >= 1e9:
res = '%1.1fB%%' % (x * 1e-9)
return res.replace('.0B%', 'B%')
if x >= 1e6:
res = '%1.1fM%%' % (x * 1e-6)
return res.replace('.0M%', 'M%')
if x >= 1e3:
res = '%1.1fK%%' % (x * 1e-3)
return res.replace('.0K%', 'K%')
res = '%1.0f%%' % x
return res.replace('.0%', '%')
| 30.308564 | 78 | 0.564471 |
import matplotlib.pyplot as _plt
try:
_plt.rcParams["font.family"] = "Arial"
except Exception:
pass
import matplotlib.dates as _mdates
from matplotlib.ticker import (
FormatStrFormatter as _FormatStrFormatter,
FuncFormatter as _FuncFormatter
)
import pandas as _pd
import numpy as _np
import seaborn as _sns
from .. import stats as _stats
_sns.set(font_scale=1.1, rc={
'figure.figsize': (10, 6),
'axes.facecolor': 'white',
'figure.facecolor': 'white',
'grid.color': '#dddddd',
'grid.linewidth': 0.5,
"lines.linewidth": 1.5,
'text.color': '#333333',
'xtick.color': '#666666',
'ytick.color': '#666666'
})
_FLATUI_COLORS = ["#fedd78", "#348dc1", "#af4b64",
"#4fa487", "#9b59b6", "#808080"]
_GRAYSCALE_COLORS = ['silver', '#222222', 'gray'] * 3
def _get_colors(grayscale):
colors = _FLATUI_COLORS
ls = '-'
alpha = .8
if grayscale:
colors = _GRAYSCALE_COLORS
ls = '-'
alpha = 0.5
return colors, ls, alpha
def plot_returns_bars(returns, benchmark=None,
returns_label="Strategy",
hline=None, hlw=None, hlcolor="red", hllabel="",
resample="A", title="Returns", match_volatility=False,
log_scale=False, figsize=(10, 6),
grayscale=False, fontname='Arial', ylabel=True,
subtitle=True, savefig=None, show=True):
if match_volatility and benchmark is None:
raise ValueError('match_volatility requires passing of '
'benchmark.')
elif match_volatility and benchmark is not None:
bmark_vol = benchmark.loc[returns.index].std()
returns = (returns / returns.std()) * bmark_vol
colors, ls, alpha = _get_colors(grayscale)
df = _pd.DataFrame(index=returns.index, data={returns_label: returns})
if isinstance(benchmark, _pd.Series):
df['Benchmark'] = benchmark[benchmark.index.isin(returns.index)]
df = df[['Benchmark', returns_label]]
df = df.dropna()
if resample is not None:
df = df.resample(resample).apply(
_stats.comp).resample(resample).last()
fig, ax = _plt.subplots(figsize=figsize)
fig.suptitle(title+"\n", y=.99, fontweight="bold", fontname=fontname,
fontsize=14, color="black")
if subtitle:
ax.set_title("\n%s - %s " % (
df.index.date[:1][0].strftime('%Y'),
df.index.date[-1:][0].strftime('%Y')
), fontsize=12, color='gray')
if benchmark is None:
colors = colors[1:]
df.plot(kind='bar', ax=ax, color=colors)
fig.set_facecolor('white')
ax.set_facecolor('white')
ax.set_xticklabels(df.index.year)
years = sorted(list(set(df.index.year)))
if len(years) > 10:
mod = int(len(years)/10)
_plt.xticks(_np.arange(len(years)), [
str(year) if not i % mod else '' for i, year in enumerate(years)])
fig.autofmt_xdate()
if hline:
if grayscale:
hlcolor = 'gray'
ax.axhline(hline, ls="--", lw=hlw, color=hlcolor,
label=hllabel, zorder=2)
ax.axhline(0, ls="--", lw=1, color="#000000", zorder=2)
if isinstance(benchmark, _pd.Series) or hline:
ax.legend(fontsize=12)
_plt.yscale("symlog" if log_scale else "linear")
ax.set_xlabel('')
if ylabel:
ax.set_ylabel("Returns", fontname=fontname,
fontweight='bold', fontsize=12, color="black")
ax.yaxis.set_label_coords(-.1, .5)
ax.yaxis.set_major_formatter(_FuncFormatter(format_pct_axis))
try:
_plt.subplots_adjust(hspace=0, bottom=0, top=1)
except Exception:
pass
try:
fig.tight_layout()
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def plot_timeseries(returns, benchmark=None,
title="Returns", compound=False, cumulative=True,
fill=False, returns_label="Strategy",
hline=None, hlw=None, hlcolor="red", hllabel="",
percent=True, match_volatility=False, log_scale=False,
resample=None, lw=1.5, figsize=(10, 6), ylabel="",
grayscale=False, fontname="Arial",
subtitle=True, savefig=None, show=True):
colors, ls, alpha = _get_colors(grayscale)
returns.fillna(0, inplace=True)
if isinstance(benchmark, _pd.Series):
benchmark.fillna(0, inplace=True)
if match_volatility and benchmark is None:
raise ValueError('match_volatility requires passing of '
'benchmark.')
elif match_volatility and benchmark is not None:
bmark_vol = benchmark.std()
returns = (returns / returns.std()) * bmark_vol
if compound is True:
if cumulative:
returns = _stats.compsum(returns)
if isinstance(benchmark, _pd.Series):
benchmark = _stats.compsum(benchmark)
else:
returns = returns.cumsum()
if isinstance(benchmark, _pd.Series):
benchmark = benchmark.cumsum()
if resample:
returns = returns.resample(resample)
returns = returns.last() if compound is True else returns.sum()
if isinstance(benchmark, _pd.Series):
benchmark = benchmark.resample(resample)
benchmark = benchmark.last(
) if compound is True else benchmark.sum()
fig, ax = _plt.subplots(figsize=figsize)
fig.suptitle(title+"\n", y=.99, fontweight="bold", fontname=fontname,
fontsize=14, color="black")
if subtitle:
ax.set_title("\n%s - %s " % (
returns.index.date[:1][0].strftime('%e %b \'%y'),
returns.index.date[-1:][0].strftime('%e %b \'%y')
), fontsize=12, color='gray')
fig.set_facecolor('white')
ax.set_facecolor('white')
if isinstance(benchmark, _pd.Series):
ax.plot(benchmark, lw=lw, ls=ls, label="Benchmark", color=colors[0])
alpha = .25 if grayscale else 1
ax.plot(returns, lw=lw, label=returns_label, color=colors[1], alpha=alpha)
if fill:
ax.fill_between(returns.index, 0, returns, color=colors[1], alpha=.25)
fig.autofmt_xdate()
if hline:
if grayscale:
hlcolor = 'black'
ax.axhline(hline, ls="--", lw=hlw, color=hlcolor,
label=hllabel, zorder=2)
ax.axhline(0, ls="-", lw=1,
color='gray', zorder=1)
ax.axhline(0, ls="--", lw=1,
color='white' if grayscale else 'black', zorder=2)
if isinstance(benchmark, _pd.Series) or hline:
ax.legend(fontsize=12)
_plt.yscale("symlog" if log_scale else "linear")
if percent:
ax.yaxis.set_major_formatter(_FuncFormatter(format_pct_axis))
ax.set_xlabel('')
if ylabel:
ax.set_ylabel(ylabel, fontname=fontname,
fontweight='bold', fontsize=12, color="black")
ax.yaxis.set_label_coords(-.1, .5)
try:
_plt.subplots_adjust(hspace=0, bottom=0, top=1)
except Exception:
pass
try:
fig.tight_layout()
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def plot_histogram(returns, resample="M", bins=20,
fontname='Arial', grayscale=False,
title="Returns", kde=True, figsize=(10, 6),
ylabel=True, subtitle=True, compounded=True,
savefig=None, show=True):
colors = ['#348dc1', '#003366', 'red']
if grayscale:
colors = ['silver', 'gray', 'black']
apply_fnc = _stats.comp if compounded else _np.sum
returns = returns.fillna(0).resample(resample).apply(
apply_fnc).resample(resample).last()
fig, ax = _plt.subplots(figsize=figsize)
fig.suptitle(title+"\n", y=.99, fontweight="bold", fontname=fontname,
fontsize=14, color="black")
if subtitle:
ax.set_title("\n%s - %s " % (
returns.index.date[:1][0].strftime('%Y'),
returns.index.date[-1:][0].strftime('%Y')
), fontsize=12, color='gray')
fig.set_facecolor('white')
ax.set_facecolor('white')
ax.axvline(returns.mean(), ls="--", lw=1.5,
color=colors[2], zorder=2, label="Average")
_sns.distplot(returns, bins=bins,
axlabel="", color=colors[0], hist_kws=dict(alpha=1),
kde=kde,
kde_kws=dict(color='black', alpha=.7),
ax=ax)
ax.xaxis.set_major_formatter(_plt.FuncFormatter(
lambda x, loc: "{:,}%".format(int(x*100))))
ax.axhline(0.01, lw=1, color="#000000", zorder=2)
ax.axvline(0, lw=1, color="#000000", zorder=2)
ax.set_xlabel('')
if ylabel:
ax.set_ylabel("Occurrences", fontname=fontname,
fontweight='bold', fontsize=12, color="black")
ax.yaxis.set_label_coords(-.1, .5)
ax.legend(fontsize=12)
try:
_plt.subplots_adjust(hspace=0, bottom=0, top=1)
except Exception:
pass
try:
fig.tight_layout()
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def plot_rolling_stats(returns, benchmark=None, title="",
returns_label="Strategy",
hline=None, hlw=None, hlcolor="red", hllabel="",
lw=1.5, figsize=(10, 6), ylabel="",
grayscale=False, fontname="Arial", subtitle=True,
savefig=None, show=True):
colors, ls, alpha = _get_colors(grayscale)
fig, ax = _plt.subplots(figsize=figsize)
df = _pd.DataFrame(index=returns.index, data={returns_label: returns})
if isinstance(benchmark, _pd.Series):
df['Benchmark'] = benchmark[benchmark.index.isin(returns.index)]
df = df[['Benchmark', returns_label]].dropna()
ax.plot(df['Benchmark'], lw=lw, label="Benchmark",
color=colors[0], alpha=.8)
ax.plot(df[returns_label].dropna(), lw=lw,
label=returns_label, color=colors[1])
fig.autofmt_xdate()
fig.suptitle(title+"\n", y=.99, fontweight="bold", fontname=fontname,
fontsize=14, color="black")
if subtitle:
ax.set_title("\n%s - %s " % (
df.index.date[:1][0].strftime('%e %b \'%y'),
df.index.date[-1:][0].strftime('%e %b \'%y')
), fontsize=12, color='gray')
if hline:
if grayscale:
hlcolor = 'black'
ax.axhline(hline, ls="--", lw=hlw, color=hlcolor,
label=hllabel, zorder=2)
ax.axhline(0, ls="--", lw=1, color="#000000", zorder=2)
if ylabel:
ax.set_ylabel(ylabel, fontname=fontname,
fontweight='bold', fontsize=12, color="black")
ax.yaxis.set_label_coords(-.1, .5)
ax.yaxis.set_major_formatter(_FormatStrFormatter('%.2f'))
ax.legend(fontsize=12)
try:
_plt.subplots_adjust(hspace=0, bottom=0, top=1)
except Exception:
pass
try:
fig.tight_layout()
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def plot_rolling_beta(returns, benchmark,
window1=126, window1_label="",
window2=None, window2_label="",
title="", hlcolor="red", figsize=(10, 6),
grayscale=False, fontname="Arial", lw=1.5,
ylabel=True, subtitle=True, savefig=None, show=True):
colors, ls, alpha = _get_colors(grayscale)
fig, ax = _plt.subplots(figsize=figsize)
fig.suptitle(title+"\n", y=.99, fontweight="bold", fontname=fontname,
fontsize=14, color="black")
if subtitle:
ax.set_title("\n%s - %s " % (
returns.index.date[:1][0].strftime('%e %b \'%y'),
returns.index.date[-1:][0].strftime('%e %b \'%y')
), fontsize=12, color='gray')
beta = _stats.rolling_greeks(returns, benchmark, window1)['beta']
ax.plot(beta, lw=lw, label=window1_label, color=colors[1])
if window2:
ax.plot(_stats.rolling_greeks(returns, benchmark, window2)['beta'],
lw=lw, label=window2_label, color="gray", alpha=0.8)
mmin = min([-100, int(beta.min()*100)])
mmax = max([100, int(beta.max()*100)])
step = 50 if (mmax-mmin) >= 200 else 100
ax.set_yticks([x / 100 for x in list(range(mmin, mmax, step))])
hlcolor = 'black' if grayscale else hlcolor
ax.axhline(beta.mean(), ls="--", lw=1.5,
color=hlcolor, zorder=2)
ax.axhline(0, ls="--", lw=1, color="#000000", zorder=2)
fig.autofmt_xdate()
ax.fmt_xdata = _mdates.DateFormatter('%Y-%m-%d')
if ylabel:
ax.set_ylabel("Beta", fontname=fontname,
fontweight='bold', fontsize=12, color="black")
ax.yaxis.set_label_coords(-.1, .5)
ax.legend(fontsize=12)
try:
_plt.subplots_adjust(hspace=0, bottom=0, top=1)
except Exception:
pass
try:
fig.tight_layout()
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def plot_longest_drawdowns(returns, periods=5, lw=1.5,
fontname='Arial', grayscale=False,
log_scale=False, figsize=(10, 6), ylabel=True,
subtitle=True, compounded=True,
savefig=None, show=True):
colors = ['#348dc1', '#003366', 'red']
if grayscale:
colors = ['#000000'] * 3
dd = _stats.to_drawdown_series(returns.fillna(0))
dddf = _stats.drawdown_details(dd)
longest_dd = dddf.sort_values(
by='days', ascending=False, kind='mergesort')[:periods]
fig, ax = _plt.subplots(figsize=figsize)
fig.suptitle("Top %.0f Drawdown Periods\n" %
periods, y=.99, fontweight="bold", fontname=fontname,
fontsize=14, color="black")
if subtitle:
ax.set_title("\n%s - %s " % (
returns.index.date[:1][0].strftime('%e %b \'%y'),
returns.index.date[-1:][0].strftime('%e %b \'%y')
), fontsize=12, color='gray')
fig.set_facecolor('white')
ax.set_facecolor('white')
series = _stats.compsum(returns) if compounded else returns.cumsum()
ax.plot(series, lw=lw, label="Backtest", color=colors[0])
highlight = 'black' if grayscale else 'red'
for idx, row in longest_dd.iterrows():
ax.axvspan(*_mdates.datestr2num([str(row['start']), str(row['end'])]),
color=highlight, alpha=.1)
fig.autofmt_xdate()
ax.fmt_xdata = _mdates.DateFormatter('%Y-%m-%d')
ax.axhline(0, ls="--", lw=1, color="#000000", zorder=2)
_plt.yscale("symlog" if log_scale else "linear")
if ylabel:
ax.set_ylabel("Cumulative Returns", fontname=fontname,
fontweight='bold', fontsize=12, color="black")
ax.yaxis.set_label_coords(-.1, .5)
ax.yaxis.set_major_formatter(_FuncFormatter(format_pct_axis))
fig.autofmt_xdate()
try:
_plt.subplots_adjust(hspace=0, bottom=0, top=1)
except Exception:
pass
try:
fig.tight_layout()
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def plot_distribution(returns, figsize=(10, 6),
fontname='Arial', grayscale=False, ylabel=True,
subtitle=True, compounded=True,
savefig=None, show=True):
colors = _FLATUI_COLORS
if grayscale:
colors = ['#f9f9f9', '#dddddd', '#bbbbbb', '#999999', '#808080']
port = _pd.DataFrame(returns.fillna(0))
port.columns = ['Daily']
apply_fnc = _stats.comp if compounded else _np.sum
port['Weekly'] = port['Daily'].resample(
'W-MON').apply(apply_fnc).resample('W-MON').last()
port['Weekly'].ffill(inplace=True)
port['Monthly'] = port['Daily'].resample(
'M').apply(apply_fnc).resample('M').last()
port['Monthly'].ffill(inplace=True)
port['Quarterly'] = port['Daily'].resample(
'Q').apply(apply_fnc).resample('Q').last()
port['Quarterly'].ffill(inplace=True)
port['Yearly'] = port['Daily'].resample(
'A').apply(apply_fnc).resample('A').last()
port['Yearly'].ffill(inplace=True)
fig, ax = _plt.subplots(figsize=figsize)
fig.suptitle("Return Quantiles\n", y=.99,
fontweight="bold", fontname=fontname,
fontsize=14, color="black")
if subtitle:
ax.set_title("\n%s - %s " % (
returns.index.date[:1][0].strftime('%e %b \'%y'),
returns.index.date[-1:][0].strftime('%e %b \'%y')
), fontsize=12, color='gray')
fig.set_facecolor('white')
ax.set_facecolor('white')
_sns.boxplot(data=port, ax=ax, palette=tuple(colors[:5]))
ax.yaxis.set_major_formatter(_plt.FuncFormatter(
lambda x, loc: "{:,}%".format(int(x*100))))
if ylabel:
ax.set_ylabel('Rerurns', fontname=fontname,
fontweight='bold', fontsize=12, color="black")
ax.yaxis.set_label_coords(-.1, .5)
fig.autofmt_xdate()
try:
_plt.subplots_adjust(hspace=0)
except Exception:
pass
try:
fig.tight_layout(w_pad=0, h_pad=0)
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def plot_table(tbl, columns=None, title="", title_loc="left",
header=True,
colWidths=None,
rowLoc='right',
colLoc='right',
colLabels=[],
edges='horizontal',
orient='horizontal',
figsize=(10, 6),
savefig=None,
show=False):
if columns is not None:
try:
tbl.columns = columns
except Exception:
pass
fig = _plt.figure(figsize=(5.5, 6))
ax = _plt.subplot(111, frame_on=False)
if title != "":
ax.set_title(title, fontweight="bold",
fontsize=14, color="black", loc=title_loc)
the_table = ax.table(cellText=tbl.values,
colWidths=colWidths,
rowLoc=rowLoc,
colLoc=colLoc,
edges=edges,
colLabels=(tbl.columns if header else None),
loc='center',
zorder=2
)
the_table.auto_set_font_size(False)
the_table.set_fontsize(12)
the_table.scale(1, 1)
for (row, col), cell in the_table.get_celld().items():
cell.set_height(0.08)
cell.set_text_props(color='black')
cell.set_edgecolor('#dddddd')
if row == 0 and header:
cell.set_edgecolor('black')
cell.set_facecolor('black')
cell.set_linewidth(2)
cell.set_text_props(weight='bold', color='black')
elif col == 0 and "vertical" in orient:
cell.set_edgecolor('#dddddd')
cell.set_linewidth(1)
cell.set_text_props(weight='bold', color='black')
elif row > 1:
cell.set_linewidth(1)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
try:
_plt.subplots_adjust(hspace=0)
except Exception:
pass
try:
fig.tight_layout(w_pad=0, h_pad=0)
except Exception:
pass
if savefig:
if isinstance(savefig, dict):
_plt.savefig(**savefig)
else:
_plt.savefig(savefig)
if show:
_plt.show(fig)
_plt.close()
if not show:
return fig
def format_cur_axis(x, pos):
if x >= 1e12:
res = '$%1.1fT' % (x * 1e-12)
return res.replace('.0T', 'T')
if x >= 1e9:
res = '$%1.1fB' % (x * 1e-9)
return res.replace('.0B', 'B')
if x >= 1e6:
res = '$%1.1fM' % (x * 1e-6)
return res.replace('.0M', 'M')
if x >= 1e3:
res = '$%1.0fK' % (x * 1e-3)
return res.replace('.0K', 'K')
res = '$%1.0f' % x
return res.replace('.0', '')
def format_pct_axis(x, pos):
x *= 100
if x >= 1e12:
res = '%1.1fT%%' % (x * 1e-12)
return res.replace('.0T%', 'T%')
if x >= 1e9:
res = '%1.1fB%%' % (x * 1e-9)
return res.replace('.0B%', 'B%')
if x >= 1e6:
res = '%1.1fM%%' % (x * 1e-6)
return res.replace('.0M%', 'M%')
if x >= 1e3:
res = '%1.1fK%%' % (x * 1e-3)
return res.replace('.0K%', 'K%')
res = '%1.0f%%' % x
return res.replace('.0%', '%')
| true | true |
1c34f495a5af2eb1cfba1648b37b88a451fb5063 | 2,597 | py | Python | tests/__init__.py | Kinto/kinto-pusher | 9cad48b33a55d64f69cabf7d4c7eab15145e4a74 | [
"Apache-2.0"
] | 8 | 2016-06-07T09:49:52.000Z | 2018-08-01T11:06:45.000Z | tests/__init__.py | Kinto/kinto-pusher | 9cad48b33a55d64f69cabf7d4c7eab15145e4a74 | [
"Apache-2.0"
] | 12 | 2016-05-20T16:41:10.000Z | 2020-03-31T18:20:02.000Z | tests/__init__.py | Kinto/kinto-pusher | 9cad48b33a55d64f69cabf7d4c7eab15145e4a74 | [
"Apache-2.0"
] | 5 | 2016-05-26T12:54:44.000Z | 2019-04-17T22:31:31.000Z | import kinto.core
import webtest
from kinto.core.utils import random_bytes_hex
from pyramid.config import Configurator
def get_request_class(prefix):
class PrefixedRequestClass(webtest.app.TestRequest):
@classmethod
def blank(cls, path, *args, **kwargs):
path = "/%s%s" % (prefix, path)
return webtest.app.TestRequest.blank(path, *args, **kwargs)
return PrefixedRequestClass
class BaseWebTest(object):
"""Base Web Test to test your cornice service.
It setups the database before each test and delete it after.
"""
api_prefix = "v1"
def __init__(self, *args, **kwargs):
super(BaseWebTest, self).__init__(*args, **kwargs)
self.app = self._get_test_app()
self.headers = {"Content-Type": "application/json"}
def _get_test_app(self, settings=None):
config = self._get_app_config(settings)
wsgi_app = config.make_wsgi_app()
app = webtest.TestApp(wsgi_app)
app.RequestClass = get_request_class(self.api_prefix)
return app
def _get_app_config(self, settings=None):
config = Configurator(settings=self.get_app_settings(settings))
kinto.core.initialize(config, version="1.0.1")
return config
def get_app_settings(self, additional_settings=None):
"""
kinto.includes = kinto_pusher
kinto.event_listeners = pusher
kinto.event_listeners.pusher.use = kinto_pusher.listener
kinto.event_listeners.pusher.resources = <list of resource names>
kinto.event_listeners.pusher.channel = <channel-name or pattern>
pusher.app_id = <pusher-app-id>
pusher.key = <pusher-key>
pusher.secret = <pusher-secret>
"""
settings = kinto.core.DEFAULT_SETTINGS.copy()
settings["includes"] = "kinto_pusher"
settings["cache_backend"] = "kinto.core.cache.memory"
settings["cache_backend"] = "kinto.core.cache.memory"
settings["userid_hmac_secret"] = random_bytes_hex(16)
settings["event_listeners"] = "pusher"
settings["event_listeners.pusher.use"] = "kinto_pusher.listener"
settings["event_listeners.pusher.resources"] = "records"
pattern = "{bucket_id}-{collection_id}-{resource_name}"
settings["event_listeners.pusher.channel"] = pattern
settings["pusher.app_id"] = "12345"
settings["pusher.key"] = "demo-key"
settings["pusher.secret"] = "demo-secret"
if additional_settings is not None:
settings.update(additional_settings)
return settings
| 35.575342 | 73 | 0.663843 | import kinto.core
import webtest
from kinto.core.utils import random_bytes_hex
from pyramid.config import Configurator
def get_request_class(prefix):
class PrefixedRequestClass(webtest.app.TestRequest):
@classmethod
def blank(cls, path, *args, **kwargs):
path = "/%s%s" % (prefix, path)
return webtest.app.TestRequest.blank(path, *args, **kwargs)
return PrefixedRequestClass
class BaseWebTest(object):
api_prefix = "v1"
def __init__(self, *args, **kwargs):
super(BaseWebTest, self).__init__(*args, **kwargs)
self.app = self._get_test_app()
self.headers = {"Content-Type": "application/json"}
def _get_test_app(self, settings=None):
config = self._get_app_config(settings)
wsgi_app = config.make_wsgi_app()
app = webtest.TestApp(wsgi_app)
app.RequestClass = get_request_class(self.api_prefix)
return app
def _get_app_config(self, settings=None):
config = Configurator(settings=self.get_app_settings(settings))
kinto.core.initialize(config, version="1.0.1")
return config
def get_app_settings(self, additional_settings=None):
settings = kinto.core.DEFAULT_SETTINGS.copy()
settings["includes"] = "kinto_pusher"
settings["cache_backend"] = "kinto.core.cache.memory"
settings["cache_backend"] = "kinto.core.cache.memory"
settings["userid_hmac_secret"] = random_bytes_hex(16)
settings["event_listeners"] = "pusher"
settings["event_listeners.pusher.use"] = "kinto_pusher.listener"
settings["event_listeners.pusher.resources"] = "records"
pattern = "{bucket_id}-{collection_id}-{resource_name}"
settings["event_listeners.pusher.channel"] = pattern
settings["pusher.app_id"] = "12345"
settings["pusher.key"] = "demo-key"
settings["pusher.secret"] = "demo-secret"
if additional_settings is not None:
settings.update(additional_settings)
return settings
| true | true |
1c34f4fd314bfea96ecf0e4939d2eff0afa450f9 | 471 | py | Python | roundup.py | lpig/some_tools | 4c7e9ee95bef2c230a2a6bd2fdca7dc19ea3e1dc | [
"MIT"
] | 2 | 2020-03-23T11:02:37.000Z | 2020-04-17T08:02:35.000Z | roundup.py | lpig/some_tools | 4c7e9ee95bef2c230a2a6bd2fdca7dc19ea3e1dc | [
"MIT"
] | null | null | null | roundup.py | lpig/some_tools | 4c7e9ee95bef2c230a2a6bd2fdca7dc19ea3e1dc | [
"MIT"
] | null | null | null | import math
def roundup_int(x: int, num_digits: int) -> int:
"""
round up like excel roundup
向上取证方法,暂时只支持整数
**One**::
>>> num = roundup_int(123,-1)
>>> num
130
"""
if num_digits > 0:
raise TypeError(f'is over 0! digit:{num_digits}')
num_digits = abs(num_digits)
_num_digits = 1 * math.pow(10, num_digits)
_num = x if x % _num_digits == 0 else x + _num_digits - x % _num_digits
return int(_num)
| 21.409091 | 75 | 0.577495 | import math
def roundup_int(x: int, num_digits: int) -> int:
if num_digits > 0:
raise TypeError(f'is over 0! digit:{num_digits}')
num_digits = abs(num_digits)
_num_digits = 1 * math.pow(10, num_digits)
_num = x if x % _num_digits == 0 else x + _num_digits - x % _num_digits
return int(_num)
| true | true |
1c34f52b43551b4ecb53a18ee47a50477a3bac98 | 933 | py | Python | main.py | NicksonYap/T-1000 | e1f82f9e72cb5407bb01a83daa278c51b2e30c2a | [
"MIT"
] | null | null | null | main.py | NicksonYap/T-1000 | e1f82f9e72cb5407bb01a83daa278c51b2e30c2a | [
"MIT"
] | null | null | null | main.py | NicksonYap/T-1000 | e1f82f9e72cb5407bb01a83daa278c51b2e30c2a | [
"MIT"
] | null | null | null | if __name__ == '__main__':
from utils import loading
loading()
from core_main import Nostradamus
env = Nostradamus(assets=['XRP','BCH','LTC','BNB'],
currency='BTC',
granularity='day',
datapoints=600)
env.train(timesteps=1e6,
checkpoint_freq=50,
lr_schedule=[
[
[0, 7e-5], # [timestep, lr]
[1e6, 7e-6],
],
[
[0, 6e-5],
[1e6, 6e-6],
],
[
[0, 5e-5],
[1e6, 5e-6],
]
],
algo='PPO')
# checkpoint_path = 'results/t-100_test/1_2019-10-28_16-53-531fzmn26h/checkpoint_250/checkpoint-250'
# env.backtest(checkpoint_path=checkpoint_path)
| 31.1 | 104 | 0.392283 | if __name__ == '__main__':
from utils import loading
loading()
from core_main import Nostradamus
env = Nostradamus(assets=['XRP','BCH','LTC','BNB'],
currency='BTC',
granularity='day',
datapoints=600)
env.train(timesteps=1e6,
checkpoint_freq=50,
lr_schedule=[
[
[0, 7e-5],
[1e6, 7e-6],
],
[
[0, 6e-5],
[1e6, 6e-6],
],
[
[0, 5e-5],
[1e6, 5e-6],
]
],
algo='PPO')
| true | true |
1c34f53fb3ea558447b8a363a8aeb941c702c8c5 | 557 | py | Python | src/ap_games/ap_typing.py | aplatkouski/tic-tac-toe | d5dc8f5c1f75226989326163a18505e6d88a8f49 | [
"MIT"
] | null | null | null | src/ap_games/ap_typing.py | aplatkouski/tic-tac-toe | d5dc8f5c1f75226989326163a18505e6d88a8f49 | [
"MIT"
] | null | null | null | src/ap_games/ap_typing.py | aplatkouski/tic-tac-toe | d5dc8f5c1f75226989326163a18505e6d88a8f49 | [
"MIT"
] | 1 | 2020-08-25T12:51:53.000Z | 2020-08-25T12:51:53.000Z | from __future__ import annotations
from typing import Dict
from typing import Literal
from typing import Tuple
from ap_games.ap_collections import Cell
from ap_games.ap_collections import Coordinate
from ap_games.ap_collections import Node
Mark = Literal['X', 'O', ' ', '']
Empty = Literal[' ']
PlayerMark = Literal['X', 'O']
UndefinedMark = Literal['']
Coordinates = Tuple[Coordinate, ...]
Directions = Tuple[Coordinate, ...]
PlayerType = Literal['easy', 'hard', 'medium', 'nightmare', 'user']
Side = Tuple[Cell, ...]
Size = int
Tree = Dict[str, Node]
| 25.318182 | 67 | 0.723519 | from __future__ import annotations
from typing import Dict
from typing import Literal
from typing import Tuple
from ap_games.ap_collections import Cell
from ap_games.ap_collections import Coordinate
from ap_games.ap_collections import Node
Mark = Literal['X', 'O', ' ', '']
Empty = Literal[' ']
PlayerMark = Literal['X', 'O']
UndefinedMark = Literal['']
Coordinates = Tuple[Coordinate, ...]
Directions = Tuple[Coordinate, ...]
PlayerType = Literal['easy', 'hard', 'medium', 'nightmare', 'user']
Side = Tuple[Cell, ...]
Size = int
Tree = Dict[str, Node]
| true | true |
1c34f59d7b3d2551889648884bb963a9eb45ca7f | 3,726 | py | Python | examples/08_tfRecord.py | csehong/stanford-tensorflow-tutorials | fd93ff0568914724f2b9e97920eb8d6138efc52c | [
"MIT"
] | 1 | 2020-11-24T09:35:19.000Z | 2020-11-24T09:35:19.000Z | examples/08_tfRecord.py | csehong/stanford-tensorflow-tutorials | fd93ff0568914724f2b9e97920eb8d6138efc52c | [
"MIT"
] | null | null | null | examples/08_tfRecord.py | csehong/stanford-tensorflow-tutorials | fd93ff0568914724f2b9e97920eb8d6138efc52c | [
"MIT"
] | null | null | null | """ Examples to demonstrate how to write an image file to a TFRecord,
and how to read a TFRecord file using TFRecordReader.
Author: Chip Huyen
Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"
cs20si.stanford.edu
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import sys
sys.path.append('..')
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
# image supposed to have shape: 480 x 640 x 3 = 921600
IMAGE_PATH = 'data/'
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def get_image_binary(filename):
""" You can read in the image using tensorflow too, but it's a drag
since you have to create graphs. It's much easier using Pillow and NumPy
"""
image = Image.open(filename)
image = np.asarray(image, np.uint8)
shape = np.array(image.shape, np.int32)
return shape.tobytes(), image.tobytes() # convert image to raw data bytes in the array.
def write_to_tfrecord(label, shape, binary_image, tfrecord_file):
""" This example is to write a sample to TFRecord file. If you want to write
more samples, just use a loop.
"""
writer = tf.python_io.TFRecordWriter(tfrecord_file)
# write label, shape, and image content to the TFRecord file
example = tf.train.Example(features=tf.train.Features(feature={
'label': _int64_feature(label),
'shape': _bytes_feature(shape),
'image': _bytes_feature(binary_image)
}))
writer.write(example.SerializeToString())
writer.close()
def write_tfrecord(label, image_file, tfrecord_file):
shape, binary_image = get_image_binary(image_file)
write_to_tfrecord(label, shape, binary_image, tfrecord_file)
def read_from_tfrecord(filenames):
tfrecord_file_queue = tf.train.string_input_producer(filenames, name='queue')
reader = tf.TFRecordReader()
_, tfrecord_serialized = reader.read(tfrecord_file_queue)
# label and image are stored as bytes but could be stored as
# int64 or float64 values in a serialized tf.Example protobuf.
tfrecord_features = tf.parse_single_example(tfrecord_serialized,
features={
'label': tf.FixedLenFeature([], tf.int64),
'shape': tf.FixedLenFeature([], tf.string),
'image': tf.FixedLenFeature([], tf.string),
}, name='features')
# image was saved as uint8, so we have to decode as uint8.
image = tf.decode_raw(tfrecord_features['image'], tf.uint8)
shape = tf.decode_raw(tfrecord_features['shape'], tf.int32)
# the image tensor is flattened out, so we have to reconstruct the shape
image = tf.reshape(image, shape)
label = tfrecord_features['label']
return label, shape, image
def read_tfrecord(tfrecord_file):
label, shape, image = read_from_tfrecord([tfrecord_file])
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
label, image, shape = sess.run([label, image, shape])
coord.request_stop()
coord.join(threads)
print(label)
print(shape)
plt.imshow(image)
plt.show()
def main():
# assume the image has the label Chihuahua, which corresponds to class number 1
label = 1
image_file = IMAGE_PATH + 'friday.jpg'
tfrecord_file = IMAGE_PATH + 'friday.tfrecord'
write_tfrecord(label, image_file, tfrecord_file)
read_tfrecord(tfrecord_file)
if __name__ == '__main__':
main() | 36.174757 | 91 | 0.685185 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import sys
sys.path.append('..')
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
IMAGE_PATH = 'data/'
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def get_image_binary(filename):
image = Image.open(filename)
image = np.asarray(image, np.uint8)
shape = np.array(image.shape, np.int32)
return shape.tobytes(), image.tobytes()
def write_to_tfrecord(label, shape, binary_image, tfrecord_file):
writer = tf.python_io.TFRecordWriter(tfrecord_file)
example = tf.train.Example(features=tf.train.Features(feature={
'label': _int64_feature(label),
'shape': _bytes_feature(shape),
'image': _bytes_feature(binary_image)
}))
writer.write(example.SerializeToString())
writer.close()
def write_tfrecord(label, image_file, tfrecord_file):
shape, binary_image = get_image_binary(image_file)
write_to_tfrecord(label, shape, binary_image, tfrecord_file)
def read_from_tfrecord(filenames):
tfrecord_file_queue = tf.train.string_input_producer(filenames, name='queue')
reader = tf.TFRecordReader()
_, tfrecord_serialized = reader.read(tfrecord_file_queue)
tfrecord_features = tf.parse_single_example(tfrecord_serialized,
features={
'label': tf.FixedLenFeature([], tf.int64),
'shape': tf.FixedLenFeature([], tf.string),
'image': tf.FixedLenFeature([], tf.string),
}, name='features')
image = tf.decode_raw(tfrecord_features['image'], tf.uint8)
shape = tf.decode_raw(tfrecord_features['shape'], tf.int32)
image = tf.reshape(image, shape)
label = tfrecord_features['label']
return label, shape, image
def read_tfrecord(tfrecord_file):
label, shape, image = read_from_tfrecord([tfrecord_file])
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
label, image, shape = sess.run([label, image, shape])
coord.request_stop()
coord.join(threads)
print(label)
print(shape)
plt.imshow(image)
plt.show()
def main():
label = 1
image_file = IMAGE_PATH + 'friday.jpg'
tfrecord_file = IMAGE_PATH + 'friday.tfrecord'
write_tfrecord(label, image_file, tfrecord_file)
read_tfrecord(tfrecord_file)
if __name__ == '__main__':
main() | true | true |
1c34f636c13ffeacfc990b9129d8526b74bf2b7b | 4,681 | py | Python | tothc/managers.py | kennydo/tweets-of-the-highest-caliber | 5f9d61ded2b4d9e0e65af0524321eac448c79a68 | [
"MIT"
] | null | null | null | tothc/managers.py | kennydo/tweets-of-the-highest-caliber | 5f9d61ded2b4d9e0e65af0524321eac448c79a68 | [
"MIT"
] | null | null | null | tothc/managers.py | kennydo/tweets-of-the-highest-caliber | 5f9d61ded2b4d9e0e65af0524321eac448c79a68 | [
"MIT"
] | null | null | null | import datetime
import logging
from typing import List
from databases.core import Connection
from tothc import models
log = logging.getLogger(__name__)
class TwitterSubscriptionManager:
@classmethod
async def subscribe(
cls,
connection: Connection,
*,
user_id: int,
screen_name: str,
) -> None:
async with connection.transaction():
# This subscription might be inactive.
existing_subscription = await connection.fetch_one(
models.twitter_subscriptions
.select()
.where(models.twitter_subscriptions.c.user_id == user_id),
)
if existing_subscription:
if existing_subscription[models.twitter_subscriptions.c.unsubscribed_at]:
log.info('Re-enabling existing subscription for user ID %s (%s)', user_id, screen_name)
await connection.execute(
models.twitter_subscriptions
.update()
.where(models.twitter_subscriptions.c.user_id == user_id)
.values(
screen_name=screen_name,
subscribed_at=datetime.datetime.utcnow(),
unsubscribed_at=None,
),
)
else:
log.info('User ID %s (%s) already has an active subscription', user_id, screen_name)
else:
log.info('Adding new subscription for user ID %s (%s)', user_id, screen_name)
await connection.execute(
models.twitter_subscriptions
.insert()
.values(
user_id=user_id,
screen_name=screen_name,
subscribed_at=datetime.datetime.utcnow(),
),
)
@classmethod
async def unsubscribe(
cls,
connection: Connection,
*,
screen_name: str,
) -> None:
"""We unsubscribe based on the screen name in our DB instead of the user ID because
screen names can change, and users can get into bad states (ex: suspended) that prevent
us from fetching their ID.
"""
log.info('Unsubscribing from screen name %s', screen_name)
await connection.execute(
models.twitter_subscriptions
.update()
.where(models.twitter_subscriptions.c.screen_name.ilike(screen_name))
.values(
unsubscribed_at=datetime.datetime.utcnow(),
latest_tweet_id=None,
refreshed_latest_tweet_id_at=None,
),
)
@classmethod
async def update_screen_name(
cls,
connection: Connection,
*,
user_id: int,
screen_name: str,
) -> None:
await connection.execute(
models.twitter_subscriptions
.update()
.where(models.twitter_subscriptions.c.user_id == user_id)
.values(
screen_name=screen_name,
),
)
@classmethod
async def get_latest_tweet_id_for_user_id(
cls,
connection: Connection,
*,
user_id: int
) -> int:
subscription = await connection.fetch_one(
models.twitter_subscriptions
.select()
.where(models.twitter_subscriptions.c.user_id == user_id),
)
return subscription[models.twitter_subscriptions.c.latest_tweet_id]
@classmethod
async def update_latest_tweet_id(
cls,
connection: Connection,
*,
user_id: int,
latest_tweet_id: int,
) -> None:
log.info('Updating latest tweet ID of user %s to %s', user_id, latest_tweet_id)
await connection.execute(
models.twitter_subscriptions
.update()
.where(models.twitter_subscriptions.c.user_id == user_id)
.values(
refreshed_latest_tweet_id_at=datetime.datetime.utcnow(),
latest_tweet_id=latest_tweet_id,
),
)
@classmethod
async def list_user_ids_of_active_subscriptions(
cls,
connection: Connection,
) -> List[int]:
result = await connection.fetch_all(
models.twitter_subscriptions
.select()
.where(
models.twitter_subscriptions.c.unsubscribed_at.is_(None),
),
)
return [row[models.twitter_subscriptions.c.user_id] for row in result]
| 32.506944 | 107 | 0.553514 | import datetime
import logging
from typing import List
from databases.core import Connection
from tothc import models
log = logging.getLogger(__name__)
class TwitterSubscriptionManager:
@classmethod
async def subscribe(
cls,
connection: Connection,
*,
user_id: int,
screen_name: str,
) -> None:
async with connection.transaction():
existing_subscription = await connection.fetch_one(
models.twitter_subscriptions
.select()
.where(models.twitter_subscriptions.c.user_id == user_id),
)
if existing_subscription:
if existing_subscription[models.twitter_subscriptions.c.unsubscribed_at]:
log.info('Re-enabling existing subscription for user ID %s (%s)', user_id, screen_name)
await connection.execute(
models.twitter_subscriptions
.update()
.where(models.twitter_subscriptions.c.user_id == user_id)
.values(
screen_name=screen_name,
subscribed_at=datetime.datetime.utcnow(),
unsubscribed_at=None,
),
)
else:
log.info('User ID %s (%s) already has an active subscription', user_id, screen_name)
else:
log.info('Adding new subscription for user ID %s (%s)', user_id, screen_name)
await connection.execute(
models.twitter_subscriptions
.insert()
.values(
user_id=user_id,
screen_name=screen_name,
subscribed_at=datetime.datetime.utcnow(),
),
)
@classmethod
async def unsubscribe(
cls,
connection: Connection,
*,
screen_name: str,
) -> None:
log.info('Unsubscribing from screen name %s', screen_name)
await connection.execute(
models.twitter_subscriptions
.update()
.where(models.twitter_subscriptions.c.screen_name.ilike(screen_name))
.values(
unsubscribed_at=datetime.datetime.utcnow(),
latest_tweet_id=None,
refreshed_latest_tweet_id_at=None,
),
)
@classmethod
async def update_screen_name(
cls,
connection: Connection,
*,
user_id: int,
screen_name: str,
) -> None:
await connection.execute(
models.twitter_subscriptions
.update()
.where(models.twitter_subscriptions.c.user_id == user_id)
.values(
screen_name=screen_name,
),
)
@classmethod
async def get_latest_tweet_id_for_user_id(
cls,
connection: Connection,
*,
user_id: int
) -> int:
subscription = await connection.fetch_one(
models.twitter_subscriptions
.select()
.where(models.twitter_subscriptions.c.user_id == user_id),
)
return subscription[models.twitter_subscriptions.c.latest_tweet_id]
@classmethod
async def update_latest_tweet_id(
cls,
connection: Connection,
*,
user_id: int,
latest_tweet_id: int,
) -> None:
log.info('Updating latest tweet ID of user %s to %s', user_id, latest_tweet_id)
await connection.execute(
models.twitter_subscriptions
.update()
.where(models.twitter_subscriptions.c.user_id == user_id)
.values(
refreshed_latest_tweet_id_at=datetime.datetime.utcnow(),
latest_tweet_id=latest_tweet_id,
),
)
@classmethod
async def list_user_ids_of_active_subscriptions(
cls,
connection: Connection,
) -> List[int]:
result = await connection.fetch_all(
models.twitter_subscriptions
.select()
.where(
models.twitter_subscriptions.c.unsubscribed_at.is_(None),
),
)
return [row[models.twitter_subscriptions.c.user_id] for row in result]
| true | true |
1c34f6884d500d11e54c5e32765f323a948b2fef | 1,180 | py | Python | release/stubs.min/Autodesk/Revit/DB/__init___parts/DataExchangeMessageSeverity.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Autodesk/Revit/DB/__init___parts/DataExchangeMessageSeverity.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Autodesk/Revit/DB/__init___parts/DataExchangeMessageSeverity.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class DataExchangeMessageSeverity(Enum, IComparable, IFormattable, IConvertible):
"""
Error levels for DataExchangeLog
enum DataExchangeMessageSeverity,values: Error (2),FatalError (3),Info (0),Warning (1)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
Error = None
FatalError = None
Info = None
value__ = None
Warning = None
| 24.583333 | 221 | 0.574576 | class DataExchangeMessageSeverity(Enum, IComparable, IFormattable, IConvertible):
def __eq__(self, *args):
pass
def __format__(self, *args):
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
Error = None
FatalError = None
Info = None
value__ = None
Warning = None
| true | true |
1c34f713dedfef8b5b8a0e3a156db7d83a3fba3b | 36,760 | py | Python | lib/tests/test_bitcoin.py | D3m0nKingx/electrum-ganja | be204713107626f3e334e9fd5974c044a9f2ffb6 | [
"MIT"
] | null | null | null | lib/tests/test_bitcoin.py | D3m0nKingx/electrum-ganja | be204713107626f3e334e9fd5974c044a9f2ffb6 | [
"MIT"
] | null | null | null | lib/tests/test_bitcoin.py | D3m0nKingx/electrum-ganja | be204713107626f3e334e9fd5974c044a9f2ffb6 | [
"MIT"
] | null | null | null | import base64
import unittest
import sys
from lib import bitcoin
from lib.bitcoin import (
public_key_to_p2pkh,
bip32_root, bip32_public_derivation, bip32_private_derivation,
Hash, address_from_private_key,
is_address, is_private_key, xpub_from_xprv, is_new_seed, is_old_seed,
var_int, op_push, address_to_script,
deserialize_privkey, serialize_privkey, is_segwit_address,
is_b58_address, address_to_scripthash, is_minikey, is_compressed, is_xpub,
xpub_type, is_xprv, is_bip32_derivation, seed_type, EncodeBase58Check,
script_num_to_hex, push_script, add_number_to_script)
from lib import ecc, crypto, ecc_fast
from lib.ecc import number_to_string, string_to_number
from lib.transaction import opcodes
from lib.util import bfh, bh2u
from lib import constants
from lib.storage import WalletStorage
from . import SequentialTestCase
from . import TestCaseForTestnet
from . import FAST_TESTS
try:
import ecdsa
except ImportError:
sys.exit("Error: python-ecdsa does not seem to be installed. Try 'sudo pip install ecdsa'")
def needs_test_with_all_ecc_implementations(func):
"""Function decorator to run a unit test twice:
once when libsecp256k1 is not available, once when it is.
NOTE: this is inherently sequential;
tests running in parallel would break things
"""
def run_test(*args, **kwargs):
if FAST_TESTS: # if set, only run tests once, using fastest implementation
func(*args, **kwargs)
return
ecc_fast.undo_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1()
try:
# first test without libsecp
func(*args, **kwargs)
finally:
# if libsecp is not available, we are done
if not ecc_fast._libsecp256k1:
return
ecc_fast.do_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1()
# if libsecp is available, test again now
func(*args, **kwargs)
return run_test
def needs_test_with_all_aes_implementations(func):
"""Function decorator to run a unit test twice:
once when pycryptodomex is not available, once when it is.
NOTE: this is inherently sequential;
tests running in parallel would break things
"""
def run_test(*args, **kwargs):
if FAST_TESTS: # if set, only run tests once, using fastest implementation
func(*args, **kwargs)
return
_aes = crypto.AES
crypto.AES = None
try:
# first test without pycryptodomex
func(*args, **kwargs)
finally:
# if pycryptodomex is not available, we are done
if not _aes:
return
crypto.AES = _aes
# if pycryptodomex is available, test again now
func(*args, **kwargs)
return run_test
class Test_bitcoin(SequentialTestCase):
def test_libsecp256k1_is_available(self):
# we want the unit testing framework to test with libsecp256k1 available.
self.assertTrue(bool(ecc_fast._libsecp256k1))
def test_pycryptodomex_is_available(self):
# we want the unit testing framework to test with pycryptodomex available.
self.assertTrue(bool(crypto.AES))
@needs_test_with_all_aes_implementations
@needs_test_with_all_ecc_implementations
def test_crypto(self):
for message in [b"Chancellor on brink of second bailout for banks", b'\xff'*512]:
self._do_test_crypto(message)
def _do_test_crypto(self, message):
G = ecc.generator()
_r = G.order()
pvk = ecdsa.util.randrange(_r)
Pub = pvk*G
pubkey_c = Pub.get_public_key_bytes(True)
#pubkey_u = point_to_ser(Pub,False)
addr_c = public_key_to_p2pkh(pubkey_c)
#print "Private key ", '%064x'%pvk
eck = ecc.ECPrivkey(number_to_string(pvk,_r))
#print "Compressed public key ", pubkey_c.encode('hex')
enc = ecc.ECPubkey(pubkey_c).encrypt_message(message)
dec = eck.decrypt_message(enc)
self.assertEqual(message, dec)
#print "Uncompressed public key", pubkey_u.encode('hex')
#enc2 = EC_KEY.encrypt_message(message, pubkey_u)
dec2 = eck.decrypt_message(enc)
self.assertEqual(message, dec2)
signature = eck.sign_message(message, True)
#print signature
eck.verify_message_for_address(signature, message)
@needs_test_with_all_ecc_implementations
def test_msg_signing(self):
msg1 = b'Chancellor on brink of second bailout for banks'
msg2 = b'Electrum-Ganja'
def sign_message_with_wif_privkey(wif_privkey, msg):
txin_type, privkey, compressed = deserialize_privkey(wif_privkey)
key = ecc.ECPrivkey(privkey)
return key.sign_message(msg, compressed)
sig1 = sign_message_with_wif_privkey(
'L1TnU2zbNaAqMoVh65Cyvmcjzbrj41Gs9iTLcWbpJCMynXuap6UN', msg1)
addr1 = '15hETetDmcXm1mM4sEf7U2KXC9hDHFMSzz'
sig2 = sign_message_with_wif_privkey(
'5Hxn5C4SQuiV6e62A1MtZmbSeQyrLFhu5uYks62pU5VBUygK2KD', msg2)
addr2 = '1GPHVTY8UD9my6jyP4tb2TYJwUbDetyNC6'
sig1_b64 = base64.b64encode(sig1)
sig2_b64 = base64.b64encode(sig2)
self.assertEqual(sig1_b64, b'H/9jMOnj4MFbH3d7t4yCQ9i7DgZU/VZ278w3+ySv2F4yIsdqjsc5ng3kmN8OZAThgyfCZOQxZCWza9V5XzlVY0Y=')
self.assertEqual(sig2_b64, b'G84dmJ8TKIDKMT9qBRhpX2sNmR0y5t+POcYnFFJCs66lJmAs3T8A6Sbpx7KA6yTQ9djQMabwQXRrDomOkIKGn18=')
self.assertTrue(ecc.verify_message_with_address(addr1, sig1, msg1))
self.assertTrue(ecc.verify_message_with_address(addr2, sig2, msg2))
self.assertFalse(ecc.verify_message_with_address(addr1, b'wrong', msg1))
self.assertFalse(ecc.verify_message_with_address(addr1, sig2, msg1))
@needs_test_with_all_aes_implementations
@needs_test_with_all_ecc_implementations
def test_decrypt_message(self):
key = WalletStorage.get_eckey_from_password('pw123')
self.assertEqual(b'me<(s_s)>age', key.decrypt_message(b'QklFMQMDFtgT3zWSQsa+Uie8H/WvfUjlu9UN9OJtTt3KlgKeSTi6SQfuhcg1uIz9hp3WIUOFGTLr4RNQBdjPNqzXwhkcPi2Xsbiw6UCNJncVPJ6QBg=='))
self.assertEqual(b'me<(s_s)>age', key.decrypt_message(b'QklFMQKXOXbylOQTSMGfo4MFRwivAxeEEkewWQrpdYTzjPhqjHcGBJwdIhB7DyRfRQihuXx1y0ZLLv7XxLzrILzkl/H4YUtZB4uWjuOAcmxQH4i/Og=='))
self.assertEqual(b'hey_there' * 100, key.decrypt_message(b'QklFMQLOOsabsXtGQH8edAa6VOUa5wX8/DXmxX9NyHoAx1a5bWgllayGRVPeI2bf0ZdWK0tfal0ap0ZIVKbd2eOJybqQkILqT6E1/Syzq0Zicyb/AA1eZNkcX5y4gzloxinw00ubCA8M7gcUjJpOqbnksATcJ5y2YYXcHMGGfGurWu6uJ/UyrNobRidWppRMW5yR9/6utyNvT6OHIolCMEf7qLcmtneoXEiz51hkRdZS7weNf9mGqSbz9a2NL3sdh1A0feHIjAZgcCKcAvksNUSauf0/FnIjzTyPRpjRDMeDC8Ci3sGiuO3cvpWJwhZfbjcS26KmBv2CHWXfRRNFYOInHZNIXWNAoBB47Il5bGSMd+uXiGr+SQ9tNvcu+BiJNmFbxYqg+oQ8dGAl1DtvY2wJVY8k7vO9BIWSpyIxfGw7EDifhc5vnOmGe016p6a01C3eVGxgl23UYMrP7+fpjOcPmTSF4rk5U5ljEN3MSYqlf1QEv0OqlI9q1TwTK02VBCjMTYxDHsnt04OjNBkNO8v5uJ4NR+UUDBEp433z53I59uawZ+dbk4v4ZExcl8EGmKm3Gzbal/iJ/F7KQuX2b/ySEhLOFVYFWxK73X1nBvCSK2mC2/8fCw8oI5pmvzJwQhcCKTdEIrz3MMvAHqtPScDUOjzhXxInQOCb3+UBj1PPIdqkYLvZss1TEaBwYZjLkVnK2MBj7BaqT6Rp6+5A/fippUKHsnB6eYMEPR2YgDmCHL+4twxHJG6UWdP3ybaKiiAPy2OHNP6PTZ0HrqHOSJzBSDD+Z8YpaRg29QX3UEWlqnSKaan0VYAsV1VeaN0XFX46/TWO0L5tjhYVXJJYGqo6tIQJymxATLFRF6AZaD1Mwd27IAL04WkmoQoXfO6OFfwdp/shudY/1gBkDBvGPICBPtnqkvhGF+ZF3IRkuPwiFWeXmwBxKHsRx/3+aJu32Ml9+za41zVk2viaxcGqwTc5KMexQFLAUwqhv+aIik7U+5qk/gEVSuRoVkihoweFzKolNF+BknH2oB4rZdPixag5Zje3DvgjsSFlOl69W/67t/Gs8htfSAaHlsB8vWRQr9+v/lxTbrAw+O0E+sYGoObQ4qQMyQshNZEHbpPg63eWiHtJJnrVBvOeIbIHzoLDnMDsWVWZSMzAQ1vhX1H5QLgSEbRlKSliVY03kDkh/Nk/KOn+B2q37Ialq4JcRoIYFGJ8AoYEAD0tRuTqFddIclE75HzwaNG7NyKW1plsa72ciOPwsPJsdd5F0qdSQ3OSKtooTn7uf6dXOc4lDkfrVYRlZ0PX'))
@needs_test_with_all_aes_implementations
@needs_test_with_all_ecc_implementations
def test_encrypt_message(self):
key = WalletStorage.get_eckey_from_password('secret_password77')
msgs = [
bytes([0] * 555),
b'cannot think of anything funny'
]
for plaintext in msgs:
ciphertext1 = key.encrypt_message(plaintext)
ciphertext2 = key.encrypt_message(plaintext)
self.assertEqual(plaintext, key.decrypt_message(ciphertext1))
self.assertEqual(plaintext, key.decrypt_message(ciphertext2))
self.assertNotEqual(ciphertext1, ciphertext2)
@needs_test_with_all_ecc_implementations
def test_sign_transaction(self):
eckey1 = ecc.ECPrivkey(bfh('7e1255fddb52db1729fc3ceb21a46f95b8d9fe94cc83425e936a6c5223bb679d'))
sig1 = eckey1.sign_transaction(bfh('5a548b12369a53faaa7e51b5081829474ebdd9c924b3a8230b69aa0be254cd94'))
self.assertEqual(bfh('3045022100902a288b98392254cd23c0e9a49ac6d7920f171b8249a48e484b998f1874a2010220723d844826828f092cf400cb210c4fa0b8cd1b9d1a7f21590e78e022ff6476b9'), sig1)
eckey2 = ecc.ECPrivkey(bfh('c7ce8c1462c311eec24dff9e2532ac6241e50ae57e7d1833af21942136972f23'))
sig2 = eckey2.sign_transaction(bfh('642a2e66332f507c92bda910158dfe46fc10afbf72218764899d3af99a043fac'))
self.assertEqual(bfh('30440220618513f4cfc87dde798ce5febae7634c23e7b9254a1eabf486be820f6a7c2c4702204fef459393a2b931f949e63ced06888f35e286e446dc46feb24b5b5f81c6ed52'), sig2)
@needs_test_with_all_aes_implementations
def test_aes_homomorphic(self):
"""Make sure AES is homomorphic."""
payload = u'\u66f4\u7a33\u5b9a\u7684\u4ea4\u6613\u5e73\u53f0'
password = u'secret'
enc = crypto.pw_encode(payload, password)
dec = crypto.pw_decode(enc, password)
self.assertEqual(dec, payload)
@needs_test_with_all_aes_implementations
def test_aes_encode_without_password(self):
"""When not passed a password, pw_encode is noop on the payload."""
payload = u'\u66f4\u7a33\u5b9a\u7684\u4ea4\u6613\u5e73\u53f0'
enc = crypto.pw_encode(payload, None)
self.assertEqual(payload, enc)
@needs_test_with_all_aes_implementations
def test_aes_deencode_without_password(self):
"""When not passed a password, pw_decode is noop on the payload."""
payload = u'\u66f4\u7a33\u5b9a\u7684\u4ea4\u6613\u5e73\u53f0'
enc = crypto.pw_decode(payload, None)
self.assertEqual(payload, enc)
@needs_test_with_all_aes_implementations
def test_aes_decode_with_invalid_password(self):
"""pw_decode raises an Exception when supplied an invalid password."""
payload = u"blah"
password = u"uber secret"
wrong_password = u"not the password"
enc = crypto.pw_encode(payload, password)
self.assertRaises(Exception, crypto.pw_decode, enc, wrong_password)
def test_hash(self):
"""Make sure the Hash function does sha256 twice"""
payload = u"test"
expected = b'\x95MZI\xfdp\xd9\xb8\xbc\xdb5\xd2R&x)\x95\x7f~\xf7\xfalt\xf8\x84\x19\xbd\xc5\xe8"\t\xf4'
result = Hash(payload)
self.assertEqual(expected, result)
def test_var_int(self):
for i in range(0xfd):
self.assertEqual(var_int(i), "{:02x}".format(i) )
self.assertEqual(var_int(0xfd), "fdfd00")
self.assertEqual(var_int(0xfe), "fdfe00")
self.assertEqual(var_int(0xff), "fdff00")
self.assertEqual(var_int(0x1234), "fd3412")
self.assertEqual(var_int(0xffff), "fdffff")
self.assertEqual(var_int(0x10000), "fe00000100")
self.assertEqual(var_int(0x12345678), "fe78563412")
self.assertEqual(var_int(0xffffffff), "feffffffff")
self.assertEqual(var_int(0x100000000), "ff0000000001000000")
self.assertEqual(var_int(0x0123456789abcdef), "ffefcdab8967452301")
def test_op_push(self):
self.assertEqual(op_push(0x00), '00')
self.assertEqual(op_push(0x12), '12')
self.assertEqual(op_push(0x4b), '4b')
self.assertEqual(op_push(0x4c), '4c4c')
self.assertEqual(op_push(0xfe), '4cfe')
self.assertEqual(op_push(0xff), '4cff')
self.assertEqual(op_push(0x100), '4d0001')
self.assertEqual(op_push(0x1234), '4d3412')
self.assertEqual(op_push(0xfffe), '4dfeff')
self.assertEqual(op_push(0xffff), '4dffff')
self.assertEqual(op_push(0x10000), '4e00000100')
self.assertEqual(op_push(0x12345678), '4e78563412')
def test_script_num_to_hex(self):
# test vectors from https://github.com/btcsuite/btcd/blob/fdc2bc867bda6b351191b5872d2da8270df00d13/txscript/scriptnum.go#L77
self.assertEqual(script_num_to_hex(127), '7f')
self.assertEqual(script_num_to_hex(-127), 'ff')
self.assertEqual(script_num_to_hex(128), '8000')
self.assertEqual(script_num_to_hex(-128), '8080')
self.assertEqual(script_num_to_hex(129), '8100')
self.assertEqual(script_num_to_hex(-129), '8180')
self.assertEqual(script_num_to_hex(256), '0001')
self.assertEqual(script_num_to_hex(-256), '0081')
self.assertEqual(script_num_to_hex(32767), 'ff7f')
self.assertEqual(script_num_to_hex(-32767), 'ffff')
self.assertEqual(script_num_to_hex(32768), '008000')
self.assertEqual(script_num_to_hex(-32768), '008080')
def test_push_script(self):
# https://github.com/bitcoin/bips/blob/master/bip-0062.mediawiki#push-operators
self.assertEqual(push_script(''), bh2u(bytes([opcodes.OP_0])))
self.assertEqual(push_script('07'), bh2u(bytes([opcodes.OP_7])))
self.assertEqual(push_script('10'), bh2u(bytes([opcodes.OP_16])))
self.assertEqual(push_script('81'), bh2u(bytes([opcodes.OP_1NEGATE])))
self.assertEqual(push_script('11'), '0111')
self.assertEqual(push_script(75 * '42'), '4b' + 75 * '42')
self.assertEqual(push_script(76 * '42'), bh2u(bytes([opcodes.OP_PUSHDATA1]) + bfh('4c' + 76 * '42')))
self.assertEqual(push_script(100 * '42'), bh2u(bytes([opcodes.OP_PUSHDATA1]) + bfh('64' + 100 * '42')))
self.assertEqual(push_script(255 * '42'), bh2u(bytes([opcodes.OP_PUSHDATA1]) + bfh('ff' + 255 * '42')))
self.assertEqual(push_script(256 * '42'), bh2u(bytes([opcodes.OP_PUSHDATA2]) + bfh('0001' + 256 * '42')))
self.assertEqual(push_script(520 * '42'), bh2u(bytes([opcodes.OP_PUSHDATA2]) + bfh('0802' + 520 * '42')))
def test_add_number_to_script(self):
# https://github.com/bitcoin/bips/blob/master/bip-0062.mediawiki#numbers
self.assertEqual(add_number_to_script(0), bytes([opcodes.OP_0]))
self.assertEqual(add_number_to_script(7), bytes([opcodes.OP_7]))
self.assertEqual(add_number_to_script(16), bytes([opcodes.OP_16]))
self.assertEqual(add_number_to_script(-1), bytes([opcodes.OP_1NEGATE]))
self.assertEqual(add_number_to_script(-127), bfh('01ff'))
self.assertEqual(add_number_to_script(-2), bfh('0182'))
self.assertEqual(add_number_to_script(17), bfh('0111'))
self.assertEqual(add_number_to_script(127), bfh('017f'))
self.assertEqual(add_number_to_script(-32767), bfh('02ffff'))
self.assertEqual(add_number_to_script(-128), bfh('028080'))
self.assertEqual(add_number_to_script(128), bfh('028000'))
self.assertEqual(add_number_to_script(32767), bfh('02ff7f'))
self.assertEqual(add_number_to_script(-8388607), bfh('03ffffff'))
self.assertEqual(add_number_to_script(-32768), bfh('03008080'))
self.assertEqual(add_number_to_script(32768), bfh('03008000'))
self.assertEqual(add_number_to_script(8388607), bfh('03ffff7f'))
self.assertEqual(add_number_to_script(-2147483647), bfh('04ffffffff'))
self.assertEqual(add_number_to_script(-8388608 ), bfh('0400008080'))
self.assertEqual(add_number_to_script(8388608), bfh('0400008000'))
self.assertEqual(add_number_to_script(2147483647), bfh('04ffffff7f'))
def test_address_to_script(self):
# bech32 native segwit
# test vectors from BIP-0173
self.assertEqual(address_to_script('BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4'), '0014751e76e8199196d454941c45d1b3a323f1433bd6')
self.assertEqual(address_to_script('bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx'), '5128751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6')
self.assertEqual(address_to_script('BC1SW50QA3JX3S'), '6002751e')
self.assertEqual(address_to_script('bc1zw508d6qejxtdg4y5r3zarvaryvg6kdaj'), '5210751e76e8199196d454941c45d1b3a323')
# base58 P2PKH
self.assertEqual(address_to_script('14gcRovpkCoGkCNBivQBvw7eso7eiNAbxG'), '76a91428662c67561b95c79d2257d2a93d9d151c977e9188ac')
self.assertEqual(address_to_script('1BEqfzh4Y3zzLosfGhw1AsqbEKVW6e1qHv'), '76a914704f4b81cadb7bf7e68c08cd3657220f680f863c88ac')
# base58 P2SH
self.assertEqual(address_to_script('35ZqQJcBQMZ1rsv8aSuJ2wkC7ohUCQMJbT'), 'a9142a84cf00d47f699ee7bbc1dea5ec1bdecb4ac15487')
self.assertEqual(address_to_script('3PyjzJ3im7f7bcV724GR57edKDqoZvH7Ji'), 'a914f47c8954e421031ad04ecd8e7752c9479206b9d387')
class Test_bitcoin_testnet(TestCaseForTestnet):
def test_address_to_script(self):
# bech32 native segwit
# test vectors from BIP-0173
self.assertEqual(address_to_script('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7'), '00201863143c14c5166804bd19203356da136c985678cd4d27a1b8c6329604903262')
self.assertEqual(address_to_script('tb1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesrxh6hy'), '0020000000c4a5cad46221b2a187905e5266362b99d5e91c6ce24d165dab93e86433')
# base58 P2PKH
self.assertEqual(address_to_script('mutXcGt1CJdkRvXuN2xoz2quAAQYQ59bRX'), '76a9149da64e300c5e4eb4aaffc9c2fd465348d5618ad488ac')
self.assertEqual(address_to_script('miqtaRTkU3U8rzwKbEHx3g8FSz8GJtPS3K'), '76a914247d2d5b6334bdfa2038e85b20fc15264f8e5d2788ac')
# base58 P2SH
self.assertEqual(address_to_script('2N3LSvr3hv5EVdfcrxg2Yzecf3SRvqyBE4p'), 'a9146eae23d8c4a941316017946fc761a7a6c85561fb87')
self.assertEqual(address_to_script('2NE4ZdmxFmUgwu5wtfoN2gVniyMgRDYq1kk'), 'a914e4567743d378957cd2ee7072da74b1203c1a7a0b87')
class Test_xprv_xpub(SequentialTestCase):
xprv_xpub = (
# Taken from test vectors in https://en.bitcoin.it/wiki/BIP_0032_TestVectors
{'xprv': 'xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76',
'xpub': 'xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy',
'xtype': 'standard'},
{'xprv': 'yprvAJEYHeNEPcyBoQYM7sGCxDiNCTX65u4ANgZuSGTrKN5YCC9MP84SBayrgaMyZV7zvkHrr3HVPTK853s2SPk4EttPazBZBmz6QfDkXeE8Zr7',
'xpub': 'ypub6XDth9u8DzXV1tcpDtoDKMf6kVMaVMn1juVWEesTshcX4zUVvfNgjPJLXrD9N7AdTLnbHFL64KmBn3SNaTe69iZYbYCqLCCNPZKbLz9niQ4',
'xtype': 'p2wpkh-p2sh'},
{'xprv': 'zprvAWgYBBk7JR8GkraNZJeEodAp2UR1VRWJTXyV1ywuUVs1awUgTiBS1ZTDtLA5F3MFDn1LZzu8dUpSKdT7ToDpvEG6PQu4bJs7zQY47Sd3sEZ',
'xpub': 'zpub6jftahH18ngZyLeqfLBFAm7YaWFVttE9pku5pNMX2qPzTjoq1FVgZMmhjecyB2nqFb31gHE9vNvbaggU6vvWpNZbXEWLLUjYjFqG95LNyT8',
'xtype': 'p2wpkh'},
)
def _do_test_bip32(self, seed, sequence):
xprv, xpub = bip32_root(bfh(seed), 'standard')
self.assertEqual("m/", sequence[0:2])
path = 'm'
sequence = sequence[2:]
for n in sequence.split('/'):
child_path = path + '/' + n
if n[-1] != "'":
xpub2 = bip32_public_derivation(xpub, path, child_path)
xprv, xpub = bip32_private_derivation(xprv, path, child_path)
if n[-1] != "'":
self.assertEqual(xpub, xpub2)
path = child_path
return xpub, xprv
@needs_test_with_all_ecc_implementations
def test_bip32(self):
# see https://en.bitcoin.it/wiki/BIP_0032_TestVectors
xpub, xprv = self._do_test_bip32("000102030405060708090a0b0c0d0e0f", "m/0'/1/2'/2/1000000000")
self.assertEqual("xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy", xpub)
self.assertEqual("xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76", xprv)
xpub, xprv = self._do_test_bip32("fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542","m/0/2147483647'/1/2147483646'/2")
self.assertEqual("xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt", xpub)
self.assertEqual("xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j", xprv)
@needs_test_with_all_ecc_implementations
def test_xpub_from_xprv(self):
"""We can derive the xpub key from a xprv."""
for xprv_details in self.xprv_xpub:
result = xpub_from_xprv(xprv_details['xprv'])
self.assertEqual(result, xprv_details['xpub'])
@needs_test_with_all_ecc_implementations
def test_is_xpub(self):
for xprv_details in self.xprv_xpub:
xpub = xprv_details['xpub']
self.assertTrue(is_xpub(xpub))
self.assertFalse(is_xpub('xpub1nval1d'))
self.assertFalse(is_xpub('xpub661MyMwAqRbcFWohJWt7PHsFEJfZAvw9ZxwQoDa4SoMgsDDM1T7WK3u9E4edkC4ugRnZ8E4xDZRpk8Rnts3Nbt97dPwT52WRONGBADWRONG'))
@needs_test_with_all_ecc_implementations
def test_xpub_type(self):
for xprv_details in self.xprv_xpub:
xpub = xprv_details['xpub']
self.assertEqual(xprv_details['xtype'], xpub_type(xpub))
@needs_test_with_all_ecc_implementations
def test_is_xprv(self):
for xprv_details in self.xprv_xpub:
xprv = xprv_details['xprv']
self.assertTrue(is_xprv(xprv))
self.assertFalse(is_xprv('xprv1nval1d'))
self.assertFalse(is_xprv('xprv661MyMwAqRbcFWohJWt7PHsFEJfZAvw9ZxwQoDa4SoMgsDDM1T7WK3u9E4edkC4ugRnZ8E4xDZRpk8Rnts3Nbt97dPwT52WRONGBADWRONG'))
def test_is_bip32_derivation(self):
self.assertTrue(is_bip32_derivation("m/0'/1"))
self.assertTrue(is_bip32_derivation("m/0'/0'"))
self.assertTrue(is_bip32_derivation("m/44'/0'/0'/0/0"))
self.assertTrue(is_bip32_derivation("m/49'/0'/0'/0/0"))
self.assertFalse(is_bip32_derivation("mmmmmm"))
self.assertFalse(is_bip32_derivation("n/"))
self.assertFalse(is_bip32_derivation(""))
self.assertFalse(is_bip32_derivation("m/q8462"))
def test_version_bytes(self):
xprv_headers_b58 = {
'standard': 'xprv',
'p2wpkh-p2sh': 'yprv',
'p2wsh-p2sh': 'Yprv',
'p2wpkh': 'zprv',
'p2wsh': 'Zprv',
}
xpub_headers_b58 = {
'standard': 'xpub',
'p2wpkh-p2sh': 'ypub',
'p2wsh-p2sh': 'Ypub',
'p2wpkh': 'zpub',
'p2wsh': 'Zpub',
}
for xtype, xkey_header_bytes in constants.net.XPRV_HEADERS.items():
xkey_header_bytes = bfh("%08x" % xkey_header_bytes)
xkey_bytes = xkey_header_bytes + bytes([0] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xprv_headers_b58[xtype]))
xkey_bytes = xkey_header_bytes + bytes([255] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xprv_headers_b58[xtype]))
for xtype, xkey_header_bytes in constants.net.XPUB_HEADERS.items():
xkey_header_bytes = bfh("%08x" % xkey_header_bytes)
xkey_bytes = xkey_header_bytes + bytes([0] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xpub_headers_b58[xtype]))
xkey_bytes = xkey_header_bytes + bytes([255] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xpub_headers_b58[xtype]))
class Test_xprv_xpub_testnet(TestCaseForTestnet):
def test_version_bytes(self):
xprv_headers_b58 = {
'standard': 'tprv',
'p2wpkh-p2sh': 'uprv',
'p2wsh-p2sh': 'Uprv',
'p2wpkh': 'vprv',
'p2wsh': 'Vprv',
}
xpub_headers_b58 = {
'standard': 'tpub',
'p2wpkh-p2sh': 'upub',
'p2wsh-p2sh': 'Upub',
'p2wpkh': 'vpub',
'p2wsh': 'Vpub',
}
for xtype, xkey_header_bytes in constants.net.XPRV_HEADERS.items():
xkey_header_bytes = bfh("%08x" % xkey_header_bytes)
xkey_bytes = xkey_header_bytes + bytes([0] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xprv_headers_b58[xtype]))
xkey_bytes = xkey_header_bytes + bytes([255] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xprv_headers_b58[xtype]))
for xtype, xkey_header_bytes in constants.net.XPUB_HEADERS.items():
xkey_header_bytes = bfh("%08x" % xkey_header_bytes)
xkey_bytes = xkey_header_bytes + bytes([0] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xpub_headers_b58[xtype]))
xkey_bytes = xkey_header_bytes + bytes([255] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xpub_headers_b58[xtype]))
class Test_keyImport(SequentialTestCase):
priv_pub_addr = (
{'priv': 'KzMFjMC2MPadjvX5Cd7b8AKKjjpBSoRKUTpoAtN6B3J9ezWYyXS6',
'exported_privkey': 'p2pkh:KzMFjMC2MPadjvX5Cd7b8AKKjjpBSoRKUTpoAtN6B3J9ezWYyXS6',
'pub': '02c6467b7e621144105ed3e4835b0b4ab7e35266a2ae1c4f8baa19e9ca93452997',
'address': '17azqT8T16coRmWKYFj3UjzJuxiYrYFRBR',
'minikey' : False,
'txin_type': 'p2pkh',
'compressed': True,
'addr_encoding': 'base58',
'scripthash': 'c9aecd1fef8d661a42c560bf75c8163e337099800b8face5ca3d1393a30508a7'},
{'priv': 'p2pkh:Kzj8VjwpZ99bQqVeUiRXrKuX9mLr1o6sWxFMCBJn1umC38BMiQTD',
'exported_privkey': 'p2pkh:Kzj8VjwpZ99bQqVeUiRXrKuX9mLr1o6sWxFMCBJn1umC38BMiQTD',
'pub': '0352d78b4b37e0f6d4e164423436f2925fa57817467178eca550a88f2821973c41',
'address': '1GXgZ5Qi6gmXTHVSpUPZLy4Ci2nbfb3ZNb',
'minikey': False,
'txin_type': 'p2pkh',
'compressed': True,
'addr_encoding': 'base58',
'scripthash': 'a9b2a76fc196c553b352186dfcca81fcf323a721cd8431328f8e9d54216818c1'},
{'priv': '5Hxn5C4SQuiV6e62A1MtZmbSeQyrLFhu5uYks62pU5VBUygK2KD',
'exported_privkey': 'p2pkh:5Hxn5C4SQuiV6e62A1MtZmbSeQyrLFhu5uYks62pU5VBUygK2KD',
'pub': '04e5fe91a20fac945845a5518450d23405ff3e3e1ce39827b47ee6d5db020a9075422d56a59195ada0035e4a52a238849f68e7a325ba5b2247013e0481c5c7cb3f',
'address': '1GPHVTY8UD9my6jyP4tb2TYJwUbDetyNC6',
'minikey': False,
'txin_type': 'p2pkh',
'compressed': False,
'addr_encoding': 'base58',
'scripthash': 'f5914651408417e1166f725a5829ff9576d0dbf05237055bf13abd2af7f79473'},
{'priv': 'p2pkh:5KhYQCe1xd5g2tqpmmGpUWDpDuTbA8vnpbiCNDwMPAx29WNQYfN',
'exported_privkey': 'p2pkh:5KhYQCe1xd5g2tqpmmGpUWDpDuTbA8vnpbiCNDwMPAx29WNQYfN',
'pub': '048f0431b0776e8210376c81280011c2b68be43194cb00bd47b7e9aa66284b713ce09556cde3fee606051a07613f3c159ef3953b8927c96ae3dae94a6ba4182e0e',
'address': '147kiRHHm9fqeMQSgqf4k35XzuWLP9fmmS',
'minikey': False,
'txin_type': 'p2pkh',
'compressed': False,
'addr_encoding': 'base58',
'scripthash': '6dd2e07ad2de9ba8eec4bbe8467eb53f8845acff0d9e6f5627391acc22ff62df'},
{'priv': 'LHJnnvRzsdrTX2j5QeWVsaBkabK7gfMNqNNqxnbBVRaJYfk24iJz',
'exported_privkey': 'p2wpkh-p2sh:Kz9XebiCXL2BZzhYJViiHDzn5iup1povWV8aqstzWU4sz1K5nVva',
'pub': '0279ad237ca0d812fb503ab86f25e15ebd5fa5dd95c193639a8a738dcd1acbad81',
'address': '3GeVJB3oKr7psgKR6BTXSxKtWUkfsHHhk7',
'minikey': False,
'txin_type': 'p2wpkh-p2sh',
'compressed': True,
'addr_encoding': 'base58',
'scripthash': 'd7b04e882fa6b13246829ac552a2b21461d9152eb00f0a6adb58457a3e63d7c5'},
{'priv': 'p2wpkh-p2sh:L3CZH1pm87X4bbE6mSGvZnAZ1KcFDRomBudUkrkBG7EZhDtBVXMW',
'exported_privkey': 'p2wpkh-p2sh:L3CZH1pm87X4bbE6mSGvZnAZ1KcFDRomBudUkrkBG7EZhDtBVXMW',
'pub': '0229da20a15b3363b2c28e3c5093c180b56c439df0b968a970366bb1f38435361e',
'address': '3C79goMwT7zSTjXnPoCg6VFGAnUpZAkyus',
'minikey': False,
'txin_type': 'p2wpkh-p2sh',
'compressed': True,
'addr_encoding': 'base58',
'scripthash': '714bf6bfe1083e69539f40d4c7a7dca85d187471b35642e55f20d7e866494cf7'},
{'priv': 'L8g5V8kFFeg2WbecahRSdobARbHz2w2STH9S8ePHVSY4fmia7Rsj',
'exported_privkey': 'p2wpkh:Kz6SuyPM5VktY5dr2d2YqdVgBA6LCWkiHqXJaC3BzxnMPSUuYzmF',
'pub': '03e9f948421aaa89415dc5f281a61b60dde12aae3181b3a76cd2d849b164fc6d0b',
'address': 'bc1qqmpt7u5e9hfznljta5gnvhyvfd2kdd0r90hwue',
'minikey': False,
'txin_type': 'p2wpkh',
'compressed': True,
'addr_encoding': 'bech32',
'scripthash': '1929acaaef3a208c715228e9f1ca0318e3a6b9394ab53c8d026137f847ecf97b'},
{'priv': 'p2wpkh:KyDWy5WbjLA58Zesh1o8m3pADGdJ3v33DKk4m7h8BD5zDKDmDFwo',
'exported_privkey': 'p2wpkh:KyDWy5WbjLA58Zesh1o8m3pADGdJ3v33DKk4m7h8BD5zDKDmDFwo',
'pub': '038c57657171c1f73e34d5b3971d05867d50221ad94980f7e87cbc2344425e6a1e',
'address': 'bc1qpakeeg4d9ydyjxd8paqrw4xy9htsg532xzxn50',
'minikey': False,
'txin_type': 'p2wpkh',
'compressed': True,
'addr_encoding': 'bech32',
'scripthash': '242f02adde84ebb2a7dd778b2f3a81b3826f111da4d8960d826d7a4b816cb261'},
# from http://bitscan.com/articles/security/spotlight-on-mini-private-keys
{'priv': 'SzavMBLoXU6kDrqtUVmffv',
'exported_privkey': 'p2pkh:L53fCHmQhbNp1B4JipfBtfeHZH7cAibzG9oK19XfiFzxHgAkz6JK',
'pub': '02588d202afcc1ee4ab5254c7847ec25b9a135bbda0f2bc69ee1a714749fd77dc9',
'address': '19GuvDvMMUZ8vq84wT79fvnvhMd5MnfTkR',
'minikey': True,
'txin_type': 'p2pkh',
'compressed': True, # this is actually ambiguous... issue #2748
'addr_encoding': 'base58',
'scripthash': '60ad5a8b922f758cd7884403e90ee7e6f093f8d21a0ff24c9a865e695ccefdf1'},
)
@needs_test_with_all_ecc_implementations
def test_public_key_from_private_key(self):
for priv_details in self.priv_pub_addr:
txin_type, privkey, compressed = deserialize_privkey(priv_details['priv'])
result = ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
self.assertEqual(priv_details['pub'], result)
self.assertEqual(priv_details['txin_type'], txin_type)
self.assertEqual(priv_details['compressed'], compressed)
@needs_test_with_all_ecc_implementations
def test_address_from_private_key(self):
for priv_details in self.priv_pub_addr:
addr2 = address_from_private_key(priv_details['priv'])
self.assertEqual(priv_details['address'], addr2)
@needs_test_with_all_ecc_implementations
def test_is_valid_address(self):
for priv_details in self.priv_pub_addr:
addr = priv_details['address']
self.assertFalse(is_address(priv_details['priv']))
self.assertFalse(is_address(priv_details['pub']))
self.assertTrue(is_address(addr))
is_enc_b58 = priv_details['addr_encoding'] == 'base58'
self.assertEqual(is_enc_b58, is_b58_address(addr))
is_enc_bech32 = priv_details['addr_encoding'] == 'bech32'
self.assertEqual(is_enc_bech32, is_segwit_address(addr))
self.assertFalse(is_address("not an address"))
@needs_test_with_all_ecc_implementations
def test_is_private_key(self):
for priv_details in self.priv_pub_addr:
self.assertTrue(is_private_key(priv_details['priv']))
self.assertTrue(is_private_key(priv_details['exported_privkey']))
self.assertFalse(is_private_key(priv_details['pub']))
self.assertFalse(is_private_key(priv_details['address']))
self.assertFalse(is_private_key("not a privkey"))
@needs_test_with_all_ecc_implementations
def test_serialize_privkey(self):
for priv_details in self.priv_pub_addr:
txin_type, privkey, compressed = deserialize_privkey(priv_details['priv'])
priv2 = serialize_privkey(privkey, compressed, txin_type)
self.assertEqual(priv_details['exported_privkey'], priv2)
@needs_test_with_all_ecc_implementations
def test_address_to_scripthash(self):
for priv_details in self.priv_pub_addr:
sh = address_to_scripthash(priv_details['address'])
self.assertEqual(priv_details['scripthash'], sh)
@needs_test_with_all_ecc_implementations
def test_is_minikey(self):
for priv_details in self.priv_pub_addr:
minikey = priv_details['minikey']
priv = priv_details['priv']
self.assertEqual(minikey, is_minikey(priv))
@needs_test_with_all_ecc_implementations
def test_is_compressed(self):
for priv_details in self.priv_pub_addr:
self.assertEqual(priv_details['compressed'],
is_compressed(priv_details['priv']))
class Test_seeds(SequentialTestCase):
""" Test old and new seeds. """
mnemonics = {
('cell dumb heartbeat north boom tease ship baby bright kingdom rare squeeze', 'old'),
('cell dumb heartbeat north boom tease ' * 4, 'old'),
('cell dumb heartbeat north boom tease ship baby bright kingdom rare badword', ''),
('cElL DuMb hEaRtBeAt nOrTh bOoM TeAsE ShIp bAbY BrIgHt kInGdOm rArE SqUeEzE', 'old'),
(' cElL DuMb hEaRtBeAt nOrTh bOoM TeAsE ShIp bAbY BrIgHt kInGdOm rArE SqUeEzE ', 'old'),
# below seed is actually 'invalid old' as it maps to 33 hex chars
('hurry idiot prefer sunset mention mist jaw inhale impossible kingdom rare squeeze', 'old'),
('cram swing cover prefer miss modify ritual silly deliver chunk behind inform able', 'standard'),
('cram swing cover prefer miss modify ritual silly deliver chunk behind inform', ''),
('ostrich security deer aunt climb inner alpha arm mutual marble solid task', 'standard'),
('OSTRICH SECURITY DEER AUNT CLIMB INNER ALPHA ARM MUTUAL MARBLE SOLID TASK', 'standard'),
(' oStRiCh sEcUrItY DeEr aUnT ClImB InNeR AlPhA ArM MuTuAl mArBlE SoLiD TaSk ', 'standard'),
('x8', 'standard'),
('science dawn member doll dutch real can brick knife deny drive list', '2fa'),
('science dawn member doll dutch real ca brick knife deny drive list', ''),
(' sCience dawn member doll Dutch rEAl can brick knife deny drive lisT', '2fa'),
('frost pig brisk excite novel report camera enlist axis nation novel desert', 'segwit'),
(' fRoSt pig brisk excIte novel rePort CamEra enlist axis nation nOVeL dEsert ', 'segwit'),
('9dk', 'segwit'),
}
def test_new_seed(self):
seed = "cram swing cover prefer miss modify ritual silly deliver chunk behind inform able"
self.assertTrue(is_new_seed(seed))
seed = "cram swing cover prefer miss modify ritual silly deliver chunk behind inform"
self.assertFalse(is_new_seed(seed))
def test_old_seed(self):
self.assertTrue(is_old_seed(" ".join(["like"] * 12)))
self.assertFalse(is_old_seed(" ".join(["like"] * 18)))
self.assertTrue(is_old_seed(" ".join(["like"] * 24)))
self.assertFalse(is_old_seed("not a seed"))
self.assertTrue(is_old_seed("0123456789ABCDEF" * 2))
self.assertTrue(is_old_seed("0123456789ABCDEF" * 4))
def test_seed_type(self):
for seed_words, _type in self.mnemonics:
self.assertEqual(_type, seed_type(seed_words), msg=seed_words)
| 52.664756 | 1,378 | 0.707671 | import base64
import unittest
import sys
from lib import bitcoin
from lib.bitcoin import (
public_key_to_p2pkh,
bip32_root, bip32_public_derivation, bip32_private_derivation,
Hash, address_from_private_key,
is_address, is_private_key, xpub_from_xprv, is_new_seed, is_old_seed,
var_int, op_push, address_to_script,
deserialize_privkey, serialize_privkey, is_segwit_address,
is_b58_address, address_to_scripthash, is_minikey, is_compressed, is_xpub,
xpub_type, is_xprv, is_bip32_derivation, seed_type, EncodeBase58Check,
script_num_to_hex, push_script, add_number_to_script)
from lib import ecc, crypto, ecc_fast
from lib.ecc import number_to_string, string_to_number
from lib.transaction import opcodes
from lib.util import bfh, bh2u
from lib import constants
from lib.storage import WalletStorage
from . import SequentialTestCase
from . import TestCaseForTestnet
from . import FAST_TESTS
try:
import ecdsa
except ImportError:
sys.exit("Error: python-ecdsa does not seem to be installed. Try 'sudo pip install ecdsa'")
def needs_test_with_all_ecc_implementations(func):
def run_test(*args, **kwargs):
if FAST_TESTS:
func(*args, **kwargs)
return
ecc_fast.undo_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1()
try:
func(*args, **kwargs)
finally:
if not ecc_fast._libsecp256k1:
return
ecc_fast.do_monkey_patching_of_python_ecdsa_internals_with_libsecp256k1()
func(*args, **kwargs)
return run_test
def needs_test_with_all_aes_implementations(func):
def run_test(*args, **kwargs):
if FAST_TESTS:
func(*args, **kwargs)
return
_aes = crypto.AES
crypto.AES = None
try:
func(*args, **kwargs)
finally:
if not _aes:
return
crypto.AES = _aes
func(*args, **kwargs)
return run_test
class Test_bitcoin(SequentialTestCase):
def test_libsecp256k1_is_available(self):
self.assertTrue(bool(ecc_fast._libsecp256k1))
def test_pycryptodomex_is_available(self):
self.assertTrue(bool(crypto.AES))
@needs_test_with_all_aes_implementations
@needs_test_with_all_ecc_implementations
def test_crypto(self):
for message in [b"Chancellor on brink of second bailout for banks", b'\xff'*512]:
self._do_test_crypto(message)
def _do_test_crypto(self, message):
G = ecc.generator()
_r = G.order()
pvk = ecdsa.util.randrange(_r)
Pub = pvk*G
pubkey_c = Pub.get_public_key_bytes(True)
addr_c = public_key_to_p2pkh(pubkey_c)
eck = ecc.ECPrivkey(number_to_string(pvk,_r))
enc = ecc.ECPubkey(pubkey_c).encrypt_message(message)
dec = eck.decrypt_message(enc)
self.assertEqual(message, dec)
dec2 = eck.decrypt_message(enc)
self.assertEqual(message, dec2)
signature = eck.sign_message(message, True)
eck.verify_message_for_address(signature, message)
@needs_test_with_all_ecc_implementations
def test_msg_signing(self):
msg1 = b'Chancellor on brink of second bailout for banks'
msg2 = b'Electrum-Ganja'
def sign_message_with_wif_privkey(wif_privkey, msg):
txin_type, privkey, compressed = deserialize_privkey(wif_privkey)
key = ecc.ECPrivkey(privkey)
return key.sign_message(msg, compressed)
sig1 = sign_message_with_wif_privkey(
'L1TnU2zbNaAqMoVh65Cyvmcjzbrj41Gs9iTLcWbpJCMynXuap6UN', msg1)
addr1 = '15hETetDmcXm1mM4sEf7U2KXC9hDHFMSzz'
sig2 = sign_message_with_wif_privkey(
'5Hxn5C4SQuiV6e62A1MtZmbSeQyrLFhu5uYks62pU5VBUygK2KD', msg2)
addr2 = '1GPHVTY8UD9my6jyP4tb2TYJwUbDetyNC6'
sig1_b64 = base64.b64encode(sig1)
sig2_b64 = base64.b64encode(sig2)
self.assertEqual(sig1_b64, b'H/9jMOnj4MFbH3d7t4yCQ9i7DgZU/VZ278w3+ySv2F4yIsdqjsc5ng3kmN8OZAThgyfCZOQxZCWza9V5XzlVY0Y=')
self.assertEqual(sig2_b64, b'G84dmJ8TKIDKMT9qBRhpX2sNmR0y5t+POcYnFFJCs66lJmAs3T8A6Sbpx7KA6yTQ9djQMabwQXRrDomOkIKGn18=')
self.assertTrue(ecc.verify_message_with_address(addr1, sig1, msg1))
self.assertTrue(ecc.verify_message_with_address(addr2, sig2, msg2))
self.assertFalse(ecc.verify_message_with_address(addr1, b'wrong', msg1))
self.assertFalse(ecc.verify_message_with_address(addr1, sig2, msg1))
@needs_test_with_all_aes_implementations
@needs_test_with_all_ecc_implementations
def test_decrypt_message(self):
key = WalletStorage.get_eckey_from_password('pw123')
self.assertEqual(b'me<(s_s)>age', key.decrypt_message(b'QklFMQMDFtgT3zWSQsa+Uie8H/WvfUjlu9UN9OJtTt3KlgKeSTi6SQfuhcg1uIz9hp3WIUOFGTLr4RNQBdjPNqzXwhkcPi2Xsbiw6UCNJncVPJ6QBg=='))
self.assertEqual(b'me<(s_s)>age', key.decrypt_message(b'QklFMQKXOXbylOQTSMGfo4MFRwivAxeEEkewWQrpdYTzjPhqjHcGBJwdIhB7DyRfRQihuXx1y0ZLLv7XxLzrILzkl/H4YUtZB4uWjuOAcmxQH4i/Og=='))
self.assertEqual(b'hey_there' * 100, key.decrypt_message(b'QklFMQLOOsabsXtGQH8edAa6VOUa5wX8/DXmxX9NyHoAx1a5bWgllayGRVPeI2bf0ZdWK0tfal0ap0ZIVKbd2eOJybqQkILqT6E1/Syzq0Zicyb/AA1eZNkcX5y4gzloxinw00ubCA8M7gcUjJpOqbnksATcJ5y2YYXcHMGGfGurWu6uJ/UyrNobRidWppRMW5yR9/6utyNvT6OHIolCMEf7qLcmtneoXEiz51hkRdZS7weNf9mGqSbz9a2NL3sdh1A0feHIjAZgcCKcAvksNUSauf0/FnIjzTyPRpjRDMeDC8Ci3sGiuO3cvpWJwhZfbjcS26KmBv2CHWXfRRNFYOInHZNIXWNAoBB47Il5bGSMd+uXiGr+SQ9tNvcu+BiJNmFbxYqg+oQ8dGAl1DtvY2wJVY8k7vO9BIWSpyIxfGw7EDifhc5vnOmGe016p6a01C3eVGxgl23UYMrP7+fpjOcPmTSF4rk5U5ljEN3MSYqlf1QEv0OqlI9q1TwTK02VBCjMTYxDHsnt04OjNBkNO8v5uJ4NR+UUDBEp433z53I59uawZ+dbk4v4ZExcl8EGmKm3Gzbal/iJ/F7KQuX2b/ySEhLOFVYFWxK73X1nBvCSK2mC2/8fCw8oI5pmvzJwQhcCKTdEIrz3MMvAHqtPScDUOjzhXxInQOCb3+UBj1PPIdqkYLvZss1TEaBwYZjLkVnK2MBj7BaqT6Rp6+5A/fippUKHsnB6eYMEPR2YgDmCHL+4twxHJG6UWdP3ybaKiiAPy2OHNP6PTZ0HrqHOSJzBSDD+Z8YpaRg29QX3UEWlqnSKaan0VYAsV1VeaN0XFX46/TWO0L5tjhYVXJJYGqo6tIQJymxATLFRF6AZaD1Mwd27IAL04WkmoQoXfO6OFfwdp/shudY/1gBkDBvGPICBPtnqkvhGF+ZF3IRkuPwiFWeXmwBxKHsRx/3+aJu32Ml9+za41zVk2viaxcGqwTc5KMexQFLAUwqhv+aIik7U+5qk/gEVSuRoVkihoweFzKolNF+BknH2oB4rZdPixag5Zje3DvgjsSFlOl69W/67t/Gs8htfSAaHlsB8vWRQr9+v/lxTbrAw+O0E+sYGoObQ4qQMyQshNZEHbpPg63eWiHtJJnrVBvOeIbIHzoLDnMDsWVWZSMzAQ1vhX1H5QLgSEbRlKSliVY03kDkh/Nk/KOn+B2q37Ialq4JcRoIYFGJ8AoYEAD0tRuTqFddIclE75HzwaNG7NyKW1plsa72ciOPwsPJsdd5F0qdSQ3OSKtooTn7uf6dXOc4lDkfrVYRlZ0PX'))
@needs_test_with_all_aes_implementations
@needs_test_with_all_ecc_implementations
def test_encrypt_message(self):
key = WalletStorage.get_eckey_from_password('secret_password77')
msgs = [
bytes([0] * 555),
b'cannot think of anything funny'
]
for plaintext in msgs:
ciphertext1 = key.encrypt_message(plaintext)
ciphertext2 = key.encrypt_message(plaintext)
self.assertEqual(plaintext, key.decrypt_message(ciphertext1))
self.assertEqual(plaintext, key.decrypt_message(ciphertext2))
self.assertNotEqual(ciphertext1, ciphertext2)
@needs_test_with_all_ecc_implementations
def test_sign_transaction(self):
eckey1 = ecc.ECPrivkey(bfh('7e1255fddb52db1729fc3ceb21a46f95b8d9fe94cc83425e936a6c5223bb679d'))
sig1 = eckey1.sign_transaction(bfh('5a548b12369a53faaa7e51b5081829474ebdd9c924b3a8230b69aa0be254cd94'))
self.assertEqual(bfh('3045022100902a288b98392254cd23c0e9a49ac6d7920f171b8249a48e484b998f1874a2010220723d844826828f092cf400cb210c4fa0b8cd1b9d1a7f21590e78e022ff6476b9'), sig1)
eckey2 = ecc.ECPrivkey(bfh('c7ce8c1462c311eec24dff9e2532ac6241e50ae57e7d1833af21942136972f23'))
sig2 = eckey2.sign_transaction(bfh('642a2e66332f507c92bda910158dfe46fc10afbf72218764899d3af99a043fac'))
self.assertEqual(bfh('30440220618513f4cfc87dde798ce5febae7634c23e7b9254a1eabf486be820f6a7c2c4702204fef459393a2b931f949e63ced06888f35e286e446dc46feb24b5b5f81c6ed52'), sig2)
@needs_test_with_all_aes_implementations
def test_aes_homomorphic(self):
payload = u'\u66f4\u7a33\u5b9a\u7684\u4ea4\u6613\u5e73\u53f0'
password = u'secret'
enc = crypto.pw_encode(payload, password)
dec = crypto.pw_decode(enc, password)
self.assertEqual(dec, payload)
@needs_test_with_all_aes_implementations
def test_aes_encode_without_password(self):
payload = u'\u66f4\u7a33\u5b9a\u7684\u4ea4\u6613\u5e73\u53f0'
enc = crypto.pw_encode(payload, None)
self.assertEqual(payload, enc)
@needs_test_with_all_aes_implementations
def test_aes_deencode_without_password(self):
payload = u'\u66f4\u7a33\u5b9a\u7684\u4ea4\u6613\u5e73\u53f0'
enc = crypto.pw_decode(payload, None)
self.assertEqual(payload, enc)
@needs_test_with_all_aes_implementations
def test_aes_decode_with_invalid_password(self):
payload = u"blah"
password = u"uber secret"
wrong_password = u"not the password"
enc = crypto.pw_encode(payload, password)
self.assertRaises(Exception, crypto.pw_decode, enc, wrong_password)
def test_hash(self):
payload = u"test"
expected = b'\x95MZI\xfdp\xd9\xb8\xbc\xdb5\xd2R&x)\x95\x7f~\xf7\xfalt\xf8\x84\x19\xbd\xc5\xe8"\t\xf4'
result = Hash(payload)
self.assertEqual(expected, result)
def test_var_int(self):
for i in range(0xfd):
self.assertEqual(var_int(i), "{:02x}".format(i) )
self.assertEqual(var_int(0xfd), "fdfd00")
self.assertEqual(var_int(0xfe), "fdfe00")
self.assertEqual(var_int(0xff), "fdff00")
self.assertEqual(var_int(0x1234), "fd3412")
self.assertEqual(var_int(0xffff), "fdffff")
self.assertEqual(var_int(0x10000), "fe00000100")
self.assertEqual(var_int(0x12345678), "fe78563412")
self.assertEqual(var_int(0xffffffff), "feffffffff")
self.assertEqual(var_int(0x100000000), "ff0000000001000000")
self.assertEqual(var_int(0x0123456789abcdef), "ffefcdab8967452301")
def test_op_push(self):
self.assertEqual(op_push(0x00), '00')
self.assertEqual(op_push(0x12), '12')
self.assertEqual(op_push(0x4b), '4b')
self.assertEqual(op_push(0x4c), '4c4c')
self.assertEqual(op_push(0xfe), '4cfe')
self.assertEqual(op_push(0xff), '4cff')
self.assertEqual(op_push(0x100), '4d0001')
self.assertEqual(op_push(0x1234), '4d3412')
self.assertEqual(op_push(0xfffe), '4dfeff')
self.assertEqual(op_push(0xffff), '4dffff')
self.assertEqual(op_push(0x10000), '4e00000100')
self.assertEqual(op_push(0x12345678), '4e78563412')
def test_script_num_to_hex(self):
# test vectors from https://github.com/btcsuite/btcd/blob/fdc2bc867bda6b351191b5872d2da8270df00d13/txscript/scriptnum.go#L77
self.assertEqual(script_num_to_hex(127), '7f')
self.assertEqual(script_num_to_hex(-127), 'ff')
self.assertEqual(script_num_to_hex(128), '8000')
self.assertEqual(script_num_to_hex(-128), '8080')
self.assertEqual(script_num_to_hex(129), '8100')
self.assertEqual(script_num_to_hex(-129), '8180')
self.assertEqual(script_num_to_hex(256), '0001')
self.assertEqual(script_num_to_hex(-256), '0081')
self.assertEqual(script_num_to_hex(32767), 'ff7f')
self.assertEqual(script_num_to_hex(-32767), 'ffff')
self.assertEqual(script_num_to_hex(32768), '008000')
self.assertEqual(script_num_to_hex(-32768), '008080')
def test_push_script(self):
# https://github.com/bitcoin/bips/blob/master/bip-0062.mediawiki#push-operators
self.assertEqual(push_script(''), bh2u(bytes([opcodes.OP_0])))
self.assertEqual(push_script('07'), bh2u(bytes([opcodes.OP_7])))
self.assertEqual(push_script('10'), bh2u(bytes([opcodes.OP_16])))
self.assertEqual(push_script('81'), bh2u(bytes([opcodes.OP_1NEGATE])))
self.assertEqual(push_script('11'), '0111')
self.assertEqual(push_script(75 * '42'), '4b' + 75 * '42')
self.assertEqual(push_script(76 * '42'), bh2u(bytes([opcodes.OP_PUSHDATA1]) + bfh('4c' + 76 * '42')))
self.assertEqual(push_script(100 * '42'), bh2u(bytes([opcodes.OP_PUSHDATA1]) + bfh('64' + 100 * '42')))
self.assertEqual(push_script(255 * '42'), bh2u(bytes([opcodes.OP_PUSHDATA1]) + bfh('ff' + 255 * '42')))
self.assertEqual(push_script(256 * '42'), bh2u(bytes([opcodes.OP_PUSHDATA2]) + bfh('0001' + 256 * '42')))
self.assertEqual(push_script(520 * '42'), bh2u(bytes([opcodes.OP_PUSHDATA2]) + bfh('0802' + 520 * '42')))
def test_add_number_to_script(self):
# https://github.com/bitcoin/bips/blob/master/bip-0062.mediawiki#numbers
self.assertEqual(add_number_to_script(0), bytes([opcodes.OP_0]))
self.assertEqual(add_number_to_script(7), bytes([opcodes.OP_7]))
self.assertEqual(add_number_to_script(16), bytes([opcodes.OP_16]))
self.assertEqual(add_number_to_script(-1), bytes([opcodes.OP_1NEGATE]))
self.assertEqual(add_number_to_script(-127), bfh('01ff'))
self.assertEqual(add_number_to_script(-2), bfh('0182'))
self.assertEqual(add_number_to_script(17), bfh('0111'))
self.assertEqual(add_number_to_script(127), bfh('017f'))
self.assertEqual(add_number_to_script(-32767), bfh('02ffff'))
self.assertEqual(add_number_to_script(-128), bfh('028080'))
self.assertEqual(add_number_to_script(128), bfh('028000'))
self.assertEqual(add_number_to_script(32767), bfh('02ff7f'))
self.assertEqual(add_number_to_script(-8388607), bfh('03ffffff'))
self.assertEqual(add_number_to_script(-32768), bfh('03008080'))
self.assertEqual(add_number_to_script(32768), bfh('03008000'))
self.assertEqual(add_number_to_script(8388607), bfh('03ffff7f'))
self.assertEqual(add_number_to_script(-2147483647), bfh('04ffffffff'))
self.assertEqual(add_number_to_script(-8388608 ), bfh('0400008080'))
self.assertEqual(add_number_to_script(8388608), bfh('0400008000'))
self.assertEqual(add_number_to_script(2147483647), bfh('04ffffff7f'))
def test_address_to_script(self):
# bech32 native segwit
# test vectors from BIP-0173
self.assertEqual(address_to_script('BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4'), '0014751e76e8199196d454941c45d1b3a323f1433bd6')
self.assertEqual(address_to_script('bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx'), '5128751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6')
self.assertEqual(address_to_script('BC1SW50QA3JX3S'), '6002751e')
self.assertEqual(address_to_script('bc1zw508d6qejxtdg4y5r3zarvaryvg6kdaj'), '5210751e76e8199196d454941c45d1b3a323')
# base58 P2PKH
self.assertEqual(address_to_script('14gcRovpkCoGkCNBivQBvw7eso7eiNAbxG'), '76a91428662c67561b95c79d2257d2a93d9d151c977e9188ac')
self.assertEqual(address_to_script('1BEqfzh4Y3zzLosfGhw1AsqbEKVW6e1qHv'), '76a914704f4b81cadb7bf7e68c08cd3657220f680f863c88ac')
# base58 P2SH
self.assertEqual(address_to_script('35ZqQJcBQMZ1rsv8aSuJ2wkC7ohUCQMJbT'), 'a9142a84cf00d47f699ee7bbc1dea5ec1bdecb4ac15487')
self.assertEqual(address_to_script('3PyjzJ3im7f7bcV724GR57edKDqoZvH7Ji'), 'a914f47c8954e421031ad04ecd8e7752c9479206b9d387')
class Test_bitcoin_testnet(TestCaseForTestnet):
def test_address_to_script(self):
# bech32 native segwit
# test vectors from BIP-0173
self.assertEqual(address_to_script('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7'), '00201863143c14c5166804bd19203356da136c985678cd4d27a1b8c6329604903262')
self.assertEqual(address_to_script('tb1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesrxh6hy'), '0020000000c4a5cad46221b2a187905e5266362b99d5e91c6ce24d165dab93e86433')
# base58 P2PKH
self.assertEqual(address_to_script('mutXcGt1CJdkRvXuN2xoz2quAAQYQ59bRX'), '76a9149da64e300c5e4eb4aaffc9c2fd465348d5618ad488ac')
self.assertEqual(address_to_script('miqtaRTkU3U8rzwKbEHx3g8FSz8GJtPS3K'), '76a914247d2d5b6334bdfa2038e85b20fc15264f8e5d2788ac')
# base58 P2SH
self.assertEqual(address_to_script('2N3LSvr3hv5EVdfcrxg2Yzecf3SRvqyBE4p'), 'a9146eae23d8c4a941316017946fc761a7a6c85561fb87')
self.assertEqual(address_to_script('2NE4ZdmxFmUgwu5wtfoN2gVniyMgRDYq1kk'), 'a914e4567743d378957cd2ee7072da74b1203c1a7a0b87')
class Test_xprv_xpub(SequentialTestCase):
xprv_xpub = (
# Taken from test vectors in https://en.bitcoin.it/wiki/BIP_0032_TestVectors
{'xprv': 'xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76',
'xpub': 'xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy',
'xtype': 'standard'},
{'xprv': 'yprvAJEYHeNEPcyBoQYM7sGCxDiNCTX65u4ANgZuSGTrKN5YCC9MP84SBayrgaMyZV7zvkHrr3HVPTK853s2SPk4EttPazBZBmz6QfDkXeE8Zr7',
'xpub': 'ypub6XDth9u8DzXV1tcpDtoDKMf6kVMaVMn1juVWEesTshcX4zUVvfNgjPJLXrD9N7AdTLnbHFL64KmBn3SNaTe69iZYbYCqLCCNPZKbLz9niQ4',
'xtype': 'p2wpkh-p2sh'},
{'xprv': 'zprvAWgYBBk7JR8GkraNZJeEodAp2UR1VRWJTXyV1ywuUVs1awUgTiBS1ZTDtLA5F3MFDn1LZzu8dUpSKdT7ToDpvEG6PQu4bJs7zQY47Sd3sEZ',
'xpub': 'zpub6jftahH18ngZyLeqfLBFAm7YaWFVttE9pku5pNMX2qPzTjoq1FVgZMmhjecyB2nqFb31gHE9vNvbaggU6vvWpNZbXEWLLUjYjFqG95LNyT8',
'xtype': 'p2wpkh'},
)
def _do_test_bip32(self, seed, sequence):
xprv, xpub = bip32_root(bfh(seed), 'standard')
self.assertEqual("m/", sequence[0:2])
path = 'm'
sequence = sequence[2:]
for n in sequence.split('/'):
child_path = path + '/' + n
if n[-1] != "'":
xpub2 = bip32_public_derivation(xpub, path, child_path)
xprv, xpub = bip32_private_derivation(xprv, path, child_path)
if n[-1] != "'":
self.assertEqual(xpub, xpub2)
path = child_path
return xpub, xprv
@needs_test_with_all_ecc_implementations
def test_bip32(self):
# see https://en.bitcoin.it/wiki/BIP_0032_TestVectors
xpub, xprv = self._do_test_bip32("000102030405060708090a0b0c0d0e0f", "m/0'/1/2'/2/1000000000")
self.assertEqual("xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy", xpub)
self.assertEqual("xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76", xprv)
xpub, xprv = self._do_test_bip32("fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542","m/0/2147483647'/1/2147483646'/2")
self.assertEqual("xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt", xpub)
self.assertEqual("xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j", xprv)
@needs_test_with_all_ecc_implementations
def test_xpub_from_xprv(self):
for xprv_details in self.xprv_xpub:
result = xpub_from_xprv(xprv_details['xprv'])
self.assertEqual(result, xprv_details['xpub'])
@needs_test_with_all_ecc_implementations
def test_is_xpub(self):
for xprv_details in self.xprv_xpub:
xpub = xprv_details['xpub']
self.assertTrue(is_xpub(xpub))
self.assertFalse(is_xpub('xpub1nval1d'))
self.assertFalse(is_xpub('xpub661MyMwAqRbcFWohJWt7PHsFEJfZAvw9ZxwQoDa4SoMgsDDM1T7WK3u9E4edkC4ugRnZ8E4xDZRpk8Rnts3Nbt97dPwT52WRONGBADWRONG'))
@needs_test_with_all_ecc_implementations
def test_xpub_type(self):
for xprv_details in self.xprv_xpub:
xpub = xprv_details['xpub']
self.assertEqual(xprv_details['xtype'], xpub_type(xpub))
@needs_test_with_all_ecc_implementations
def test_is_xprv(self):
for xprv_details in self.xprv_xpub:
xprv = xprv_details['xprv']
self.assertTrue(is_xprv(xprv))
self.assertFalse(is_xprv('xprv1nval1d'))
self.assertFalse(is_xprv('xprv661MyMwAqRbcFWohJWt7PHsFEJfZAvw9ZxwQoDa4SoMgsDDM1T7WK3u9E4edkC4ugRnZ8E4xDZRpk8Rnts3Nbt97dPwT52WRONGBADWRONG'))
def test_is_bip32_derivation(self):
self.assertTrue(is_bip32_derivation("m/0'/1"))
self.assertTrue(is_bip32_derivation("m/0'/0'"))
self.assertTrue(is_bip32_derivation("m/44'/0'/0'/0/0"))
self.assertTrue(is_bip32_derivation("m/49'/0'/0'/0/0"))
self.assertFalse(is_bip32_derivation("mmmmmm"))
self.assertFalse(is_bip32_derivation("n/"))
self.assertFalse(is_bip32_derivation(""))
self.assertFalse(is_bip32_derivation("m/q8462"))
def test_version_bytes(self):
xprv_headers_b58 = {
'standard': 'xprv',
'p2wpkh-p2sh': 'yprv',
'p2wsh-p2sh': 'Yprv',
'p2wpkh': 'zprv',
'p2wsh': 'Zprv',
}
xpub_headers_b58 = {
'standard': 'xpub',
'p2wpkh-p2sh': 'ypub',
'p2wsh-p2sh': 'Ypub',
'p2wpkh': 'zpub',
'p2wsh': 'Zpub',
}
for xtype, xkey_header_bytes in constants.net.XPRV_HEADERS.items():
xkey_header_bytes = bfh("%08x" % xkey_header_bytes)
xkey_bytes = xkey_header_bytes + bytes([0] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xprv_headers_b58[xtype]))
xkey_bytes = xkey_header_bytes + bytes([255] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xprv_headers_b58[xtype]))
for xtype, xkey_header_bytes in constants.net.XPUB_HEADERS.items():
xkey_header_bytes = bfh("%08x" % xkey_header_bytes)
xkey_bytes = xkey_header_bytes + bytes([0] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xpub_headers_b58[xtype]))
xkey_bytes = xkey_header_bytes + bytes([255] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xpub_headers_b58[xtype]))
class Test_xprv_xpub_testnet(TestCaseForTestnet):
def test_version_bytes(self):
xprv_headers_b58 = {
'standard': 'tprv',
'p2wpkh-p2sh': 'uprv',
'p2wsh-p2sh': 'Uprv',
'p2wpkh': 'vprv',
'p2wsh': 'Vprv',
}
xpub_headers_b58 = {
'standard': 'tpub',
'p2wpkh-p2sh': 'upub',
'p2wsh-p2sh': 'Upub',
'p2wpkh': 'vpub',
'p2wsh': 'Vpub',
}
for xtype, xkey_header_bytes in constants.net.XPRV_HEADERS.items():
xkey_header_bytes = bfh("%08x" % xkey_header_bytes)
xkey_bytes = xkey_header_bytes + bytes([0] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xprv_headers_b58[xtype]))
xkey_bytes = xkey_header_bytes + bytes([255] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xprv_headers_b58[xtype]))
for xtype, xkey_header_bytes in constants.net.XPUB_HEADERS.items():
xkey_header_bytes = bfh("%08x" % xkey_header_bytes)
xkey_bytes = xkey_header_bytes + bytes([0] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xpub_headers_b58[xtype]))
xkey_bytes = xkey_header_bytes + bytes([255] * 74)
xkey_b58 = EncodeBase58Check(xkey_bytes)
self.assertTrue(xkey_b58.startswith(xpub_headers_b58[xtype]))
class Test_keyImport(SequentialTestCase):
priv_pub_addr = (
{'priv': 'KzMFjMC2MPadjvX5Cd7b8AKKjjpBSoRKUTpoAtN6B3J9ezWYyXS6',
'exported_privkey': 'p2pkh:KzMFjMC2MPadjvX5Cd7b8AKKjjpBSoRKUTpoAtN6B3J9ezWYyXS6',
'pub': '02c6467b7e621144105ed3e4835b0b4ab7e35266a2ae1c4f8baa19e9ca93452997',
'address': '17azqT8T16coRmWKYFj3UjzJuxiYrYFRBR',
'minikey' : False,
'txin_type': 'p2pkh',
'compressed': True,
'addr_encoding': 'base58',
'scripthash': 'c9aecd1fef8d661a42c560bf75c8163e337099800b8face5ca3d1393a30508a7'},
{'priv': 'p2pkh:Kzj8VjwpZ99bQqVeUiRXrKuX9mLr1o6sWxFMCBJn1umC38BMiQTD',
'exported_privkey': 'p2pkh:Kzj8VjwpZ99bQqVeUiRXrKuX9mLr1o6sWxFMCBJn1umC38BMiQTD',
'pub': '0352d78b4b37e0f6d4e164423436f2925fa57817467178eca550a88f2821973c41',
'address': '1GXgZ5Qi6gmXTHVSpUPZLy4Ci2nbfb3ZNb',
'minikey': False,
'txin_type': 'p2pkh',
'compressed': True,
'addr_encoding': 'base58',
'scripthash': 'a9b2a76fc196c553b352186dfcca81fcf323a721cd8431328f8e9d54216818c1'},
{'priv': '5Hxn5C4SQuiV6e62A1MtZmbSeQyrLFhu5uYks62pU5VBUygK2KD',
'exported_privkey': 'p2pkh:5Hxn5C4SQuiV6e62A1MtZmbSeQyrLFhu5uYks62pU5VBUygK2KD',
'pub': '04e5fe91a20fac945845a5518450d23405ff3e3e1ce39827b47ee6d5db020a9075422d56a59195ada0035e4a52a238849f68e7a325ba5b2247013e0481c5c7cb3f',
'address': '1GPHVTY8UD9my6jyP4tb2TYJwUbDetyNC6',
'minikey': False,
'txin_type': 'p2pkh',
'compressed': False,
'addr_encoding': 'base58',
'scripthash': 'f5914651408417e1166f725a5829ff9576d0dbf05237055bf13abd2af7f79473'},
{'priv': 'p2pkh:5KhYQCe1xd5g2tqpmmGpUWDpDuTbA8vnpbiCNDwMPAx29WNQYfN',
'exported_privkey': 'p2pkh:5KhYQCe1xd5g2tqpmmGpUWDpDuTbA8vnpbiCNDwMPAx29WNQYfN',
'pub': '048f0431b0776e8210376c81280011c2b68be43194cb00bd47b7e9aa66284b713ce09556cde3fee606051a07613f3c159ef3953b8927c96ae3dae94a6ba4182e0e',
'address': '147kiRHHm9fqeMQSgqf4k35XzuWLP9fmmS',
'minikey': False,
'txin_type': 'p2pkh',
'compressed': False,
'addr_encoding': 'base58',
'scripthash': '6dd2e07ad2de9ba8eec4bbe8467eb53f8845acff0d9e6f5627391acc22ff62df'},
{'priv': 'LHJnnvRzsdrTX2j5QeWVsaBkabK7gfMNqNNqxnbBVRaJYfk24iJz',
'exported_privkey': 'p2wpkh-p2sh:Kz9XebiCXL2BZzhYJViiHDzn5iup1povWV8aqstzWU4sz1K5nVva',
'pub': '0279ad237ca0d812fb503ab86f25e15ebd5fa5dd95c193639a8a738dcd1acbad81',
'address': '3GeVJB3oKr7psgKR6BTXSxKtWUkfsHHhk7',
'minikey': False,
'txin_type': 'p2wpkh-p2sh',
'compressed': True,
'addr_encoding': 'base58',
'scripthash': 'd7b04e882fa6b13246829ac552a2b21461d9152eb00f0a6adb58457a3e63d7c5'},
{'priv': 'p2wpkh-p2sh:L3CZH1pm87X4bbE6mSGvZnAZ1KcFDRomBudUkrkBG7EZhDtBVXMW',
'exported_privkey': 'p2wpkh-p2sh:L3CZH1pm87X4bbE6mSGvZnAZ1KcFDRomBudUkrkBG7EZhDtBVXMW',
'pub': '0229da20a15b3363b2c28e3c5093c180b56c439df0b968a970366bb1f38435361e',
'address': '3C79goMwT7zSTjXnPoCg6VFGAnUpZAkyus',
'minikey': False,
'txin_type': 'p2wpkh-p2sh',
'compressed': True,
'addr_encoding': 'base58',
'scripthash': '714bf6bfe1083e69539f40d4c7a7dca85d187471b35642e55f20d7e866494cf7'},
{'priv': 'L8g5V8kFFeg2WbecahRSdobARbHz2w2STH9S8ePHVSY4fmia7Rsj',
'exported_privkey': 'p2wpkh:Kz6SuyPM5VktY5dr2d2YqdVgBA6LCWkiHqXJaC3BzxnMPSUuYzmF',
'pub': '03e9f948421aaa89415dc5f281a61b60dde12aae3181b3a76cd2d849b164fc6d0b',
'address': 'bc1qqmpt7u5e9hfznljta5gnvhyvfd2kdd0r90hwue',
'minikey': False,
'txin_type': 'p2wpkh',
'compressed': True,
'addr_encoding': 'bech32',
'scripthash': '1929acaaef3a208c715228e9f1ca0318e3a6b9394ab53c8d026137f847ecf97b'},
{'priv': 'p2wpkh:KyDWy5WbjLA58Zesh1o8m3pADGdJ3v33DKk4m7h8BD5zDKDmDFwo',
'exported_privkey': 'p2wpkh:KyDWy5WbjLA58Zesh1o8m3pADGdJ3v33DKk4m7h8BD5zDKDmDFwo',
'pub': '038c57657171c1f73e34d5b3971d05867d50221ad94980f7e87cbc2344425e6a1e',
'address': 'bc1qpakeeg4d9ydyjxd8paqrw4xy9htsg532xzxn50',
'minikey': False,
'txin_type': 'p2wpkh',
'compressed': True,
'addr_encoding': 'bech32',
'scripthash': '242f02adde84ebb2a7dd778b2f3a81b3826f111da4d8960d826d7a4b816cb261'},
# from http://bitscan.com/articles/security/spotlight-on-mini-private-keys
{'priv': 'SzavMBLoXU6kDrqtUVmffv',
'exported_privkey': 'p2pkh:L53fCHmQhbNp1B4JipfBtfeHZH7cAibzG9oK19XfiFzxHgAkz6JK',
'pub': '02588d202afcc1ee4ab5254c7847ec25b9a135bbda0f2bc69ee1a714749fd77dc9',
'address': '19GuvDvMMUZ8vq84wT79fvnvhMd5MnfTkR',
'minikey': True,
'txin_type': 'p2pkh',
'compressed': True, # this is actually ambiguous... issue #2748
'addr_encoding': 'base58',
'scripthash': '60ad5a8b922f758cd7884403e90ee7e6f093f8d21a0ff24c9a865e695ccefdf1'},
)
@needs_test_with_all_ecc_implementations
def test_public_key_from_private_key(self):
for priv_details in self.priv_pub_addr:
txin_type, privkey, compressed = deserialize_privkey(priv_details['priv'])
result = ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
self.assertEqual(priv_details['pub'], result)
self.assertEqual(priv_details['txin_type'], txin_type)
self.assertEqual(priv_details['compressed'], compressed)
@needs_test_with_all_ecc_implementations
def test_address_from_private_key(self):
for priv_details in self.priv_pub_addr:
addr2 = address_from_private_key(priv_details['priv'])
self.assertEqual(priv_details['address'], addr2)
@needs_test_with_all_ecc_implementations
def test_is_valid_address(self):
for priv_details in self.priv_pub_addr:
addr = priv_details['address']
self.assertFalse(is_address(priv_details['priv']))
self.assertFalse(is_address(priv_details['pub']))
self.assertTrue(is_address(addr))
is_enc_b58 = priv_details['addr_encoding'] == 'base58'
self.assertEqual(is_enc_b58, is_b58_address(addr))
is_enc_bech32 = priv_details['addr_encoding'] == 'bech32'
self.assertEqual(is_enc_bech32, is_segwit_address(addr))
self.assertFalse(is_address("not an address"))
@needs_test_with_all_ecc_implementations
def test_is_private_key(self):
for priv_details in self.priv_pub_addr:
self.assertTrue(is_private_key(priv_details['priv']))
self.assertTrue(is_private_key(priv_details['exported_privkey']))
self.assertFalse(is_private_key(priv_details['pub']))
self.assertFalse(is_private_key(priv_details['address']))
self.assertFalse(is_private_key("not a privkey"))
@needs_test_with_all_ecc_implementations
def test_serialize_privkey(self):
for priv_details in self.priv_pub_addr:
txin_type, privkey, compressed = deserialize_privkey(priv_details['priv'])
priv2 = serialize_privkey(privkey, compressed, txin_type)
self.assertEqual(priv_details['exported_privkey'], priv2)
@needs_test_with_all_ecc_implementations
def test_address_to_scripthash(self):
for priv_details in self.priv_pub_addr:
sh = address_to_scripthash(priv_details['address'])
self.assertEqual(priv_details['scripthash'], sh)
@needs_test_with_all_ecc_implementations
def test_is_minikey(self):
for priv_details in self.priv_pub_addr:
minikey = priv_details['minikey']
priv = priv_details['priv']
self.assertEqual(minikey, is_minikey(priv))
@needs_test_with_all_ecc_implementations
def test_is_compressed(self):
for priv_details in self.priv_pub_addr:
self.assertEqual(priv_details['compressed'],
is_compressed(priv_details['priv']))
class Test_seeds(SequentialTestCase):
mnemonics = {
('cell dumb heartbeat north boom tease ship baby bright kingdom rare squeeze', 'old'),
('cell dumb heartbeat north boom tease ' * 4, 'old'),
('cell dumb heartbeat north boom tease ship baby bright kingdom rare badword', ''),
('cElL DuMb hEaRtBeAt nOrTh bOoM TeAsE ShIp bAbY BrIgHt kInGdOm rArE SqUeEzE', 'old'),
(' cElL DuMb hEaRtBeAt nOrTh bOoM TeAsE ShIp bAbY BrIgHt kInGdOm rArE SqUeEzE ', 'old'),
# below seed is actually 'invalid old' as it maps to 33 hex chars
('hurry idiot prefer sunset mention mist jaw inhale impossible kingdom rare squeeze', 'old'),
('cram swing cover prefer miss modify ritual silly deliver chunk behind inform able', 'standard'),
('cram swing cover prefer miss modify ritual silly deliver chunk behind inform', ''),
('ostrich security deer aunt climb inner alpha arm mutual marble solid task', 'standard'),
('OSTRICH SECURITY DEER AUNT CLIMB INNER ALPHA ARM MUTUAL MARBLE SOLID TASK', 'standard'),
(' oStRiCh sEcUrItY DeEr aUnT ClImB InNeR AlPhA ArM MuTuAl mArBlE SoLiD TaSk ', 'standard'),
('x8', 'standard'),
('science dawn member doll dutch real can brick knife deny drive list', '2fa'),
('science dawn member doll dutch real ca brick knife deny drive list', ''),
(' sCience dawn member doll Dutch rEAl can brick knife deny drive lisT', '2fa'),
('frost pig brisk excite novel report camera enlist axis nation novel desert', 'segwit'),
(' fRoSt pig brisk excIte novel rePort CamEra enlist axis nation nOVeL dEsert ', 'segwit'),
('9dk', 'segwit'),
}
def test_new_seed(self):
seed = "cram swing cover prefer miss modify ritual silly deliver chunk behind inform able"
self.assertTrue(is_new_seed(seed))
seed = "cram swing cover prefer miss modify ritual silly deliver chunk behind inform"
self.assertFalse(is_new_seed(seed))
def test_old_seed(self):
self.assertTrue(is_old_seed(" ".join(["like"] * 12)))
self.assertFalse(is_old_seed(" ".join(["like"] * 18)))
self.assertTrue(is_old_seed(" ".join(["like"] * 24)))
self.assertFalse(is_old_seed("not a seed"))
self.assertTrue(is_old_seed("0123456789ABCDEF" * 2))
self.assertTrue(is_old_seed("0123456789ABCDEF" * 4))
def test_seed_type(self):
for seed_words, _type in self.mnemonics:
self.assertEqual(_type, seed_type(seed_words), msg=seed_words)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.