hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1179d145b552985cec412737cba87b50083678
| 16,704
|
py
|
Python
|
third_party/blink/tools/blinkpy/w3c/import_notifier.py
|
iridium-browser/iridium-browser
|
907e31cf5ce5ad14d832796e3a7c11e496828959
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 575
|
2015-06-18T23:58:20.000Z
|
2022-03-23T09:32:39.000Z
|
third_party/blink/tools/blinkpy/w3c/import_notifier.py
|
iridium-browser/iridium-browser
|
907e31cf5ce5ad14d832796e3a7c11e496828959
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113
|
2015-05-04T09:58:14.000Z
|
2022-01-31T19:35:03.000Z
|
third_party/blink/tools/blinkpy/w3c/import_notifier.py
|
iridium-browser/iridium-browser
|
907e31cf5ce5ad14d832796e3a7c11e496828959
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 52
|
2015-07-14T10:40:50.000Z
|
2022-03-15T01:11:49.000Z
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sends notifications after automatic imports from web-platform-tests (WPT).
Automatically file bugs for new failures caused by WPT imports for opted-in
directories.
Design doc: https://docs.google.com/document/d/1W3V81l94slAC_rPcTKWXgv3YxRxtlSIAxi3yj6NsbBw/edit?usp=sharing
"""
from collections import defaultdict
import logging
import re
from blinkpy.common.net.luci_auth import LuciAuth
from blinkpy.common.path_finder import PathFinder
from blinkpy.w3c.common import WPT_GH_URL
from blinkpy.w3c.directory_owners_extractor import DirectoryOwnersExtractor
from blinkpy.w3c.monorail import MonorailAPI, MonorailIssue
from blinkpy.w3c.wpt_expectations_updater import WPTExpectationsUpdater
_log = logging.getLogger(__name__)
GITHUB_COMMIT_PREFIX = WPT_GH_URL + 'commit/'
SHORT_GERRIT_PREFIX = 'https://crrev.com/c/'
class ImportNotifier(object):
def __init__(self, host, chromium_git, local_wpt):
self.host = host
self.git = chromium_git
self.local_wpt = local_wpt
self._monorail_api = MonorailAPI
self.default_port = host.port_factory.get()
self.finder = PathFinder(host.filesystem)
self.owners_extractor = DirectoryOwnersExtractor(host)
self.new_failures_by_directory = defaultdict(list)
def main(self,
wpt_revision_start,
wpt_revision_end,
rebaselined_tests,
test_expectations,
issue,
patchset,
dry_run=True,
service_account_key_json=None):
"""Files bug reports for new failures.
Args:
wpt_revision_start: The start of the imported WPT revision range
(exclusive), i.e. the last imported revision.
wpt_revision_end: The end of the imported WPT revision range
(inclusive), i.e. the current imported revision.
rebaselined_tests: A list of test names that have been rebaselined.
test_expectations: A dictionary mapping names of tests that cannot
be rebaselined to a list of new test expectation lines.
issue: The issue number of the import CL (a string).
patchset: The patchset number of the import CL (a string).
dry_run: If True, no bugs will be actually filed to crbug.com.
service_account_key_json: The path to a JSON private key of a
service account for accessing Monorail. If None, try to get an
access token from luci-auth.
Note: "test names" are paths of the tests relative to web_tests.
"""
gerrit_url = SHORT_GERRIT_PREFIX + issue
gerrit_url_with_ps = gerrit_url + '/' + patchset + '/'
changed_test_baselines = self.find_changed_baselines_of_tests(
rebaselined_tests)
self.examine_baseline_changes(changed_test_baselines,
gerrit_url_with_ps)
self.examine_new_test_expectations(test_expectations)
bugs = self.create_bugs_from_new_failures(wpt_revision_start,
wpt_revision_end, gerrit_url)
self.file_bugs(bugs, dry_run, service_account_key_json)
def find_changed_baselines_of_tests(self, rebaselined_tests):
"""Finds the corresponding changed baselines of each test.
Args:
rebaselined_tests: A list of test names that have been rebaselined.
Returns:
A dictionary mapping test names to paths of their baselines changed
in this import CL (paths relative to the root of Chromium repo).
"""
test_baselines = {}
changed_files = self.git.changed_files()
for test_name in rebaselined_tests:
test_without_ext, _ = self.host.filesystem.splitext(test_name)
changed_baselines = []
# TODO(robertma): Refactor this into web_tests.port.base.
baseline_name = test_without_ext + '-expected.txt'
for changed_file in changed_files:
if changed_file.endswith(baseline_name):
changed_baselines.append(changed_file)
if changed_baselines:
test_baselines[test_name] = changed_baselines
return test_baselines
def examine_baseline_changes(self, changed_test_baselines,
gerrit_url_with_ps):
"""Examines all changed baselines to find new failures.
Args:
changed_test_baselines: A dictionary mapping test names to paths of
changed baselines.
gerrit_url_with_ps: Gerrit URL of this CL with the patchset number.
"""
for test_name, changed_baselines in changed_test_baselines.iteritems():
directory = self.find_owned_directory(test_name)
if not directory:
_log.warning('Cannot find OWNERS of %s', test_name)
continue
for baseline in changed_baselines:
if self.more_failures_in_baseline(baseline):
self.new_failures_by_directory[directory].append(
TestFailure(
TestFailure.BASELINE_CHANGE,
test_name,
baseline_path=baseline,
gerrit_url_with_ps=gerrit_url_with_ps))
def more_failures_in_baseline(self, baseline):
"""Determines if a testharness.js baseline file has new failures.
The file is assumed to have been modified in the current git checkout,
and so has a diff we can parse.
We recognize two types of failures: FAIL lines, which are output for a
specific subtest failing, and harness errors, which indicate an uncaught
error in the test. Increasing numbers of either are considered new
failures - this includes going from FAIL to error or vice-versa.
"""
diff = self.git.run(['diff', '-U0', 'origin/master', '--', baseline])
delta_failures = 0
delta_harness_errors = 0
for line in diff.splitlines():
if line.startswith('+FAIL'):
delta_failures += 1
if line.startswith('-FAIL'):
delta_failures -= 1
if line.startswith('+Harness Error.'):
delta_harness_errors += 1
if line.startswith('-Harness Error.'):
delta_harness_errors -= 1
return delta_failures > 0 or delta_harness_errors > 0
def examine_new_test_expectations(self, test_expectations):
"""Examines new test expectations to find new failures.
Args:
test_expectations: A dictionary mapping names of tests that cannot
be rebaselined to a list of new test expectation lines.
"""
for test_name, expectation_lines in test_expectations.iteritems():
directory = self.find_owned_directory(test_name)
if not directory:
_log.warning('Cannot find OWNERS of %s', test_name)
continue
for expectation_line in expectation_lines:
self.new_failures_by_directory[directory].append(
TestFailure(
TestFailure.NEW_EXPECTATION,
test_name,
expectation_line=expectation_line))
def create_bugs_from_new_failures(self, wpt_revision_start,
wpt_revision_end, gerrit_url):
"""Files bug reports for new failures.
Args:
wpt_revision_start: The start of the imported WPT revision range
(exclusive), i.e. the last imported revision.
wpt_revision_end: The end of the imported WPT revision range
(inclusive), i.e. the current imported revision.
gerrit_url: Gerrit URL of the CL.
Return:
A list of MonorailIssue objects that should be filed.
"""
imported_commits = self.local_wpt.commits_in_range(
wpt_revision_start, wpt_revision_end)
bugs = []
for directory, failures in self.new_failures_by_directory.iteritems():
summary = '[WPT] New failures introduced in {} by import {}'.format(
directory, gerrit_url)
full_directory = self.host.filesystem.join(
self.finder.web_tests_dir(), directory)
owners_file = self.host.filesystem.join(full_directory, 'OWNERS')
metadata_file = self.host.filesystem.join(full_directory,
'WPT_METADATA')
is_wpt_notify_enabled = self.owners_extractor.is_wpt_notify_enabled(
metadata_file)
owners = self.owners_extractor.extract_owners(owners_file)
# owners may be empty but not None.
cc = owners
component = self.owners_extractor.extract_component(metadata_file)
# component could be None.
components = [component] if component else None
prologue = ('WPT import {} introduced new failures in {}:\n\n'
'List of new failures:\n'.format(
gerrit_url, directory))
failure_list = ''
for failure in failures:
failure_list += str(failure) + '\n'
expectations_statement = (
'\nExpectations or baseline files [0] have been automatically '
'added for the failing results to keep the bots green. Please '
'investigate the new failures and triage as appropriate.\n')
range_statement = '\nThis import contains upstream changes from {} to {}:\n'.format(
wpt_revision_start, wpt_revision_end)
commit_list = self.format_commit_list(imported_commits,
full_directory)
links_list = '\n[0]: https://chromium.googlesource.com/chromium/src/+/HEAD/docs/testing/web_test_expectations.md\n'
description = (prologue + failure_list + expectations_statement +
range_statement + commit_list + links_list)
bug = MonorailIssue.new_chromium_issue(summary,
description,
cc,
components,
labels=['Test-WebTest'])
_log.info(unicode(bug))
if is_wpt_notify_enabled:
_log.info(
"WPT-NOTIFY enabled in this directory; adding the bug to the pending list."
)
bugs.append(bug)
else:
_log.info(
"WPT-NOTIFY disabled in this directory; discarding the bug."
)
return bugs
def format_commit_list(self, imported_commits, directory):
"""Formats the list of imported WPT commits.
Imports affecting the given directory will be highlighted.
Args:
imported_commits: A list of (SHA, commit subject) pairs.
directory: An absolute path of a directory in the Chromium repo, for
which the list is formatted.
Returns:
A multi-line string.
"""
path_from_wpt = self.host.filesystem.relpath(
directory, self.finder.path_from_web_tests('external', 'wpt'))
commit_list = ''
for sha, subject in imported_commits:
# subject is a Unicode string and can contain non-ASCII characters.
line = u'{}: {}'.format(subject, GITHUB_COMMIT_PREFIX + sha)
if self.local_wpt.is_commit_affecting_directory(
sha, path_from_wpt):
line += ' [affecting this directory]'
commit_list += line + '\n'
return commit_list
def find_owned_directory(self, test_name):
"""Finds the lowest directory that contains the test and has OWNERS.
Args:
The name of the test (a path relative to web_tests).
Returns:
The path of the found directory relative to web_tests.
"""
# Always use non-virtual test names when looking up OWNERS.
if self.default_port.lookup_virtual_test_base(test_name):
test_name = self.default_port.lookup_virtual_test_base(test_name)
# find_owners_file takes either a relative path from the *root* of the
# repository, or an absolute path.
abs_test_path = self.finder.path_from_web_tests(test_name)
owners_file = self.owners_extractor.find_owners_file(
self.host.filesystem.dirname(abs_test_path))
if not owners_file:
return None
owned_directory = self.host.filesystem.dirname(owners_file)
short_directory = self.host.filesystem.relpath(
owned_directory, self.finder.web_tests_dir())
return short_directory
def file_bugs(self, bugs, dry_run, service_account_key_json=None):
"""Files a list of bugs to Monorail.
Args:
bugs: A list of MonorailIssue objects.
dry_run: A boolean, whether we are in dry run mode.
service_account_key_json: Optional, see docs for main().
"""
# TODO(robertma): Better error handling in this method.
if dry_run:
_log.info(
'[dry_run] Would have filed the %d bugs in the pending list.',
len(bugs))
return
_log.info('Filing %d bugs in the pending list to Monorail', len(bugs))
api = self._get_monorail_api(service_account_key_json)
for index, bug in enumerate(bugs, start=1):
response = api.insert_issue(bug)
_log.info('[%d] Filed bug: %s', index,
MonorailIssue.crbug_link(response['id']))
def _get_monorail_api(self, service_account_key_json):
if service_account_key_json:
return self._monorail_api(
service_account_key_json=service_account_key_json)
token = LuciAuth(self.host).get_access_token()
return self._monorail_api(access_token=token)
class TestFailure(object):
"""A simple abstraction of a new test failure for the notifier."""
# Failure types:
BASELINE_CHANGE = 1
NEW_EXPECTATION = 2
def __init__(self,
failure_type,
test_name,
expectation_line='',
baseline_path='',
gerrit_url_with_ps=''):
if failure_type == self.BASELINE_CHANGE:
assert baseline_path and gerrit_url_with_ps
else:
assert failure_type == self.NEW_EXPECTATION
assert expectation_line
self.failure_type = failure_type
self.test_name = test_name
self.expectation_line = expectation_line
self.baseline_path = baseline_path
self.gerrit_url_with_ps = gerrit_url_with_ps
def __str__(self):
if self.failure_type == self.BASELINE_CHANGE:
return self._format_baseline_change()
else:
return self._format_new_expectation()
def __eq__(self, other):
return (self.failure_type == other.failure_type
and self.test_name == other.test_name
and self.expectation_line == other.expectation_line
and self.baseline_path == other.baseline_path
and self.gerrit_url_with_ps == other.gerrit_url_with_ps)
def _format_baseline_change(self):
assert self.failure_type == self.BASELINE_CHANGE
result = ''
# TODO(robertma): Is there any better way than using regexp?
platform = re.search(r'/platform/([^/]+)/', self.baseline_path)
if platform:
result += '[ {} ] '.format(platform.group(1).capitalize())
result += '{} new failing tests: {}{}'.format(
self.test_name, self.gerrit_url_with_ps, self.baseline_path)
return result
def _format_new_expectation(self):
assert self.failure_type == self.NEW_EXPECTATION
# TODO(robertma): Are there saner ways to remove the link to the umbrella bug?
line = self.expectation_line
if line.startswith(WPTExpectationsUpdater.UMBRELLA_BUG):
line = line[len(WPTExpectationsUpdater.UMBRELLA_BUG):].lstrip()
return line
| 43.162791
| 127
| 0.618475
|
4a117ad1dd74f72110c2751da884837fc80feb72
| 4,121
|
py
|
Python
|
prep.py
|
jskrable/sarcasm-detection
|
d6bce5a6c428b6899194358b09ba94246908d483
|
[
"Apache-2.0"
] | null | null | null |
prep.py
|
jskrable/sarcasm-detection
|
d6bce5a6c428b6899194358b09ba94246908d483
|
[
"Apache-2.0"
] | null | null | null |
prep.py
|
jskrable/sarcasm-detection
|
d6bce5a6c428b6899194358b09ba94246908d483
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
"""
title: main.py
date: 2019-11-23
author: jskrable
description: Preprocessing
"""
import os
import re
import json
from pyspark import SparkContext
from pyspark.sql import SQLContext, Row
from pyspark.sql.types import ArrayType, StringType
from pyspark.sql.functions import lit, when, col, udf, concat
from pyspark.ml.feature import NGram, CountVectorizer, CountVectorizerModel, IndexToString, StringIndexer, VectorIndexer
def progress(count, total, suffix=''):
"""
Progress bar for cli
"""
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '#' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s %s\r' % (bar, percents, '%', suffix))
sys.stdout.flush()
def parse_data(file):
"""
Function to read a psuedo-json file line by line and
return a generator object to save CPU and mem.
Wrap response in a list() to subscribe
"""
for l in open(file, 'r'):
yield json.loads(l)
def get_source_data(base_dir):
"""
Function to read a directory full of psuedo-json files
and return a list of objects. Objects are structured as follows:
article_link: http link to original article
headline: string headline, special characters intact
is_sarcastic: int, 1 for sarcasm, 0 for serious
"""
for d, _, f in os.walk(base_dir):
files = [os.path.join(d,file) for file in f]
data = [list(parse_data(f)) for f in files]
data = [item for sublist in data for item in sublist]
return data
def count_vectorizer(df, col, train=False):
"""
Function to take in a df of headlines and tranform to a
word count vector. Simple bag of words method.
Requires headline to be a list of words. Returns a df
with an additional vector column.
"""
if train:
cv = CountVectorizer(
inputCol=col,
outputCol='vector',
vocabSize=50000)
model = cv.fit(df)
print('Saving count vectorizer model to disk...')
model.save('./cv_model')
else:
model = CountVectorizerModel.load('./cv_model')
df = model.transform(df)
return df
def label_indexer(df, col):
"""
Function to take in a df, index the class label column,
and return the df w/ a new indexedLabel column.
"""
labelIndexer = StringIndexer(
inputCol=col,
outputCol="indexedLabel").fit(df)
df = labelIndexer.transform(df)
return df
def n_grams(df, col, n=2):
"""
Function to take in a df with a list of words and convert to
list of n-grams.
"""
ngram = NGram(
n=n,
inputCol=col,
outputCol="ngrams")
df = ngram.transform(df)
return df
def preprocessing(sql, data, train=True):
"""
Function to take in a list of dicts containing string
headlines and return a df containing indexed labels and
vectorized features.
"""
# convert input data to spark dataframe
# print('Creating dataframe...')
df = sql.createDataFrame(Row(**entry) for entry in data)
# print('Cleaning headlines...')
# allow only alphabetic characters
regex = re.compile('[^a-zA-Z]')
clean_headline = udf(lambda x:
regex.sub(' ', x).lower().split(), ArrayType(StringType()))
df = df.withColumn('cleanHeadline', clean_headline(df.headline))
df = n_grams(df, 'cleanHeadline')
concat = udf(lambda x,y : x + y, ArrayType(StringType()))
df = df.withColumn('gramList', concat(df.cleanHeadline,df.ngrams))
# print('Vectorizing headlines...')
# get a sparse vector of dictionary word counts
# choose to use n-grams or list here
# df = count_vectorizer(df, 'cleanHeadline')
# df = count_vectorizer(df, 'ngrams')
df = count_vectorizer(df, 'gramList', train)
if train:
# index label column
print('Indexing labels...')
df = label_indexer(df, 'is_sarcastic')
train, test = df.randomSplit([0.7,0.3])
return train, test
else:
return df
| 30.753731
| 120
| 0.646445
|
4a117ae2a800528dfcc580391edfc1fe8715d518
| 1,271
|
py
|
Python
|
will/backends/storage/base.py
|
Ashex/will
|
eb57878db8c4e13a69ea670b9fda78daeea31436
|
[
"MIT"
] | 349
|
2015-01-15T05:12:02.000Z
|
2022-01-11T09:21:01.000Z
|
will/backends/storage/base.py
|
Ashex/will
|
eb57878db8c4e13a69ea670b9fda78daeea31436
|
[
"MIT"
] | 350
|
2015-01-02T16:33:14.000Z
|
2022-02-06T17:34:34.000Z
|
will/backends/storage/base.py
|
chillipeper/will
|
4bd9a3f5cc4e2ea822cbf4f7a6f04996b3f20449
|
[
"MIT"
] | 184
|
2015-01-08T13:20:50.000Z
|
2021-12-31T05:57:21.000Z
|
import logging
import redis
from six.moves.urllib.parse import urlparse
from will.mixins import SettingsMixin, EncryptionMixin
class PrivateBaseStorageBackend(SettingsMixin, EncryptionMixin, object):
required_settings = []
def save(self, key, value, *args, **kwargs):
self.do_save(key, self.encrypt(value), *args, **kwargs)
def load(self, key, *args, **kwargs):
try:
return self.decrypt(self.do_load(key, *args, **kwargs))
except:
logging.warn("Error decrypting. Attempting unencrypted load for %s to ease migration." % key)
return self.do_load(key, *args, **kwargs)
class BaseStorageBackend(PrivateBaseStorageBackend):
"""
The base storage backend. All storage backends must supply the following methods:
__init__() - sets up the connection
do_save() - saves a single value to a key
do_load() - gets a value from the backend
clear() - deletes a key
clear_all_keys() - clears the db
"""
def do_save(self, key, value, expire=None):
raise NotImplementedError
def do_load(self, key):
raise NotImplementedError
def clear(self, key):
raise NotImplementedError
def clear_all_keys(self):
raise NotImplementedError
| 30.261905
| 106
| 0.678206
|
4a117bb118238ce93c1e7c589fcc7079f88d06a0
| 4,402
|
py
|
Python
|
nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py
|
xushiwei/nova
|
f27956708b0aaeabb06125e6a72b4d61747934b7
|
[
"Apache-2.0"
] | 1
|
2021-11-08T10:11:44.000Z
|
2021-11-08T10:11:44.000Z
|
nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py
|
xushiwei/nova
|
f27956708b0aaeabb06125e6a72b4d61747934b7
|
[
"Apache-2.0"
] | null | null | null |
nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py
|
xushiwei/nova
|
f27956708b0aaeabb06125e6a72b4d61747934b7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from sqlalchemy import *
from migrate import *
from nova import log as logging
from nova import utils
meta = MetaData()
# virtual interface table to add to DB
virtual_interfaces = Table('virtual_interfaces', meta,
Column('created_at', DateTime(timezone=False),
default=utils.utcnow()),
Column('updated_at', DateTime(timezone=False),
onupdate=utils.utcnow()),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('address',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True),
Column('network_id',
Integer(),
ForeignKey('networks.id')),
Column('instance_id',
Integer(),
ForeignKey('instances.id'),
nullable=False),
mysql_engine='InnoDB')
# bridge_interface column to add to networks table
interface = Column('bridge_interface',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
_warn_on_bytestring=False))
# virtual interface id column to add to fixed_ips table
# foreignkey added in next migration
virtual_interface_id = Column('virtual_interface_id',
Integer())
def upgrade(migrate_engine):
meta.bind = migrate_engine
# grab tables and (column for dropping later)
instances = Table('instances', meta, autoload=True)
networks = Table('networks', meta, autoload=True)
fixed_ips = Table('fixed_ips', meta, autoload=True)
c = instances.columns['mac_address']
# add interface column to networks table
# values will have to be set manually before running nova
try:
networks.create_column(interface)
except Exception:
logging.error(_("interface column not added to networks table"))
raise
# create virtual_interfaces table
try:
virtual_interfaces.create()
except Exception:
logging.error(_("Table |%s| not created!"), repr(virtual_interfaces))
raise
# add virtual_interface_id column to fixed_ips table
try:
fixed_ips.create_column(virtual_interface_id)
except Exception:
logging.error(_("VIF column not added to fixed_ips table"))
raise
# populate the virtual_interfaces table
# extract data from existing instance and fixed_ip tables
s = select([instances.c.id, instances.c.mac_address,
fixed_ips.c.network_id],
fixed_ips.c.instance_id == instances.c.id)
keys = ('instance_id', 'address', 'network_id')
join_list = [dict(zip(keys, row)) for row in s.execute()]
logging.debug(_("join list for moving mac_addresses |%s|"), join_list)
# insert data into the table
if join_list:
i = virtual_interfaces.insert()
i.execute(join_list)
# populate the fixed_ips virtual_interface_id column
s = select([fixed_ips.c.id, fixed_ips.c.instance_id],
fixed_ips.c.instance_id != None)
for row in s.execute():
m = select([virtual_interfaces.c.id]).\
where(virtual_interfaces.c.instance_id == row['instance_id']).\
as_scalar()
u = fixed_ips.update().values(virtual_interface_id=m).\
where(fixed_ips.c.id == row['id'])
u.execute()
# drop the mac_address column from instances
c.drop()
def downgrade(migrate_engine):
logging.error(_("Can't downgrade without losing data"))
raise Exception
| 34.936508
| 78
| 0.655838
|
4a117bea598a74e1c7f55d3c34c057b0911404ff
| 5,848
|
py
|
Python
|
docs/conf.py
|
flavio-fernandes/Adafruit_CircuitPython_PyPortal
|
2ac9269ee959a1c8a04547328d87d2e536971f47
|
[
"MIT"
] | 1
|
2020-12-30T22:07:09.000Z
|
2020-12-30T22:07:09.000Z
|
docs/conf.py
|
flavio-fernandes/Adafruit_CircuitPython_PyPortal
|
2ac9269ee959a1c8a04547328d87d2e536971f47
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
flavio-fernandes/Adafruit_CircuitPython_PyPortal
|
2ac9269ee959a1c8a04547328d87d2e536971f47
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
autodoc_mock_imports = [
"rtc",
"supervisor",
"pulseio",
"audioio",
"audiocore",
"displayio",
"neopixel",
"microcontroller",
"adafruit_touchscreen",
"adafruit_bitmap_font",
"adafruit_display_text",
"adafruit_esp32spi",
"secrets",
"adafruit_sdcard",
"storage",
"sdcardio",
"adafruit_io",
"adafruit_cursorcontrol",
"adafruit_requests",
"terminalio",
]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.4", None),
"BusDevice": (
"https://circuitpython.readthedocs.io/projects/busdevice/en/latest/",
None,
),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Adafruit PyPortal Library"
copyright = "2019 Limor Fried"
author = "Limor Fried"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = "AdafruitPyportalLibrarydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"AdafruitPyPortalLibrary.tex",
"AdafruitPyPortal Library Documentation",
author,
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"AdafruitPyPortallibrary",
"Adafruit PyPortal Library Documentation",
[author],
1,
)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AdafruitPyPortalLibrary",
"Adafruit PyPortal Library Documentation",
author,
"AdafruitPyPortalLibrary",
"One line description of project.",
"Miscellaneous",
),
]
| 28.666667
| 85
| 0.661252
|
4a117c8a427eb98683593f58ddf0374d22ae3a38
| 12,635
|
py
|
Python
|
magnet/training/train.py
|
ysglh/magnet
|
cd37f0ebe46a17e0948158795c6715a60c34b9db
|
[
"MIT"
] | 343
|
2018-09-03T09:59:36.000Z
|
2022-02-08T11:32:34.000Z
|
magnet/training/train.py
|
ysglh/magnet
|
cd37f0ebe46a17e0948158795c6715a60c34b9db
|
[
"MIT"
] | 7
|
2018-09-04T07:03:11.000Z
|
2019-03-21T07:17:14.000Z
|
magnet/training/train.py
|
ysglh/magnet
|
cd37f0ebe46a17e0948158795c6715a60c34b9db
|
[
"MIT"
] | 23
|
2018-09-03T19:12:04.000Z
|
2021-02-20T09:23:30.000Z
|
from torch import optim
from contextlib import contextmanager
class Trainer:
r"""Abstract base class for training models.
The Trainer class makes it incredibly simple and convinient to train,
monitor, debug and checkpoint entire Deep Learning projects.
Simply define your training loop by
implementing the :py:meth:`optimize` method.
Args:
models (list of :py:class:`nn.Module`): All the models that need
to be trained
optimizers (list of :py:class:`optim.Optimizer`): Any optimizers that
are used
.. note::
If any model is in eval() model, the trainer is *set off*.
This means that as per protocol, *all* models will not train.
Attributes:
callbacks (list): A list of callbacks attached to the trainer.
Take a look at :py:class:`SupervisedTrainer` for an idea on how to extend this class.
"""
def __init__(self, models, optimizers):
self.models = models
self.optimizers = optimizers
self.parameters = set()
self.register_parameter('iterations', 0)
def optimize(self):
r""" Defines the core optimization loop.
This method is called on each iteration.
Two quick protocols that one needs to follow are:
1. **Do NOT** actually backpropagate or step() the optimizers if the
trainer is not training. Use the :py:meth:`is_training` method
to find out.
This is essential since this will ensure that the trainer behaves
as expected when :py:meth:`is_training` is ``False``.
Useful, for example, in cases like :py:class:`callbacks.ColdStart`
2. Send a callback the signal ``'gradient'`` with a keyword argument
``'models'`` that is the list of models that accumulate a gradient.
Usually, it's all the modules (``self.modules``).
Any callbacks that listen to this signal are interested in the gradient
information (eg. ``callbacks.Babysitter``).
"""
raise NotImplementedError
def train(self, dataloader, epochs=1, callbacks=None, **kwargs):
r"""Starts the training process.
Args:
dataloader (``DataLoader``): The MagNet dataloader that iterates
over the training set
epochs (float or int): The number of epochs to train for.
Default: ``1``
callbacks (list): Any callbacks to be attached. Default: ``None``
Keyword Args:
iterations (int): The number of iterations to train for.
Overrides :attr:`epochs`.
.. note::
PyTorch ``DataLoader`` s are not supported.
Ideally, encapsulate your dataset in the ``Data`` class.
"""
from magnet.training.callbacks import CallbackQueue
self.dataloader = dataloader
if callbacks is None: callbacks = []
self.callbacks = CallbackQueue(callbacks)
total_iterations = kwargs.get('iterations', int(epochs * len(dataloader)))
self.callbacks('on_training_start', trainer=self, total_iterations=total_iterations)
for self.iterations in range(self.iterations, self.iterations + total_iterations): next(self)
self.callbacks('on_training_end', trainer=self)
def __iter__(self):
return self
def __next__(self):
self.callbacks('on_batch_start', trainer=self)
self.optimize()
self.callbacks('on_batch_end', trainer=self)
@contextmanager
def mock(self, path=None):
r"""A context manager that creates a temporary *'safe'* scope for training.
All impact to stateful objects (models, optimizers and the
trainer itself) are forgotten once out of this scope.
This is very useful if you need to try out *what-if experiments*.
Args:
path (pathlib.Path): The path to save temporary states into
Default: ``{System temp directory}/.mock_trainer``
"""
from shutil import rmtree
if path is None:
from pathlib import Path
from tempfile import gettempdir
path = Path(gettempdir()) / '.mock_trainer'
rmtree(path, ignore_errors=True) # Remove any existing directory
self.save_state(path)
try:
yield
finally:
self.load_state(path)
rmtree(path)
def epochs(self, mode=None):
r"""The number of epochs completed.
Args:
mode (str or None): If the mode is ``'start'`` or ``'end'``, a
boolean is returned signalling if it's the start or end of an epoch
"""
if mode is None:
return self.iterations / len(self.dataloader)
if mode == 'start':
return (self.iterations / len(self.dataloader)).is_integer()
if mode == 'end':
return ((self.iterations + 1) / len(self.dataloader)).is_integer()
def is_training(self):
return all(model.training for model in self.models)
def load_state(self, path):
from magnet.training.utils import load_state, load_object
for i, model in enumerate(self.models): load_state(model, path / 'models', alternative_name=str(i))
for i, optimizer in enumerate(self.optimizers): load_state(optimizer, path / 'optimizers', alternative_name=str(i))
state_dict = load_object(path / 'state.p', default={})
for attr, val in state_dict.items(): self.register_parameter(attr, val)
try: self.callbacks('load_state', trainer=self, path=path / 'callbacks')
except AttributeError: pass
try: self.dataloader.load_state_dict(path / 'dataloader.p')
except AttributeError: pass
def save_state(self, path):
from magnet.training.utils import save_state, save_object
for i, model in enumerate(self.models): save_state(model, path / 'models', alternative_name=str(i))
for i, optimizer in enumerate(self.optimizers): save_state(optimizer, path / 'optimizers', alternative_name=str(i))
state_dict = {attr: getattr(self, attr) for attr in self.parameters}
save_object(state_dict, path / 'state.p')
try: self.callbacks('save_state', trainer=self, path=path / 'callbacks')
except AttributeError: pass
try: self.dataloader.save_state_dict(path / 'dataloader.p')
except AttributeError: pass
def register_parameter(self, name, value):
r"""Use this to register *'stateful'* parameters that are serialized
"""
setattr(self, name, value)
self.parameters.add(name)
class SupervisedTrainer(Trainer):
r"""A simple trainer that implements a supervised approach where a simple
model :math:`\hat{y} = f(x)` is trained to map :math:`\hat{y}` to
ground-truth :math:`y` according to some specified loss.
This is the training routine that most high-level deep learning frameworks
implement.
Args:
model (``nn.Module``): The model that needs to be trained
optimizer (str or optim.Optimzer): The optimizer used to train
the model. Default: ``'adam'``
loss (str or ``callable``): A loss function that gives the objective
to be minimized. Default: ``'cross_entropy'``
metrics (list): Any other metrics that need to be monitored.
Default: ``None``
* :attr:`optimizer` can be an actual ``optim.Optimizer`` instance or the
name of a popular optimzizer (eg. ``'adam'``).
* :attr:`loss` can be a function or the name of a popular
loss function (eg. ``'cross_entropy'``).
It should accept 2 arguments (:math:`\hat{y}`, :math:`y`).
* :attr:`metrics` should contain a list of functions which accept
2 arguments (:math:`\hat{y}`, :math:`y`), like the loss function.
.. note::
A static :py:meth:`validate` function is provided for the
validation callback
.. note::
The :attr:`metrics` is of no use unless there is some
callback (eg.``callbacks.Monitor``) to receive the metrics
Examples::
>>> import magnet as mag
>>> import magnet.nodes as mn
>>> from magnet.data import Data
>>> from magnet.training import callbacks, SupervisedTrainer
>>> data = Data.get('mnist')
>>> model = mn.Linear(10, act=None)
>>> model.build(x=next(data())[0])
>>> trainer = SupervisedTrainer(model)
>>> callbacks=[callbacks.Monitor(),
callbacks.Validate(data(64, mode='val'), SupervisedTrainer.validate)]
>>> trainer.train(data(64, shuffle=True), 1, callbacks)
"""
def __init__(self, model, optimizer='adam', loss='cross_entropy', metrics=None):
from magnet.nodes.functional import wiki
if isinstance(optimizer, str): optimizer = optimizer_wiki[optimizer.lower()](model.parameters())
if isinstance(loss, str): loss = wiki['losses'][loss.lower()]
if metrics is None: metrics = []
if not isinstance(metrics, (tuple, list)): metrics = [metrics]
for i, metric in enumerate(metrics):
if isinstance(metric, str): metrics[i] = (metric, wiki['metrics'][metric.lower()])
super().__init__([model], [optimizer])
self.loss = loss
self.metrics = metrics
def optimize(self):
optimizer = self.optimizers[0]
loss = self.get_loss(self.dataloader)
# Protocol 1: Backprop and step() only if trainer is training
if self.is_training():
loss.backward()
# Protocol 2: Broadcast the models that accumulate the gradient
# using signal 'gradient' before clearing them.
self.callbacks('gradient', trainer=self, models=self.models)
optimizer.step()
optimizer.zero_grad()
@staticmethod
def validate(trainer, dataloader):
r"""Static helper method to validate models in :attr:`trainer` against
data in :attr:`dataloader`.
Can be passed to ``callbacks.Validate()``.
"""
trainer.get_loss(dataloader, validation=True)
def get_loss(self, dataloader, validation=False):
r"""Utility function that returns the loss and broadcasts metrics.
"""
def write_stats(key, value):
self.callbacks('write_stats', trainer=self, key=key, value=value, validation=validation, buffer_size=len(dataloader))
model = self.models[0]
x, y = next(dataloader)
y_pred = model(x)
loss = self.loss(y_pred, y)
# Broadcast the loss and any other metrics using the 'write_stats' signal.
write_stats('loss', loss.item())
for metric in self.metrics: write_stats(metric[0], metric[1](y_pred, y).item())
return loss
def finish_training(path, names=None):
r""" A helper function for cleaning up the training logs and other
checkpoints and retaining only the state_dicts of the trained models.
Args:
path (pathlib.Path): The path where the trainer was checkpointed
names (list): The names of the models in the order given to the trainer.
Default: ``None``
* :attr:`names` can be used if the models themselves did not have names
prior to training.
The checkpoints default to an ordered naming scheme.
If passed, the files are additionally renamed to these names.
.. note::
Does nothing / fails silently if the path does not exist.
Example::
>>> # Assume that we've defined two models - encoder and decoder,
>>> # and a suitable trainer. The models do not have a 'name' attribute.
>>> trainer.save_state(checkpoint_path / 'my-trainer')
>>> # Suppose the checkpoint directory contains the following files:
>>> # my-trainer/
>>> # models/
>>> # 0.pt
>>> # 1.pt
>>> # callbacks/
>>> # monitor/
>>> # babysitter/
>>> # state.p
>>> finish_training(path, names=['encoder', 'decoder'])
>>> # Now the directory contains these files:
>>> # encoder.pt
>>> # decoder.pt
"""
if not path.exists(): return
import shutil
if isinstance(names, str): names = [names]
filenames = list((path / 'models').glob('*.pt'))
if names is None: names = [filename.stem for filename in filenames]
for name, filename in zip(names, filenames):
shutil.move(filename, path.parent / (name + '.pt'))
shutil.rmtree(path)
optimizer_wiki = {'adam': optim.Adam}
| 36.836735
| 129
| 0.627622
|
4a117d94f01a88675e241504708fe678bcf9d1c5
| 69,362
|
py
|
Python
|
trace/google/cloud/trace_v2/proto/trace_pb2.py
|
conwaychriscosmo/google-cloud-python
|
8e7b7f8a5f4bb04d13f4d88ec3848f017faf834a
|
[
"Apache-2.0"
] | 1
|
2019-03-26T21:44:51.000Z
|
2019-03-26T21:44:51.000Z
|
trace/google/cloud/trace_v2/proto/trace_pb2.py
|
conwaychriscosmo/google-cloud-python
|
8e7b7f8a5f4bb04d13f4d88ec3848f017faf834a
|
[
"Apache-2.0"
] | 1
|
2019-03-29T22:03:48.000Z
|
2019-04-02T22:24:45.000Z
|
trace/google/cloud/trace_v2/proto/trace_pb2.py
|
conwaychriscosmo/google-cloud-python
|
8e7b7f8a5f4bb04d13f4d88ec3848f017faf834a
|
[
"Apache-2.0"
] | 1
|
2019-03-29T18:26:16.000Z
|
2019-03-29T18:26:16.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/devtools/cloudtrace_v2/proto/trace.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/devtools/cloudtrace_v2/proto/trace.proto",
package="google.devtools.cloudtrace.v2",
syntax="proto3",
serialized_options=_b(
"\n!com.google.devtools.cloudtrace.v2B\nTraceProtoP\001ZGgoogle.golang.org/genproto/googleapis/devtools/cloudtrace/v2;cloudtrace\252\002\025Google.Cloud.Trace.V2\312\002\025Google\\Cloud\\Trace\\V2"
),
serialized_pb=_b(
'\n/google/devtools/cloudtrace_v2/proto/trace.proto\x12\x1dgoogle.devtools.cloudtrace.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xc2\x10\n\x04Span\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x14\n\x07span_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x16\n\x0eparent_span_id\x18\x03 \x01(\t\x12K\n\x0c\x64isplay_name\x18\x04 \x01(\x0b\x32\x30.google.devtools.cloudtrace.v2.TruncatableStringB\x03\xe0\x41\x02\x12\x33\n\nstart_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x02\x12\x31\n\x08\x65nd_time\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x02\x12\x42\n\nattributes\x18\x07 \x01(\x0b\x32..google.devtools.cloudtrace.v2.Span.Attributes\x12>\n\x0bstack_trace\x18\x08 \x01(\x0b\x32).google.devtools.cloudtrace.v2.StackTrace\x12\x43\n\x0btime_events\x18\t \x01(\x0b\x32..google.devtools.cloudtrace.v2.Span.TimeEvents\x12\x38\n\x05links\x18\n \x01(\x0b\x32).google.devtools.cloudtrace.v2.Span.Links\x12\'\n\x06status\x18\x0b \x01(\x0b\x32\x12.google.rpc.StatusB\x03\xe0\x41\x01\x12\x44\n\x1bsame_process_as_parent_span\x18\x0c \x01(\x0b\x32\x1a.google.protobuf.BoolValueB\x03\xe0\x41\x01\x12:\n\x10\x63hild_span_count\x18\r \x01(\x0b\x32\x1b.google.protobuf.Int32ValueB\x03\xe0\x41\x01\x1a\xeb\x01\n\nAttributes\x12W\n\rattribute_map\x18\x01 \x03(\x0b\x32@.google.devtools.cloudtrace.v2.Span.Attributes.AttributeMapEntry\x12 \n\x18\x64ropped_attributes_count\x18\x02 \x01(\x05\x1a\x62\n\x11\x41ttributeMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12<\n\x05value\x18\x02 \x01(\x0b\x32-.google.devtools.cloudtrace.v2.AttributeValue:\x02\x38\x01\x1a\xdf\x04\n\tTimeEvent\x12(\n\x04time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12N\n\nannotation\x18\x02 \x01(\x0b\x32\x38.google.devtools.cloudtrace.v2.Span.TimeEvent.AnnotationH\x00\x12S\n\rmessage_event\x18\x03 \x01(\x0b\x32:.google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEventH\x00\x1a\x97\x01\n\nAnnotation\x12\x45\n\x0b\x64\x65scription\x18\x01 \x01(\x0b\x32\x30.google.devtools.cloudtrace.v2.TruncatableString\x12\x42\n\nattributes\x18\x02 \x01(\x0b\x32..google.devtools.cloudtrace.v2.Span.Attributes\x1a\xdf\x01\n\x0cMessageEvent\x12M\n\x04type\x18\x01 \x01(\x0e\x32?.google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent.Type\x12\n\n\x02id\x18\x02 \x01(\x03\x12\x1f\n\x17uncompressed_size_bytes\x18\x03 \x01(\x03\x12\x1d\n\x15\x63ompressed_size_bytes\x18\x04 \x01(\x03"4\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x08\n\x04SENT\x10\x01\x12\x0c\n\x08RECEIVED\x10\x02\x42\x07\n\x05value\x1a\x98\x01\n\nTimeEvents\x12\x41\n\ntime_event\x18\x01 \x03(\x0b\x32-.google.devtools.cloudtrace.v2.Span.TimeEvent\x12!\n\x19\x64ropped_annotations_count\x18\x02 \x01(\x05\x12$\n\x1c\x64ropped_message_events_count\x18\x03 \x01(\x05\x1a\xf7\x01\n\x04Link\x12\x10\n\x08trace_id\x18\x01 \x01(\t\x12\x0f\n\x07span_id\x18\x02 \x01(\t\x12;\n\x04type\x18\x03 \x01(\x0e\x32-.google.devtools.cloudtrace.v2.Span.Link.Type\x12\x42\n\nattributes\x18\x04 \x01(\x0b\x32..google.devtools.cloudtrace.v2.Span.Attributes"K\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x15\n\x11\x43HILD_LINKED_SPAN\x10\x01\x12\x16\n\x12PARENT_LINKED_SPAN\x10\x02\x1a\\\n\x05Links\x12\x36\n\x04link\x18\x01 \x03(\x0b\x32(.google.devtools.cloudtrace.v2.Span.Link\x12\x1b\n\x13\x64ropped_links_count\x18\x02 \x01(\x05:S\xea\x41P\n\x1e\x63loudtrace.googleapis.com/Span\x12.projects/{project}/traces/{trace}/spans/{span}"\x8e\x01\n\x0e\x41ttributeValue\x12H\n\x0cstring_value\x18\x01 \x01(\x0b\x32\x30.google.devtools.cloudtrace.v2.TruncatableStringH\x00\x12\x13\n\tint_value\x18\x02 \x01(\x03H\x00\x12\x14\n\nbool_value\x18\x03 \x01(\x08H\x00\x42\x07\n\x05value"\x89\x05\n\nStackTrace\x12K\n\x0cstack_frames\x18\x01 \x01(\x0b\x32\x35.google.devtools.cloudtrace.v2.StackTrace.StackFrames\x12\x1b\n\x13stack_trace_hash_id\x18\x02 \x01(\x03\x1a\x9e\x03\n\nStackFrame\x12G\n\rfunction_name\x18\x01 \x01(\x0b\x32\x30.google.devtools.cloudtrace.v2.TruncatableString\x12P\n\x16original_function_name\x18\x02 \x01(\x0b\x32\x30.google.devtools.cloudtrace.v2.TruncatableString\x12\x43\n\tfile_name\x18\x03 \x01(\x0b\x32\x30.google.devtools.cloudtrace.v2.TruncatableString\x12\x13\n\x0bline_number\x18\x04 \x01(\x03\x12\x15\n\rcolumn_number\x18\x05 \x01(\x03\x12:\n\x0bload_module\x18\x06 \x01(\x0b\x32%.google.devtools.cloudtrace.v2.Module\x12H\n\x0esource_version\x18\x07 \x01(\x0b\x32\x30.google.devtools.cloudtrace.v2.TruncatableString\x1ap\n\x0bStackFrames\x12\x43\n\x05\x66rame\x18\x01 \x03(\x0b\x32\x34.google.devtools.cloudtrace.v2.StackTrace.StackFrame\x12\x1c\n\x14\x64ropped_frames_count\x18\x02 \x01(\x05"\x8e\x01\n\x06Module\x12@\n\x06module\x18\x01 \x01(\x0b\x32\x30.google.devtools.cloudtrace.v2.TruncatableString\x12\x42\n\x08\x62uild_id\x18\x02 \x01(\x0b\x32\x30.google.devtools.cloudtrace.v2.TruncatableString"@\n\x11TruncatableString\x12\r\n\x05value\x18\x01 \x01(\t\x12\x1c\n\x14truncated_byte_count\x18\x02 \x01(\x05\x42\xaa\x01\n!com.google.devtools.cloudtrace.v2B\nTraceProtoP\x01ZGgoogle.golang.org/genproto/googleapis/devtools/cloudtrace/v2;cloudtrace\xaa\x02\x15Google.Cloud.Trace.V2\xca\x02\x15Google\\Cloud\\Trace\\V2b\x06proto3'
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,
google_dot_api_dot_resource__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,
google_dot_rpc_dot_status__pb2.DESCRIPTOR,
],
)
_SPAN_TIMEEVENT_MESSAGEEVENT_TYPE = _descriptor.EnumDescriptor(
name="Type",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent.Type",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="TYPE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="SENT", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="RECEIVED", index=2, number=2, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=1732,
serialized_end=1784,
)
_sym_db.RegisterEnumDescriptor(_SPAN_TIMEEVENT_MESSAGEEVENT_TYPE)
_SPAN_LINK_TYPE = _descriptor.EnumDescriptor(
name="Type",
full_name="google.devtools.cloudtrace.v2.Span.Link.Type",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="TYPE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="CHILD_LINKED_SPAN",
index=1,
number=1,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="PARENT_LINKED_SPAN",
index=2,
number=2,
serialized_options=None,
type=None,
),
],
containing_type=None,
serialized_options=None,
serialized_start=2123,
serialized_end=2198,
)
_sym_db.RegisterEnumDescriptor(_SPAN_LINK_TYPE)
_SPAN_ATTRIBUTES_ATTRIBUTEMAPENTRY = _descriptor.Descriptor(
name="AttributeMapEntry",
full_name="google.devtools.cloudtrace.v2.Span.Attributes.AttributeMapEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.devtools.cloudtrace.v2.Span.Attributes.AttributeMapEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.devtools.cloudtrace.v2.Span.Attributes.AttributeMapEntry.value",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1085,
serialized_end=1183,
)
_SPAN_ATTRIBUTES = _descriptor.Descriptor(
name="Attributes",
full_name="google.devtools.cloudtrace.v2.Span.Attributes",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="attribute_map",
full_name="google.devtools.cloudtrace.v2.Span.Attributes.attribute_map",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dropped_attributes_count",
full_name="google.devtools.cloudtrace.v2.Span.Attributes.dropped_attributes_count",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_SPAN_ATTRIBUTES_ATTRIBUTEMAPENTRY,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=948,
serialized_end=1183,
)
_SPAN_TIMEEVENT_ANNOTATION = _descriptor.Descriptor(
name="Annotation",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvent.Annotation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="description",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvent.Annotation.description",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="attributes",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvent.Annotation.attributes",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1407,
serialized_end=1558,
)
_SPAN_TIMEEVENT_MESSAGEEVENT = _descriptor.Descriptor(
name="MessageEvent",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="type",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent.type",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="id",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent.id",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="uncompressed_size_bytes",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent.uncompressed_size_bytes",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="compressed_size_bytes",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent.compressed_size_bytes",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_SPAN_TIMEEVENT_MESSAGEEVENT_TYPE,],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1561,
serialized_end=1784,
)
_SPAN_TIMEEVENT = _descriptor.Descriptor(
name="TimeEvent",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvent",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="time",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvent.time",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="annotation",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvent.annotation",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="message_event",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvent.message_event",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_SPAN_TIMEEVENT_ANNOTATION, _SPAN_TIMEEVENT_MESSAGEEVENT,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="value",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvent.value",
index=0,
containing_type=None,
fields=[],
),
],
serialized_start=1186,
serialized_end=1793,
)
_SPAN_TIMEEVENTS = _descriptor.Descriptor(
name="TimeEvents",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvents",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="time_event",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvents.time_event",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dropped_annotations_count",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvents.dropped_annotations_count",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dropped_message_events_count",
full_name="google.devtools.cloudtrace.v2.Span.TimeEvents.dropped_message_events_count",
index=2,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1796,
serialized_end=1948,
)
_SPAN_LINK = _descriptor.Descriptor(
name="Link",
full_name="google.devtools.cloudtrace.v2.Span.Link",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="trace_id",
full_name="google.devtools.cloudtrace.v2.Span.Link.trace_id",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="span_id",
full_name="google.devtools.cloudtrace.v2.Span.Link.span_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="type",
full_name="google.devtools.cloudtrace.v2.Span.Link.type",
index=2,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="attributes",
full_name="google.devtools.cloudtrace.v2.Span.Link.attributes",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_SPAN_LINK_TYPE,],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=1951,
serialized_end=2198,
)
_SPAN_LINKS = _descriptor.Descriptor(
name="Links",
full_name="google.devtools.cloudtrace.v2.Span.Links",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="link",
full_name="google.devtools.cloudtrace.v2.Span.Links.link",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dropped_links_count",
full_name="google.devtools.cloudtrace.v2.Span.Links.dropped_links_count",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2200,
serialized_end=2292,
)
_SPAN = _descriptor.Descriptor(
name="Span",
full_name="google.devtools.cloudtrace.v2.Span",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.devtools.cloudtrace.v2.Span.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="span_id",
full_name="google.devtools.cloudtrace.v2.Span.span_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="parent_span_id",
full_name="google.devtools.cloudtrace.v2.Span.parent_span_id",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="display_name",
full_name="google.devtools.cloudtrace.v2.Span.display_name",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="start_time",
full_name="google.devtools.cloudtrace.v2.Span.start_time",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="end_time",
full_name="google.devtools.cloudtrace.v2.Span.end_time",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\002"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="attributes",
full_name="google.devtools.cloudtrace.v2.Span.attributes",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="stack_trace",
full_name="google.devtools.cloudtrace.v2.Span.stack_trace",
index=7,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="time_events",
full_name="google.devtools.cloudtrace.v2.Span.time_events",
index=8,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="links",
full_name="google.devtools.cloudtrace.v2.Span.links",
index=9,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="status",
full_name="google.devtools.cloudtrace.v2.Span.status",
index=10,
number=11,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="same_process_as_parent_span",
full_name="google.devtools.cloudtrace.v2.Span.same_process_as_parent_span",
index=11,
number=12,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="child_span_count",
full_name="google.devtools.cloudtrace.v2.Span.child_span_count",
index=12,
number=13,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\001"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[
_SPAN_ATTRIBUTES,
_SPAN_TIMEEVENT,
_SPAN_TIMEEVENTS,
_SPAN_LINK,
_SPAN_LINKS,
],
enum_types=[],
serialized_options=_b(
"\352AP\n\036cloudtrace.googleapis.com/Span\022.projects/{project}/traces/{trace}/spans/{span}"
),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=263,
serialized_end=2377,
)
_ATTRIBUTEVALUE = _descriptor.Descriptor(
name="AttributeValue",
full_name="google.devtools.cloudtrace.v2.AttributeValue",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="string_value",
full_name="google.devtools.cloudtrace.v2.AttributeValue.string_value",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="int_value",
full_name="google.devtools.cloudtrace.v2.AttributeValue.int_value",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="bool_value",
full_name="google.devtools.cloudtrace.v2.AttributeValue.bool_value",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="value",
full_name="google.devtools.cloudtrace.v2.AttributeValue.value",
index=0,
containing_type=None,
fields=[],
),
],
serialized_start=2380,
serialized_end=2522,
)
_STACKTRACE_STACKFRAME = _descriptor.Descriptor(
name="StackFrame",
full_name="google.devtools.cloudtrace.v2.StackTrace.StackFrame",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="function_name",
full_name="google.devtools.cloudtrace.v2.StackTrace.StackFrame.function_name",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="original_function_name",
full_name="google.devtools.cloudtrace.v2.StackTrace.StackFrame.original_function_name",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="file_name",
full_name="google.devtools.cloudtrace.v2.StackTrace.StackFrame.file_name",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="line_number",
full_name="google.devtools.cloudtrace.v2.StackTrace.StackFrame.line_number",
index=3,
number=4,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="column_number",
full_name="google.devtools.cloudtrace.v2.StackTrace.StackFrame.column_number",
index=4,
number=5,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="load_module",
full_name="google.devtools.cloudtrace.v2.StackTrace.StackFrame.load_module",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="source_version",
full_name="google.devtools.cloudtrace.v2.StackTrace.StackFrame.source_version",
index=6,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2646,
serialized_end=3060,
)
_STACKTRACE_STACKFRAMES = _descriptor.Descriptor(
name="StackFrames",
full_name="google.devtools.cloudtrace.v2.StackTrace.StackFrames",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="frame",
full_name="google.devtools.cloudtrace.v2.StackTrace.StackFrames.frame",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="dropped_frames_count",
full_name="google.devtools.cloudtrace.v2.StackTrace.StackFrames.dropped_frames_count",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3062,
serialized_end=3174,
)
_STACKTRACE = _descriptor.Descriptor(
name="StackTrace",
full_name="google.devtools.cloudtrace.v2.StackTrace",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="stack_frames",
full_name="google.devtools.cloudtrace.v2.StackTrace.stack_frames",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="stack_trace_hash_id",
full_name="google.devtools.cloudtrace.v2.StackTrace.stack_trace_hash_id",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_STACKTRACE_STACKFRAME, _STACKTRACE_STACKFRAMES,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=2525,
serialized_end=3174,
)
_MODULE = _descriptor.Descriptor(
name="Module",
full_name="google.devtools.cloudtrace.v2.Module",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="module",
full_name="google.devtools.cloudtrace.v2.Module.module",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="build_id",
full_name="google.devtools.cloudtrace.v2.Module.build_id",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3177,
serialized_end=3319,
)
_TRUNCATABLESTRING = _descriptor.Descriptor(
name="TruncatableString",
full_name="google.devtools.cloudtrace.v2.TruncatableString",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="value",
full_name="google.devtools.cloudtrace.v2.TruncatableString.value",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="truncated_byte_count",
full_name="google.devtools.cloudtrace.v2.TruncatableString.truncated_byte_count",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=3321,
serialized_end=3385,
)
_SPAN_ATTRIBUTES_ATTRIBUTEMAPENTRY.fields_by_name[
"value"
].message_type = _ATTRIBUTEVALUE
_SPAN_ATTRIBUTES_ATTRIBUTEMAPENTRY.containing_type = _SPAN_ATTRIBUTES
_SPAN_ATTRIBUTES.fields_by_name[
"attribute_map"
].message_type = _SPAN_ATTRIBUTES_ATTRIBUTEMAPENTRY
_SPAN_ATTRIBUTES.containing_type = _SPAN
_SPAN_TIMEEVENT_ANNOTATION.fields_by_name[
"description"
].message_type = _TRUNCATABLESTRING
_SPAN_TIMEEVENT_ANNOTATION.fields_by_name["attributes"].message_type = _SPAN_ATTRIBUTES
_SPAN_TIMEEVENT_ANNOTATION.containing_type = _SPAN_TIMEEVENT
_SPAN_TIMEEVENT_MESSAGEEVENT.fields_by_name[
"type"
].enum_type = _SPAN_TIMEEVENT_MESSAGEEVENT_TYPE
_SPAN_TIMEEVENT_MESSAGEEVENT.containing_type = _SPAN_TIMEEVENT
_SPAN_TIMEEVENT_MESSAGEEVENT_TYPE.containing_type = _SPAN_TIMEEVENT_MESSAGEEVENT
_SPAN_TIMEEVENT.fields_by_name[
"time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_SPAN_TIMEEVENT.fields_by_name["annotation"].message_type = _SPAN_TIMEEVENT_ANNOTATION
_SPAN_TIMEEVENT.fields_by_name[
"message_event"
].message_type = _SPAN_TIMEEVENT_MESSAGEEVENT
_SPAN_TIMEEVENT.containing_type = _SPAN
_SPAN_TIMEEVENT.oneofs_by_name["value"].fields.append(
_SPAN_TIMEEVENT.fields_by_name["annotation"]
)
_SPAN_TIMEEVENT.fields_by_name[
"annotation"
].containing_oneof = _SPAN_TIMEEVENT.oneofs_by_name["value"]
_SPAN_TIMEEVENT.oneofs_by_name["value"].fields.append(
_SPAN_TIMEEVENT.fields_by_name["message_event"]
)
_SPAN_TIMEEVENT.fields_by_name[
"message_event"
].containing_oneof = _SPAN_TIMEEVENT.oneofs_by_name["value"]
_SPAN_TIMEEVENTS.fields_by_name["time_event"].message_type = _SPAN_TIMEEVENT
_SPAN_TIMEEVENTS.containing_type = _SPAN
_SPAN_LINK.fields_by_name["type"].enum_type = _SPAN_LINK_TYPE
_SPAN_LINK.fields_by_name["attributes"].message_type = _SPAN_ATTRIBUTES
_SPAN_LINK.containing_type = _SPAN
_SPAN_LINK_TYPE.containing_type = _SPAN_LINK
_SPAN_LINKS.fields_by_name["link"].message_type = _SPAN_LINK
_SPAN_LINKS.containing_type = _SPAN
_SPAN.fields_by_name["display_name"].message_type = _TRUNCATABLESTRING
_SPAN.fields_by_name[
"start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_SPAN.fields_by_name[
"end_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_SPAN.fields_by_name["attributes"].message_type = _SPAN_ATTRIBUTES
_SPAN.fields_by_name["stack_trace"].message_type = _STACKTRACE
_SPAN.fields_by_name["time_events"].message_type = _SPAN_TIMEEVENTS
_SPAN.fields_by_name["links"].message_type = _SPAN_LINKS
_SPAN.fields_by_name["status"].message_type = google_dot_rpc_dot_status__pb2._STATUS
_SPAN.fields_by_name[
"same_process_as_parent_span"
].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_SPAN.fields_by_name[
"child_span_count"
].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE
_ATTRIBUTEVALUE.fields_by_name["string_value"].message_type = _TRUNCATABLESTRING
_ATTRIBUTEVALUE.oneofs_by_name["value"].fields.append(
_ATTRIBUTEVALUE.fields_by_name["string_value"]
)
_ATTRIBUTEVALUE.fields_by_name[
"string_value"
].containing_oneof = _ATTRIBUTEVALUE.oneofs_by_name["value"]
_ATTRIBUTEVALUE.oneofs_by_name["value"].fields.append(
_ATTRIBUTEVALUE.fields_by_name["int_value"]
)
_ATTRIBUTEVALUE.fields_by_name[
"int_value"
].containing_oneof = _ATTRIBUTEVALUE.oneofs_by_name["value"]
_ATTRIBUTEVALUE.oneofs_by_name["value"].fields.append(
_ATTRIBUTEVALUE.fields_by_name["bool_value"]
)
_ATTRIBUTEVALUE.fields_by_name[
"bool_value"
].containing_oneof = _ATTRIBUTEVALUE.oneofs_by_name["value"]
_STACKTRACE_STACKFRAME.fields_by_name["function_name"].message_type = _TRUNCATABLESTRING
_STACKTRACE_STACKFRAME.fields_by_name[
"original_function_name"
].message_type = _TRUNCATABLESTRING
_STACKTRACE_STACKFRAME.fields_by_name["file_name"].message_type = _TRUNCATABLESTRING
_STACKTRACE_STACKFRAME.fields_by_name["load_module"].message_type = _MODULE
_STACKTRACE_STACKFRAME.fields_by_name[
"source_version"
].message_type = _TRUNCATABLESTRING
_STACKTRACE_STACKFRAME.containing_type = _STACKTRACE
_STACKTRACE_STACKFRAMES.fields_by_name["frame"].message_type = _STACKTRACE_STACKFRAME
_STACKTRACE_STACKFRAMES.containing_type = _STACKTRACE
_STACKTRACE.fields_by_name["stack_frames"].message_type = _STACKTRACE_STACKFRAMES
_MODULE.fields_by_name["module"].message_type = _TRUNCATABLESTRING
_MODULE.fields_by_name["build_id"].message_type = _TRUNCATABLESTRING
DESCRIPTOR.message_types_by_name["Span"] = _SPAN
DESCRIPTOR.message_types_by_name["AttributeValue"] = _ATTRIBUTEVALUE
DESCRIPTOR.message_types_by_name["StackTrace"] = _STACKTRACE
DESCRIPTOR.message_types_by_name["Module"] = _MODULE
DESCRIPTOR.message_types_by_name["TruncatableString"] = _TRUNCATABLESTRING
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Span = _reflection.GeneratedProtocolMessageType(
"Span",
(_message.Message,),
dict(
Attributes=_reflection.GeneratedProtocolMessageType(
"Attributes",
(_message.Message,),
dict(
AttributeMapEntry=_reflection.GeneratedProtocolMessageType(
"AttributeMapEntry",
(_message.Message,),
dict(
DESCRIPTOR=_SPAN_ATTRIBUTES_ATTRIBUTEMAPENTRY,
__module__="google.devtools.cloudtrace_v2.proto.trace_pb2"
# @@protoc_insertion_point(class_scope:google.devtools.cloudtrace.v2.Span.Attributes.AttributeMapEntry)
),
),
DESCRIPTOR=_SPAN_ATTRIBUTES,
__module__="google.devtools.cloudtrace_v2.proto.trace_pb2",
__doc__="""A set of attributes, each in the format ``[KEY]:[VALUE]``.
Attributes:
attribute_map:
The set of attributes. Each attribute's key can be up to 128
bytes long. The value can be a string up to 256 bytes, a
signed 64-bit integer, or the Boolean values ``true`` and
``false``. For example: :: "/instance_id": "my-instance"
"/http/user_agent": "" "/http/request_bytes": 300
"abc.com/myattribute": true
dropped_attributes_count:
The number of attributes that were discarded. Attributes can
be discarded because their keys are too long or because there
are too many attributes. If this value is 0 then all
attributes are valid.
""",
# @@protoc_insertion_point(class_scope:google.devtools.cloudtrace.v2.Span.Attributes)
),
),
TimeEvent=_reflection.GeneratedProtocolMessageType(
"TimeEvent",
(_message.Message,),
dict(
Annotation=_reflection.GeneratedProtocolMessageType(
"Annotation",
(_message.Message,),
dict(
DESCRIPTOR=_SPAN_TIMEEVENT_ANNOTATION,
__module__="google.devtools.cloudtrace_v2.proto.trace_pb2",
__doc__="""Text annotation with a set of attributes.
Attributes:
description:
A user-supplied message describing the event. The maximum
length for the description is 256 bytes.
attributes:
A set of attributes on the annotation. You can have up to 4
attributes per Annotation.
""",
# @@protoc_insertion_point(class_scope:google.devtools.cloudtrace.v2.Span.TimeEvent.Annotation)
),
),
MessageEvent=_reflection.GeneratedProtocolMessageType(
"MessageEvent",
(_message.Message,),
dict(
DESCRIPTOR=_SPAN_TIMEEVENT_MESSAGEEVENT,
__module__="google.devtools.cloudtrace_v2.proto.trace_pb2",
__doc__="""An event describing a message sent/received between Spans.
Attributes:
type:
Type of MessageEvent. Indicates whether the message was sent
or received.
id:
An identifier for the MessageEvent's message that can be used
to match SENT and RECEIVED MessageEvents. It is recommended to
be unique within a Span.
uncompressed_size_bytes:
The number of uncompressed bytes sent or received.
compressed_size_bytes:
The number of compressed bytes sent or received. If missing
assumed to be the same size as uncompressed.
""",
# @@protoc_insertion_point(class_scope:google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent)
),
),
DESCRIPTOR=_SPAN_TIMEEVENT,
__module__="google.devtools.cloudtrace_v2.proto.trace_pb2",
__doc__="""A time-stamped annotation or message event in the Span.
Attributes:
time:
The timestamp indicating the time the event occurred.
value:
A ``TimeEvent`` can contain either an ``Annotation`` object or
a ``MessageEvent`` object, but not both.
annotation:
Text annotation with a set of attributes.
message_event:
An event describing a message sent/received between Spans.
""",
# @@protoc_insertion_point(class_scope:google.devtools.cloudtrace.v2.Span.TimeEvent)
),
),
TimeEvents=_reflection.GeneratedProtocolMessageType(
"TimeEvents",
(_message.Message,),
dict(
DESCRIPTOR=_SPAN_TIMEEVENTS,
__module__="google.devtools.cloudtrace_v2.proto.trace_pb2",
__doc__="""A collection of ``TimeEvent``\ s. A ``TimeEvent`` is a time-stamped
annotation on the span, consisting of either user-supplied key:value
pairs, or details of a message sent/received between Spans.
Attributes:
time_event:
A collection of ``TimeEvent``\ s.
dropped_annotations_count:
The number of dropped annotations in all the included time
events. If the value is 0, then no annotations were dropped.
dropped_message_events_count:
The number of dropped message events in all the included time
events. If the value is 0, then no message events were
dropped.
""",
# @@protoc_insertion_point(class_scope:google.devtools.cloudtrace.v2.Span.TimeEvents)
),
),
Link=_reflection.GeneratedProtocolMessageType(
"Link",
(_message.Message,),
dict(
DESCRIPTOR=_SPAN_LINK,
__module__="google.devtools.cloudtrace_v2.proto.trace_pb2",
__doc__="""A pointer from the current span to another span in the same trace or in
a different trace. For example, this can be used in batching operations,
where a single batch handler processes multiple requests from different
traces or when the handler receives a request from a different project.
Attributes:
trace_id:
The [TRACE\_ID] for a trace within a project.
span_id:
The [SPAN\_ID] for a span within a trace.
type:
The relationship of the current span relative to the linked
span.
attributes:
A set of attributes on the link. You have have up to 32
attributes per link.
""",
# @@protoc_insertion_point(class_scope:google.devtools.cloudtrace.v2.Span.Link)
),
),
Links=_reflection.GeneratedProtocolMessageType(
"Links",
(_message.Message,),
dict(
DESCRIPTOR=_SPAN_LINKS,
__module__="google.devtools.cloudtrace_v2.proto.trace_pb2",
__doc__="""A collection of links, which are references from this span to a span in
the same or different trace.
Attributes:
link:
A collection of links.
dropped_links_count:
The number of dropped links after the maximum size was
enforced. If this value is 0, then no links were dropped.
""",
# @@protoc_insertion_point(class_scope:google.devtools.cloudtrace.v2.Span.Links)
),
),
DESCRIPTOR=_SPAN,
__module__="google.devtools.cloudtrace_v2.proto.trace_pb2",
__doc__="""A span represents a single operation within a trace. Spans can be nested
to form a trace tree. Often, a trace contains a root span that describes
the end-to-end latency, and one or more subspans for its sub-operations.
A trace can also contain multiple root spans, or none at all. Spans do
not need to be contiguous—there may be gaps or overlaps between spans in
a trace.
Attributes:
name:
The resource name of the span in the following format: ::
projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID]
[TRACE\_ID] is a unique identifier for a trace within a
project; it is a 32-character hexadecimal encoding of a
16-byte array. [SPAN\_ID] is a unique identifier for a span
within a trace; it is a 16-character hexadecimal encoding of
an 8-byte array.
span_id:
The [SPAN\_ID] portion of the span's resource name.
parent_span_id:
The [SPAN\_ID] of this span's parent span. If this is a root
span, then this field must be empty.
display_name:
A description of the span's operation (up to 128 bytes).
Stackdriver Trace displays the description in the Google Cloud
Platform Console. For example, the display name can be a
qualified method name or a file name and a line number where
the operation is called. A best practice is to use the same
display name within an application and at the same call point.
This makes it easier to correlate spans in different traces.
start_time:
The start time of the span. On the client side, this is the
time kept by the local machine where the span execution
starts. On the server side, this is the time when the server's
application handler starts running.
end_time:
The end time of the span. On the client side, this is the time
kept by the local machine where the span execution ends. On
the server side, this is the time when the server application
handler stops running.
attributes:
A set of attributes on the span. You can have up to 32
attributes per span.
stack_trace:
Stack trace captured at the start of the span.
time_events:
A set of time events. You can have up to 32 annotations and
128 message events per span.
links:
Links associated with the span. You can have up to 128 links
per Span.
status:
Optional. The final status for this span.
same_process_as_parent_span:
Optional. Set this parameter to indicate whether this span is
in the same process as its parent. If you do not set this
parameter, Stackdriver Trace is unable to take advantage of
this helpful information.
child_span_count:
Optional. The number of child spans that were generated while
this span was active. If set, allows implementation to detect
missing child spans.
""",
# @@protoc_insertion_point(class_scope:google.devtools.cloudtrace.v2.Span)
),
)
_sym_db.RegisterMessage(Span)
_sym_db.RegisterMessage(Span.Attributes)
_sym_db.RegisterMessage(Span.Attributes.AttributeMapEntry)
_sym_db.RegisterMessage(Span.TimeEvent)
_sym_db.RegisterMessage(Span.TimeEvent.Annotation)
_sym_db.RegisterMessage(Span.TimeEvent.MessageEvent)
_sym_db.RegisterMessage(Span.TimeEvents)
_sym_db.RegisterMessage(Span.Link)
_sym_db.RegisterMessage(Span.Links)
AttributeValue = _reflection.GeneratedProtocolMessageType(
"AttributeValue",
(_message.Message,),
dict(
DESCRIPTOR=_ATTRIBUTEVALUE,
__module__="google.devtools.cloudtrace_v2.proto.trace_pb2",
__doc__="""The allowed types for [VALUE] in a ``[KEY]:[VALUE]`` attribute.
Attributes:
value:
The type of the value.
string_value:
A string up to 256 bytes long.
int_value:
A 64-bit signed integer.
bool_value:
A Boolean value represented by ``true`` or ``false``.
""",
# @@protoc_insertion_point(class_scope:google.devtools.cloudtrace.v2.AttributeValue)
),
)
_sym_db.RegisterMessage(AttributeValue)
StackTrace = _reflection.GeneratedProtocolMessageType(
"StackTrace",
(_message.Message,),
dict(
StackFrame=_reflection.GeneratedProtocolMessageType(
"StackFrame",
(_message.Message,),
dict(
DESCRIPTOR=_STACKTRACE_STACKFRAME,
__module__="google.devtools.cloudtrace_v2.proto.trace_pb2",
__doc__="""Represents a single stack frame in a stack trace.
Attributes:
function_name:
The fully-qualified name that uniquely identifies the function
or method that is active in this frame (up to 1024 bytes).
original_function_name:
An un-mangled function name, if ``function_name`` is `mangled
<http://www.avabodh.com/cxxin/namemangling.html>`__. The name
can be fully-qualified (up to 1024 bytes).
file_name:
The name of the source file where the function call appears
(up to 256 bytes).
line_number:
The line number in ``file_name`` where the function call
appears.
column_number:
The column number where the function call appears, if
available. This is important in JavaScript because of its
anonymous functions.
load_module:
The binary module from where the code was loaded.
source_version:
The version of the deployed source code (up to 128 bytes).
""",
# @@protoc_insertion_point(class_scope:google.devtools.cloudtrace.v2.StackTrace.StackFrame)
),
),
StackFrames=_reflection.GeneratedProtocolMessageType(
"StackFrames",
(_message.Message,),
dict(
DESCRIPTOR=_STACKTRACE_STACKFRAMES,
__module__="google.devtools.cloudtrace_v2.proto.trace_pb2",
__doc__="""A collection of stack frames, which can be truncated.
Attributes:
frame:
Stack frames in this call stack.
dropped_frames_count:
The number of stack frames that were dropped because there
were too many stack frames. If this value is 0, then no stack
frames were dropped.
""",
# @@protoc_insertion_point(class_scope:google.devtools.cloudtrace.v2.StackTrace.StackFrames)
),
),
DESCRIPTOR=_STACKTRACE,
__module__="google.devtools.cloudtrace_v2.proto.trace_pb2",
__doc__="""A call stack appearing in a trace.
Attributes:
stack_frames:
Stack frames in this stack trace. A maximum of 128 frames are
allowed.
stack_trace_hash_id:
The hash ID is used to conserve network bandwidth for
duplicate stack traces within a single trace. Often multiple
spans will have identical stack traces. The first occurrence
of a stack trace should contain both the ``stackFrame``
content and a value in ``stackTraceHashId``. Subsequent spans
within the same request can refer to that stack trace by only
setting ``stackTraceHashId``.
""",
# @@protoc_insertion_point(class_scope:google.devtools.cloudtrace.v2.StackTrace)
),
)
_sym_db.RegisterMessage(StackTrace)
_sym_db.RegisterMessage(StackTrace.StackFrame)
_sym_db.RegisterMessage(StackTrace.StackFrames)
Module = _reflection.GeneratedProtocolMessageType(
"Module",
(_message.Message,),
dict(
DESCRIPTOR=_MODULE,
__module__="google.devtools.cloudtrace_v2.proto.trace_pb2",
__doc__="""Binary module.
Attributes:
module:
For example: main binary, kernel modules, and dynamic
libraries such as libc.so, sharedlib.so (up to 256 bytes).
build_id:
A unique identifier for the module, usually a hash of its
contents (up to 128 bytes).
""",
# @@protoc_insertion_point(class_scope:google.devtools.cloudtrace.v2.Module)
),
)
_sym_db.RegisterMessage(Module)
TruncatableString = _reflection.GeneratedProtocolMessageType(
"TruncatableString",
(_message.Message,),
dict(
DESCRIPTOR=_TRUNCATABLESTRING,
__module__="google.devtools.cloudtrace_v2.proto.trace_pb2",
__doc__="""Represents a string that might be shortened to a specified length.
Attributes:
value:
The shortened string. For example, if the original string is
500 bytes long and the limit of the string is 128 bytes, then
``value`` contains the first 128 bytes of the 500-byte string.
Truncation always happens on a UTF8 character boundary. If
there are multi-byte characters in the string, then the length
of the shortened string might be less than the size limit.
truncated_byte_count:
The number of bytes removed from the original string. If this
value is 0, then the string was not shortened.
""",
# @@protoc_insertion_point(class_scope:google.devtools.cloudtrace.v2.TruncatableString)
),
)
_sym_db.RegisterMessage(TruncatableString)
DESCRIPTOR._options = None
_SPAN_ATTRIBUTES_ATTRIBUTEMAPENTRY._options = None
_SPAN.fields_by_name["name"]._options = None
_SPAN.fields_by_name["span_id"]._options = None
_SPAN.fields_by_name["display_name"]._options = None
_SPAN.fields_by_name["start_time"]._options = None
_SPAN.fields_by_name["end_time"]._options = None
_SPAN.fields_by_name["status"]._options = None
_SPAN.fields_by_name["same_process_as_parent_span"]._options = None
_SPAN.fields_by_name["child_span_count"]._options = None
_SPAN._options = None
# @@protoc_insertion_point(module_scope)
| 36.050936
| 5,317
| 0.618393
|
4a117ebda0bc2968e2262aec9b8ed8214f722511
| 14,462
|
py
|
Python
|
laske_export/tests/test_party.py
|
joonvena/mvj
|
2191a6e23067f8e7fdda2bcbfe5e80a5dd749abc
|
[
"MIT"
] | 1
|
2021-01-12T08:14:10.000Z
|
2021-01-12T08:14:10.000Z
|
laske_export/tests/test_party.py
|
joonvena/mvj
|
2191a6e23067f8e7fdda2bcbfe5e80a5dd749abc
|
[
"MIT"
] | 249
|
2017-04-18T14:00:13.000Z
|
2022-03-30T12:18:03.000Z
|
laske_export/tests/test_party.py
|
joonvena/mvj
|
2191a6e23067f8e7fdda2bcbfe5e80a5dd749abc
|
[
"MIT"
] | 7
|
2017-04-18T08:43:54.000Z
|
2021-07-28T07:29:30.000Z
|
import pytest
from django.core.exceptions import ValidationError
from laske_export.document.sales_order import Party
from leasing.enums import ContactType
@pytest.mark.django_db
@pytest.mark.parametrize(
"first_name, last_name, expected1, expected2, expected3, expected4",
[
(
# Name
None,
None,
# Expected
"",
None,
None,
None,
),
(
# Name
"First",
None,
# Expected
"First",
None,
None,
None,
),
(
# Name
None,
"Last",
# Expected
"Last",
None,
None,
None,
),
(
# Name
"First name 1",
"Last name 1",
# Expected
"First name 1 Last name 1",
None,
None,
None,
),
(
# Name
"Super long first name 123456789abcde",
"Super long last name 123456789abcde",
# Expected
"Super long first name 123456789abcd",
"e Super long last name 123456789abc",
"de",
None,
),
(
# Name
"Super super super super hyper mega long first name 123456789abcdefghijklm",
"Super super super super hyper mega long last name 123456789abcdefghijklmn",
# Expected
"Super super super super hyper mega ",
"long first name 123456789abcdefghij",
"klm Super super super super hyper m",
"ega long last name 123456789abcdefg",
),
],
)
def test_party_from_contact_person_name(
django_db_setup,
contact_factory,
first_name,
last_name,
expected1,
expected2,
expected3,
expected4,
):
contact = contact_factory(
first_name=first_name, last_name=last_name, type=ContactType.PERSON
)
party = Party()
party.from_contact(contact)
assert party.priority_name1 == expected1
assert party.priority_name2 == expected2
assert party.priority_name3 == expected3
assert party.priority_name4 == expected4
assert party.info_name1 == expected1
assert party.info_name2 == expected2
assert party.info_name3 == expected3
assert party.info_name4 == expected4
@pytest.mark.django_db
@pytest.mark.parametrize(
"first_name, last_name, expected1, expected2, expected3, expected4",
[
(
# Name
None,
None,
# Expected
"",
"c/o Something random",
None,
None,
),
(
# Name
"First",
None,
# Expected
"First",
"c/o Something random",
None,
None,
),
(
# Name
None,
"Last",
# Expected
"Last",
"c/o Something random",
None,
None,
),
(
# Name
"First name 1",
"Last name 1",
# Expected
"First name 1 Last name 1",
"c/o Something random",
None,
None,
),
(
# Name
"Super long first name 123456789abcde",
"Super long last name 123456789abcde",
# Expected
"Super long first name 123456789abcd",
"e Super long last name 123456789abc",
"de",
"c/o Something random",
),
(
# Name
"Super super super super hyper mega long first name 123456789abcdefghijklm",
"Super super super super hyper mega long last name 123456789abcdefghijklmn",
# Expected
"Super super super super hyper mega ",
"long first name 123456789abcdefghij",
"klm Super super super super hyper m",
"c/o Something random",
),
],
)
def test_party_from_contact_person_name_with_care_of(
django_db_setup,
contact_factory,
first_name,
last_name,
expected1,
expected2,
expected3,
expected4,
):
contact = contact_factory(
first_name=first_name,
last_name=last_name,
type=ContactType.PERSON,
care_of="Something random",
)
party = Party()
party.from_contact(contact)
assert party.priority_name1 == expected1, "priority_name1"
assert party.priority_name2 == expected2, "priority_name2"
assert party.priority_name3 == expected3, "priority_name3"
assert party.priority_name4 == expected4, "priority_name4"
assert party.info_name1 == expected1, "info_name1"
assert party.info_name2 == expected2, "info_name2"
assert party.info_name3 == expected3, "info_name3"
assert party.info_name4 == expected4, "info_name4"
@pytest.mark.django_db
@pytest.mark.parametrize(
"first_name, last_name, expected1, expected2, expected3, expected4",
[
(
# Name
None,
None,
# Expected
"",
"c/o Something random super long car",
"e of name 123456789abcdefghijklmnop",
"qrstuvwxyzzyxwvutsrqponmlkjihgfedcb",
),
(
# Name
"First",
None,
# Expected
"First",
"c/o Something random super long car",
"e of name 123456789abcdefghijklmnop",
"qrstuvwxyzzyxwvutsrqponmlkjihgfedcb",
),
(
# Name
None,
"Last",
# Expected
"Last",
"c/o Something random super long car",
"e of name 123456789abcdefghijklmnop",
"qrstuvwxyzzyxwvutsrqponmlkjihgfedcb",
),
(
# Name
"First name 1",
"Last name 1",
# Expected
"First name 1 Last name 1",
"c/o Something random super long car",
"e of name 123456789abcdefghijklmnop",
"qrstuvwxyzzyxwvutsrqponmlkjihgfedcb",
),
(
# Name
"Super long first name 123456789abcde",
"Super long last name 123456789abcde",
# Expected
"Super long first name 123456789abcd",
"e Super long last name 123456789abc",
"de",
"c/o Something random super long car",
),
(
# Name
"Super super super super hyper mega long first name 123456789abcdefghijklm",
"Super super super super hyper mega long last name 123456789abcdefghijklmn",
# Expected
"Super super super super hyper mega ",
"long first name 123456789abcdefghij",
"klm Super super super super hyper m",
"c/o Something random super long car",
),
],
)
def test_party_from_contact_person_name_with_long_care_of(
django_db_setup,
contact_factory,
first_name,
last_name,
expected1,
expected2,
expected3,
expected4,
):
contact = contact_factory(
first_name=first_name,
last_name=last_name,
type=ContactType.PERSON,
care_of="Something random super long care of name 123456789abcdefghijklmnopqrstuvwxyz"
"zyxwvutsrqponmlkjihgfedcba987654321 eman fo erac gnol repus modnar gnihtemoS",
)
party = Party()
party.from_contact(contact)
assert party.priority_name1 == expected1, "priority_name1"
assert party.priority_name2 == expected2, "priority_name2"
assert party.priority_name3 == expected3, "priority_name3"
assert party.priority_name4 == expected4, "priority_name4"
assert party.info_name1 == expected1, "info_name1"
assert party.info_name2 == expected2, "info_name2"
assert party.info_name3 == expected3, "info_name3"
assert party.info_name4 == expected4, "info_name4"
@pytest.mark.django_db
@pytest.mark.parametrize(
"name, expected1, expected2, expected3, expected4",
[
(
# Name
None,
# Expected
"",
None,
None,
None,
),
(
# Name
"Business name",
# Expected
"Business name",
None,
None,
None,
),
(
# Name
"Super long business name 123456789abcde Super long business name 123456789abcde",
# Expected
"Super long business name 123456789a",
"bcde Super long business name 12345",
"6789abcde",
None,
),
(
# Name
"Super super super super hyper mega long business name 123456789abcdefghijklm"
"Super super super super hyper mega long business name 123456789abcdefghijklm",
# Expected
"Super super super super hyper mega ",
"long business name 123456789abcdefg",
"hijklmSuper super super super hyper",
" mega long business name 123456789a",
),
],
)
def test_party_from_contact_name(
django_db_setup, contact_factory, name, expected1, expected2, expected3, expected4
):
contact = contact_factory(name=name, type=ContactType.BUSINESS)
party = Party()
party.from_contact(contact)
assert party.priority_name1 == expected1
assert party.priority_name2 == expected2
assert party.priority_name3 == expected3
assert party.priority_name4 == expected4
assert party.info_name1 == expected1
assert party.info_name2 == expected2
assert party.info_name3 == expected3
assert party.info_name4 == expected4
@pytest.mark.django_db
@pytest.mark.parametrize(
"name, expected1, expected2, expected3, expected4",
[
(
# Name
None,
# Expected
"",
"c/o Something random",
None,
None,
),
(
# Name
"Business name",
# Expected
"Business name",
"c/o Something random",
None,
None,
),
(
# Name
"Super long business name 123456789abcde Super long business name 123456789abcde",
# Expected
"Super long business name 123456789a",
"bcde Super long business name 12345",
"6789abcde",
"c/o Something random",
),
(
# Name
"Super super super super hyper mega long business name 123456789abcdefghijklm"
"Super super super super hyper mega long business name 123456789abcdefghijklm",
# Expected
"Super super super super hyper mega ",
"long business name 123456789abcdefg",
"hijklmSuper super super super hyper",
"c/o Something random",
),
],
)
def test_party_from_contact_name_with_care_of(
django_db_setup, contact_factory, name, expected1, expected2, expected3, expected4
):
contact = contact_factory(
name=name, type=ContactType.BUSINESS, care_of="Something random"
)
party = Party()
party.from_contact(contact)
assert party.priority_name1 == expected1, "priority_name1"
assert party.priority_name2 == expected2, "priority_name2"
assert party.priority_name3 == expected3, "priority_name3"
assert party.priority_name4 == expected4, "priority_name4"
assert party.info_name1 == expected1, "info_name1"
assert party.info_name2 == expected2, "info_name2"
assert party.info_name3 == expected3, "info_name3"
assert party.info_name4 == expected4, "info_name4"
@pytest.mark.django_db
@pytest.mark.parametrize(
"name, expected1, expected2, expected3, expected4",
[
(
# Name
None,
# Expected
"",
"c/o Something random super long car",
"e of name 123456789abcdefghijklmnop",
"qrstuvwxyzzyxwvutsrqponmlkjihgfedcb",
),
(
# Name
"Business name",
# Expected
"Business name",
"c/o Something random super long car",
"e of name 123456789abcdefghijklmnop",
"qrstuvwxyzzyxwvutsrqponmlkjihgfedcb",
),
(
# Name
"Super long business name 123456789abcde Super long business name 123456789abcde",
# Expected
"Super long business name 123456789a",
"bcde Super long business name 12345",
"6789abcde",
"c/o Something random super long car",
),
(
# Name
"Super super super super hyper mega long business name 123456789abcdefghijklm"
"Super super super super hyper mega long businesst name 123456789abcdefghijklm",
# Expected
"Super super super super hyper mega ",
"long business name 123456789abcdefg",
"hijklmSuper super super super hyper",
"c/o Something random super long car",
),
],
)
def test_party_from_contact_person_with_long_care_of(
django_db_setup, contact_factory, name, expected1, expected2, expected3, expected4
):
contact = contact_factory(
name=name,
type=ContactType.BUSINESS,
care_of="Something random super long care of name 123456789abcdefghijklmnopqrstuvwxyz"
"zyxwvutsrqponmlkjihgfedcba987654321 eman fo erac gnol repus modnar gnihtemoS",
)
party = Party()
party.from_contact(contact)
assert party.priority_name1 == expected1, "priority_name1"
assert party.priority_name2 == expected2, "priority_name2"
assert party.priority_name3 == expected3, "priority_name3"
assert party.priority_name4 == expected4, "priority_name4"
assert party.info_name1 == expected1, "info_name1"
assert party.info_name2 == expected2, "info_name2"
assert party.info_name3 == expected3, "info_name3"
assert party.info_name4 == expected4, "info_name4"
@pytest.mark.django_db
def test_invalid_party_contact():
party = Party()
with pytest.raises(ValidationError):
party.from_contact(None)
| 29.514286
| 94
| 0.567626
|
4a117f675a40626253635f35c641fa836e79b5b7
| 847
|
py
|
Python
|
venues/migrations/0028_auto_20201014_1424.py
|
danroberts728/hsvdotbeer
|
5b977bf4a7aab149ad56564b3adbb09424500308
|
[
"Apache-2.0"
] | 18
|
2018-12-06T01:46:37.000Z
|
2021-10-17T10:37:17.000Z
|
venues/migrations/0028_auto_20201014_1424.py
|
danroberts728/hsvdotbeer
|
5b977bf4a7aab149ad56564b3adbb09424500308
|
[
"Apache-2.0"
] | 194
|
2018-11-04T12:50:49.000Z
|
2022-01-06T22:43:43.000Z
|
venues/migrations/0028_auto_20201014_1424.py
|
danroberts728/hsvdotbeer
|
5b977bf4a7aab149ad56564b3adbb09424500308
|
[
"Apache-2.0"
] | 7
|
2019-03-18T05:36:06.000Z
|
2020-12-25T03:27:29.000Z
|
# Generated by Django 3.0.8 on 2020-10-14 14:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("venues", "0027_auto_20200124_2059"),
]
operations = [
migrations.AddField(
model_name="venue",
name="tap_list_last_check_time",
field=models.DateTimeField(
blank=True,
null=True,
verbose_name="The last time the venue's tap list was refreshed",
),
),
migrations.AddField(
model_name="venue",
name="tap_list_last_update_time",
field=models.DateTimeField(
blank=True,
null=True,
verbose_name="The last time the venue's tap list was updated",
),
),
]
| 26.46875
| 80
| 0.539551
|
4a117fc2ea178ffe8c3df421213deb7d232befcc
| 32,395
|
py
|
Python
|
src/_pytest/main.py
|
MarkHoo/pytest
|
04be900d0677791d97e955b42440627b1818fbcb
|
[
"MIT"
] | null | null | null |
src/_pytest/main.py
|
MarkHoo/pytest
|
04be900d0677791d97e955b42440627b1818fbcb
|
[
"MIT"
] | null | null | null |
src/_pytest/main.py
|
MarkHoo/pytest
|
04be900d0677791d97e955b42440627b1818fbcb
|
[
"MIT"
] | null | null | null |
"""Core implementation of the testing process: init, session, runtest loop."""
import argparse
import fnmatch
import functools
import importlib
import os
import sys
from pathlib import Path
from typing import Callable
from typing import Dict
from typing import FrozenSet
from typing import Iterator
from typing import List
from typing import Optional
from typing import overload
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import Union
import attr
import _pytest._code
from _pytest import nodes
from _pytest.compat import final
from _pytest.config import Config
from _pytest.config import directory_arg
from _pytest.config import ExitCode
from _pytest.config import hookimpl
from _pytest.config import PytestPluginManager
from _pytest.config import UsageError
from _pytest.config.argparsing import Parser
from _pytest.fixtures import FixtureManager
from _pytest.outcomes import exit
from _pytest.pathlib import absolutepath
from _pytest.pathlib import bestrelpath
from _pytest.pathlib import fnmatch_ex
from _pytest.pathlib import visit
from _pytest.reports import CollectReport
from _pytest.reports import TestReport
from _pytest.runner import collect_one_node
from _pytest.runner import SetupState
if TYPE_CHECKING:
from typing_extensions import Literal
def pytest_addoption(parser: Parser) -> None:
parser.addini(
"norecursedirs",
"Directory patterns to avoid for recursion",
type="args",
default=[
"*.egg",
".*",
"_darcs",
"build",
"CVS",
"dist",
"node_modules",
"venv",
"{arch}",
],
)
parser.addini(
"testpaths",
"Directories to search for tests when no files or directories are given on the "
"command line",
type="args",
default=[],
)
group = parser.getgroup("general", "Running and selection options")
group._addoption(
"-x",
"--exitfirst",
action="store_const",
dest="maxfail",
const=1,
help="Exit instantly on first error or failed test",
)
group = parser.getgroup("pytest-warnings")
group.addoption(
"-W",
"--pythonwarnings",
action="append",
help="Set which warnings to report, see -W option of Python itself",
)
parser.addini(
"filterwarnings",
type="linelist",
help="Each line specifies a pattern for "
"warnings.filterwarnings. "
"Processed after -W/--pythonwarnings.",
)
group._addoption(
"--maxfail",
metavar="num",
action="store",
type=int,
dest="maxfail",
default=0,
help="Exit after first num failures or errors",
)
group._addoption(
"--strict-config",
action="store_true",
help="Any warnings encountered while parsing the `pytest` section of the "
"configuration file raise errors",
)
group._addoption(
"--strict-markers",
action="store_true",
help="Markers not registered in the `markers` section of the configuration "
"file raise errors",
)
group._addoption(
"--strict",
action="store_true",
help="(Deprecated) alias to --strict-markers",
)
group._addoption(
"-c",
metavar="file",
type=str,
dest="inifilename",
help="Load configuration from `file` instead of trying to locate one of the "
"implicit configuration files",
)
group._addoption(
"--continue-on-collection-errors",
action="store_true",
default=False,
dest="continue_on_collection_errors",
help="Force test execution even if collection errors occur",
)
group._addoption(
"--rootdir",
action="store",
dest="rootdir",
help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', "
"'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: "
"'$HOME/root_dir'.",
)
group = parser.getgroup("collect", "collection")
group.addoption(
"--collectonly",
"--collect-only",
"--co",
action="store_true",
help="Only collect tests, don't execute them",
)
group.addoption(
"--pyargs",
action="store_true",
help="Try to interpret all arguments as Python packages",
)
group.addoption(
"--ignore",
action="append",
metavar="path",
help="Ignore path during collection (multi-allowed)",
)
group.addoption(
"--ignore-glob",
action="append",
metavar="path",
help="Ignore path pattern during collection (multi-allowed)",
)
group.addoption(
"--deselect",
action="append",
metavar="nodeid_prefix",
help="Deselect item (via node id prefix) during collection (multi-allowed)",
)
group.addoption(
"--confcutdir",
dest="confcutdir",
default=None,
metavar="dir",
type=functools.partial(directory_arg, optname="--confcutdir"),
help="Only load conftest.py's relative to specified dir",
)
group.addoption(
"--noconftest",
action="store_true",
dest="noconftest",
default=False,
help="Don't load any conftest.py files",
)
group.addoption(
"--keepduplicates",
"--keep-duplicates",
action="store_true",
dest="keepduplicates",
default=False,
help="Keep duplicate tests",
)
group.addoption(
"--collect-in-virtualenv",
action="store_true",
dest="collect_in_virtualenv",
default=False,
help="Don't ignore tests in a local virtualenv directory",
)
group.addoption(
"--import-mode",
default="prepend",
choices=["prepend", "append", "importlib"],
dest="importmode",
help="Prepend/append to sys.path when importing test modules and conftest "
"files. Default: prepend.",
)
group = parser.getgroup("debugconfig", "test session debugging and configuration")
group.addoption(
"--basetemp",
dest="basetemp",
default=None,
type=validate_basetemp,
metavar="dir",
help=(
"Base temporary directory for this test run. "
"(Warning: this directory is removed if it exists.)"
),
)
def validate_basetemp(path: str) -> str:
# GH 7119
msg = "basetemp must not be empty, the current working directory or any parent directory of it"
# empty path
if not path:
raise argparse.ArgumentTypeError(msg)
def is_ancestor(base: Path, query: Path) -> bool:
"""Return whether query is an ancestor of base."""
if base == query:
return True
return query in base.parents
# check if path is an ancestor of cwd
if is_ancestor(Path.cwd(), Path(path).absolute()):
raise argparse.ArgumentTypeError(msg)
# check symlinks for ancestors
if is_ancestor(Path.cwd().resolve(), Path(path).resolve()):
raise argparse.ArgumentTypeError(msg)
return path
def wrap_session(
config: Config, doit: Callable[[Config, "Session"], Optional[Union[int, ExitCode]]]
) -> Union[int, ExitCode]:
"""Skeleton command line program."""
session = Session.from_config(config)
session.exitstatus = ExitCode.OK
initstate = 0
try:
try:
config._do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
session.exitstatus = doit(config, session) or 0
except UsageError:
session.exitstatus = ExitCode.USAGE_ERROR
raise
except Failed:
session.exitstatus = ExitCode.TESTS_FAILED
except (KeyboardInterrupt, exit.Exception):
excinfo = _pytest._code.ExceptionInfo.from_current()
exitstatus: Union[int, ExitCode] = ExitCode.INTERRUPTED
if isinstance(excinfo.value, exit.Exception):
if excinfo.value.returncode is not None:
exitstatus = excinfo.value.returncode
if initstate < 2:
sys.stderr.write(f"{excinfo.typename}: {excinfo.value.msg}\n")
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = exitstatus
except BaseException:
session.exitstatus = ExitCode.INTERNAL_ERROR
excinfo = _pytest._code.ExceptionInfo.from_current()
try:
config.notify_exception(excinfo, config.option)
except exit.Exception as exc:
if exc.returncode is not None:
session.exitstatus = exc.returncode
sys.stderr.write(f"{type(exc).__name__}: {exc}\n")
else:
if isinstance(excinfo.value, SystemExit):
sys.stderr.write("mainloop: caught unexpected SystemExit!\n")
finally:
# Explicitly break reference cycle.
excinfo = None # type: ignore
os.chdir(session.startpath)
if initstate >= 2:
try:
config.hook.pytest_sessionfinish(
session=session, exitstatus=session.exitstatus
)
except exit.Exception as exc:
if exc.returncode is not None:
session.exitstatus = exc.returncode
sys.stderr.write(f"{type(exc).__name__}: {exc}\n")
config._ensure_unconfigure()
return session.exitstatus
def pytest_cmdline_main(config: Config) -> Union[int, ExitCode]:
return wrap_session(config, _main)
def _main(config: Config, session: "Session") -> Optional[Union[int, ExitCode]]:
"""Default command line protocol for initialization, session,
running tests and reporting."""
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
if session.testsfailed:
return ExitCode.TESTS_FAILED
elif session.testscollected == 0:
return ExitCode.NO_TESTS_COLLECTED
return None
def pytest_collection(session: "Session") -> None:
session.perform_collect()
def pytest_runtestloop(session: "Session") -> bool:
if session.testsfailed and not session.config.option.continue_on_collection_errors:
raise session.Interrupted(
"%d error%s during collection"
% (session.testsfailed, "s" if session.testsfailed != 1 else "")
)
if session.config.option.collectonly:
return True
for i, item in enumerate(session.items):
nextitem = session.items[i + 1] if i + 1 < len(session.items) else None
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldfail:
raise session.Failed(session.shouldfail)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def _in_venv(path: Path) -> bool:
"""Attempt to detect if ``path`` is the root of a Virtual Environment by
checking for the existence of the appropriate activate script."""
bindir = path.joinpath("Scripts" if sys.platform.startswith("win") else "bin")
try:
if not bindir.is_dir():
return False
except OSError:
return False
activates = (
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
)
return any(fname.name in activates for fname in bindir.iterdir())
def pytest_ignore_collect(collection_path: Path, config: Config) -> Optional[bool]:
ignore_paths = config._getconftest_pathlist(
"collect_ignore", path=collection_path.parent, rootpath=config.rootpath
)
ignore_paths = ignore_paths or []
excludeopt = config.getoption("ignore")
if excludeopt:
ignore_paths.extend(absolutepath(x) for x in excludeopt)
if collection_path in ignore_paths:
return True
ignore_globs = config._getconftest_pathlist(
"collect_ignore_glob", path=collection_path.parent, rootpath=config.rootpath
)
ignore_globs = ignore_globs or []
excludeglobopt = config.getoption("ignore_glob")
if excludeglobopt:
ignore_globs.extend(absolutepath(x) for x in excludeglobopt)
if any(fnmatch.fnmatch(str(collection_path), str(glob)) for glob in ignore_globs):
return True
allow_in_venv = config.getoption("collect_in_virtualenv")
if not allow_in_venv and _in_venv(collection_path):
return True
return None
def pytest_collection_modifyitems(items: List[nodes.Item], config: Config) -> None:
deselect_prefixes = tuple(config.getoption("deselect") or [])
if not deselect_prefixes:
return
remaining = []
deselected = []
for colitem in items:
if colitem.nodeid.startswith(deselect_prefixes):
deselected.append(colitem)
else:
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
class FSHookProxy:
def __init__(self, pm: PytestPluginManager, remove_mods) -> None:
self.pm = pm
self.remove_mods = remove_mods
def __getattr__(self, name: str):
x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
self.__dict__[name] = x
return x
class Interrupted(KeyboardInterrupt):
"""Signals that the test run was interrupted."""
__module__ = "builtins" # For py3.
class Failed(Exception):
"""Signals a stop as failed test run."""
@attr.s(slots=True, auto_attribs=True)
class _bestrelpath_cache(Dict[Path, str]):
path: Path
def __missing__(self, path: Path) -> str:
r = bestrelpath(self.path, path)
self[path] = r
return r
@final
class Session(nodes.FSCollector):
Interrupted = Interrupted
Failed = Failed
# Set on the session by runner.pytest_sessionstart.
_setupstate: SetupState
# Set on the session by fixtures.pytest_sessionstart.
_fixturemanager: FixtureManager
exitstatus: Union[int, ExitCode]
def __init__(self, config: Config) -> None:
super().__init__(
path=config.rootpath,
fspath=None,
parent=None,
config=config,
session=self,
nodeid="",
)
self.testsfailed = 0
self.testscollected = 0
self.shouldstop: Union[bool, str] = False
self.shouldfail: Union[bool, str] = False
self.trace = config.trace.root.get("collection")
self._initialpaths: FrozenSet[Path] = frozenset()
self._bestrelpathcache: Dict[Path, str] = _bestrelpath_cache(config.rootpath)
self.config.pluginmanager.register(self, name="session")
@classmethod
def from_config(cls, config: Config) -> "Session":
session: Session = cls._create(config=config)
return session
def __repr__(self) -> str:
return "<%s %s exitstatus=%r testsfailed=%d testscollected=%d>" % (
self.__class__.__name__,
self.name,
getattr(self, "exitstatus", "<UNSET>"),
self.testsfailed,
self.testscollected,
)
@property
def startpath(self) -> Path:
"""The path from which pytest was invoked.
.. versionadded:: 7.0.0
"""
return self.config.invocation_params.dir
def _node_location_to_relpath(self, node_path: Path) -> str:
# bestrelpath is a quite slow function.
return self._bestrelpathcache[node_path]
@hookimpl(tryfirst=True)
def pytest_collectstart(self) -> None:
if self.shouldfail:
raise self.Failed(self.shouldfail)
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
@hookimpl(tryfirst=True)
def pytest_runtest_logreport(
self, report: Union[TestReport, CollectReport]
) -> None:
if report.failed and not hasattr(report, "wasxfail"):
self.testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self.testsfailed >= maxfail:
self.shouldfail = "stopping after %d failures" % (self.testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path: Union[str, "os.PathLike[str]"]) -> bool:
# Optimization: Path(Path(...)) is much slower than isinstance.
path_ = path if isinstance(path, Path) else Path(path)
return path_ in self._initialpaths
def gethookproxy(self, fspath: "os.PathLike[str]"):
# Optimization: Path(Path(...)) is much slower than isinstance.
path = fspath if isinstance(fspath, Path) else Path(fspath)
pm = self.config.pluginmanager
# Check if we have the common case of running
# hooks with all conftest.py files.
my_conftestmodules = pm._getconftestmodules(
path,
self.config.getoption("importmode"),
rootpath=self.config.rootpath,
)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
if remove_mods:
# One or more conftests are not in use at this fspath.
from .config.compat import PathAwareHookProxy
proxy = PathAwareHookProxy(FSHookProxy(pm, remove_mods))
else:
# All plugins are active for this fspath.
proxy = self.config.hook
return proxy
def _recurse(self, direntry: "os.DirEntry[str]") -> bool:
if direntry.name == "__pycache__":
return False
fspath = Path(direntry.path)
ihook = self.gethookproxy(fspath.parent)
if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config):
return False
norecursepatterns = self.config.getini("norecursedirs")
if any(fnmatch_ex(pat, fspath) for pat in norecursepatterns):
return False
return True
def _collectfile(
self, fspath: Path, handle_dupes: bool = True
) -> Sequence[nodes.Collector]:
assert (
fspath.is_file()
), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format(
fspath, fspath.is_dir(), fspath.exists(), fspath.is_symlink()
)
ihook = self.gethookproxy(fspath)
if not self.isinitpath(fspath):
if ihook.pytest_ignore_collect(collection_path=fspath, config=self.config):
return ()
if handle_dupes:
keepduplicates = self.config.getoption("keepduplicates")
if not keepduplicates:
duplicate_paths = self.config.pluginmanager._duplicatepaths
if fspath in duplicate_paths:
return ()
else:
duplicate_paths.add(fspath)
return ihook.pytest_collect_file(file_path=fspath, parent=self) # type: ignore[no-any-return]
@overload
def perform_collect(
self, args: Optional[Sequence[str]] = ..., genitems: "Literal[True]" = ...
) -> Sequence[nodes.Item]:
...
@overload
def perform_collect(
self, args: Optional[Sequence[str]] = ..., genitems: bool = ...
) -> Sequence[Union[nodes.Item, nodes.Collector]]:
...
def perform_collect(
self, args: Optional[Sequence[str]] = None, genitems: bool = True
) -> Sequence[Union[nodes.Item, nodes.Collector]]:
"""Perform the collection phase for this session.
This is called by the default :hook:`pytest_collection` hook
implementation; see the documentation of this hook for more details.
For testing purposes, it may also be called directly on a fresh
``Session``.
This function normally recursively expands any collectors collected
from the session to their items, and only items are returned. For
testing purposes, this may be suppressed by passing ``genitems=False``,
in which case the return value contains these collectors unexpanded,
and ``session.items`` is empty.
"""
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound: List[Tuple[str, Sequence[nodes.Collector]]] = []
self._initial_parts: List[Tuple[Path, List[str]]] = []
self.items: List[nodes.Item] = []
hook = self.config.hook
items: Sequence[Union[nodes.Item, nodes.Collector]] = self.items
try:
initialpaths: List[Path] = []
for arg in args:
fspath, parts = resolve_collection_argument(
self.config.invocation_params.dir,
arg,
as_pypath=self.config.option.pyargs,
)
self._initial_parts.append((fspath, parts))
initialpaths.append(fspath)
self._initialpaths = frozenset(initialpaths)
rep = collect_one_node(self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
errors = []
for arg, collectors in self._notfound:
if collectors:
errors.append(
f"not found: {arg}\n(no name {arg!r} in any of {collectors!r})"
)
else:
errors.append(f"found no collectors for {arg}")
raise UsageError(*errors)
if not genitems:
items = rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
self.config.pluginmanager.check_pending()
hook.pytest_collection_modifyitems(
session=self, config=self.config, items=items
)
finally:
hook.pytest_collection_finish(session=self)
self.testscollected = len(items)
return items
def collect(self) -> Iterator[Union[nodes.Item, nodes.Collector]]:
from _pytest.python import Package
# Keep track of any collected nodes in here, so we don't duplicate fixtures.
node_cache1: Dict[Path, Sequence[nodes.Collector]] = {}
node_cache2: Dict[Tuple[Type[nodes.Collector], Path], nodes.Collector] = {}
# Keep track of any collected collectors in matchnodes paths, so they
# are not collected more than once.
matchnodes_cache: Dict[Tuple[Type[nodes.Collector], str], CollectReport] = {}
# Dirnames of pkgs with dunder-init files.
pkg_roots: Dict[str, Package] = {}
for argpath, names in self._initial_parts:
self.trace("processing argument", (argpath, names))
self.trace.root.indent += 1
# Start with a Session root, and delve to argpath item (dir or file)
# and stack all Packages found on the way.
# No point in finding packages when collecting doctests.
if not self.config.getoption("doctestmodules", False):
pm = self.config.pluginmanager
for parent in (argpath, *argpath.parents):
if not pm._is_in_confcutdir(argpath):
break
if parent.is_dir():
pkginit = parent / "__init__.py"
if pkginit.is_file() and pkginit not in node_cache1:
col = self._collectfile(pkginit, handle_dupes=False)
if col:
if isinstance(col[0], Package):
pkg_roots[str(parent)] = col[0]
node_cache1[col[0].path] = [col[0]]
# If it's a directory argument, recurse and look for any Subpackages.
# Let the Package collector deal with subnodes, don't collect here.
if argpath.is_dir():
assert not names, f"invalid arg {(argpath, names)!r}"
seen_dirs: Set[Path] = set()
for direntry in visit(str(argpath), self._recurse):
if not direntry.is_file():
continue
path = Path(direntry.path)
dirpath = path.parent
if dirpath not in seen_dirs:
# Collect packages first.
seen_dirs.add(dirpath)
pkginit = dirpath / "__init__.py"
if pkginit.exists():
for x in self._collectfile(pkginit):
yield x
if isinstance(x, Package):
pkg_roots[str(dirpath)] = x
if str(dirpath) in pkg_roots:
# Do not collect packages here.
continue
for x in self._collectfile(path):
key2 = (type(x), x.path)
if key2 in node_cache2:
yield node_cache2[key2]
else:
node_cache2[key2] = x
yield x
else:
assert argpath.is_file()
if argpath in node_cache1:
col = node_cache1[argpath]
else:
collect_root = pkg_roots.get(str(argpath.parent), self)
col = collect_root._collectfile(argpath, handle_dupes=False)
if col:
node_cache1[argpath] = col
matching = []
work: List[
Tuple[Sequence[Union[nodes.Item, nodes.Collector]], Sequence[str]]
] = [(col, names)]
while work:
self.trace("matchnodes", col, names)
self.trace.root.indent += 1
matchnodes, matchnames = work.pop()
for node in matchnodes:
if not matchnames:
matching.append(node)
continue
if not isinstance(node, nodes.Collector):
continue
key = (type(node), node.nodeid)
if key in matchnodes_cache:
rep = matchnodes_cache[key]
else:
rep = collect_one_node(node)
matchnodes_cache[key] = rep
if rep.passed:
submatchnodes = []
for r in rep.result:
# TODO: Remove parametrized workaround once collection structure contains
# parametrization.
if (
r.name == matchnames[0]
or r.name.split("[")[0] == matchnames[0]
):
submatchnodes.append(r)
if submatchnodes:
work.append((submatchnodes, matchnames[1:]))
else:
# Report collection failures here to avoid failing to run some test
# specified in the command line because the module could not be
# imported (#134).
node.ihook.pytest_collectreport(report=rep)
self.trace("matchnodes finished -> ", len(matching), "nodes")
self.trace.root.indent -= 1
if not matching:
report_arg = "::".join((str(argpath), *names))
self._notfound.append((report_arg, col))
continue
# If __init__.py was the only file requested, then the matched
# node will be the corresponding Package (by default), and the
# first yielded item will be the __init__ Module itself, so
# just use that. If this special case isn't taken, then all the
# files in the package will be yielded.
if argpath.name == "__init__.py" and isinstance(matching[0], Package):
try:
yield next(iter(matching[0].collect()))
except StopIteration:
# The package collects nothing with only an __init__.py
# file in it, which gets ignored by the default
# "python_files" option.
pass
continue
yield from matching
self.trace.root.indent -= 1
def genitems(
self, node: Union[nodes.Item, nodes.Collector]
) -> Iterator[nodes.Item]:
self.trace("genitems", node)
if isinstance(node, nodes.Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, nodes.Collector)
rep = collect_one_node(node)
if rep.passed:
for subnode in rep.result:
yield from self.genitems(subnode)
node.ihook.pytest_collectreport(report=rep)
def search_pypath(module_name: str) -> str:
"""Search sys.path for the given a dotted module name, and return its file system path."""
try:
spec = importlib.util.find_spec(module_name)
# AttributeError: looks like package module, but actually filename
# ImportError: module does not exist
# ValueError: not a module name
except (AttributeError, ImportError, ValueError):
return module_name
if spec is None or spec.origin is None or spec.origin == "namespace":
return module_name
elif spec.submodule_search_locations:
return os.path.dirname(spec.origin)
else:
return spec.origin
def resolve_collection_argument(
invocation_path: Path, arg: str, *, as_pypath: bool = False
) -> Tuple[Path, List[str]]:
"""Parse path arguments optionally containing selection parts and return (fspath, names).
Command-line arguments can point to files and/or directories, and optionally contain
parts for specific tests selection, for example:
"pkg/tests/test_foo.py::TestClass::test_foo"
This function ensures the path exists, and returns a tuple:
(Path("/full/path/to/pkg/tests/test_foo.py"), ["TestClass", "test_foo"])
When as_pypath is True, expects that the command-line argument actually contains
module paths instead of file-system paths:
"pkg.tests.test_foo::TestClass::test_foo"
In which case we search sys.path for a matching module, and then return the *path* to the
found module.
If the path doesn't exist, raise UsageError.
If the path is a directory and selection parts are present, raise UsageError.
"""
base, squacket, rest = str(arg).partition("[")
strpath, *parts = base.split("::")
if parts:
parts[-1] = f"{parts[-1]}{squacket}{rest}"
if as_pypath:
strpath = search_pypath(strpath)
fspath = invocation_path / strpath
fspath = absolutepath(fspath)
if not fspath.exists():
msg = (
"module or package not found: {arg} (missing __init__.py?)"
if as_pypath
else "file or directory not found: {arg}"
)
raise UsageError(msg.format(arg=arg))
if parts and fspath.is_dir():
msg = (
"package argument cannot contain :: selection parts: {arg}"
if as_pypath
else "directory argument cannot contain :: selection parts: {arg}"
)
raise UsageError(msg.format(arg=arg))
return fspath, parts
| 35.874862
| 105
| 0.588115
|
4a1180ad7a49be9881a495ddaa72e1ca9367016e
| 11,229
|
py
|
Python
|
Lib/site-packages/django/contrib/gis/geos/prototypes/io.py
|
Lucas11200/LocaPy
|
5d1f214c091aa3703b2ff7d3c0713a91ed4a1f48
|
[
"bzip2-1.0.6"
] | 61,676
|
2015-01-01T00:05:13.000Z
|
2022-03-31T20:37:54.000Z
|
checkerista/.env/Lib/site-packages/django/contrib/gis/geos/prototypes/io.py
|
LybaFatimaNasir/CS311S20PID02
|
bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39
|
[
"MIT"
] | 8,884
|
2015-01-01T00:12:05.000Z
|
2022-03-31T19:53:11.000Z
|
checkerista/.env/Lib/site-packages/django/contrib/gis/geos/prototypes/io.py
|
LybaFatimaNasir/CS311S20PID02
|
bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39
|
[
"MIT"
] | 33,143
|
2015-01-01T02:04:52.000Z
|
2022-03-31T19:42:46.000Z
|
import threading
from ctypes import POINTER, Structure, byref, c_byte, c_char_p, c_int, c_size_t
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import (
GEOM_PTR, GEOSFuncFactory, geos_version_tuple,
)
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_sized_string, check_string,
)
from django.contrib.gis.geos.prototypes.geom import c_uchar_p, geos_char_p
from django.utils.encoding import force_bytes
# ### The WKB/WKT Reader/Writer structures and pointers ###
class WKTReader_st(Structure):
pass
class WKTWriter_st(Structure):
pass
class WKBReader_st(Structure):
pass
class WKBWriter_st(Structure):
pass
WKT_READ_PTR = POINTER(WKTReader_st)
WKT_WRITE_PTR = POINTER(WKTWriter_st)
WKB_READ_PTR = POINTER(WKBReader_st)
WKB_WRITE_PTR = POINTER(WKBReader_st)
# WKTReader routines
wkt_reader_create = GEOSFuncFactory('GEOSWKTReader_create', restype=WKT_READ_PTR)
wkt_reader_destroy = GEOSFuncFactory('GEOSWKTReader_destroy', argtypes=[WKT_READ_PTR])
wkt_reader_read = GEOSFuncFactory(
'GEOSWKTReader_read', argtypes=[WKT_READ_PTR, c_char_p], restype=GEOM_PTR, errcheck=check_geom
)
# WKTWriter routines
wkt_writer_create = GEOSFuncFactory('GEOSWKTWriter_create', restype=WKT_WRITE_PTR)
wkt_writer_destroy = GEOSFuncFactory('GEOSWKTWriter_destroy', argtypes=[WKT_WRITE_PTR])
wkt_writer_write = GEOSFuncFactory(
'GEOSWKTWriter_write', argtypes=[WKT_WRITE_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
wkt_writer_get_outdim = GEOSFuncFactory(
'GEOSWKTWriter_getOutputDimension', argtypes=[WKT_WRITE_PTR], restype=c_int
)
wkt_writer_set_outdim = GEOSFuncFactory(
'GEOSWKTWriter_setOutputDimension', argtypes=[WKT_WRITE_PTR, c_int]
)
wkt_writer_set_trim = GEOSFuncFactory('GEOSWKTWriter_setTrim', argtypes=[WKT_WRITE_PTR, c_byte])
wkt_writer_set_precision = GEOSFuncFactory('GEOSWKTWriter_setRoundingPrecision', argtypes=[WKT_WRITE_PTR, c_int])
# WKBReader routines
wkb_reader_create = GEOSFuncFactory('GEOSWKBReader_create', restype=WKB_READ_PTR)
wkb_reader_destroy = GEOSFuncFactory('GEOSWKBReader_destroy', argtypes=[WKB_READ_PTR])
class WKBReadFunc(GEOSFuncFactory):
# Although the function definitions take `const unsigned char *`
# as their parameter, we use c_char_p here so the function may
# take Python strings directly as parameters. Inside Python there
# is not a difference between signed and unsigned characters, so
# it is not a problem.
argtypes = [WKB_READ_PTR, c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
wkb_reader_read = WKBReadFunc('GEOSWKBReader_read')
wkb_reader_read_hex = WKBReadFunc('GEOSWKBReader_readHEX')
# WKBWriter routines
wkb_writer_create = GEOSFuncFactory('GEOSWKBWriter_create', restype=WKB_WRITE_PTR)
wkb_writer_destroy = GEOSFuncFactory('GEOSWKBWriter_destroy', argtypes=[WKB_WRITE_PTR])
# WKB Writing prototypes.
class WKBWriteFunc(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
wkb_writer_write = WKBWriteFunc('GEOSWKBWriter_write')
wkb_writer_write_hex = WKBWriteFunc('GEOSWKBWriter_writeHEX')
# WKBWriter property getter/setter prototypes.
class WKBWriterGet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR]
restype = c_int
class WKBWriterSet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, c_int]
wkb_writer_get_byteorder = WKBWriterGet('GEOSWKBWriter_getByteOrder')
wkb_writer_set_byteorder = WKBWriterSet('GEOSWKBWriter_setByteOrder')
wkb_writer_get_outdim = WKBWriterGet('GEOSWKBWriter_getOutputDimension')
wkb_writer_set_outdim = WKBWriterSet('GEOSWKBWriter_setOutputDimension')
wkb_writer_get_include_srid = WKBWriterGet('GEOSWKBWriter_getIncludeSRID', restype=c_byte)
wkb_writer_set_include_srid = WKBWriterSet('GEOSWKBWriter_setIncludeSRID', argtypes=[WKB_WRITE_PTR, c_byte])
# ### Base I/O Class ###
class IOBase(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self._constructor()
# Loading the real destructor function at this point as doing it in
# __del__ is too late (import error).
self.destructor.func
# ### Base WKB/WKT Reading and Writing objects ###
# Non-public WKB/WKT reader classes for internal use because
# their `read` methods return _pointers_ instead of GEOSGeometry
# objects.
class _WKTReader(IOBase):
_constructor = wkt_reader_create
ptr_type = WKT_READ_PTR
destructor = wkt_reader_destroy
def read(self, wkt):
if not isinstance(wkt, (bytes, str)):
raise TypeError
return wkt_reader_read(self.ptr, force_bytes(wkt))
class _WKBReader(IOBase):
_constructor = wkb_reader_create
ptr_type = WKB_READ_PTR
destructor = wkb_reader_destroy
def read(self, wkb):
"Return a _pointer_ to C GEOS Geometry object from the given WKB."
if isinstance(wkb, memoryview):
wkb_s = bytes(wkb)
return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, (bytes, str)):
return wkb_reader_read_hex(self.ptr, wkb, len(wkb))
else:
raise TypeError
# ### WKB/WKT Writer Classes ###
class WKTWriter(IOBase):
_constructor = wkt_writer_create
ptr_type = WKT_WRITE_PTR
destructor = wkt_writer_destroy
_trim = False
_precision = None
def __init__(self, dim=2, trim=False, precision=None):
super().__init__()
if bool(trim) != self._trim:
self.trim = trim
if precision is not None:
self.precision = precision
self.outdim = dim
def write(self, geom):
"Return the WKT representation of the given geometry."
return wkt_writer_write(self.ptr, geom.ptr)
@property
def outdim(self):
return wkt_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKT output dimension must be 2 or 3')
wkt_writer_set_outdim(self.ptr, new_dim)
@property
def trim(self):
return self._trim
@trim.setter
def trim(self, flag):
if bool(flag) != self._trim:
self._trim = bool(flag)
wkt_writer_set_trim(self.ptr, self._trim)
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, precision):
if (not isinstance(precision, int) or precision < 0) and precision is not None:
raise AttributeError('WKT output rounding precision must be non-negative integer or None.')
if precision != self._precision:
self._precision = precision
wkt_writer_set_precision(self.ptr, -1 if precision is None else precision)
class WKBWriter(IOBase):
_constructor = wkb_writer_create
ptr_type = WKB_WRITE_PTR
destructor = wkb_writer_destroy
geos_version = geos_version_tuple()
def __init__(self, dim=2):
super().__init__()
self.outdim = dim
def _handle_empty_point(self, geom):
from django.contrib.gis.geos import Point
if isinstance(geom, Point) and geom.empty:
if self.srid:
# PostGIS uses POINT(NaN NaN) for WKB representation of empty
# points. Use it for EWKB as it's a PostGIS specific format.
# https://trac.osgeo.org/postgis/ticket/3181
geom = Point(float('NaN'), float('NaN'), srid=geom.srid)
else:
raise ValueError('Empty point is not representable in WKB.')
return geom
def write(self, geom):
"Return the WKB representation of the given geometry."
from django.contrib.gis.geos import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t()))
if self.geos_version < (3, 6, 1) and isinstance(geom, Polygon) and geom.empty:
# Fix GEOS output for empty polygon.
# See https://trac.osgeo.org/geos/ticket/680.
wkb = wkb[:-8] + b'\0' * 4
return memoryview(wkb)
def write_hex(self, geom):
"Return the HEXEWKB representation of the given geometry."
from django.contrib.gis.geos.polygon import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
if self.geos_version < (3, 6, 1) and isinstance(geom, Polygon) and geom.empty:
wkb = wkb[:-16] + b'0' * 8
return wkb
# ### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if order not in (0, 1):
raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).')
wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
@property
def outdim(self):
return wkb_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKB output dimension must be 2 or 3')
wkb_writer_set_outdim(self.ptr, new_dim)
# Property for getting/setting the include srid flag.
@property
def srid(self):
return bool(wkb_writer_get_include_srid(self.ptr))
@srid.setter
def srid(self, include):
wkb_writer_set_include_srid(self.ptr, bool(include))
# `ThreadLocalIO` object holds instances of the WKT and WKB reader/writer
# objects that are local to the thread. The `GEOSGeometry` internals
# access these instances by calling the module-level functions, defined
# below.
class ThreadLocalIO(threading.local):
wkt_r = None
wkt_w = None
wkb_r = None
wkb_w = None
ewkb_w = None
thread_context = ThreadLocalIO()
# These module-level routines return the I/O object that is local to the
# thread. If the I/O object does not exist yet it will be initialized.
def wkt_r():
thread_context.wkt_r = thread_context.wkt_r or _WKTReader()
return thread_context.wkt_r
def wkt_w(dim=2, trim=False, precision=None):
if not thread_context.wkt_w:
thread_context.wkt_w = WKTWriter(dim=dim, trim=trim, precision=precision)
else:
thread_context.wkt_w.outdim = dim
thread_context.wkt_w.trim = trim
thread_context.wkt_w.precision = precision
return thread_context.wkt_w
def wkb_r():
thread_context.wkb_r = thread_context.wkb_r or _WKBReader()
return thread_context.wkb_r
def wkb_w(dim=2):
if not thread_context.wkb_w:
thread_context.wkb_w = WKBWriter(dim=dim)
else:
thread_context.wkb_w.outdim = dim
return thread_context.wkb_w
def ewkb_w(dim=2):
if not thread_context.ewkb_w:
thread_context.ewkb_w = WKBWriter(dim=dim)
thread_context.ewkb_w.srid = True
else:
thread_context.ewkb_w.outdim = dim
return thread_context.ewkb_w
| 33.026471
| 113
| 0.711996
|
4a1180c9d8e45a61a939851401ad7d5f0e1daf16
| 1,814
|
py
|
Python
|
data_structures/stacks/infix_to_postfix_conversion.py
|
jenia90/Python
|
696fb4a681ad9e4d84e0d2b894daf449a3e30b24
|
[
"MIT"
] | 14
|
2020-10-03T05:43:48.000Z
|
2021-11-01T21:02:26.000Z
|
data_structures/stacks/infix_to_postfix_conversion.py
|
jenia90/Python
|
696fb4a681ad9e4d84e0d2b894daf449a3e30b24
|
[
"MIT"
] | 2
|
2021-07-09T21:23:47.000Z
|
2021-08-06T02:47:29.000Z
|
data_structures/stacks/infix_to_postfix_conversion.py
|
jenia90/Python
|
696fb4a681ad9e4d84e0d2b894daf449a3e30b24
|
[
"MIT"
] | 12
|
2020-10-03T05:44:19.000Z
|
2022-01-16T05:37:54.000Z
|
import string
from .stack import Stack
__author__ = "Omkar Pathak"
def is_operand(char):
return char in string.ascii_letters or char in string.digits
def precedence(char):
"""Return integer value representing an operator's precedence, or
order of operation.
https://en.wikipedia.org/wiki/Order_of_operations
"""
dictionary = {"+": 1, "-": 1, "*": 2, "/": 2, "^": 3}
return dictionary.get(char, -1)
def infix_to_postfix(expression):
"""Convert infix notation to postfix notation using the Shunting-yard
algorithm.
https://en.wikipedia.org/wiki/Shunting-yard_algorithm
https://en.wikipedia.org/wiki/Infix_notation
https://en.wikipedia.org/wiki/Reverse_Polish_notation
"""
stack = Stack(len(expression))
postfix = []
for char in expression:
if is_operand(char):
postfix.append(char)
elif char not in {"(", ")"}:
while not stack.is_empty() and precedence(char) <= precedence(stack.peek()):
postfix.append(stack.pop())
stack.push(char)
elif char == "(":
stack.push(char)
elif char == ")":
while not stack.is_empty() and stack.peek() != "(":
postfix.append(stack.pop())
# Pop '(' from stack. If there is no '(', there is a mismatched
# parentheses.
if stack.peek() != "(":
raise ValueError("Mismatched parentheses")
stack.pop()
while not stack.is_empty():
postfix.append(stack.pop())
return " ".join(postfix)
if __name__ == "__main__":
expression = "a+b*(c^d-e)^(f+g*h)-i"
print("Infix to Postfix Notation demonstration:\n")
print("Infix notation: " + expression)
print("Postfix notation: " + infix_to_postfix(expression))
| 30.233333
| 88
| 0.605292
|
4a1181350a3b3613c9c5fcc8a0f82316360aae54
| 2,239
|
py
|
Python
|
simple-backend/nlpviewer_backend/models.py
|
gxchris95/stave
|
12239c189723d7325994e75fd1eb3e88364aaa32
|
[
"Apache-2.0"
] | null | null | null |
simple-backend/nlpviewer_backend/models.py
|
gxchris95/stave
|
12239c189723d7325994e75fd1eb3e88364aaa32
|
[
"Apache-2.0"
] | null | null | null |
simple-backend/nlpviewer_backend/models.py
|
gxchris95/stave
|
12239c189723d7325994e75fd1eb3e88364aaa32
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
<<<<<<< HEAD
from django.contrib.auth.models import User
class Project(models.Model):
# project: name, ontology, config
# realtionship:
# - Project.document
# - User
name = models.CharField(max_length=200)
project_type = models.CharField(max_length=100, default='single_pack')
ontology = models.TextField(default='')
multi_ontology = models.TextField(default='')
config = models.TextField(default='', null=True, blank=True)
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
default='',
related_name='projects',
null=True,
blank=True
)
class Meta:
permissions = (
('read_project', 'Can read the project'),
('edit_annotation', 'Can edit annotation'),
('edit_text', 'Can edit the document'),
('edit_project', 'Can edit the project'),
('remove_project', 'Can remove the project'),
('new_project', 'Can create in the project'),
)
def __str__(self):
return self.name
=======
class Project(models.Model):
# project: name, ontology
# realtionship: Project.document
name = models.CharField(max_length=200)
ontology = models.TextField(default='')
>>>>>>> 6fe7a7deb55bd77f5f91c4e387bc7ec9e2da9486
class Document(models.Model):
# content: textPack: text body + annotation
name = models.CharField(max_length=200)
# relationship: project
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
default='',
related_name='documents',
null=True,
blank=True
)
textPack = models.TextField()
<<<<<<< HEAD
class CrossDoc(models.Model):
name = models.CharField(max_length=200)
# relationship: project
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
default='',
related_name='crossdocs',
null=True,
blank=True
)
textPack = models.TextField()
=======
class User(models.Model):
name = models.CharField(max_length=200)
password = models.CharField(max_length=200)
>>>>>>> 6fe7a7deb55bd77f5f91c4e387bc7ec9e2da9486
| 26.034884
| 74
| 0.621259
|
4a11816a3be02fddedb19640f0b279f5c206107f
| 4,821
|
py
|
Python
|
src/olympia/shelves/tests/test_views.py
|
imsahil007/addons-server
|
6cdaebe2b67110ad87c8d87558753bb93e41b036
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/shelves/tests/test_views.py
|
imsahil007/addons-server
|
6cdaebe2b67110ad87c8d87558753bb93e41b036
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/shelves/tests/test_views.py
|
imsahil007/addons-server
|
6cdaebe2b67110ad87c8d87558753bb93e41b036
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from django.conf import settings
from olympia import amo
from olympia.amo.tests import addon_factory, ESTestCase, reverse_ns
from olympia.constants.promoted import RECOMMENDED
from olympia.promoted.models import PromotedAddon
from olympia.shelves.models import Shelf, ShelfManagement
class TestShelfViewSet(ESTestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
# Shouldn't be necessary, but just in case.
cls.empty_index('default')
addon_factory(
name='test addon test01', type=amo.ADDON_EXTENSION,
average_daily_users=46812, weekly_downloads=132, summary=None)
addon_factory(
name='test addon test02', type=amo.ADDON_STATICTHEME,
average_daily_users=18981, weekly_downloads=145, summary=None)
addon_ext = addon_factory(
name='test addon test03', type=amo.ADDON_EXTENSION,
average_daily_users=482, weekly_downloads=506, summary=None)
addon_theme = addon_factory(
name='test addon test04', type=amo.ADDON_STATICTHEME,
average_daily_users=8838, weekly_downloads=358, summary=None)
PromotedAddon.objects.create(
addon=addon_ext, group_id=RECOMMENDED.id
).approve_for_version(version=addon_ext.current_version)
PromotedAddon.objects.create(
addon=addon_theme, group_id=RECOMMENDED.id
).approve_for_version(version=addon_theme.current_version)
cls.refresh()
def setUp(self):
self.url = reverse_ns('shelves-list')
shelf_a = Shelf.objects.create(
title='Recommended extensions',
endpoint='search',
criteria='?promoted=recommended&sort=random&type=extension',
footer_text='See more recommended extensions')
shelf_b = Shelf.objects.create(
title='Enhanced privacy extensions',
endpoint='collections',
criteria='privacy-matters',
footer_text='See more enhanced privacy extensions')
shelf_c = Shelf.objects.create(
title='Popular themes',
endpoint='search',
criteria='?sort=users&type=statictheme',
footer_text='See more popular themes')
self.hpshelf_a = ShelfManagement.objects.create(
shelf=shelf_a,
position=3)
self.hpshelf_b = ShelfManagement.objects.create(
shelf=shelf_b,
position=2)
ShelfManagement.objects.create(
shelf=shelf_c,
position=1)
self.search_url = reverse_ns('addon-search') + shelf_a.criteria
self.collections_url = reverse_ns('collection-addon-list', kwargs={
'user_pk': settings.TASK_USER_ID,
'collection_slug': shelf_b.criteria})
def test_no_enabled_shelves_empty_view(self):
response = self.client.get(self.url)
assert response.status_code == 200
assert response.json() == {
'count': 0,
'next': None,
'page_count': 1,
'page_size': 25,
'previous': None,
'results': []}
def test_only_enabled_shelves_in_view(self):
self.hpshelf_a.update(enabled=True)
self.hpshelf_b.update(enabled=True)
# don't enable shelf_c
with self.assertNumQueries(4):
response = self.client.get(self.url)
assert response.status_code == 200
result = json.loads(response.content)
assert len(result['results']) == 2
assert result['results'][0]['title'] == 'Enhanced privacy extensions'
assert result['results'][0]['url'] == self.collections_url
assert result['results'][0]['endpoint'] == 'collections'
assert result['results'][0]['criteria'] == 'privacy-matters'
assert result['results'][0]['footer_text'] == (
'See more enhanced privacy extensions')
assert result['results'][0]['footer_pathname'] == ''
assert result['results'][0]['addons'] is None
assert result['results'][1]['title'] == 'Recommended extensions'
assert result['results'][1]['url'] == self.search_url
assert result['results'][1]['endpoint'] == 'search'
assert result['results'][1]['criteria'] == (
'?promoted=recommended&sort=random&type=extension')
assert result['results'][1]['footer_text'] == (
'See more recommended extensions')
assert result['results'][1]['footer_pathname'] == ''
assert result['results'][1]['addons'][0]['name']['en-US'] == (
'test addon test03')
assert result['results'][1]['addons'][0]['promoted']['category'] == (
'recommended')
assert result['results'][1]['addons'][0]['type'] == 'extension'
| 38.879032
| 77
| 0.624767
|
4a1182c2344c406e99d8b9dfc30ec1f5db2bf0d8
| 3,444
|
py
|
Python
|
unidef/models/base_model.py
|
qiujiangkun/unidef
|
6d3ca31a6b1d498f38f483d4174f79f7fe920f65
|
[
"MIT"
] | 4
|
2021-11-08T10:01:19.000Z
|
2022-03-17T06:27:14.000Z
|
unidef/models/base_model.py
|
qiujiangkun/unidef
|
6d3ca31a6b1d498f38f483d4174f79f7fe920f65
|
[
"MIT"
] | null | null | null |
unidef/models/base_model.py
|
qiujiangkun/unidef
|
6d3ca31a6b1d498f38f483d4174f79f7fe920f65
|
[
"MIT"
] | null | null | null |
import copy
from unidef.utils.typing_ext import *
from typedmodel import *
from typedmodel.compat import *
from .typed_field import FieldValue, TypedField
class MixedModel(BaseModel):
extended: Dict[str, Any] = {}
frozen: bool = False
def __init__(self, **kwargs):
super().__init__(**kwargs)
@beartype
def append_field(self, field: FieldValue) -> __qualname__:
assert not self.is_frozen()
if hasattr(self, field.key):
value = getattr(self, field.key)
else:
value = self.extended.get(field.key)
if value is not None:
value.extend(field.value)
else:
self.replace_field(field)
return self
@beartype
def replace_field(self, field: FieldValue) -> __qualname__:
assert not self.is_frozen()
if hasattr(self, field.key):
setattr(self, field.key, field.value)
else:
self.extended[field.key] = field.value
return self
@beartype
def remove_field(self, field: TypedField) -> __qualname__:
assert not self.is_frozen()
if hasattr(self, field.key):
raise Exception(
"Could not remove required field {} in {}".format(field.key, type(self))
)
if field.key in self.extended:
self.extended.pop(field.key)
return self
def _get_field_raw(self, key: str, default):
if hasattr(self, key):
return getattr(self, key)
if hasattr(self, key + "_field"):
return getattr(self, key + "_field")
if key in self.extended:
return self.extended.get(key)
else:
return default
def get_field(self, field: TypedField) -> Any:
return self._get_field_raw(field.key, field.default)
def get_field_opt(self, field: TypedField) -> Optional[Any]:
return self._get_field_raw(field.key, None)
def exist_field(self, field: TypedField) -> bool:
return field.key in self.keys()
def keys(self) -> List[str]:
keys = set(self._keys())
keys.update(self.extended.keys())
for x in ["extended", "frozen"]:
keys.remove(x)
return list(keys)
def __iter__(self):
collected = self.keys()
for key in collected:
yield key, self._get_field_raw(key, None)
def is_frozen(self) -> bool:
return self.frozen
def freeze(self) -> __qualname__:
self.frozen = True
return self
def unfreeze(self) -> __qualname__:
self.frozen = False
return self
def copy(self, *args, **kwargs) -> __qualname__:
this = copy.deepcopy(self)
this.unfreeze()
return this
def __str__(self):
return f"{type(self).__qualname__}{dict(list(self))}"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if self is other:
return True
if type(self) != type(other):
return False
for key in self.keys():
if self._get_field_raw(key, default=None) != other._get_field_raw(key, default=None):
return False
return True
def test_mixed_model():
class Model(MixedModel):
key1: int
key2: int
model = Model(key1=1, key2=2)
assert set(model.keys()) == {"key1", "key2"}
assert dict(list(model)) == {"key1": 1, "key2": 2}
| 28.229508
| 97
| 0.58856
|
4a11858ef8ff13889d910ceac97c0c42fd259830
| 42,451
|
py
|
Python
|
Blender/src/babylon-js/mesh.py
|
leroyron/Exporters
|
663e3598046a485942ffcdf9eedfd4dce9c7b682
|
[
"Apache-2.0"
] | null | null | null |
Blender/src/babylon-js/mesh.py
|
leroyron/Exporters
|
663e3598046a485942ffcdf9eedfd4dce9c7b682
|
[
"Apache-2.0"
] | null | null | null |
Blender/src/babylon-js/mesh.py
|
leroyron/Exporters
|
663e3598046a485942ffcdf9eedfd4dce9c7b682
|
[
"Apache-2.0"
] | null | null | null |
from .logger import *
from .package_level import *
from .f_curve_animatable import *
from .armature import *
from .material import *
from .shape_key_group import *
import bpy
import math
from mathutils import Vector, Quaternion
from random import randint
# used in Mesh & Node constructors, defined in BABYLON.AbstractMesh
BILLBOARDMODE_NONE = 0
#BILLBOARDMODE_X = 1
#BILLBOARDMODE_Y = 2
#BILLBOARDMODE_Z = 4
BILLBOARDMODE_ALL = 7
# used in Mesh constructor, defined in BABYLON.PhysicsImpostor
SPHERE_IMPOSTER = 1
BOX_IMPOSTER = 2
#PLANE_IMPOSTER = 3
MESH_IMPOSTER = 4
CAPSULE_IMPOSTER = 5
CONE_IMPOSTER = 6
CYLINDER_IMPOSTER = 7
PARTICLE_IMPOSTER = 8
SHAPE_KEY_GROUPS_ALLOWED = False
ZERO_V = Vector((0, 0, 0))
ZERO_Q = Quaternion((1, 0, 0, 0))
#===============================================================================
class Mesh(FCurveAnimatable):
def __init__(self, object, scene, exporter):
self.scene = scene
self.name = object.name
Logger.log('processing begun of mesh: ' + self.name)
self.define_animations(object, True, True, True) #Should animations be done when forcedParent
self.isVisible = not object.hide_render
self.isPickable = object.data.isPickable
self.isEnabled = not object.data.loadDisabled
if hasattr(object.data, 'useFlatShading') and object.data.useFlatShading:
hasModifier = False
# extra checking not really working; all checking going to be pulled in future
for con in object.constraints:
if con.name == 'EDGE_SPLIT':
hasModifier = True
break
if not hasModifier:
Logger.warn('Found Obsolete "Use Flat Shading" property set True. Replaced by "Edge Split" modifier instead', 2)
self.checkCollisions = object.data.checkCollisions
self.receiveShadows = object.data.receiveShadows
self.castShadows = object.data.castShadows
self.freezeWorldMatrix = object.data.freezeWorldMatrix
self.layer = getLayer(object) # used only for lights with 'This Layer Only' checked, not exported
self.tags = object.data.tags
# hasSkeleton detection & skeletonID determination
self.hasSkeleton = False
objArmature = None # if there's an armature, this will be the one!
if len(object.vertex_groups) > 0 and not object.data.ignoreSkeleton:
objArmature = object.find_armature()
if objArmature != None:
# used to get bone index, since could be skipping IK bones
skeleton = exporter.get_skeleton(objArmature.name)
self.hasSkeleton = skeleton is not None
if not self.hasSkeleton:
Logger.warn('No skeleton with name "' + objArmature.name + '" found skeleton ignored.', 2)
else:
i = 0
for obj in scene.objects:
if obj.type == "ARMATURE":
if obj == objArmature:
self.skeletonId = i
break
else:
i += 1
# determine Position, rotation, & scaling
# Use local matrix
locMatrix = object.matrix_local
if objArmature != None:
# unless the armature is the parent
if object.parent and object.parent == objArmature:
locMatrix = object.matrix_world * object.parent.matrix_world.inverted()
loc, rot, scale = locMatrix.decompose()
self.position = loc
if object.rotation_mode == 'QUATERNION':
self.rotationQuaternion = rot
else:
self.rotation = scale_vector(rot.to_euler('XYZ'), -1)
self.scaling = scale
# ensure no unapplied rotation or scale, when there is an armature
self.hasUnappliedTransforms = (self.scaling.x != 1 or self.scaling.y != 1 or self.scaling.z != 1 or
(hasattr(self, 'rotation' ) and not same_vertex (self.rotation , ZERO_V, FLOAT_PRECISION_DEFAULT)) or
(hasattr(self, 'rotationQuaternion') and not same_quaternion(self.rotationQuaternion, ZERO_Q, FLOAT_PRECISION_DEFAULT))
)
# determine parent & dataName
self.dataName = object.data.name # used to support shared vertex instances in later passed
if object.parent and object.parent.type != 'ARMATURE':
self.parentId = object.parent.name
# Physics
if object.rigid_body != None:
shape_items = {'SPHERE' : SPHERE_IMPOSTER,
'BOX' : BOX_IMPOSTER,
'MESH' : MESH_IMPOSTER,
'CAPSULE' : CAPSULE_IMPOSTER,
'CONE' : CONE_IMPOSTER,
'CYLINDER' : CYLINDER_IMPOSTER,
'CONVEX_HULL': PARTICLE_IMPOSTER}
shape_type = shape_items[object.rigid_body.collision_shape]
self.physicsImpostor = shape_type
mass = object.rigid_body.mass
if mass < 0.005:
mass = 0
self.physicsMass = mass
self.physicsFriction = object.rigid_body.friction
self.physicsRestitution = object.rigid_body.restitution
# Get if this will be an instance of another, before processing materials, to avoid multi-bakes
sourceMesh = exporter.getSourceMeshInstance(self.dataName)
if sourceMesh is not None:
#need to make sure rotation mode matches, since value initially copied in InstancedMesh constructor
if hasattr(sourceMesh, 'rotationQuaternion'):
instRot = None
instRotq = rot
else:
instRot = scale_vector(rot.to_euler('XYZ'), -1)
instRotq = None
instance = MeshInstance(self, instRot, instRotq)
sourceMesh.instances.append(instance)
Logger.log('mesh is an instance of : ' + sourceMesh.name + '. Processing halted.', 2)
return
else:
self.instances = []
# process all of the materials required
recipe = BakingRecipe(object)
self.billboardMode = BILLBOARDMODE_ALL if recipe.isBillboard else BILLBOARDMODE_NONE
if recipe.needsBaking:
if recipe.multipleRenders:
Logger.warn('Mixing of Cycles & Blender Render in same mesh not supported. No materials exported.', 2)
else:
bakedMat = BakedMaterial(exporter, object, recipe)
exporter.materials.append(bakedMat)
self.materialId = bakedMat.name
else:
bjs_material_slots = []
for slot in object.material_slots:
# None will be returned when either the first encounter or must be unique due to baked textures
material = exporter.getMaterial(slot.name)
if (material != None):
Logger.log('registered as also a user of material: ' + slot.name, 2)
else:
material = StdMaterial(slot, exporter, object)
exporter.materials.append(material)
bjs_material_slots.append(material)
if len(bjs_material_slots) == 1:
self.materialId = bjs_material_slots[0].name
elif len(bjs_material_slots) > 1:
multimat = MultiMaterial(bjs_material_slots, len(exporter.multiMaterials), exporter.nameSpace)
self.materialId = multimat.name
exporter.multiMaterials.append(multimat)
else:
Logger.warn('No materials have been assigned: ', 2)
# Get mesh
mesh = object.to_mesh(scene, True, 'PREVIEW')
# Triangulate mesh if required
Mesh.mesh_triangulate(mesh)
# Getting vertices and indices
self.positions = []
self.normals = []
self.uvs = [] # not always used
self.uvs2 = [] # not always used
self.colors = [] # not always used
self.indices = []
self.subMeshes = []
hasUV = len(mesh.tessface_uv_textures) > 0
if hasUV:
which = len(mesh.tessface_uv_textures) - 1 if recipe.needsBaking else 0
UVmap = mesh.tessface_uv_textures[which].data
hasUV2 = len(mesh.tessface_uv_textures) > 1 and not recipe.needsBaking
if hasUV2:
UV2map = mesh.tessface_uv_textures[1].data
hasVertexColor = len(mesh.vertex_colors) > 0
if hasVertexColor:
Colormap = mesh.tessface_vertex_colors.active.data
if self.hasSkeleton:
weightsPerVertex = []
indicesPerVertex = []
influenceCounts = [0, 0, 0, 0, 0, 0, 0, 0, 0] # 9, so accessed orign 1; 0 used for all those greater than 8
totalInfluencers = 0
highestInfluenceObserved = 0
hasShapeKeys = False
if object.data.shape_keys:
for block in object.data.shape_keys.key_blocks:
if (block.name == 'Basis'):
hasShapeKeys = True
keyOrderMap = []
basis = block
break
if not hasShapeKeys:
Logger.warn('Basis key missing, shape-key processing NOT performed', 2)
# used tracking of vertices as they are received
alreadySavedVertices = []
vertices_Normals = []
vertices_UVs = []
vertices_UV2s = []
vertices_Colors = []
vertices_indices = []
vertices_sk_weights = []
vertices_sk_indices = []
for v in range(len(mesh.vertices)):
alreadySavedVertices.append(False)
vertices_Normals.append([])
vertices_UVs.append([])
vertices_UV2s.append([])
vertices_Colors.append([])
vertices_indices.append([])
vertices_sk_weights.append([])
vertices_sk_indices.append([])
materialsCount = 1 if recipe.needsBaking else max(1, len(object.material_slots))
verticesCount = 0
indicesCount = 0
for materialIndex in range(materialsCount):
subMeshVerticesStart = verticesCount
subMeshIndexStart = indicesCount
for faceIndex in range(len(mesh.tessfaces)): # For each face
face = mesh.tessfaces[faceIndex]
if face.material_index != materialIndex and not recipe.needsBaking:
continue
for v in range(3): # For each vertex in face
vertex_index = face.vertices[v]
vertex = mesh.vertices[vertex_index]
position = vertex.co
normal = vertex.normal
#skeletons
if self.hasSkeleton:
matricesWeights = []
matricesIndices = []
# Getting influences
for group in vertex.groups:
index = group.group
weight = group.weight
for bone in objArmature.pose.bones:
if object.vertex_groups[index].name == bone.name:
matricesWeights.append(weight)
matricesIndices.append(skeleton.get_index_of_bone(bone.name))
# Texture coordinates
if hasUV:
vertex_UV = UVmap[face.index].uv[v]
if hasUV2:
vertex_UV2 = UV2map[face.index].uv[v]
# Vertex color
if hasVertexColor:
if v == 0:
vertex_Color = Colormap[face.index].color1
if v == 1:
vertex_Color = Colormap[face.index].color2
if v == 2:
vertex_Color = Colormap[face.index].color3
# Check if the current vertex is already saved
alreadySaved = alreadySavedVertices[vertex_index]
if alreadySaved:
alreadySaved = False
# UV
index_UV = 0
for savedIndex in vertices_indices[vertex_index]:
vNormal = vertices_Normals[vertex_index][index_UV]
if not same_vertex(normal, vNormal, scene.normalsPrecision):
continue;
if hasUV:
vUV = vertices_UVs[vertex_index][index_UV]
if not same_array(vertex_UV, vUV, scene.UVsPrecision):
continue
if hasUV2:
vUV2 = vertices_UV2s[vertex_index][index_UV]
if not same_array(vertex_UV2, vUV2, scene.UVsPrecision):
continue
if hasVertexColor:
vColor = vertices_Colors[vertex_index][index_UV]
if not same_color(vertex_Color, vColor, scene.vColorsPrecision):
continue
if self.hasSkeleton:
vSkWeight = vertices_sk_weights[vertex_index]
vSkIndices = vertices_sk_indices[vertex_index]
if not same_array(vSkWeight[index_UV], matricesWeights, scene.mWeightsPrecision) or not same_array(vSkIndices[index_UV], matricesIndices, 1):
continue
if vertices_indices[vertex_index][index_UV] >= subMeshVerticesStart:
alreadySaved = True
break
index_UV += 1
if (alreadySaved):
# Reuse vertex
index = vertices_indices[vertex_index][index_UV]
else:
# Export new one
index = verticesCount
alreadySavedVertices[vertex_index] = True
vertices_Normals[vertex_index].append(normal)
self.normals.append(normal)
if hasUV:
vertices_UVs[vertex_index].append(vertex_UV)
self.uvs.append(vertex_UV[0])
self.uvs.append(vertex_UV[1])
if hasUV2:
vertices_UV2s[vertex_index].append(vertex_UV2)
self.uvs2.append(vertex_UV2[0])
self.uvs2.append(vertex_UV2[1])
if hasVertexColor:
vertices_Colors[vertex_index].append(vertex_Color)
self.colors.append(vertex_Color.r)
self.colors.append(vertex_Color.g)
self.colors.append(vertex_Color.b)
self.colors.append(1.0)
if self.hasSkeleton:
vertices_sk_weights[vertex_index].append(matricesWeights)
vertices_sk_indices[vertex_index].append(matricesIndices)
nInfluencers = len(matricesWeights)
totalInfluencers += nInfluencers
if nInfluencers <= 8:
influenceCounts[nInfluencers] += 1
else:
influenceCounts[0] += 1
highestInfluenceObserved = nInfluencers if nInfluencers > highestInfluenceObserved else highestInfluenceObserved
weightsPerVertex.append(matricesWeights)
indicesPerVertex.append(matricesIndices)
if hasShapeKeys:
keyOrderMap.append([vertex_index, len(self.positions)]) # use len positions before it is append to convert from 1 to 0 origin
vertices_indices[vertex_index].append(index)
self.positions.append(position)
verticesCount += 1
self.indices.append(index)
indicesCount += 1
self.subMeshes.append(SubMesh(materialIndex, subMeshVerticesStart, subMeshIndexStart, verticesCount - subMeshVerticesStart, indicesCount - subMeshIndexStart))
BakedMaterial.meshBakingClean(object)
Logger.log('num positions : ' + str(len(self.positions)), 2)
Logger.log('num normals : ' + str(len(self.normals )), 2)
Logger.log('num uvs : ' + str(len(self.uvs )), 2)
Logger.log('num uvs2 : ' + str(len(self.uvs2 )), 2)
Logger.log('num colors : ' + str(len(self.colors )), 2)
Logger.log('num indices : ' + str(len(self.indices )), 2)
if self.hasSkeleton:
Logger.log('Skeleton stats: ', 2)
self.toFixedInfluencers(weightsPerVertex, indicesPerVertex, object.data.maxInfluencers, highestInfluenceObserved)
self.skeletonIndices = Mesh.packSkeletonIndices(self.skeletonIndices)
if (self.numBoneInfluencers > 4):
self.skeletonIndicesExtra = Mesh.packSkeletonIndices(self.skeletonIndicesExtra)
Logger.log('Total Influencers: ' + format_f(totalInfluencers), 3)
Logger.log('Avg # of influencers per vertex: ' + format_f(totalInfluencers / len(self.positions)), 3)
Logger.log('Highest # of influencers observed: ' + str(highestInfluenceObserved) + ', num vertices with this: ' + format_int(influenceCounts[highestInfluenceObserved if highestInfluenceObserved < 9 else 0]), 3)
Logger.log('exported as ' + str(self.numBoneInfluencers) + ' influencers', 3)
nWeights = len(self.skeletonWeights) + (len(self.skeletonWeightsExtra) if hasattr(self, 'skeletonWeightsExtra') else 0)
Logger.log('num skeletonWeights and skeletonIndices: ' + str(nWeights), 3)
numZeroAreaFaces = self.find_zero_area_faces()
if numZeroAreaFaces > 0:
Logger.warn('# of 0 area faces found: ' + str(numZeroAreaFaces), 2)
# shape keys for mesh
if hasShapeKeys:
Mesh.sort(keyOrderMap)
self.rawShapeKeys = []
self.morphTargetManagerId = randint(0, 1000000) # not used for TOB implementation
groupNames = []
Logger.log('Shape Keys:', 2)
# process the keys in the .blend
for block in object.data.shape_keys.key_blocks:
# perform name format validation, before processing
keyName = block.name
# the Basis shape key is a member of all groups, processed in 2nd pass
if keyName == 'Basis': continue
if keyName.find('-') <= 0 and SHAPE_KEY_GROUPS_ALLOWED:
if object.data.defaultShapeKeyGroup != DEFAULT_SHAPE_KEY_GROUP:
keyName = object.data.defaultShapeKeyGroup + '-' + keyName
else: continue
group = None
state = keyName
if SHAPE_KEY_GROUPS_ALLOWED:
temp = keyName.upper().partition('-')
group = temp[0]
state = temp[2]
self.rawShapeKeys.append(RawShapeKey(block, group, state, keyOrderMap, basis, self.scene.positionsPrecision))
if SHAPE_KEY_GROUPS_ALLOWED:
# check for a new group, add to groupNames if so
newGroup = True
for group in groupNames:
if temp[0] == group:
newGroup = False
break
if newGroup:
groupNames.append(temp[0])
# process into ShapeKeyGroups, when rawShapeKeys found and groups allowed (implied)
if len(groupNames) > 0:
self.shapeKeyGroups = []
basis = RawShapeKey(basis, None, 'BASIS', keyOrderMap, basis, self.scene.positionsPrecision)
for group in groupNames:
self.shapeKeyGroups.append(ShapeKeyGroup(group,self.rawShapeKeys, basis.vertices, self.scene.positionsPrecision))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def find_zero_area_faces(self):
nFaces = int(len(self.indices) / 3)
nZeroAreaFaces = 0
for f in range(nFaces):
faceOffset = f * 3
p1 = self.positions[self.indices[faceOffset ]]
p2 = self.positions[self.indices[faceOffset + 1]]
p3 = self.positions[self.indices[faceOffset + 2]]
if same_vertex(p1, p2) or same_vertex(p1, p3) or same_vertex(p2, p3): nZeroAreaFaces += 1
return nZeroAreaFaces
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
# ShapeKeyGroup depends on AffectedIndices being in asending order, so sort it, probably nothing to do
def sort(keyOrderMap):
notSorted = True
while(notSorted):
notSorted = False
for idx in range(1, len(keyOrderMap)):
if keyOrderMap[idx - 1][1] > keyOrderMap[idx][1]:
tmp = keyOrderMap[idx]
keyOrderMap[idx ] = keyOrderMap[idx - 1]
keyOrderMap[idx - 1] = tmp
notSorted = True
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def mesh_triangulate(mesh):
try:
import bmesh
bm = bmesh.new()
bm.from_mesh(mesh)
bmesh.ops.triangulate(bm, faces = bm.faces)
bm.to_mesh(mesh)
mesh.calc_tessface()
bm.free()
except:
pass
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def toFixedInfluencers(self, weightsPerVertex, indicesPerVertex, maxInfluencers, highestObserved):
if (maxInfluencers > 8 or maxInfluencers < 1):
maxInfluencers = 8
Logger.warn('Maximum # of influencers invalid, set to 8', 3)
self.numBoneInfluencers = maxInfluencers if maxInfluencers < highestObserved else highestObserved
needExtras = self.numBoneInfluencers > 4
maxInfluencersExceeded = 0
fixedWeights = []
fixedIndices = []
fixedWeightsExtra = []
fixedIndicesExtra = []
for i in range(len(weightsPerVertex)):
weights = weightsPerVertex[i]
indices = indicesPerVertex[i]
nInfluencers = len(weights)
if (nInfluencers > self.numBoneInfluencers):
maxInfluencersExceeded += 1
Mesh.sortByDescendingInfluence(weights, indices)
for j in range(4):
fixedWeights.append(weights[j] if nInfluencers > j else 0.0)
fixedIndices.append(indices[j] if nInfluencers > j else 0 )
if needExtras:
for j in range(4, 8):
fixedWeightsExtra.append(weights[j] if nInfluencers > j else 0.0)
fixedIndicesExtra.append(indices[j] if nInfluencers > j else 0 )
self.skeletonWeights = fixedWeights
self.skeletonIndices = fixedIndices
if needExtras:
self.skeletonWeightsExtra = fixedWeightsExtra
self.skeletonIndicesExtra = fixedIndicesExtra
if maxInfluencersExceeded > 0:
Logger.warn('Maximum # of influencers exceeded for ' + format_int(maxInfluencersExceeded) + ' vertices, extras ignored', 3)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# sorts one set of weights & indices by descending weight, by reference
# not shown to help with MakeHuman, but did not hurt. In just so it is not lost for future.
@staticmethod
def sortByDescendingInfluence(weights, indices):
notSorted = True
while(notSorted):
notSorted = False
for idx in range(1, len(weights)):
if weights[idx - 1] < weights[idx]:
tmp = weights[idx]
weights[idx ] = weights[idx - 1]
weights[idx - 1] = tmp
tmp = indices[idx]
indices[idx ] = indices[idx - 1]
indices[idx - 1] = tmp
notSorted = True
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# assume that toFixedInfluencers has already run, which ensures indices length is a multiple of 4
@staticmethod
def packSkeletonIndices(indices):
compressedIndices = []
for i in range(math.floor(len(indices) / 4)):
idx = i * 4
matricesIndicesCompressed = indices[idx ]
matricesIndicesCompressed += indices[idx + 1] << 8
matricesIndicesCompressed += indices[idx + 2] << 16
matricesIndicesCompressed += indices[idx + 3] << 24
compressedIndices.append(matricesIndicesCompressed)
return compressedIndices
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_string(file_handler, 'name', self.name, True)
write_string(file_handler, 'id', self.name)
if hasattr(self, 'parentId'): write_string(file_handler, 'parentId', self.parentId)
if hasattr(self, 'materialId'): write_string(file_handler, 'materialId', self.materialId)
write_int(file_handler, 'billboardMode', self.billboardMode)
write_vector(file_handler, 'position', self.position)
if hasattr(self, "rotationQuaternion"):
write_quaternion(file_handler, 'rotationQuaternion', self.rotationQuaternion)
else:
write_vector(file_handler, 'rotation', self.rotation)
write_vector(file_handler, 'scaling', self.scaling)
write_bool(file_handler, 'isVisible', self.isVisible)
write_bool(file_handler, 'freezeWorldMatrix', self.freezeWorldMatrix)
write_bool(file_handler, 'isEnabled', self.isEnabled)
write_bool(file_handler, 'checkCollisions', self.checkCollisions)
write_bool(file_handler, 'receiveShadows', self.receiveShadows)
write_bool(file_handler, 'pickable', self.isPickable)
write_string(file_handler, 'tags', self.tags)
if hasattr(self, 'physicsImpostor'):
write_int(file_handler, 'physicsImpostor', self.physicsImpostor)
write_float(file_handler, 'physicsMass', self.physicsMass)
write_float(file_handler, 'physicsFriction', self.physicsFriction)
write_float(file_handler, 'physicsRestitution', self.physicsRestitution)
# Geometry
if self.hasSkeleton:
write_int(file_handler, 'skeletonId', self.skeletonId)
write_int(file_handler, 'numBoneInfluencers', self.numBoneInfluencers)
write_vector_array(file_handler, 'positions', self.positions, self.scene.positionsPrecision)
write_vector_array(file_handler, 'normals' , self.normals, self.scene.normalsPrecision)
if len(self.uvs) > 0:
write_array(file_handler, 'uvs', self.uvs, self.scene.UVsPrecision)
if len(self.uvs2) > 0:
write_array(file_handler, 'uvs2', self.uvs2, self.scene.UVsPrecision)
if len(self.colors) > 0:
write_array(file_handler, 'colors', self.colors, self.scene.vColorsPrecision)
if hasattr(self, 'skeletonWeights'):
write_array(file_handler, 'matricesWeights', self.skeletonWeights, self.scene.mWeightsPrecision)
write_array(file_handler, 'matricesIndices', self.skeletonIndices)
if hasattr(self, 'skeletonWeightsExtra'):
write_array(file_handler, 'matricesWeightsExtra', self.skeletonWeightsExtra, self.scene.mWeightsPrecision)
write_array(file_handler, 'matricesIndicesExtra', self.skeletonIndicesExtra)
write_array(file_handler, 'indices', self.indices)
# Sub meshes
file_handler.write('\n,"subMeshes":[')
first = True
for subMesh in self.subMeshes:
if first == False:
file_handler.write(',')
subMesh.to_scene_file(file_handler)
first = False
file_handler.write(']')
super().to_scene_file(file_handler) # Animations
# Instances
first = True
file_handler.write('\n,"instances":[')
for instance in self.instances:
if first == False:
file_handler.write(',')
instance.to_scene_file(file_handler)
first = False
file_handler.write(']')
# Shape Keys
if hasattr(self, 'morphTargetManagerId'):
write_int(file_handler, 'morphTargetManagerId', self.morphTargetManagerId)
# Close mesh
file_handler.write('}\n')
self.alreadyExported = True
#===============================================================================
def write_morphing_file(self, file_handler):
first = True
file_handler.write('{')
write_int(file_handler, 'id', self.morphTargetManagerId, True)
file_handler.write('\n,"targets":[')
for key in self.rawShapeKeys:
if first == False:
file_handler.write(',')
key.to_scene_file(file_handler)
first = False
file_handler.write(']}')
#===============================================================================
class MeshInstance:
def __init__(self, instancedMesh, rotation, rotationQuaternion):
self.name = instancedMesh.name
if hasattr(instancedMesh, 'parentId'): self.parentId = instancedMesh.parentId
self.position = instancedMesh.position
if rotation is not None:
self.rotation = rotation
if rotationQuaternion is not None:
self.rotationQuaternion = rotationQuaternion
self.scaling = instancedMesh.scaling
self.freezeWorldMatrix = instancedMesh.freezeWorldMatrix
if hasattr(instancedMesh, 'physicsImpostor'):
self.physicsImpostor = instancedMesh.physicsImpostor
self.physicsMass = instancedMesh.physicsMass
self.physicsFriction = instancedMesh.physicsFriction
self.physicsRestitution = instancedMesh.physicsRestitution
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_string(file_handler, 'name', self.name, True)
if hasattr(self, 'parentId'): write_string(file_handler, 'parentId', self.parentId)
write_vector(file_handler, 'position', self.position)
if hasattr(self, 'rotation'):
write_vector(file_handler, 'rotation', self.rotation)
else:
write_quaternion(file_handler, 'rotationQuaternion', self.rotationQuaternion)
write_vector(file_handler, 'scaling', self.scaling)
# freeze World Matrix currently ignored for instances
write_bool(file_handler, 'freezeWorldMatrix', self.freezeWorldMatrix)
if hasattr(self, 'physicsImpostor'):
write_int(file_handler, 'physicsImpostor', self.physicsImpostor)
write_float(file_handler, 'physicsMass', self.physicsMass)
write_float(file_handler, 'physicsFriction', self.physicsFriction)
write_float(file_handler, 'physicsRestitution', self.physicsRestitution)
file_handler.write('}')
#===============================================================================
class Node(FCurveAnimatable):
def __init__(self, node):
Logger.log('processing begun of node: ' + node.name)
self.define_animations(node, True, True, True) #Should animations be done when forcedParent
self.name = node.name
if node.parent and node.parent.type != 'ARMATURE':
self.parentId = node.parent.name
loc, rot, scale = node.matrix_local.decompose()
self.position = loc
if node.rotation_mode == 'QUATERNION':
self.rotationQuaternion = rot
else:
self.rotation = scale_vector(rot.to_euler('XYZ'), -1)
self.scaling = scale
self.isVisible = False
self.isEnabled = True
self.checkCollisions = False
self.billboardMode = BILLBOARDMODE_NONE
self.castShadows = False
self.receiveShadows = False
self.tags = ''
self.layer = -1 # nodes do not have layers attribute
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_string(file_handler, 'name', self.name, True)
write_string(file_handler, 'id', self.name)
if hasattr(self, 'parentId'): write_string(file_handler, 'parentId', self.parentId)
write_vector(file_handler, 'position', self.position)
if hasattr(self, "rotationQuaternion"):
write_quaternion(file_handler, "rotationQuaternion", self.rotationQuaternion)
else:
write_vector(file_handler, 'rotation', self.rotation)
write_vector(file_handler, 'scaling', self.scaling)
write_bool(file_handler, 'isVisible', self.isVisible)
write_bool(file_handler, 'isEnabled', self.isEnabled)
write_bool(file_handler, 'checkCollisions', self.checkCollisions)
write_int(file_handler, 'billboardMode', self.billboardMode)
write_bool(file_handler, 'receiveShadows', self.receiveShadows)
write_string(file_handler, 'tags', self.tags)
super().to_scene_file(file_handler) # Animations
file_handler.write('}')
#===============================================================================
class SubMesh:
def __init__(self, materialIndex, verticesStart, indexStart, verticesCount, indexCount):
self.materialIndex = materialIndex
self.verticesStart = verticesStart
self.indexStart = indexStart
self.verticesCount = verticesCount
self.indexCount = indexCount
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_scene_file(self, file_handler):
file_handler.write('{')
write_int(file_handler, 'materialIndex', self.materialIndex, True)
write_int(file_handler, 'verticesStart', self.verticesStart)
write_int(file_handler, 'verticesCount', self.verticesCount)
write_int(file_handler, 'indexStart' , self.indexStart)
write_int(file_handler, 'indexCount' , self.indexCount)
file_handler.write('}')
#===============================================================================
bpy.types.Mesh.autoAnimate = bpy.props.BoolProperty(
name='Auto launch animations',
description='',
default = False
)
bpy.types.Mesh.useFlatShading = bpy.props.BoolProperty(
name='Use Flat Shading',
description='Obsolete feature. Only being kept for generating warning message',
default = False
)
bpy.types.Mesh.isPickable = bpy.props.BoolProperty(
name='Pickable',
description='Disable picking for a mesh.',
default = True
)
bpy.types.Mesh.checkCollisions = bpy.props.BoolProperty(
name='Check Collisions',
description='Indicates mesh should be checked that it does not run into anything.',
default = False
)
bpy.types.Mesh.castShadows = bpy.props.BoolProperty(
name='Cast Shadows',
description='',
default = False
)
bpy.types.Mesh.receiveShadows = bpy.props.BoolProperty(
name='Receive Shadows',
description='',
default = False
)
bpy.types.Mesh.tags = bpy.props.StringProperty(
name='Tags',
description='Add meta-data to mesh (space delimited for multiples)',
default = ''
)
# not currently in use
bpy.types.Mesh.forceBaking = bpy.props.BoolProperty(
name='Combine Multi-textures / resize',
description='Also good to adjust single texture\'s size /compression.',
default = False
)
# not currently in use
bpy.types.Mesh.usePNG = bpy.props.BoolProperty(
name='Need Alpha',
description='Saved as PNG when alpha is required, else JPG.',
default = False
)
bpy.types.Mesh.bakeSize = bpy.props.IntProperty(
name='Texture Size',
description='Final dimensions of texture(s). Not required to be a power of 2, but recommended.',
default = 1024
)
bpy.types.Mesh.bakeQuality = bpy.props.IntProperty(
name='Quality 1-100',
description='For JPG: The trade-off between Quality - File size(100 highest quality)\nFor PNG: amount of time spent for compression',
default = 50, min = 1, max = 100
)
bpy.types.Mesh.materialNameSpace = bpy.props.StringProperty(
name='Name Space',
description='Prefix to use for materials for sharing across .blends.',
default = DEFAULT_MATERIAL_NAMESPACE
)
bpy.types.Mesh.maxSimultaneousLights = bpy.props.IntProperty(
name='Max Simultaneous Lights 0 - 32',
description='BJS property set on each material of this mesh.\nSet higher for more complex lighting.\nSet lower for armatures on mobile',
default = 4, min = 0, max = 32
)
bpy.types.Mesh.checkReadyOnlyOnce = bpy.props.BoolProperty(
name='Check Ready Only Once',
description='When checked better CPU utilization. Advanced user option.',
default = False
)
bpy.types.Mesh.freezeWorldMatrix = bpy.props.BoolProperty(
name='Freeze World Matrix',
description='Indicate the position, rotation, & scale do not change for performance reasons',
default = False
)
bpy.types.Mesh.loadDisabled = bpy.props.BoolProperty(
name='Load Disabled',
description='Indicate this mesh & children should not be active until enabled by code.',
default = False
)
bpy.types.Mesh.attachedSound = bpy.props.StringProperty(
name='Sound',
description='',
default = ''
)
bpy.types.Mesh.loopSound = bpy.props.BoolProperty(
name='Loop sound',
description='',
default = True
)
bpy.types.Mesh.autoPlaySound = bpy.props.BoolProperty(
name='Auto play sound',
description='',
default = True
)
bpy.types.Mesh.maxSoundDistance = bpy.props.FloatProperty(
name='Max sound distance',
description='',
default = 100
)
bpy.types.Mesh.ignoreSkeleton = bpy.props.BoolProperty(
name='Ignore',
description='Do not export assignment to a skeleton',
default = False
)
bpy.types.Mesh.maxInfluencers = bpy.props.IntProperty(
name='Max bone Influencers / Vertex',
description='When fewer than this are observed, the lower value is used.',
default = 8, min = 1, max = 8
)
#===============================================================================
class MeshPanel(bpy.types.Panel):
bl_label = get_title()
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'data'
@classmethod
def poll(cls, context):
ob = context.object
return ob is not None and isinstance(ob.data, bpy.types.Mesh)
def draw(self, context):
ob = context.object
layout = self.layout
row = layout.row()
row.prop(ob.data, 'isPickable')
row.prop(ob.data, 'checkCollisions')
row = layout.row()
row.prop(ob.data, 'castShadows')
row.prop(ob.data, 'receiveShadows')
row = layout.row()
row.prop(ob.data, 'freezeWorldMatrix')
row.prop(ob.data, 'loadDisabled')
layout.prop(ob.data, 'autoAnimate')
layout.prop(ob.data, 'tags')
box = layout.box()
box.label(text='Skeleton:')
box.prop(ob.data, 'ignoreSkeleton')
row = box.row()
row.enabled = not ob.data.ignoreSkeleton
row.prop(ob.data, 'maxInfluencers')
box = layout.box()
box.label('Materials')
box.prop(ob.data, 'materialNameSpace')
box.prop(ob.data, 'maxSimultaneousLights')
box.prop(ob.data, 'checkReadyOnlyOnce')
box = layout.box()
box.label(text='Procedural Texture / Cycles Baking')
# box.prop(ob.data, 'forceBaking')
# box.prop(ob.data, 'usePNG')
box.prop(ob.data, 'bakeSize')
box.prop(ob.data, 'bakeQuality')
# - - - - - - - - - - - - - - - - - - - - - - - - -
box = layout.box()
box.prop(ob.data, 'attachedSound')
row = box.row()
row.prop(ob.data, 'autoPlaySound')
row.prop(ob.data, 'loopSound')
box.prop(ob.data, 'maxSoundDistance')
| 43.990674
| 225
| 0.561447
|
4a11861b6c80b1a0bda07783f4235941dd68a16f
| 1,242
|
py
|
Python
|
vertex_metrics_experiment/code/results.py
|
wethepeopleonline/law-net
|
e8b01136360078c89b666e2b127672644ed0c54b
|
[
"MIT"
] | 17
|
2016-09-02T19:39:11.000Z
|
2021-11-15T21:22:48.000Z
|
vertex_metrics_experiment/code/results.py
|
wethepeopleonline/law-net
|
e8b01136360078c89b666e2b127672644ed0c54b
|
[
"MIT"
] | 7
|
2016-09-04T17:19:13.000Z
|
2017-01-19T19:17:10.000Z
|
vertex_metrics_experiment/code/results.py
|
idc9/law-net
|
e8b01136360078c89b666e2b127672644ed0c54b
|
[
"MIT"
] | 8
|
2017-01-19T04:24:09.000Z
|
2021-09-13T20:22:58.000Z
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def plot_scores(results, exper='', metric='', network_name=''):
"""
plots the results
"""
# compute mean and std of data
data = pd.DataFrame(index=results.columns, columns=['score', 'error'])
data['score'] = results.median(axis=0)
data['error'] = results.std(axis=0)
data.sort_values(by='score', inplace=True)
# label locations
pos = np.arange(data.shape[0])
# configure error bars
error_config = {'ecolor': 'red',
'alpha': .5}
plt.barh(pos,
data['score'],
color='blue',
alpha=.5)#,
#xerr=data['error'],
#error_kw=error_config)
plt.xlim([0, 1.2 * data['score'].max()])
axis_font = {'fontname': 'Arial', 'size': '7'}
# add labels
plt.yticks(pos, data.index, **axis_font)
plt.title('%s experiment, %s' % (exper, network_name))
plt.xlabel(metric)
def get_year_aggregate(years, x, fcn):
by_year = {y: [] for y in set(years)}
for i in range(len(years)):
by_year[years[i]].append(x[i])
year_agg_dict = {y: fcn(by_year[y]) for y in by_year.keys()}
return pd.Series(year_agg_dict)
| 25.346939
| 74
| 0.582126
|
4a1186f7bc8641324d8eff052d5b843bf7a8a0f4
| 1,331
|
py
|
Python
|
alg/gain/runner.py
|
gmbhat/tsgan
|
4d5ed046a924995dd770832a2010779a8700f97f
|
[
"BSD-3-Clause"
] | null | null | null |
alg/gain/runner.py
|
gmbhat/tsgan
|
4d5ed046a924995dd770832a2010779a8700f97f
|
[
"BSD-3-Clause"
] | null | null | null |
alg/gain/runner.py
|
gmbhat/tsgan
|
4d5ed046a924995dd770832a2010779a8700f97f
|
[
"BSD-3-Clause"
] | 2
|
2021-10-04T03:13:00.000Z
|
2022-01-05T22:41:22.000Z
|
import os
print("enter command: ")
command = input()
if "-i all" in command:
command1 = command.replace("-i all", "-i stretch_missing_train.csv")
command1 = "cmd /k python gain.py " + command1
print("-------------- running stretch_missing_train.csv --------------")
os.system(command1)
print("-------------- done running stretch_missing_train.csv --------------")
command2 = command.replace("-i all", "-i accel_z_missing_train.csv")
command2 = "cmd /k python gain.py " + command2
print("-------------- running accel_z_missing_train --------------")
os.system(command2)
print("-------------- done running accel_z_missing_train.csv --------------")
command3 = command.replace("-i all", "-i accel_y_missing_train.csv")
command3 = "cmd /k python gain.py " + command3
print("-------------- running accel_y_missing_train.csv --------------")
os.system(command3)
print("-------------- done running accel_y_missing_train.csv --------------")
command4 = command.replace("-i all", "-i accel_x_missing_train.csv")
command4 = "cmd /k python gain.py " + command4
print("-------------- running accel_x_missing_train.csv --------------")
os.system(command4)
print("-------------- done running accel_x_missing_train.csv --------------")
else:
command5 = "cmd /k python gain.py " + command
os.system(command5)
| 30.25
| 78
| 0.601052
|
4a118865212a0a197fc347d6630bb49ab0218b75
| 4,001
|
py
|
Python
|
tests/test_m3u_archive.py
|
elijahjpassmore/m3u-archive
|
fb7f4b103e7885c8628163e9ab462d99813a379b
|
[
"MIT"
] | null | null | null |
tests/test_m3u_archive.py
|
elijahjpassmore/m3u-archive
|
fb7f4b103e7885c8628163e9ab462d99813a379b
|
[
"MIT"
] | null | null | null |
tests/test_m3u_archive.py
|
elijahjpassmore/m3u-archive
|
fb7f4b103e7885c8628163e9ab462d99813a379b
|
[
"MIT"
] | null | null | null |
import filecmp
import os
from distutils.dir_util import copy_tree
from pathlib import Path
import pytest
from m3u_archive import __author__, __email__, __license__, __version__
from m3u_archive.m3u_archive_writer import M3UArchiveWriter
def test_version():
assert __version__ == "1.0.0"
def test_author() -> None:
assert __author__ == "Elijah J. Passmore"
def test_email() -> None:
assert __email__ == "elijah@elijahjpassmore.com"
def test_license() -> None:
assert __license__ == "MIT"
class TestM3UArchiveWriter:
def test_eval_m3u(self, tmp_path: Path) -> None:
# Copy the test files to the temporary directory.
test_path = Path(__file__).parent.resolve()
copy_tree(str(test_path.joinpath("files")), str(tmp_path.resolve()))
m3u_file = tmp_path.joinpath("m3u-1.m3u")
paths = M3UArchiveWriter.eval_m3u(m3u_file)
assert len(paths) == 9
def test_eval_m3u_no_m3u(self, tmp_path: Path) -> None:
# Copy the test files to the temporary directory.
test_path = Path(__file__).parent.resolve()
copy_tree(str(test_path.joinpath("files")), str(tmp_path.resolve()))
m3u_file = tmp_path.joinpath("missing-m3u.m3u")
with pytest.raises(FileNotFoundError):
M3UArchiveWriter.eval_m3u(m3u_file)
def test_eval_m3u_wrong_suffix(self, tmp_path: Path) -> None:
# Copy the test files to the temporary directory.
test_path = Path(__file__).parent.resolve()
copy_tree(str(test_path.joinpath("files")), str(tmp_path.resolve()))
m3u_file = tmp_path.joinpath("wrong-suffix.m3u2")
with pytest.raises(FileNotFoundError):
M3UArchiveWriter.eval_m3u(m3u_file)
def test_eval_m3u_missing_ignore_true(self, tmp_path: Path) -> None:
# Copy the test files to the temporary directory.
test_path = Path(__file__).parent.resolve()
copy_tree(str(test_path.joinpath("files")), str(tmp_path.resolve()))
m3u_file = tmp_path.joinpath("m3u-2.m3u")
paths = M3UArchiveWriter.eval_m3u(m3u_file, ignore_invalid=True)
assert (len(paths)) == 9
def test_eval_m3u_missing_ignore_false(self, tmp_path: Path) -> None:
# Copy the test files to the temporary directory.
test_path = Path(__file__).parent.resolve()
copy_tree(str(test_path.joinpath("files")), str(tmp_path.resolve()))
m3u_file = tmp_path.joinpath("m3u-2.m3u")
my_archive_writer = M3UArchiveWriter(
[m3u_file], "test", tmp_path, ignore_invalid=False
)
with pytest.raises(FileNotFoundError):
my_archive_writer.eval_m3u(m3u_file)
def test_write_archive(self, tmp_path: Path) -> None:
# Copy the test files to the temporary directory.
# This time, we need one directory for the original file
# contents, and another for the extracted contents.
original_path = tmp_path.joinpath("original-dir")
test_path = Path(__file__).parent.resolve()
copy_tree(
str(test_path.joinpath("files")), str(original_path.resolve())
)
m3u_file = original_path.joinpath("m3u-1.m3u")
my_archive_writer = M3UArchiveWriter(
[m3u_file], "test", original_path, ignore_invalid=False
)
zip_file = my_archive_writer.write_archive()
assert my_archive_writer.archive_path.exists()
# Create an extract directory and extract all of the contents.
extract_path = tmp_path.joinpath("extract-dir")
extract_path.mkdir()
zip_file.extractall(path=extract_path)
# Now, remove the archive and assert that both the original and
# extract directories contain the same files.
os.remove(my_archive_writer.archive_path)
comparison = filecmp.dircmp(
original_path,
extract_path,
)
assert len(comparison.diff_files) == 0
assert len(comparison.funny_files) == 0
| 33.90678
| 76
| 0.673832
|
4a118903e5a69ed2eae1baeb6e9cc19ab31a76b3
| 328
|
py
|
Python
|
yolox/data/datasets/voc_classes.py
|
StephenStorm/YOLOX
|
9b03f2c8c65ebde8c3fc6131827e11418686fb0b
|
[
"Apache-2.0"
] | null | null | null |
yolox/data/datasets/voc_classes.py
|
StephenStorm/YOLOX
|
9b03f2c8c65ebde8c3fc6131827e11418686fb0b
|
[
"Apache-2.0"
] | null | null | null |
yolox/data/datasets/voc_classes.py
|
StephenStorm/YOLOX
|
9b03f2c8c65ebde8c3fc6131827e11418686fb0b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
# VOC_CLASSES = ( '__background__', # always index 0
VOC_CLASSES = (
"beasts",
"bird",
"fish",
"insect",
"plant",
"person",
)
# class_map = {0:'beasts', 1:'bird', 2:'fish', 3:'insect', 4:'plant', 5:'person'}
| 21.866667
| 81
| 0.567073
|
4a1189375bf29d6cc90ae1d9b3de4843eaa84a0c
| 732
|
py
|
Python
|
setup.py
|
AGI-Labs/continual_rl
|
bcf17d879e8a983340be233ff8f740c424d0f303
|
[
"MIT"
] | 19
|
2021-07-27T05:20:09.000Z
|
2022-02-27T07:12:05.000Z
|
setup.py
|
AGI-Labs/continual_rl
|
bcf17d879e8a983340be233ff8f740c424d0f303
|
[
"MIT"
] | 2
|
2021-11-05T07:36:50.000Z
|
2022-03-11T00:21:50.000Z
|
setup.py
|
AGI-Labs/continual_rl
|
bcf17d879e8a983340be233ff8f740c424d0f303
|
[
"MIT"
] | 3
|
2021-10-20T06:04:35.000Z
|
2022-03-06T22:59:36.000Z
|
from setuptools import setup, find_packages
setup(
name='continual_rl',
version='1.0',
description='Continual reinforcement learning baselines and standard experiments.',
author='Sam Powers',
author_email='snpowers@cs.cmu.edu',
packages=find_packages(),
py_modules=['continual_rl.available_policies', 'continual_rl.experiment_specs'],
install_requires=['uuid',
'numpy',
'tensorboard',
'torch-ac',
'gym[atari]',
'atari-py==0.2.5',
'moviepy',
'dotmap',
'psutil',
'opencv-python'
]
)
| 31.826087
| 87
| 0.497268
|
4a11895a712ac584ef4c59394371b8b49060ce66
| 516
|
py
|
Python
|
egs/wsj/s5/utils/lang/bpe/prepend_words.py
|
shuipi100/kaldi
|
8e30fddb300a87e7c79ef2c0b9c731a8a9fd23f0
|
[
"Apache-2.0"
] | 370
|
2020-05-31T16:28:08.000Z
|
2022-03-24T07:27:50.000Z
|
egs/wsj/s5/utils/lang/bpe/prepend_words.py
|
shuipi100/kaldi
|
8e30fddb300a87e7c79ef2c0b9c731a8a9fd23f0
|
[
"Apache-2.0"
] | 55
|
2020-10-20T02:18:56.000Z
|
2021-07-26T04:52:23.000Z
|
egs/wsj/s5/utils/lang/bpe/prepend_words.py
|
shuipi100/kaldi
|
8e30fddb300a87e7c79ef2c0b9c731a8a9fd23f0
|
[
"Apache-2.0"
] | 119
|
2020-06-08T11:27:09.000Z
|
2022-03-31T05:31:53.000Z
|
#!/usr/bin/env python3
# This script, prepend '|' to every words in the transcript to mark
# the beginning of the words for finding the initial-space of every word
# after decoding.
import sys
import io
import re
whitespace = re.compile("[ \t]+")
infile = io.TextIOWrapper(sys.stdin.buffer, encoding='latin-1')
output = io.TextIOWrapper(sys.stdout.buffer, encoding='latin-1')
for line in infile:
words = whitespace.split(line.strip(" \t\r\n"))
output.write(' '.join([ "|"+word for word in words]) + '\n')
| 30.352941
| 72
| 0.70155
|
4a11896ffe61382a044537ead98f442849ad7717
| 802
|
py
|
Python
|
unittests/utils/test_statistics.py
|
tonio73/dnnviewer
|
2abb89665d1d99cdb5d8b6d85d0353dc22d226f4
|
[
"MIT"
] | 12
|
2020-03-22T20:57:40.000Z
|
2022-02-25T13:47:07.000Z
|
unittests/utils/test_statistics.py
|
tonio73/dnnviewer
|
2abb89665d1d99cdb5d8b6d85d0353dc22d226f4
|
[
"MIT"
] | 59
|
2020-03-24T20:51:13.000Z
|
2020-07-05T14:52:53.000Z
|
unittests/utils/test_statistics.py
|
tonio73/dnnviewer
|
2abb89665d1d99cdb5d8b6d85d0353dc22d226f4
|
[
"MIT"
] | 3
|
2020-04-01T17:00:58.000Z
|
2020-09-29T03:18:55.000Z
|
import numpy as np
from dnnviewer.utils import statistics
def test_get_dense_strongest_weights():
weights = np.array([[1, 2], [2, 3], [3, 2]])
strongest_idx, strongest = statistics.get_strongest(weights, 1)
assert (strongest_idx.shape == (1, 2))
assert ((strongest_idx == np.array([[2, 1]])).all())
assert (strongest.shape == (1, 2))
assert ((strongest == np.array([[3, 3]])).all())
def test_get_dense_strongest_weights2():
weights = np.array([[1, 2, 1], [2, 3, 4], [3, 2, 5]])
strongest_idx, strongest = statistics.get_strongest(weights, 2)
assert (strongest_idx.shape == (2, 3))
assert ((strongest_idx == np.array([[1, 2, 1], [2, 1, 2]])).all())
assert (strongest.shape == (2, 3))
assert ((strongest == np.array([[2, 2, 4], [3, 3, 5]])).all())
| 32.08
| 70
| 0.609726
|
4a1189c191d78b55a30c13d1fb76068fd5a689a0
| 12,129
|
py
|
Python
|
fastlmmhpc/pyplink/snpreader/Bed.py
|
epiproject/FaST-LMM-HPC
|
5d6df81268aeff19015194ab0718a9163b8d33af
|
[
"Apache-2.0"
] | 2
|
2019-12-10T09:55:40.000Z
|
2019-12-11T20:58:10.000Z
|
fastlmmhpc/pyplink/snpreader/Bed.py
|
epiproject/FaST-LMM-HPC
|
5d6df81268aeff19015194ab0718a9163b8d33af
|
[
"Apache-2.0"
] | null | null | null |
fastlmmhpc/pyplink/snpreader/Bed.py
|
epiproject/FaST-LMM-HPC
|
5d6df81268aeff19015194ab0718a9163b8d33af
|
[
"Apache-2.0"
] | null | null | null |
import numpy as SP
import subprocess, sys, os.path
from itertools import *
from fastlmmhpc.pyplink.snpset import *
from fastlmmhpc.pyplink.altset_list import *
import pandas as pd
import fastlmmhpc.util.preprocess as util
import logging
WRAPPED_PLINK_PARSER_PRESENT = None
def decide_once_on_plink_reader():
#This is now done in a method, instead of at the top of the file, so that messages can be re-directed to the appropriate stream.
#(Usually messages go to stdout, but when the code is run on Hadoop, they are sent to stderr)
global WRAPPED_PLINK_PARSER_PRESENT
if WRAPPED_PLINK_PARSER_PRESENT == None:
# attempt to import wrapped plink parser
try:
from pysnptools.snpreader import wrap_plink_parser
WRAPPED_PLINK_PARSER_PRESENT = True #!!does the standardizer work without c++
logging.info("using c-based plink parser")
except Exception, detail:
logging.warn(detail)
WRAPPED_PLINK_PARSER_PRESENT = False
class Bed(object):
'''
This is a class that does random-access reads of a BED file. For examples of its use see its 'read' method.
'''
_ran_once = False
_filepointer = None
def __init__(self,basefilename):
'''
basefilename : string of the basename of [basename].bed, [basename].bim,
and [basename].fam
'''
self.basefilename = basefilename
#!! similar code in fastlmm
def __repr__(self):
return "{0}('{1}')".format(self.__class__.__name__,self.basefilename)
def run_once(self):
if (self._ran_once):
return
self._ran_once = True
famfile = self.basefilename+ '.fam'
bimfile = self.basefilename+'.bim'
logging.info("Loading fam file {0}".format(famfile))
self._original_iids = SP.loadtxt(famfile,dtype = 'str',usecols=(0,1),comments=None)
logging.info("Loading bim file {0}".format(bimfile))
self.bimfields = pd.read_csv(bimfile,delimiter = '\s',usecols = (0,1,2,3),header=None,index_col=False,engine='python')
self.rs = SP.array(self.bimfields[1].tolist(),dtype='str')
self.pos = self.bimfields.as_matrix([0,2,3])
self.snp_to_index = {}
logging.info("indexing snps");
for i in xrange(self.snp_count):
snp = self.rs[i]
if self.snp_to_index.has_key(snp) : raise Exception("Expect snp to appear in bim file only once. ({0})".format(snp))
self.snp_to_index[snp]=i
bedfile = self.basefilename+ '.bed'
self._filepointer = open(bedfile, "rb")
mode = self._filepointer.read(2)
if mode != 'l\x1b': raise Exception('No valid binary BED file')
mode = self._filepointer.read(1) #\x01 = SNP major \x00 = individual major
if mode != '\x01': raise Exception('only SNP-major is implemented')
logging.info("bed file is open {0}".format(bedfile))
return self
def __del__(self):
if self._filepointer != None: # we need to test this because Python doesn't guarentee that __init__ was fully run
self._filepointer.close()
def copyinputs(self, copier):
# doesn't need to self.run_once() because only uses original inputs
copier.input(self.basefilename + ".bed")
copier.input(self.basefilename + ".bim")
copier.input(self.basefilename + ".fam")
@property
def snp_count(self):
self.run_once()
return len(self.bimfields);
def read(self,snp_set = AllSnps(), order="F", dtype=SP.float64, force_python_only=False):
'''
Input: a snp_set. Choices include
AllSnps() [the default],
PositionRange(snpIndex,nSNPs)
SnpAndSetName(groupname,snplist),
Output dictionary:
'rs' : [S] array rs-numbers
'pos' : [S*3] array of positions [chromosome, genetic dist, basepair dist]
'snps' : [N*S] array of snp-data
'iid' : [N*2] array of family IDs and individual IDs
Examples:
>>> bed = Bed(r'../../tests/datasets/all_chr.maf0.001.N300')
... ret = bed.read()
... len(ret['rs'])
... ret = bed.read(AllSnps())
... len(ret['rs'])
... ret = bed.read(SnpAndSetName('someset',['23_9','23_2']))
... ",".join(ret['rs'])
... ret = bed.read(PositionRange(0,10))
... ",".join(ret['rs'])
Loading fam file ../../tests/datasets/all_chr.maf0.001.N300.fam
Loading bim file ../../tests/datasets/all_chr.maf0.001.N300.bim
bed file is open ../../tests/datasets/all_chr.maf0.001.N300.bed
1015
1015
'23_9,23_2'
'1_12,1_34,1_10,1_35,1_28,1_25,1_36,1_39,1_4,1_13'
closing bed file
>>> altset_list1 = SnpAndSetNameCollection(r'../../tests/datasets/set_input.small.txt') # get the list of snpsets defined in the file
Reading ../../tests/datasets/set_input.small.txt
>>> altset_list2 = Subset(altset_list1,['set1','set5']) # only use a subset of those snpsets
>>> altset_list3 = MinMaxSetSize(altset_list2, minsetsize=2, maxsetsize=15) # only use the subset of subsets that contain between 2 & 15 snps (inclusive)
>>> bed = Bed(r'../../tests/datasets/all_chr.maf0.001.N300')
... altsetlist_plusbed = altset_list3.addbed(bed) # apply altset_list3 to this bed file
... len(altsetlist_plusbed) # tell how many snpsets there will be
... for snpset_plusbed in altsetlist_plusbed:
... str(snpset_plusbed) # the name of the snpset
... len(snpset_plusbed) # the number of snps in the snpset
... ret = snpset_plusbed.read()
... ",".join(ret['rs'])
Loading fam file ../../tests/datasets/all_chr.maf0.001.N300.fam
Loading bim file ../../tests/datasets/all_chr.maf0.001.N300.bim
bed file is open ../../tests/datasets/all_chr.maf0.001.N300.bed
1
'set5'
13
'5_12,5_28,5_32,5_5,5_11,5_1,5_9,5_3,5_19,5_7,5_21,5_15,5_23'
closing bed file
'''
self.run_once()
snpset_withbed = snp_set.addbed(self)
return self.read_with_specification(snpset_withbed, order=order, dtype=dtype, force_python_only=force_python_only)
##!! This property is ugly
@property
def ind_used(self):
# doesn't need to self.run_once() because only uses original inputs
return self._ind_used
@ind_used.setter
def ind_used(self, value):
'''
Tell the Bed reader to return data for only a subset (perhaps proper) of the individuals in a particular order
e.g. 2,10,0 says to return data for three users: the user at index position 2, the user at index position 10, and the user at index position 0.
'''
# doesn't need to self.run_once() because only uses original inputs
self._ind_used = value
@property
def original_iids(self):
self.run_once()
return self._original_iids
def counts_and_indexes(self, snpset_withbbed):
iid_count_in = len(self.original_iids)
snp_count_in = self.snp_count
if hasattr(self,'_ind_used'):
iid_count_out = len(self.ind_used)
iid_index_out = self.ind_used
else:
iid_count_out = iid_count_in
iid_index_out = range(0,iid_count_in)
snp_count_out = len(snpset_withbbed)
snp_index_out = list(snpset_withbbed) #make a copy, in case it's in some strange format, such as HDF5
return iid_count_in, iid_count_out, iid_index_out, snp_count_in, snp_count_out, snp_index_out
@staticmethod
def read_with_specification(snpset_withbbed, order="F", dtype=SP.float64, force_python_only=False):
# doesn't need to self.run_once() because it is static
decide_once_on_plink_reader()
global WRAPPED_PLINK_PARSER_PRESENT
bed = snpset_withbbed.bed
iid_count_in, iid_count_out, iid_index_out, snp_count_in, snp_count_out, snp_index_out = bed.counts_and_indexes(snpset_withbbed)
if WRAPPED_PLINK_PARSER_PRESENT and not force_python_only:
from pysnptools.snpreader import wrap_plink_parser
SNPs = SP.zeros((iid_count_out, snp_count_out), order=order, dtype=dtype)
bed_fn = bed.basefilename + ".bed"
if dtype == SP.float64:
if order=="F":
wrap_plink_parser.readPlinkBedFiledoubleFAAA(bed_fn, iid_count_in, snp_count_in, iid_index_out, snp_index_out, SNPs)
elif order=="C":
wrap_plink_parser.readPlinkBedFiledoubleCAAA(bed_fn, iid_count_in, snp_count_in, iid_index_out, snp_index_out, SNPs)
else:
raise Exception("order '{0}' not known, only 'F' and 'C'".format(order));
elif dtype == SP.float32:
if order=="F":
wrap_plink_parser.readPlinkBedFilefloatFAAA(bed_fn, iid_count_in, snp_count_in, iid_index_out, snp_index_out, SNPs)
elif order=="C":
wrap_plink_parser.readPlinkBedFilefloatCAAA(bed_fn, iid_count_in, snp_count_in, iid_index_out, snp_index_out, SNPs)
else:
raise Exception("dtype '{0}' not known, only float64 and float32".format(dtype))
else:
# An earlier version of this code had a way to read consecutive SNPs of code in one read. May want
# to add that ability back to the code.
# Also, note that reading with python will often result in non-contigious memory, so the python standardizers will automatically be used, too.
logging.warn("using pure python plink parser (might be much slower!!)")
SNPs = SP.zeros(((int(SP.ceil(0.25*iid_count_in))*4),snp_count_out),order=order, dtype=dtype) #allocate it a little big
for SNPsIndex, bimIndex in enumerate(snpset_withbbed):
startbit = int(SP.ceil(0.25*iid_count_in)*bimIndex+3)
bed._filepointer.seek(startbit)
nbyte = int(SP.ceil(0.25*iid_count_in))
bytes = SP.array(bytearray(bed._filepointer.read(nbyte))).reshape((int(SP.ceil(0.25*iid_count_in)),1),order='F')
SNPs[3::4,SNPsIndex:SNPsIndex+1][bytes>=64]=SP.nan
SNPs[3::4,SNPsIndex:SNPsIndex+1][bytes>=128]=1
SNPs[3::4,SNPsIndex:SNPsIndex+1][bytes>=192]=2
bytes=SP.mod(bytes,64)
SNPs[2::4,SNPsIndex:SNPsIndex+1][bytes>=16]=SP.nan
SNPs[2::4,SNPsIndex:SNPsIndex+1][bytes>=32]=1
SNPs[2::4,SNPsIndex:SNPsIndex+1][bytes>=48]=2
bytes=SP.mod(bytes,16)
SNPs[1::4,SNPsIndex:SNPsIndex+1][bytes>=4]=SP.nan
SNPs[1::4,SNPsIndex:SNPsIndex+1][bytes>=8]=1
SNPs[1::4,SNPsIndex:SNPsIndex+1][bytes>=12]=2
bytes=SP.mod(bytes,4)
SNPs[0::4,SNPsIndex:SNPsIndex+1][bytes>=1]=SP.nan
SNPs[0::4,SNPsIndex:SNPsIndex+1][bytes>=2]=1
SNPs[0::4,SNPsIndex:SNPsIndex+1][bytes>=3]=2
SNPs = SNPs[iid_index_out,:] #reorder or trim any extra allocation
ret = {
'rs' :bed.rs[snp_index_out],
'pos' :bed.pos[snp_index_out,:],
'snps' :SNPs,
'iid' :bed.original_iids[iid_index_out,:]
}
return ret
if __name__ == "__main__":
#bed = Bed(r'../../tests/datasets/all_chr.maf0.001.N300')
#ret = bed.read()
#len(ret['rs'])
#ret = bed.read(AllSnps())
#len(ret['rs'])
#ret = bed.read(SnpAndSetName('someset',['23_9','23_2']))
#",".join(ret['rs'])
#ret = bed.read(PositionRange(0,10))
#",".join(ret['rs'])
logging.basicConfig(level=logging.INFO)
import doctest
doctest.testmod()
| 44.591912
| 163
| 0.610932
|
4a118afab6a0a0aab9034c58a5b2946aa80f34a5
| 1,463
|
py
|
Python
|
socialregistration/contrib/openid/client.py
|
mpdaugherty/django-socialregistration-with-google-apps
|
988c627995d48a0f861b1156401fe3e5e4c91826
|
[
"MIT"
] | 1
|
2019-08-14T21:14:03.000Z
|
2019-08-14T21:14:03.000Z
|
socialregistration/contrib/openid/client.py
|
mpdaugherty/django-socialregistration-with-google-apps
|
988c627995d48a0f861b1156401fe3e5e4c91826
|
[
"MIT"
] | null | null | null |
socialregistration/contrib/openid/client.py
|
mpdaugherty/django-socialregistration-with-google-apps
|
988c627995d48a0f861b1156401fe3e5e4c91826
|
[
"MIT"
] | null | null | null |
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from openid.consumer import consumer
from socialregistration.clients import Client
from socialregistration.contrib.openid.storage import OpenIDStore
from socialregistration.settings import SESSION_KEY
import urlparse
class OpenIDClient(Client):
def __init__(self, session_data, endpoint_url):
self.endpoint_url = endpoint_url
self.store = OpenIDStore()
self.consumer = consumer.Consumer(session_data, self.store)
def get_realm(self):
if self.is_https():
return 'https://%s/' % Site.objects.get_current().domain
return 'http://%s/' % Site.objects.get_current().domain
def get_callback_url(self):
return urlparse.urljoin(self.get_realm(),
reverse('socialregistration:openid:callback'))
def get_redirect_url(self):
auth_request = self.consumer.begin(self.endpoint_url)
redirect_url = auth_request.redirectURL(self.get_realm(),
self.get_callback_url())
return redirect_url
def complete(self, GET, path):
self.result = self.consumer.complete(GET, urlparse.urljoin(self.get_realm(),
path))
def is_valid(self):
return self.result.status == consumer.SUCCESS
def get_identity(self):
return self.result.identity_url
@staticmethod
def get_session_key():
return '%sopenid' % SESSION_KEY
| 32.511111
| 84
| 0.702666
|
4a118bcf1e32e4f93af2b4b58a8d777b76919230
| 3,268
|
py
|
Python
|
src/sources/readnovelfull.py
|
Epicpkmn11/lightnovel-crawler
|
6fc0bbd4970a669a7921819d98b00da5a177d4cc
|
[
"Apache-2.0"
] | 1
|
2019-03-10T13:02:23.000Z
|
2019-03-10T13:02:23.000Z
|
src/sources/readnovelfull.py
|
Epicpkmn11/lightnovel-crawler
|
6fc0bbd4970a669a7921819d98b00da5a177d4cc
|
[
"Apache-2.0"
] | 2
|
2021-06-08T21:01:15.000Z
|
2021-09-08T01:45:07.000Z
|
src/sources/readnovelfull.py
|
Epicpkmn11/lightnovel-crawler
|
6fc0bbd4970a669a7921819d98b00da5a177d4cc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import logging
import re
from ..utils.crawler import Crawler
logger = logging.getLogger('READNOVELFULL')
search_url = 'https://readnovelfull.com/search?keyword=%s'
full_chapter_url = 'https://readnovelfull.com/ajax/chapter-archive?novelId=%s'
class ReadNovelFullCrawler(Crawler):
base_url = 'https://readnovelfull.com/'
def search_novel(self, query):
query = query.lower().replace(' ', '+')
soup = self.get_soup(search_url % query)
results = []
for result in soup.select('div.col-novel-main div.list.list-novel div.row')[:5]:
url = self.absolute_url(
result.select_one('h3.novel-title a')['href'])
title = result.select_one('h3.novel-title a')['title']
last_chapter = result.select_one('span.chr-text').text.strip()
results.append({
'url': url,
'title': title,
'info': 'last chapter : %s' % last_chapter,
})
# end for
return results
# end def
def read_novel_info(self):
'''Get novel title, autor, cover etc'''
logger.debug('Visiting %s', self.novel_url)
soup = self.get_soup(self.novel_url + '?waring=1')
self.novel_title = soup.select_one('h3.title').text.strip()
logger.info('Novel title: %s', self.novel_title)
self.novel_cover = self.absolute_url(
soup.select_one('div.book img')['src'])
logger.info('Novel cover: %s', self.novel_cover)
author = []
for a in soup.select('ul.info.info-meta li')[1].select('a'):
author.append(a.text.strip())
# end for
self.novel_author = ", ".join(author)
logger.info('Novel author: %s', self.novel_author)
novel_id = soup.select_one('div#rating')['data-novel-id']
chapter_url = full_chapter_url % novel_id
logger.debug('Visiting %s', chapter_url)
chapter_soup = self.get_soup(chapter_url)
chapters = chapter_soup.select('li a')
for a in chapters:
for span in a.findAll('span'):
span.decompose()
# end for
# end for
for x in chapters:
chap_id = len(self.chapters) + 1
if len(self.chapters) % 100 == 0:
vol_id = chap_id//100 + 1
vol_title = 'Volume ' + str(vol_id)
self.volumes.append({
'id': vol_id,
'title': vol_title,
})
# end if
self.chapters.append({
'id': chap_id,
'volume': vol_id,
'url': self.absolute_url(x['href']),
'title': x['title'] or ('Chapter %d' % chap_id),
})
# end for
# end def
def download_chapter_body(self, chapter):
'''Download body of a single chapter and return as clean html format.'''
logger.info('Downloading %s', chapter['url'])
soup = self.get_soup(chapter['url'])
contents = soup.select('div.cha-words p, #chr-content p')
body = [str(p) for p in contents if p.text.strip()]
return '<p>' + '</p><p>'.join(body) + '</p>'
# end def
# end class
| 34.041667
| 88
| 0.550796
|
4a118d2b42f36c6842f103beb33c4b3b7bce95bc
| 2,342
|
py
|
Python
|
shrapnel/polls/migrations/0001_create_poll_models.py
|
BLG411-Group6/shrapnel-api
|
0cb1d6b1f4d57837c99f867d8ac80095de3e7330
|
[
"Apache-2.0"
] | null | null | null |
shrapnel/polls/migrations/0001_create_poll_models.py
|
BLG411-Group6/shrapnel-api
|
0cb1d6b1f4d57837c99f867d8ac80095de3e7330
|
[
"Apache-2.0"
] | 4
|
2021-03-19T04:38:51.000Z
|
2021-06-09T18:49:10.000Z
|
shrapnel/polls/migrations/0001_create_poll_models.py
|
BLG411-Group6/shrapnel-api
|
0cb1d6b1f4d57837c99f867d8ac80095de3e7330
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2 on 2019-12-03 20:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Poll',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('date_created', models.DateTimeField(default=django.utils.timezone.now)),
('date_expiration', models.DateTimeField()),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='polls', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PollOption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.CharField(max_length=100)),
('date_created', models.DateTimeField(default=django.utils.timezone.now)),
('poll', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='options', to='polls.Poll')),
],
),
migrations.CreateModel(
name='PollAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(default=django.utils.timezone.now)),
('date_updated', models.DateTimeField(auto_now=True)),
('option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.PollOption')),
('poll', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='polls.Poll')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='poll_answers', to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user', 'poll')},
},
),
]
| 45.038462
| 159
| 0.615713
|
4a118e70b0cc59e0deaf1ba0d80c6dcbed0ec055
| 2,675
|
py
|
Python
|
scripts/release/run_trigger_job.py
|
rhaschke/ros_buildfarm
|
85dfccd37a590ebd5eb64966e0fab54032f6b981
|
[
"Apache-2.0"
] | 57
|
2015-03-30T20:37:58.000Z
|
2022-03-18T10:51:53.000Z
|
scripts/release/run_trigger_job.py
|
rhaschke/ros_buildfarm
|
85dfccd37a590ebd5eb64966e0fab54032f6b981
|
[
"Apache-2.0"
] | 680
|
2015-01-02T17:59:37.000Z
|
2022-03-30T17:08:52.000Z
|
scripts/release/run_trigger_job.py
|
rhaschke/ros_buildfarm
|
85dfccd37a590ebd5eb64966e0fab54032f6b981
|
[
"Apache-2.0"
] | 107
|
2015-02-01T03:30:01.000Z
|
2022-03-08T00:36:49.000Z
|
#!/usr/bin/env python3
# Copyright 2014-2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import sys
from ros_buildfarm.argument import add_argument_build_name
from ros_buildfarm.argument import add_argument_cache_dir
from ros_buildfarm.argument import add_argument_config_url
from ros_buildfarm.argument import \
add_argument_distribution_repository_key_files
from ros_buildfarm.argument import add_argument_distribution_repository_urls
from ros_buildfarm.argument import add_argument_dockerfile_dir
from ros_buildfarm.argument import add_argument_groovy_script
from ros_buildfarm.argument import add_argument_missing_only
from ros_buildfarm.argument import add_argument_not_failed_only
from ros_buildfarm.argument import add_argument_rosdistro_name
from ros_buildfarm.argument import add_argument_source_only
from ros_buildfarm.common import get_distribution_repository_keys
from ros_buildfarm.common import get_user_id
from ros_buildfarm.templates import create_dockerfile
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Run the 'trigger_jobs' job")
add_argument_config_url(parser)
add_argument_rosdistro_name(parser)
add_argument_build_name(parser, 'release')
add_argument_distribution_repository_urls(parser)
add_argument_distribution_repository_key_files(parser)
add_argument_missing_only(parser)
add_argument_source_only(parser)
add_argument_not_failed_only(parser)
add_argument_groovy_script(parser)
add_argument_cache_dir(parser)
add_argument_dockerfile_dir(parser)
args = parser.parse_args(argv)
data = copy.deepcopy(args.__dict__)
data.update({
'distribution_repository_urls': args.distribution_repository_urls,
'distribution_repository_keys': get_distribution_repository_keys(
args.distribution_repository_urls,
args.distribution_repository_key_files),
'uid': get_user_id(),
})
create_dockerfile(
'release/release_create_trigger_task.Dockerfile.em',
data, args.dockerfile_dir)
if __name__ == '__main__':
main()
| 38.214286
| 76
| 0.804486
|
4a118f04c0bbe52b576cf244b8fc711216b27b01
| 196
|
py
|
Python
|
Scraping/espirito_santo.py
|
Insper-Data/data_bcg_news
|
49986db18095759adea00bb0dedc149acebb683b
|
[
"MIT"
] | null | null | null |
Scraping/espirito_santo.py
|
Insper-Data/data_bcg_news
|
49986db18095759adea00bb0dedc149acebb683b
|
[
"MIT"
] | null | null | null |
Scraping/espirito_santo.py
|
Insper-Data/data_bcg_news
|
49986db18095759adea00bb0dedc149acebb683b
|
[
"MIT"
] | null | null | null |
import time
from selenium import webdriver
from selenium import webdriver
driver = webdriver.Firefox(executable_path= r"C:\Users\siddhartha\Downloads\geckodriver-v0.25.0-win64\geckodriver.exe")
| 32.666667
| 119
| 0.826531
|
4a118f33d6de219067c3589b3c1818a465e11f61
| 1,636
|
py
|
Python
|
api/app.py
|
fuerederp/microblog-api
|
894cd9184c8f80fb47d685a70f0e2bd357cf3560
|
[
"MIT"
] | null | null | null |
api/app.py
|
fuerederp/microblog-api
|
894cd9184c8f80fb47d685a70f0e2bd357cf3560
|
[
"MIT"
] | null | null | null |
api/app.py
|
fuerederp/microblog-api
|
894cd9184c8f80fb47d685a70f0e2bd357cf3560
|
[
"MIT"
] | null | null | null |
from flask import Flask, redirect, url_for
from alchemical.flask import Alchemical
from flask_migrate import Migrate
from flask_marshmallow import Marshmallow
from flask_cors import CORS
from flask_mail import Mail
from apifairy import APIFairy
from config import Config
db = Alchemical()
migrate = Migrate()
ma = Marshmallow()
cors = CORS()
mail = Mail()
apifairy = APIFairy()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
# extensions
from api import models
db.init_app(app)
migrate.init_app(app, db)
ma.init_app(app)
if app.config['USE_CORS']: # pragma: no branch
cors.init_app(app)
mail.init_app(app)
apifairy.init_app(app)
# blueprints
from api.errors import errors
app.register_blueprint(errors)
from api.tokens import tokens
app.register_blueprint(tokens, url_prefix='/api')
from api.users import users
app.register_blueprint(users, url_prefix='/api')
from api.posts import posts
app.register_blueprint(posts, url_prefix='/api')
from api.fake import fake
app.register_blueprint(fake)
# define the shell context
@app.shell_context_processor
def shell_context(): # pragma: no cover
ctx = {'db': db}
for attr in dir(models):
model = getattr(models, attr)
if hasattr(model, '__bases__') and \
db.Model in getattr(model, '__bases__'):
ctx[attr] = model
return ctx
@app.route('/')
def index(): # pragma: no cover
return redirect(url_for('apifairy.docs'))
return app
| 27.266667
| 60
| 0.677873
|
4a118fe18b9a894716d8760eb0ecf4f6477c8b65
| 12,521
|
py
|
Python
|
examples/multimodal/layoutlm/funsd.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
examples/multimodal/layoutlm/funsd.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
examples/multimodal/layoutlm/funsd.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import paddle
from paddle.io import Dataset
logger = logging.getLogger(__name__)
import numpy as np
class FunsdDataset(Dataset):
def __init__(self, args, tokenizer, labels, pad_token_label_id, mode):
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.data_dir, mode)
features = convert_examples_to_features(
examples,
labels,
args.max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token=tokenizer.cls_token,
cls_token_segment_id=0,
sep_token=tokenizer.sep_token,
sep_token_extra=False,
pad_on_left=False,
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0,
pad_token_label_id=pad_token_label_id,
)
self.features = features
# Convert to Tensors and build dataset
self.all_input_ids = paddle.to_tensor([f.input_ids for f in features],
dtype="int64")
self.all_input_mask = paddle.to_tensor([f.input_mask for f in features],
dtype="int64")
self.all_segment_ids = paddle.to_tensor(
[f.segment_ids for f in features], dtype="int64")
self.all_label_ids = paddle.to_tensor([f.label_ids for f in features],
dtype="int64")
self.all_bboxes = paddle.to_tensor([f.boxes for f in features],
dtype="int64")
def __len__(self):
return len(self.features)
def __getitem__(self, index):
return (
self.all_input_ids[index],
self.all_input_mask[index],
self.all_segment_ids[index],
self.all_label_ids[index],
self.all_bboxes[index],
)
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels, boxes, actual_bboxes, file_name,
page_size):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
class InputFeatures(object):
"""A single set of features of data."""
def __init__(
self,
input_ids,
input_mask,
segment_ids,
label_ids,
boxes,
actual_bboxes,
file_name,
page_size,
):
assert (
0 <= all(boxes) <= 1000
), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format(
boxes)
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
def read_examples_from_file(data_dir, mode):
file_path = os.path.join(data_dir, "{}.txt".format(mode))
box_file_path = os.path.join(data_dir, "{}_box.txt".format(mode))
image_file_path = os.path.join(data_dir, "{}_image.txt".format(mode))
guid_index = 1
examples = []
with open(file_path,
encoding="utf-8") as f, open(box_file_path,
encoding="utf-8") as fb, open(
image_file_path,
encoding="utf-8") as fi:
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
for line, bline, iline in zip(f, fb, fi):
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(
InputExample(
guid="{}-{}".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
))
guid_index += 1
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
else:
splits = line.split("\t")
bsplits = bline.split("\t")
isplits = iline.split("\t")
assert len(splits) == 2
assert len(bsplits) == 2
assert len(isplits) == 4
assert splits[0] == bsplits[0]
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
box = bsplits[-1].replace("\n", "")
box = [int(b) for b in box.split()]
boxes.append(box)
actual_bbox = [int(b) for b in isplits[1].split()]
actual_bboxes.append(actual_bbox)
page_size = [int(i) for i in isplits[2].split()]
file_name = isplits[3].strip()
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(
InputExample(
guid="%s-%d".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
))
return examples
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
cls_token_box=[0, 0, 0, 0],
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0],
pad_token_segment_id=0,
pad_token_label_id=-1,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
):
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
file_name = example.file_name
page_size = example.page_size
width, height = page_size
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
token_boxes = []
actual_bboxes = []
label_ids = []
for word, label, box, actual_bbox in zip(example.words, example.labels,
example.boxes,
example.actual_bboxes):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
actual_bboxes.extend([actual_bbox] * len(word_tokens))
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] *
(len(word_tokens) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[:(max_seq_length - special_tokens_count)]
token_boxes = token_boxes[:(max_seq_length - special_tokens_count)]
actual_bboxes = actual_bboxes[:(max_seq_length -
special_tokens_count)]
label_ids = label_ids[:(max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
token_boxes += [cls_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
token_boxes = [cls_token_box] + token_boxes
actual_bboxes = [[0, 0, width, height]] + actual_bboxes
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] *
padding_length) + input_mask
segment_ids = ([pad_token_segment_id] *
padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
token_boxes = ([pad_token_box] * padding_length) + token_boxes
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
token_boxes += [pad_token_box] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(token_boxes) == max_seq_length
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids,
boxes=token_boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
))
return features
| 39.374214
| 109
| 0.548279
|
4a1191df0714d9a91e86628b15ed5cef462f960a
| 213
|
py
|
Python
|
jetblack_datemath/weekdays.py
|
rob-blackbourn/jetblack-datemath
|
d2e51fb7ec9b2a49cb335e2ad788e793bb62a3a9
|
[
"Apache-2.0"
] | null | null | null |
jetblack_datemath/weekdays.py
|
rob-blackbourn/jetblack-datemath
|
d2e51fb7ec9b2a49cb335e2ad788e793bb62a3a9
|
[
"Apache-2.0"
] | null | null | null |
jetblack_datemath/weekdays.py
|
rob-blackbourn/jetblack-datemath
|
d2e51fb7ec9b2a49cb335e2ad788e793bb62a3a9
|
[
"Apache-2.0"
] | null | null | null |
"""Week days"""
from enum import IntEnum
class DayOfWeek(IntEnum):
"""The days of the week"""
MONDAY = 0
TUESDAY = 1
WEDNESDAY = 2
THURSDAY = 3
FRIDAY = 4
SATURDAY = 5
SUNDAY = 6
| 15.214286
| 30
| 0.577465
|
4a1193a7df35c9873f4b5325fd3b63f57d88b803
| 951
|
py
|
Python
|
iAS/core/devicemgt/Device.py
|
menaka121/team---iAS
|
b7ec04982425a0e3c590011a951e678db499c84a
|
[
"Apache-2.0"
] | null | null | null |
iAS/core/devicemgt/Device.py
|
menaka121/team---iAS
|
b7ec04982425a0e3c590011a951e678db499c84a
|
[
"Apache-2.0"
] | null | null | null |
iAS/core/devicemgt/Device.py
|
menaka121/team---iAS
|
b7ec04982425a0e3c590011a951e678db499c84a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Team - iAS, University Of Peradeniya
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Base class for device types
class Device:
def __init__(self,
deviceID = "",
deviceName = "",
deviceOwnerID = "",
deviceType= ""):
self.deviceID = deviceID
self.deviceName = deviceName
self.deviceOwner = deviceOwnerID
self.deviceType = deviceType
| 33.964286
| 74
| 0.677182
|
4a1193ffcf4f587b2ebcb0b48dd91207ccdf45e4
| 15,904
|
py
|
Python
|
app/databaseFuctions.py
|
scavicchio/easyWaltonTracker
|
05fd5b12e9e6d9e21f7209baca3b8137c013f002
|
[
"MIT"
] | 2
|
2018-05-10T04:50:11.000Z
|
2018-05-10T04:50:13.000Z
|
app/databaseFuctions.py
|
scavicchio/easyWaltonTracker
|
05fd5b12e9e6d9e21f7209baca3b8137c013f002
|
[
"MIT"
] | 5
|
2018-06-11T22:23:06.000Z
|
2020-02-28T02:20:52.000Z
|
app/databaseFuctions.py
|
scavicchio/easyWaltonTracker
|
05fd5b12e9e6d9e21f7209baca3b8137c013f002
|
[
"MIT"
] | null | null | null |
import pymysql.cursors
# Lets make some global vars
with open("clientinfo.txt", "r") as ins:
data = []
for line in ins:
data.append(line)
host = data[0].strip()
port = data[1].strip()
dbUser = data[2].strip()
dbPassword = data[3].strip()
database = data[4].strip()
username = data[5].strip()
password = data[6].strip()
difficultyHashMagnitude = float(data[7].strip())
def connect():
conn = pymysql.connect(host=host,
port = int(port),
user=dbUser,
password=dbPassword,
db=database,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
return conn
conn = connect()
### MAIN PAGES ###
### MAIN PAGES ###
### MAIN PAGES ###
### MAIN PAGES ###
### MAIN PAGES ###
# gets the most recent block that the DB has data for
def getLatestBlockFromDB(conn):
cursor = conn.cursor()
query = 'SELECT blockNum FROM blockchain ORDER BY blockNum DESC LIMIT 1'
cursor.execute(query)
latestBlock = cursor.fetchone()
cursor.close()
return latestBlock["blockNum"];
def getLastUpdateTime(conn):
cursor = conn.cursor()
query = 'SELECT timest FROM blockchain ORDER BY blockNum DESC LIMIT 1'
cursor.execute(query)
latestBlock = cursor.fetchone()
cursor.close()
return latestBlock["timest"];
# gets all data for a specific etherbase
def getDataForMiner(conn, etherbase):
cursor = conn.cursor()
query = 'SELECT * FROM blockchain WHERE miner = %s'
cursor.execute(query, (etherbase))
data = cursor.fetchall()
cursor.close()
return data
def getDataForMinerPaginated(conn, etherbase, perPage,page):
offset = (page-1)*perPage
cursor = conn.cursor()
query = 'SELECT * FROM blockchain WHERE miner = %s ORDER BY blockNum DESC LIMIT %s OFFSET %s'
cursor.execute(query, (etherbase,perPage, offset))
data = cursor.fetchall()
cursor.close()
for x in data:
x['difficulty'] = float("{0:.2f}".format(x['difficulty']/difficultyHashMagnitude))
return data
# gets the total blocks a miner has solved
def getRewardCount(conn,etherbase):
cursor = conn.cursor()
query = 'SELECT COUNT(*) FROM blockchain WHERE miner = %s'
cursor.execute(query, (etherbase))
data = cursor.fetchone()
cursor.close()
return data["COUNT(*)"]
# returns the number of rewards for each unique extraData as a pair.
def getRewardCountByExtra(conn,etherbase):
cursor = conn.cursor()
query = 'SELECT extra_data, \
COUNT(blockNum) as theCount, \
SUM(case when (timest >= DATE(NOW()) - INTERVAL 7 DAY) then 1 else 0 end) as lastWeek, \
SUM(case when (timest >= DATE(NOW()) - INTERVAL 1 MONTH) then 1 else 0 end) as lastMonth \
FROM blockchain \
WHERE miner = %s \
GROUP BY extra_data ORDER BY lastWeek DESC'
cursor.execute(query,(etherbase))
data = cursor.fetchall()
cursor.close()
return data
def getLast7Days(conn,etherbase):
cursor = conn.cursor()
query = 'SELECT extra_data, COUNT(blockNum) AS theCount FROM blockchain WHERE miner = %s AND timest >= DATE(NOW()) - INTERVAL 7 DAY GROUP BY extra_data'
cursor.execute(query,(etherbase))
data = cursor.fetchall()
cursor.close()
return data
def getLastMonth(conn,etherbase):
cursor = conn.cursor()
query = 'SELECT extra_data, COUNT(blockNum) AS theCount FROM blockchain WHERE miner = %s AND timest >= DATE(NOW()) - INTERVAL 1 MONTH GROUP BY extra_data'
cursor.execute(query,(etherbase))
data = cursor.fetchall()
cursor.close()
return data
# reurns all info from latest X rewards for a given address
def getLatestNRewards(conn,etherbase,index):
cursor = conn.cursor()
query = 'SELECT * FROM blockchain WHERE miner = %s ORDER BY blockNum DESC LIMIT %s'
cursor.execute(query, (etherbase,index))
data = cursor.fetchall()
for x in data:
x['difficulty'] = float("{0:.2f}".format(x['difficulty']/difficultyHashMagnitude))
cursor.close()
return data
def getLatestAllRewards(conn,etherbase):
cursor = conn.cursor()
query = 'SELECT * FROM blockchain WHERE miner = %s ORDER BY blockNum DESC'
cursor.execute(query, (etherbase))
data = cursor.fetchall()
for x in data:
x['difficulty'] = float("{0:.2f}".format(x['difficulty']/difficultyHashMagnitude))
cursor.close()
return data
#gets all blocks, timest and extra for a JS graph for later
def getGraphData(conn,etherbase):
cursor = conn.cursor()
query = 'SELECT blockNum, extra_data, timest FROM blockchain WHERE miner = %s ORDER BY timest ASC'
cursor.execute(query, (etherbase))
data = cursor.fetchall()
cursor.close()
return data
def getDifficultyGraphData(conn):
cursor = conn.cursor()
query = 'SELECT blockNum, difficulty, timest FROM blockchain WHERE ((blockNum-1) % 100 = 0) ORDER BY blockNum ASC'
cursor.execute(query)
data = cursor.fetchall()
cursor.close()
return data
#gets most recent N blocks, data, and time for homepage
def getLatestNBlocks(conn,index):
cursor = conn.cursor()
query = 'SELECT * FROM blockchain ORDER BY timest DESC LIMIT %s'
cursor.execute(query, (index))
data = cursor.fetchall()
for x in data:
x['difficulty'] = float("{0:.2f}".format(x['difficulty']/difficultyHashMagnitude))
cursor.close()
return data
def getLatestNBlocksOffset(conn,perPage,page):
offset = (page-1)*perPage
cursor = conn.cursor()
query = 'SELECT * FROM blockchain ORDER BY timest DESC LIMIT %s OFFSET %s'
cursor.execute(query, (perPage,offset))
data = cursor.fetchall()
for x in data:
x['difficulty'] = float("{0:.2f}".format(x['difficulty']/difficultyHashMagnitude))
cursor.close()
return data
#adds a persons email to the alert table
def addEmailAlert(conn,etherbase,email,extra):
cursor = conn.cursor()
query = 'INSERT INTO `emailList`(`miner`, `email`, `extra_data`, `confirmed`) VALUES (%s,%s,%s,False)'
try:
cursor.execute(query, (etherbase, email, extra))
except pymysql.Error as e:
return e
conn.commit()
cursor.close()
return "Sucsess"
def removeEmailAlert(conn,etherbase,email,extra):
cursor = conn.cursor()
query = 'DELETE FROM emailList WHERE (miner = %s AND email = %s AND extra_data = %s)'
try:
cursor.execute(query, (etherbase, email,extra))
except pymysql.Error as e:
cursor.close()
return e
conn.commit()
cursor.close()
return "Sucsess"
def foundExtra(conn,extra):
cursor = conn.cursor()
query = 'SELECT COUNT(*) FROM blockchain WHERE extra_data = %s LIMIT 1'
cursor.execute(query,(extra))
data = cursor.fetchone()["COUNT(*)"]
cursor.close()
if (data == 0):
return False
return True
def getDataForExtra(conn,extra):
cursor = conn.cursor()
query = 'SELECT * FROM blockchain WHERE extra_data = %s ORDER BY blockNum DESC'
cursor.execute(query,(extra))
data = cursor.fetchall()
cursor.close()
for x in data:
x['difficulty'] = float("{0:.2f}".format(x['difficulty']/difficultyHashMagnitude))
return data
def getDataForExtraLimited(conn,extra,limit):
cursor = conn.cursor()
query = 'SELECT * FROM blockchain WHERE extra_data = %s ORDER BY blockNum DESC LIMIT %s'
cursor.execute(query,(extra,limit))
data = cursor.fetchall()
cursor.close()
for x in data:
x['difficulty'] = float("{0:.2f}".format(x['difficulty']/difficultyHashMagnitude))
return data
def getTotalByExtra(conn,extra):
cursor = conn.cursor()
query = 'SELECT COUNT(*) FROM blockchain WHERE extra_data = %s'
cursor.execute(query,(extra))
data = cursor.fetchone()
cursor.close()
return data["COUNT(*)"]
def getExtraStats(conn,extra):
cursor = conn.cursor()
query = 'SELECT \
COUNT(blockNum) as theCount, \
SUM(case when (timest >= DATE(NOW()) - INTERVAL 7 DAY) then 1 else 0 end) as lastWeek, \
SUM(case when (timest >= DATE(NOW()) - INTERVAL 1 MONTH) then 1 else 0 end) as lastMonth \
FROM blockchain \
WHERE extra_data = %s \
ORDER BY lastWeek DESC'
cursor.execute(query,(extra))
data = cursor.fetchall()
cursor.close()
return data
def getTopMiners(conn, limit):
cursor = conn.cursor()
query = 'SELECT miner, Count(*) AS total FROM blockchain GROUP BY miner ORDER BY total DESC LIMIT %s'
cursor.execute(query,limit)
data = cursor.fetchall()
cursor.close()
return data
def getTopRigs(conn, limit):
cursor = conn.cursor()
query = 'SELECT extra_data, Count(*) AS total FROM blockchain GROUP BY extra_data ORDER BY total DESC LIMIT %s'
cursor.execute(query,limit)
data = cursor.fetchall()
cursor.close()
return data
def getTopMinersLatest(conn, limit, oldest):
cursor = conn.cursor()
query = 'SELECT miner, Count(*) AS total FROM blockchain WHERE (timest >= DATE(NOW()) - INTERVAL %s DAY) GROUP BY miner ORDER BY total DESC LIMIT %s'
cursor.execute(query,(oldest,limit))
data = cursor.fetchall()
cursor.close()
return data
def getTopRigsLatest(conn, limit,oldest):
cursor = conn.cursor()
query = 'SELECT extra_data, Count(*) AS total FROM blockchain WHERE (timest >= DATE(NOW()) - INTERVAL %s DAY) GROUP BY extra_data ORDER BY total DESC LIMIT %s'
cursor.execute(query,(oldest,limit))
data = cursor.fetchall()
cursor.close()
return data
def getMaxTransactions(conn):
cursor = conn.cursor()
query = 'SELECT COUNT(*) AS count, DATE(timest) AS day FROM transaction GROUP BY day ORDER BY count DESC LIMIT 1'
cursor.execute(query)
data = cursor.fetchone()
cursor.close()
return data
def getMaxDifficulty(conn):
cursor = conn.cursor()
query = 'SELECT difficulty, blockNum FROM blockchain ORDER BY difficulty DESC LIMIT 1'
cursor.execute(query)
data = cursor.fetchone()
cursor.close()
return data
def getTotalTransactions(conn):
cursor = conn.cursor()
query = 'SELECT COUNT(*) AS total FROM transaction'
cursor.execute(query)
data = cursor.fetchone()
cursor.close()
return data
def getAverageDifficulty(conn):
cursor = conn.cursor()
query = 'SELECT AVG(difficulty) AS average FROM blockchain'
cursor.execute(query)
data = cursor.fetchone()
cursor.close()
return data
def getActiveWallets(conn):
cursor = conn.cursor()
query = 'SELECT COUNT(*) AS total \
FROM (SELECT DISTINCT(miner) FROM blockchain UNION \
(SELECT DISTINCT(sender) FROM transaction) UNION \
(SELECT DISTINCT(reciever) FROM transaction)) AS a'
cursor.execute(query)
data = cursor.fetchone()
cursor.close()
return data
### FOR 90 DAYS ###
def getMaxTransactions90(conn,days):
cursor = conn.cursor()
query = 'SELECT COUNT(*) AS count, DATE(timest) AS day FROM transaction WHERE (timest >= DATE(NOW()) - INTERVAL %s DAY) GROUP BY day ORDER BY count DESC LIMIT 1'
cursor.execute(query,(days))
data = cursor.fetchone()
cursor.close()
return data
def getMaxDifficulty90(conn,days):
cursor = conn.cursor()
query = 'SELECT difficulty, blockNum FROM blockchain WHERE (timest >= DATE(NOW()) - INTERVAL %s DAY) ORDER BY difficulty DESC LIMIT 1'
cursor.execute(query,(days))
data = cursor.fetchone()
cursor.close()
return data
def getTotalTransactions90(conn,days):
cursor = conn.cursor()
query = 'SELECT COUNT(*) AS total FROM transaction WHERE (timest >= DATE(NOW()) - INTERVAL %s DAY)'
cursor.execute(query,(days))
data = cursor.fetchone()
cursor.close()
return data
def getAverageDifficulty90(conn,days):
cursor = conn.cursor()
query = 'SELECT AVG(difficulty) AS average FROM blockchain WHERE (timest >= DATE(NOW()) - INTERVAL %s DAY)'
cursor.execute(query,(days))
data = cursor.fetchone()
cursor.close()
return data
def getActiveWallets90(conn,days):
cursor = conn.cursor()
query = 'SELECT COUNT(*) AS total \
FROM (SELECT DISTINCT(miner) FROM blockchain WHERE (timest >= DATE(NOW()) - INTERVAL %s DAY) UNION \
(SELECT DISTINCT(sender) FROM transaction WHERE (timest >= DATE(NOW()) - INTERVAL %s DAY)) UNION \
(SELECT DISTINCT(reciever) FROM transaction WHERE (timest >= DATE(NOW()) - INTERVAL %s DAY))) AS a'
cursor.execute(query,(days,days,days))
data = cursor.fetchone()
cursor.close()
return data
def getTransactionFrequencyGraph(conn):
cursor = conn.cursor()
query = 'SELECT COUNT(*) AS count, DATE(timest) AS day FROM transaction GROUP BY day'
cursor.execute(query)
data = cursor.fetchall()
cursor.close()
return data
def getTransactionListBlock(conn,block):
cursor = conn.cursor()
query = 'SELECT hash FROM transaction WHERE blockNum = %s'
cursor.execute(query, (block))
data = cursor.fetchall()
cursor.close()
return data
### API ###
### API ###
### API ###
### API ###
### API ###
def getBlock(conn,blockNum):
cursor = conn.cursor()
query = 'SELECT * FROM blockchain WHERE blockNum = %s'
cursor.execute(query,(blockNum))
data = cursor.fetchone()
cursor.close()
return data
def getLatestNBlocksAPI(conn,index):
cursor = conn.cursor()
query = 'SELECT * FROM blockchain ORDER BY blockNum DESC LIMIT %s'
cursor.execute(query, (index))
data = cursor.fetchall()
cursor.close()
return data
def getTransactionAPI(conn,hash):
cursor = conn.cursor()
query = 'SELECT * FROM transaction WHERE hash = %s'
cursor.execute(query, (hash))
data = cursor.fetchone()
cursor.close()
return data
def getTransactionBlockAPI(conn,hash):
cursor = conn.cursor()
query = 'SELECT * FROM transaction WHERE blockNum = %s'
cursor.execute(query, (hash))
data = cursor.fetchall()
cursor.close()
return data
# gets all data for a specific etherbase
def getDataForMiner(conn, etherbase):
cursor = conn.cursor()
query = 'SELECT * FROM blockchain WHERE miner = %s'
cursor.execute(query, (etherbase))
data = cursor.fetchall()
cursor.close()
return data
def getDataForExtraAPI(conn,extra):
cursor = conn.cursor()
query = 'SELECT * FROM blockchain WHERE extra_data = %s ORDER BY blockNum DESC'
cursor.execute(query,(extra))
data = cursor.fetchall()
cursor.close()
return data
# gets the most recent block that the DB has data for
def getLatestBlockFromDB(conn):
cursor = conn.cursor()
query = 'SELECT blockNum FROM blockchain ORDER BY blockNum DESC LIMIT 1'
cursor.execute(query)
latestBlock = cursor.fetchone()
cursor.close()
return latestBlock["blockNum"];
## EMAIL SERVICE ##
def getEmailsForAlert(conn,blockNum):
cursor = conn.cursor()
return data
def setEmailConfirmation(conn,email,confirm):
cursor = conn.cursor()
query = 'UPDATE emailList SET confirmed = %s WHERE email = %s'
cursor.execute(query,(confirm,ID))
conn.commit()
cursor.close()
return "Sucsess"
def isMasternode(conn,etherbase):
cursor = conn.cursor()
query = 'SELECT COUNT(*) FROM transaction WHERE value = 5000 AND reciever = %s'
cursor.execute(query,(etherbase))
data = cursor.fetchone()['COUNT(*)']
#print("IS MASTERNODE")
#print(data)
cursor.close()
if data >= 1:
return True
return False
def isGuardian(conn,etherbase):
cursor = conn.cursor()
query = 'SELECT COUNT(*) FROM GMN_ERC WHERE etherbase IN (SELECT wtctGMN.from FROM wtctGMN WHERE value = %s)'
cursor.execute(query,(etherbase))
data = cursor.fetchone()['COUNT(*)']
#print("IS GUARDIAN")
#print(data)
cursor.close()
if data >= 1:
return True
return False
# gets the total blocks a miner has solved
def getPhase1Rewards(conn,etherbase):
cursor = conn.cursor()
query = 'SELECT COUNT(*) FROM blockchain WHERE miner = %s AND blockNum <= 200000'
cursor.execute(query, (etherbase))
data = cursor.fetchone()
cursor.close()
return data["COUNT(*)"]
| 30.409178
| 163
| 0.674736
|
4a1194e996a93c25f56a95f153c1484df0de6a8c
| 4,547
|
py
|
Python
|
arcade/particle.py
|
LiorAvrahami/arcade
|
fce254a9eb89629de1f99d57a63759a2953184e9
|
[
"MIT"
] | 1
|
2020-04-04T01:03:24.000Z
|
2020-04-04T01:03:24.000Z
|
arcade/particle.py
|
LiorAvrahami/arcade
|
fce254a9eb89629de1f99d57a63759a2953184e9
|
[
"MIT"
] | 1
|
2019-08-11T18:47:27.000Z
|
2019-08-12T03:02:11.000Z
|
arcade/particle.py
|
LiorAvrahami/arcade
|
fce254a9eb89629de1f99d57a63759a2953184e9
|
[
"MIT"
] | null | null | null |
"""
Particle - Object produced by an Emitter. Often used in large quantity to produce visual effects effects
"""
from arcade.sprite import Sprite
from arcade.draw_commands import Texture
import arcade.utils
from arcade.arcade_types import Point, Vector
from typing import Union
FilenameOrTexture = Union[str, Texture]
class Particle(Sprite):
"""Sprite that is emitted from an Emitter"""
def __init__(
self,
filename_or_texture: FilenameOrTexture,
change_xy: Vector,
center_xy: Point = (0.0, 0.0),
angle: float = 0.0,
change_angle: float = 0.0,
scale: float = 1.0,
alpha: int = 255,
mutation_callback=None
):
if isinstance(filename_or_texture, Texture):
super().__init__(None, scale=scale)
self.append_texture(filename_or_texture)
self.set_texture(0)
else:
super().__init__(filename_or_texture, scale=scale)
self.center_x = center_xy[0]
self.center_y = center_xy[1]
self.change_x = change_xy[0]
self.change_y = change_xy[1]
self.angle = angle
self.change_angle = change_angle
self.alpha = alpha
self.mutation_callback = mutation_callback
def update(self):
"""Advance the Particle's simulation"""
super().update()
if self.mutation_callback:
self.mutation_callback(self)
# def draw(self):
# raise NotImplementedError("Particle.draw needs to be implemented")
def can_reap(self):
"""Determine if Particle can be deleted"""
raise NotImplementedError("Particle.can_reap needs to be implemented")
class EternalParticle(Particle):
"""Particle that has no end to its life"""
def __init__(
self,
filename_or_texture: FilenameOrTexture,
change_xy: Vector,
center_xy: Point = (0.0, 0.0),
angle: float = 0,
change_angle: float = 0,
scale: float = 1.0,
alpha: int = 255,
mutation_callback=None
):
super().__init__(filename_or_texture, change_xy, center_xy, angle, change_angle, scale, alpha,
mutation_callback)
def can_reap(self):
"""Determine if Particle can be deleted"""
return False
class LifetimeParticle(Particle):
"""Particle that lives for a given amount of time and is then deleted"""
def __init__(
self,
filename_or_texture: FilenameOrTexture,
change_xy: Vector,
lifetime: float,
center_xy: Point = (0.0, 0.0),
angle: float = 0,
change_angle: float = 0,
scale: float = 1.0,
alpha: int = 255,
mutation_callback=None
):
super().__init__(filename_or_texture, change_xy, center_xy, angle, change_angle, scale, alpha,
mutation_callback)
self.lifetime_original = lifetime
self.lifetime_elapsed = 0.0
def update(self):
"""Advance the Particle's simulation"""
super().update()
self.lifetime_elapsed += 1 / 60
def can_reap(self):
"""Determine if Particle can be deleted"""
return self.lifetime_elapsed >= self.lifetime_original
def clamp(a, low, high):
if a > high:
return high
elif a < low:
return low
else:
return a
class FadeParticle(LifetimeParticle):
"""Particle that animates its alpha between two values during its lifetime"""
def __init__(
self,
filename_or_texture: FilenameOrTexture,
change_xy: Vector,
lifetime: float,
center_xy: Point = (0.0, 0.0),
angle: float = 0,
change_angle: float = 0,
scale: float = 1.0,
start_alpha: int = 255,
end_alpha: int = 0,
mutation_callback=None
):
super().__init__(filename_or_texture, change_xy, lifetime, center_xy, angle, change_angle, scale, start_alpha,
mutation_callback)
self.start_alpha = start_alpha
self.end_alpha = end_alpha
def update(self):
"""Advance the Particle's simulation"""
super().update()
a = arcade.utils.lerp(self.start_alpha,
self.end_alpha,
self.lifetime_elapsed / self.lifetime_original)
self.alpha = clamp(a, 0, 255)
| 31.358621
| 118
| 0.58896
|
4a11958845e1fc7a0c0232cdc22d3a6a33814286
| 1,211
|
py
|
Python
|
Day-24/day-24.py
|
sirilalithaadapa/Hacker-Rank
|
c020c5e29cbbcc83210416c6be498450c4bbc879
|
[
"MIT"
] | null | null | null |
Day-24/day-24.py
|
sirilalithaadapa/Hacker-Rank
|
c020c5e29cbbcc83210416c6be498450c4bbc879
|
[
"MIT"
] | null | null | null |
Day-24/day-24.py
|
sirilalithaadapa/Hacker-Rank
|
c020c5e29cbbcc83210416c6be498450c4bbc879
|
[
"MIT"
] | 1
|
2021-06-06T01:23:43.000Z
|
2021-06-06T01:23:43.000Z
|
class Node:
def __init__(self,data):
self.data = data
self.next = None
class Solution:
def insert(self,head,data):
p = Node(data)
if head==None:
head=p
elif head.next==None:
head.next=p
else:
start=head
while(start.next!=None):
start=start.next
start.next=p
return head
def display(self,head):
current = head
while current:
print(current.data,end=' ')
current = current.next
def removeDuplicates(self,head):
#Write your code here
#Write your code here
if not head:
return head
cur = head.next
prev = head
while cur:
if prev.data == cur.data:
prev.next = cur.next
cur = cur.next
else:
prev = cur
cur = cur.next
return head
mylist= Solution()
T=int(input())
head=None
for i in range(T):
data=int(input())
head=mylist.insert(head,data)
head=mylist.removeDuplicates(head)
mylist.display(head);
| 24.714286
| 40
| 0.482246
|
4a1196401617375261cc3907a098e71be53d3230
| 2,537
|
py
|
Python
|
source2/common.py
|
half5life/SourceIO
|
f3dc6db92daa537acbb487ce09f371866f6e3e7f
|
[
"MIT"
] | 1
|
2021-07-12T12:55:27.000Z
|
2021-07-12T12:55:27.000Z
|
source2/common.py
|
half5life/SourceIO
|
f3dc6db92daa537acbb487ce09f371866f6e3e7f
|
[
"MIT"
] | null | null | null |
source2/common.py
|
half5life/SourceIO
|
f3dc6db92daa537acbb487ce09f371866f6e3e7f
|
[
"MIT"
] | null | null | null |
import numpy as np
from ..utilities.byte_io_mdl import ByteIO
def short_to_float(value):
value = int(value)
s = (value >> 14) & 2 # sign*2
e = (value >> 10) & 31 # exponent
m = (value & 1023) # mantissa
if e == 0:
# either zero or a subnormal number
if m != 0:
return (1 - s) * pow(2, -14) * (m / 1024)
else:
return 0
elif e != 31:
# normal number
return (1 - s) * pow(2, e - 15) * (1 + m / 1024)
elif value & 1023 != 0:
return -float('Inf')
else:
return float('Inf')
def lerp(a, b, f):
return (a * (1.0 - f)) + (b * f)
def convert_normals(inpurt_array: np.ndarray):
# X Y Z
inpurt_array = inpurt_array.astype(np.int32)
output = np.zeros((len(inpurt_array), 3), dtype=np.float32)
xs = inpurt_array[:, 0]
ys = inpurt_array[:, 1]
z_signs = -np.floor((xs - 128) / 128)
t_signs = -np.floor((ys - 128) / 128)
x_abss = np.abs(xs - 128) - z_signs
y_abss = np.abs(ys - 128) - t_signs
x_sings = -np.floor((x_abss - 64) / 64)
y_sings = -np.floor((y_abss - 64) / 64)
output[:, 0] = (np.abs(x_abss - 64) - x_sings) / 64
output[:, 1] = (np.abs(y_abss - 64) - y_sings) / 64
output[:, 2] = (1 - output[:, 0]) - output[:, 1]
sq = np.sqrt(np.sum(output ** 2, 1))
output[:, 0] /= sq
output[:, 1] /= sq
output[:, 2] /= sq
output[:, 0] *= lerp(1, -1, np.abs(x_sings))
output[:, 1] *= lerp(1, -1, np.abs(y_sings))
output[:, 2] *= lerp(1, -1, np.abs(z_signs))
return output
def magnitude(array: np.ndarray):
array = np.sum(array ** 2)
return np.sqrt(array)
def normalize(array: np.ndarray):
magn = magnitude(array)
if magn == 0:
return array
return array / magn
class Matrix:
def __init__(self, cols, rows):
self.n_rows = rows
self.n_cols = cols
self.mat: np.ndarray = np.zeros((rows, cols))
def read(self, reader: ByteIO):
self.mat = np.frombuffer(reader.read(self.n_cols * self.n_cols * 4), dtype=np.float32)
self.mat = self.mat.reshape((self.n_cols, self.n_cols))
def __repr__(self):
return '<Matrix{}x{}>'.format(self.n_cols, self.n_rows)
class CTransform:
def __init__(self):
self.quat = []
self.pos = []
def read(self, reader: ByteIO):
self.quat = reader.read_fmt('4f')
self.pos = reader.read_fmt('3f')
def __repr__(self):
return f'<CTransform pos:{self.pos} quat:{self.quat}>'
| 25.118812
| 94
| 0.549862
|
4a11965a48733990e873e21e759882512bb6ef36
| 3,683
|
py
|
Python
|
huaweicloud-sdk-cbr/huaweicloudsdkcbr/v1/model/tags_resp.py
|
Adek06/huaweicloud-sdk-python-v3
|
3d13b27d089e04a1ae567cd649b3c5509e0391d2
|
[
"Apache-2.0"
] | 1
|
2021-04-16T07:59:28.000Z
|
2021-04-16T07:59:28.000Z
|
huaweicloud-sdk-cbr/huaweicloudsdkcbr/v1/model/tags_resp.py
|
Lencof/huaweicloud-sdk-python-v3
|
d13dc4e2830a83e295be6e4de021999b3376e34e
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-cbr/huaweicloudsdkcbr/v1/model/tags_resp.py
|
Lencof/huaweicloud-sdk-python-v3
|
d13dc4e2830a83e295be6e4de021999b3376e34e
|
[
"Apache-2.0"
] | 1
|
2022-01-17T02:24:18.000Z
|
2022-01-17T02:24:18.000Z
|
# coding: utf-8
import pprint
import re
import six
class TagsResp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'key': 'str',
'values': 'str'
}
attribute_map = {
'key': 'key',
'values': 'values'
}
def __init__(self, key=None, values=None):
"""TagsResp - a model defined in huaweicloud sdk"""
self._key = None
self._values = None
self.discriminator = None
if key is not None:
self.key = key
if values is not None:
self.values = values
@property
def key(self):
"""Gets the key of this TagsResp.
键。 key最大长度36个字符。 key不能为空字符串。 [key只能由中文,字母,数字,“-”,“_”组成。](tag:hws,hws_hk,fcs_vm,ctc) [key只能由字母,数字,“_”,“-”组成。](tag:dt,ocb,tlf,sbc)
:return: The key of this TagsResp.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this TagsResp.
键。 key最大长度36个字符。 key不能为空字符串。 [key只能由中文,字母,数字,“-”,“_”组成。](tag:hws,hws_hk,fcs_vm,ctc) [key只能由字母,数字,“_”,“-”组成。](tag:dt,ocb,tlf,sbc)
:param key: The key of this TagsResp.
:type: str
"""
self._key = key
@property
def values(self):
"""Gets the values of this TagsResp.
值列表。 value最大长度43个字符。 value可以为空字符串。 [key只能由中文,字母,数字,“-”,“_”组成。](tag:hws,hws_hk,fcs_vm,ctc) [key只能由字母,数字,“_”,“-”组成。](tag:dt,ocb,tlf,sbc)
:return: The values of this TagsResp.
:rtype: str
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this TagsResp.
值列表。 value最大长度43个字符。 value可以为空字符串。 [key只能由中文,字母,数字,“-”,“_”组成。](tag:hws,hws_hk,fcs_vm,ctc) [key只能由字母,数字,“_”,“-”组成。](tag:dt,ocb,tlf,sbc)
:param values: The values of this TagsResp.
:type: str
"""
self._values = values
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TagsResp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.688406
| 146
| 0.525115
|
4a11967cd84b89590cb9843613b9a1f48d144107
| 3,429
|
py
|
Python
|
src/experiments/util/dataset.py
|
rg321/affective-2017-musa2-ode
|
abba8c84a5fe6624601cdf6d08bf48d54ca490ac
|
[
"MIT"
] | null | null | null |
src/experiments/util/dataset.py
|
rg321/affective-2017-musa2-ode
|
abba8c84a5fe6624601cdf6d08bf48d54ca490ac
|
[
"MIT"
] | null | null | null |
src/experiments/util/dataset.py
|
rg321/affective-2017-musa2-ode
|
abba8c84a5fe6624601cdf6d08bf48d54ca490ac
|
[
"MIT"
] | null | null | null |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Small library that points to a data set.
Methods of Data class:
data_files: Returns a python list of all (sharded) data set files.
num_examples_per_epoch: Returns the number of examples in the data set.
num_classes: Returns the number of classes in the data set.
reader: Return a reader for a single entry from the data set.
"""
from abc import ABCMeta
from abc import abstractmethod
import os
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_string('data_dir', '/tmp/mydata',
"""Path to the processed data, i.e. """
"""TFRecord of Example protos.""")
class Dataset(object):
"""A simple class for handling data sets."""
__metaclass__ = ABCMeta
def __init__(self, name, subset):
"""Initialize dataset using a subset and the path to the data."""
assert subset in self.available_subsets(), self.available_subsets()
self.name = name
self.subset = subset
@abstractmethod
def num_classes(self):
"""Returns the number of classes in the data set."""
pass
# return 10
@abstractmethod
def num_examples_per_epoch(self):
"""Returns the number of examples in the data subset."""
pass
# if self.subset == 'train':
# return 10000
# if self.subset == 'validation':
# return 1000
@abstractmethod
def download_message(self):
"""Prints a download message for the Dataset."""
pass
def available_subsets(self):
"""Returns the list of available subsets."""
return ['train', 'val']
def data_files(self):
"""Returns a python list of all (sharded) data subset files.
Returns:
python list of all (sharded) data set files.
Raises:
ValueError: if there are not data_files matching the subset.
"""
subset_aux = self.subset
if subset_aux == "val":
subset_aux = "test"
tf_record_pattern = os.path.join(FLAGS.data_dir, '%s-*' % subset_aux)
data_files = tf.gfile.Glob(tf_record_pattern)
if not data_files:
print('No files found for dataset %s/%s at %s' % (self.name,
self.subset,
FLAGS.data_dir))
self.download_message()
exit(-1)
return data_files
def reader(self):
"""Return a reader for a single entry from the data set.
See io_ops.py for details of Reader class.
Returns:
Reader object that reads the data set.
"""
return tf.TFRecordReader()
| 33.617647
| 80
| 0.606882
|
4a119794bb26f750422843c32e8872c2515f4f7f
| 9,018
|
py
|
Python
|
src/pt_analyzeaudio.py
|
timsum/harmonypartition
|
8be5165a0e6361c6789ccbeb7e2d81f3667587d5
|
[
"MIT"
] | null | null | null |
src/pt_analyzeaudio.py
|
timsum/harmonypartition
|
8be5165a0e6361c6789ccbeb7e2d81f3667587d5
|
[
"MIT"
] | null | null | null |
src/pt_analyzeaudio.py
|
timsum/harmonypartition
|
8be5165a0e6361c6789ccbeb7e2d81f3667587d5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 24 21:47:41 2020
@author: timsummers
"""
import numpy as np
import librosa
import librosa.display
from scipy.special import softmax
import harmony_state
import pt_utils
import pt_musicutils
import matplotlib.pyplot as plt
from matplotlib import gridspec as gridspec
import seaborn as sb
hue = 0.27
sat = 0.9
light = 0.7
def graph_audio_file(filename, key_orientation=np.array([0,0,0,4,3]), chroma_threshold=0.5, filter_chroma=True):
y, sr, X, bin_a, kpdve_a, chroma_a = assemble_audio_kpdve_analysis(filename, key_orientation, chroma_threshold=chroma_threshold, filter_chroma=filter_chroma)
graph_waveform_kpdve_combo(y, sr, bin_a, kpdve_a)
def assemble_audio_kpdve_analysis(filename, key_orientation=np.array([0,0,0,4,3]), chroma_threshold=0.5, filter_chroma=True):
y, sr, chroma_a = chroma_analyze_audiofile(filename,
hop_length=2048,
filter_chroma=filter_chroma)
bin_a, kpdve_a = analyze_chroma_list(chroma_a,
threshold=chroma_threshold,
key_orientation=key_orientation)
X = librosa.stft(y)
return y, sr, X, bin_a, kpdve_a, chroma_a
def kpdve_analyze_audiofile(filename, key_orientation=np.array([0,0,0,4,3]), chroma_threshold=0.5, filter_chroma=True):
_, _, _, bin_a, kpdve_a, _ = assemble_audio_kpdve_analysis(filename, key_orientation, chroma_threshold=chroma_threshold, filter_chroma=filter_chroma)
return bin_a, kpdve_a
# CHROMA TOOLS
# 1AB
def chroma_list_to_binary_list(a_chroma, threshold=0.5):
'''
Parameters
----------
a_chroma : np_array(chroma.size)
a chroma analysis of an audio file
Returns
-------
bin_chroma :
the same reduced to a list of 12-bit integers
(chromatic pitch class set)
a single list of numbers
'''
bin_chroma = np.zeros(a_chroma.shape[1], dtype=int)
for i in range(a_chroma.shape[1]):
notegroup = 0
count = 0
bin_chroma[i] = chroma_to_binary_value(a_chroma[:, i], threshold)
return bin_chroma
# 1AC
def chroma_to_binary_value(chroma_stripe, threshold=0.5):
'''
Make a binary notegroup value out of a single chroma vector
Parameters
----------
chroma : np.array(12, 1)
12 values of the chromatic scale, mesured 0-1
Returns
-------
12-bit notegroup integer
>>> chroma_to_binary_value(np.array([0,0,0,0,0,2,0,0,0,0,0,0]))
64
'''
notegroup = 0
count = 0
### THE ROOT OF THE THRESHOLD SEQUENCE...
for a_val in chroma_stripe:
if a_val > threshold :
notegroup |= pt_utils.LEFT_BIT >> count
count += 1
return notegroup
# 1AA
def analyze_chroma_list(chroma, threshold=0.5, key_orientation=np.array([0,0,0,4,2])):
'''
given the chroma list of an audio file, perform a matching KPDVE analysis
Parameters
----------
chroma :
a chroma list from an audio file
threshold (optional):
the intensity beyond which a chroma gets marked as a 'yes'
Returns
-------
binary, and KPDVE analyses as tuple
'''
h = harmony_state.harmony_state(start_kpdve=key_orientation)
# make a binary version for particular naming -- binary chroma is a single 12-bit integer
binary_chroma = chroma_list_to_binary_list(chroma, threshold)
kpdve_chroma = np.zeros((binary_chroma.shape[0], 5), dtype=int)
for i, ng in np.ndenumerate(binary_chroma):
h.change_notegroup(ng)
kpdve_chroma[i] = h.current_kpdve.copy()
return binary_chroma, kpdve_chroma
# =============================
# 1A GET AUDIO TO ANALYZABLE FORM
def chroma_analyze_audiofile(filename, hop_length=1024, filter_chroma=True):
'''
Parameters
----------
filename : an audio file
file must be of Librosa accepted ty pe.
hop_length : int, optional
fourier transform hop. The default is 1024.
key_orientation : ndarray(5), a valid KPDVE location, optional
for starting the analysis, a location indicating key. The default is np.array([0,0,0,4,2]).
filter_chroma : bool, optional
do k-neighbor filtering in librosa. The default is True.
Returns
-------
tuple: y, sr, chroma_a
'''
y, sr = librosa.load(filename)
# chroma_a = librosa.feature.chroma_cqt(y=y,
# sr=sr,
# bins_per_octave=12*3,
# hop_length=hop_length)
chroma_a = librosa.feature.chroma_stft(y=y,
sr=sr)
if (filter_chroma):
chroma_a = np.minimum(chroma_a,
librosa.decompose.nn_filter(chroma_a,
aggregate=np.mean,
metric='cosine'))
return y, sr, chroma_a
def graph_chroma(chroma_a):
fig = plt.figure(frameon=False, figsize=(12, 1))
sb.heatmap(chroma_a,
xticklabels=False,
yticklabels=False,
cbar=False)
def graph_waveform_kpdve_combo(y, sr, bin_a, kpdve_a):
# PYPLOT
# FIGSIZE IS HERE ADJUSTED TO BEST SHOW ON JUPYTER. NO FIGSIZE IS BEST FOR WEB
fig = plt.figure(frameon=False, figsize=(12, 2))
plt.style.use('dark_background')
g_spec = gridspec.GridSpec(nrows=50, ncols=1, figure=fig)
g_spec.update(wspace=0, hspace=0)
ax_wave = fig.add_subplot(g_spec[:25, :])
librosa.display.waveplot(y=y, sr=sr, x_axis=None)
plt.subplots_adjust(0,0,1,1,0,0)
for ax in fig.axes:
ax.axis('off')
ax.margins(0,0)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
ax_kpdve_ve = fig.add_subplot(g_spec[25:, :])
graphable = kpdve_list_to_heatmap_graphable(kpdve_a).T
graphable = np.flip(graphable, axis=0)
sb.heatmap(graphable,
ax=ax_kpdve_ve,
xticklabels=False,
yticklabels=False,
cmap=sb.husl_palette(12, h=hue, l=light, s=sat),
cbar=False,
vmin=0,
vmax=12)
plt.show()
def graph_kpdve(kpdve_a):
fig = plt.figure(frameon=False, figsize=(12, 1))
plt.style.use('dark_background')
g_spec = gridspec.GridSpec(nrows=25, ncols=1, figure=fig)
g_spec.update(wspace=0, hspace=0)
plt.subplots_adjust(0,0,1,1,0,0)
for ax in fig.axes:
ax.axis('off')
ax.margins(0,0)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
ax_kpdve = fig.add_subplot(g_spec[:])
graphable = kpdve_list_to_heatmap_graphable(kpdve_a).T
graphable = np.flip(graphable, axis=0)
sb.heatmap(graphable,
ax=ax_kpdve,
xticklabels=False,
yticklabels=False,
cmap=sb.husl_palette(12, h=hue, l=light, s=sat),
cbar=False,
vmin=0,
vmax=12)
plt.show()
def kpdve_list_to_heatmap_graphable(kpdve_list):
return np.array([KPDVE_to_heatmap_display(a_kpdve) for a_kpdve in kpdve_list])
# HEATMAP SIMPLE -- NOT SIMPLE, THIS FUNCTION IS FUCKED, TALK IT THROUGH...
# THIS IS ESSENTIALLY DEPRECATED... IT IS THE 'OLD-SCHOOL KPDVE WITHOUT THE NOTEGROUP INFO'
def KPDVE_to_heatmap_display(a_kpdve):
'''
returns a vector of length 4 with [K,P, D, E ordered chord notes... up to 7]
THIS IS FOR HEATMAP REPRESENTATIONS
Parameters
----------
a_kpdve : ndarray(5)
a kpdve location .
Returns
-------
a segment in a series of values for a readable heatmap of pitches
'''
k = (pt_utils.single_bit_loc(pt_musicutils.circle_conv_lyd_center_for_KPDVE(a_kpdve))) % 12
p = (pt_utils.single_bit_loc(pt_musicutils.circle_conv_tonic_for_KPDVE(a_kpdve))) % 12
d = (pt_utils.single_bit_loc(pt_musicutils.circle_root_note_for_KPDVE(a_kpdve))) % 12
e = (pt_utils.single_bit_loc(pt_musicutils.circle_ext_note_for_KPDVE(a_kpdve))) % 12
kpde = np.array([k, k, p, p, p,d,d,d,d,d,d,d,d,d,d,d,d, e]) # separate the hidden state measurements for visual clarity
return kpde
def KPDVE_to_KPD_graphable(a_kpdve):
k = (pt_utils.single_bit_loc(pt_musicutils.circle_conv_lyd_center_for_KPDVE(a_kpdve))) % 12
p = (pt_utils.single_bit_loc(pt_musicutils.circle_conv_tonic_for_KPDVE(a_kpdve))) % 12
d = (pt_utils.single_bit_loc(pt_musicutils.circle_root_note_for_KPDVE(a_kpdve))) % 12
return np.array([k, p, d])
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30.883562
| 161
| 0.619871
|
4a11979770684079e8ef3a4dce38378c2373ab9c
| 14,087
|
py
|
Python
|
lib/coginvasion/toon/ToonDNA.py
|
theclashingfritz/Cog-Invasion-Online-Dump
|
2561abbacb3e2e288e06f3f04b935b5ed589c8f8
|
[
"Apache-2.0"
] | 1
|
2020-03-12T16:44:10.000Z
|
2020-03-12T16:44:10.000Z
|
lib/coginvasion/toon/ToonDNA.py
|
theclashingfritz/Cog-Invasion-Online-Dump
|
2561abbacb3e2e288e06f3f04b935b5ed589c8f8
|
[
"Apache-2.0"
] | null | null | null |
lib/coginvasion/toon/ToonDNA.py
|
theclashingfritz/Cog-Invasion-Online-Dump
|
2561abbacb3e2e288e06f3f04b935b5ed589c8f8
|
[
"Apache-2.0"
] | null | null | null |
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.toon.ToonDNA
from direct.directnotify.DirectNotifyGlobal import directNotify
import types
from pprint import _id
from lib.coginvasion.npc.NPCGlobals import NPC_DNA
class ToonDNA:
notify = directNotify.newCategory('ToonDNA')
requiredStrandLength = 30
colors = [(1.0, 1.0, 1.0, 1.0),
(0.96875, 0.691406, 0.699219, 1.0),
(0.933594, 0.265625, 0.28125, 1.0),
(0.863281, 0.40625, 0.417969, 1.0),
(0.710938, 0.234375, 0.4375, 1.0),
(0.570312, 0.449219, 0.164062, 1.0),
(0.640625, 0.355469, 0.269531, 1.0),
(0.996094, 0.695312, 0.511719, 1.0),
(0.832031, 0.5, 0.296875, 1.0),
(0.992188, 0.480469, 0.167969, 1.0),
(0.996094, 0.898438, 0.320312, 1.0),
(0.996094, 0.957031, 0.597656, 1.0),
(0.855469, 0.933594, 0.492188, 1.0),
(0.550781, 0.824219, 0.324219, 1.0),
(0.242188, 0.742188, 0.515625, 1.0),
(0.304688, 0.96875, 0.402344, 1.0),
(0.433594, 0.90625, 0.835938, 1.0),
(0.347656, 0.820312, 0.953125, 1.0),
(0.191406, 0.5625, 0.773438, 1.0),
(0.558594, 0.589844, 0.875, 1.0),
(0.285156, 0.328125, 0.726562, 1.0),
(0.460938, 0.378906, 0.824219, 1.0),
(0.546875, 0.28125, 0.75, 1.0),
(0.726562, 0.472656, 0.859375, 1.0),
(0.898438, 0.617188, 0.90625, 1.0),
(0.7, 0.7, 0.8, 1.0),
(0.3, 0.3, 0.35, 1.0)]
genderDNA2gender = {'00': 'boy', '01': 'girl'}
animalDNA2animal = {'00': 'cat', '01': 'dog',
'02': 'bear',
'03': 'rabbit',
'04': 'monkey',
'05': 'horse',
'06': 'pig',
'07': 'mouse',
'08': 'duck'}
headDNA2head = {'00': '1', '01': '3',
'02': '4',
'03': '2',
'04': 'dgm_skirt',
'05': 'dgm_shorts',
'06': 'dgl_shorts',
'07': 'dgs_shorts'}
colorName2DNAcolor = {'white': colors[0], 'peach': colors[1],
'bright red': colors[2],
'red': colors[3],
'maroon': colors[4],
'sienna': colors[5],
'brown': colors[6],
'tan': colors[7],
'coral': colors[8],
'orange': colors[9],
'yellow': colors[10],
'cream': colors[11],
'citrine': colors[12],
'lime green': colors[13],
'sea green': colors[14],
'green': colors[15],
'light blue': colors[16],
'aqua': colors[17],
'blue': colors[18],
'periwinkle': colors[19],
'royal blue': colors[20],
'slate blue': colors[21],
'purple': colors[22],
'lavender': colors[23],
'pink': colors[24],
'gray': colors[25],
'black': colors[26]}
colorDNA2color = {'00': colors[0], '01': colors[1],
'02': colors[2],
'03': colors[3],
'04': colors[4],
'05': colors[5],
'06': colors[6],
'07': colors[7],
'08': colors[8],
'09': colors[9],
'10': colors[10],
'11': colors[11],
'12': colors[12],
'13': colors[13],
'14': colors[14],
'15': colors[15],
'16': colors[16],
'17': colors[17],
'18': colors[18],
'19': colors[19],
'20': colors[20],
'21': colors[21],
'22': colors[22],
'23': colors[23],
'24': colors[24],
'25': colors[25],
'26': colors[26]}
torsoDNA2torso = {'00': 'dgs_shorts', '01': 'dgm_shorts',
'02': 'dgl_shorts',
'03': 'dgs_skirt',
'04': 'dgm_skirt',
'05': 'dgl_skirt'}
legDNA2leg = {'00': 'dgs', '01': 'dgm',
'02': 'dgl'}
shirtDNA2shirt = {'00': 'phase_3/maps/desat_shirt_1.jpg', '01': 'phase_3/maps/desat_shirt_2.jpg',
'02': 'phase_3/maps/desat_shirt_3.jpg',
'03': 'phase_3/maps/desat_shirt_4.jpg',
'04': 'phase_3/maps/desat_shirt_5.jpg',
'05': 'phase_3/maps/desat_shirt_6.jpg',
'06': 'phase_3/maps/desat_shirt_7.jpg',
'07': 'phase_3/maps/desat_shirt_8.jpg',
'08': 'phase_3/maps/desat_shirt_9.jpg',
'09': 'phase_3/maps/desat_shirt_10.jpg',
'10': 'phase_3/maps/desat_shirt_11.jpg',
'11': 'phase_3/maps/desat_shirt_12.jpg',
'12': 'phase_3/maps/desat_shirt_13.jpg',
'13': 'phase_3/maps/desat_shirt_14.jpg',
'14': 'phase_3/maps/desat_shirt_15.jpg',
'15': 'phase_3/maps/desat_shirt_16.jpg',
'16': 'phase_3/maps/desat_shirt_17.jpg',
'17': 'phase_3/maps/desat_shirt_18.jpg',
'18': 'phase_3/maps/desat_shirt_19.jpg',
'19': 'phase_3/maps/desat_shirt_20.jpg',
'20': 'phase_3/maps/desat_shirt_21.jpg',
'21': 'phase_3/maps/desat_shirt_22.jpg',
'22': 'phase_3/maps/desat_shirt_23.jpg',
'23': 'phase_4/maps/tt_t_chr_avt_shirt_sellbotCrusher.jpg',
'24': 'phase_4/maps/tt_t_chr_shirt_scientistA.jpg',
'25': 'phase_4/maps/tt_t_chr_shirt_scientistB.jpg',
'26': 'phase_4/maps/tt_t_chr_shirt_scientistC.jpg',
'27': 'phase_4/maps/tsashirt.jpg'}
sleeveDNA2sleeve = {'00': 'phase_3/maps/desat_sleeve_1.jpg', '01': 'phase_3/maps/desat_sleeve_2.jpg',
'02': 'phase_3/maps/desat_sleeve_3.jpg',
'03': 'phase_3/maps/desat_sleeve_4.jpg',
'04': 'phase_3/maps/desat_sleeve_5.jpg',
'05': 'phase_3/maps/desat_sleeve_6.jpg',
'06': 'phase_3/maps/desat_sleeve_7.jpg',
'07': 'phase_3/maps/desat_sleeve_8.jpg',
'08': 'phase_3/maps/desat_sleeve_9.jpg',
'09': 'phase_3/maps/desat_sleeve_10.jpg',
'10': 'phase_3/maps/desat_sleeve_11.jpg',
'11': 'phase_3/maps/desat_sleeve_12.jpg',
'12': 'phase_3/maps/desat_sleeve_13.jpg',
'13': 'phase_3/maps/desat_sleeve_14.jpg',
'14': 'phase_3/maps/desat_sleeve_15.jpg',
'15': 'phase_3/maps/desat_sleeve_16.jpg',
'16': 'phase_3/maps/desat_sleeve_17.jpg',
'17': 'phase_3/maps/desat_sleeve_18.jpg',
'18': 'phase_3/maps/desat_sleeve_19.jpg',
'19': 'phase_3/maps/desat_sleeve_20.jpg',
'20': 'phase_3/maps/desat_sleeve_21.jpg',
'21': 'phase_3/maps/desat_sleeve_22.jpg',
'22': 'phase_3/maps/desat_sleeve_23.jpg',
'23': 'phase_4/maps/tt_t_chr_avt_shirtSleeve_sellbotCrusher.jpg',
'24': 'phase_4/maps/tt_t_chr_shirtSleeve_scientist.jpg',
'25': 'phase_4/maps/tsasleeve.jpg'}
shortDNA2short = {'00': 'phase_3/maps/desat_shorts_1.jpg', '01': 'phase_3/maps/desat_shorts_2.jpg',
'02': 'phase_3/maps/desat_shorts_3.jpg',
'03': 'phase_3/maps/desat_shorts_4.jpg',
'04': 'phase_3/maps/desat_shorts_5.jpg',
'05': 'phase_3/maps/desat_shorts_6.jpg',
'06': 'phase_3/maps/desat_shorts_7.jpg',
'07': 'phase_3/maps/desat_shorts_8.jpg',
'08': 'phase_3/maps/desat_shorts_9.jpg',
'09': 'phase_3/maps/desat_shorts_10.jpg',
'10': 'phase_3/maps/desat_skirt_1.jpg',
'11': 'phase_3/maps/desat_skirt_2.jpg',
'12': 'phase_3/maps/desat_skirt_3.jpg',
'13': 'phase_3/maps/desat_skirt_4.jpg',
'14': 'phase_3/maps/desat_skirt_5.jpg',
'15': 'phase_3/maps/desat_skirt_6.jpg',
'16': 'phase_3/maps/desat_skirt_7.jpg',
'17': 'phase_4/maps/tt_t_chr_avt_shorts_sellbotCrusher.jpg',
'18': 'phase_4/maps/skirtNew5.jpg',
'19': 'phase_4/maps/tt_t_chr_avt_skirt_winter1.jpg',
'20': 'phase_4/maps/tt_t_chr_shorts_scientistA.jpg',
'21': 'phase_4/maps/tt_t_chr_shorts_scientistB.jpg',
'22': 'phase_4/maps/tt_t_chr_shorts_scientistC.jpg',
'23': 'phase_3/maps/desat_shorts_11.jpg',
'24': 'phase_3/maps/desat_shorts_12.jpg',
'25': 'phase_3/maps/desat_shorts_13.jpg',
'26': 'phase_3/maps/desat_shorts_14.jpg',
'27': 'phase_4/maps/tsashorts.jpg',
'28': 'phase_4/maps/tsaskirt.jpg'}
ShortHeads = [
'1', '4', 'dgm_skirt', 'dgs_shorts']
LongHeads = ['2', '3', 'dgm_shorts', 'dgl_shorts']
gender2genderDNA = {v:k for k, v in genderDNA2gender.items()}
animal2animalDNA = {v:k for k, v in animalDNA2animal.items()}
head2headDNA = {v:k for k, v in headDNA2head.items()}
color2colorDNA = {v:k for k, v in colorDNA2color.items()}
torso2torsoDNA = {v:k for k, v in torsoDNA2torso.items()}
leg2legDNA = {v:k for k, v in legDNA2leg.items()}
shirt2shirtDNA = {v:k for k, v in shirtDNA2shirt.items()}
sleeve2sleeveDNA = {v:k for k, v in sleeveDNA2sleeve.items()}
short2shortDNA = {v:k for k, v in shortDNA2short.items()}
def __init__(self):
self.dnaStrand = '00/00/00/00/00/00/00/00/00/00/00/00/00/00/00'
self.gender = ''
self.animal = ''
self.head = ''
self.headcolor = None
self.headLength = ''
self.torso = ''
self.torsocolor = None
self.legs = ''
self.legcolor = None
self.shirt = ''
self.sleeve = ''
self.shorts = ''
self.shirtColor = None
self.sleeveColor = None
self.shortColor = None
self.gloveColor = None
self.parseDNAStrand(self.dnaStrand)
return
def getColorByName(self, name):
name = name.lower()
color = None
if name in self.colorName2DNAcolor.keys():
color = self.colorName2DNAcolor[name]
return color
def getDNAIDFromColor(self, color):
dnaID = None
for _id, dnaColor in self.colorDNA2color.iteritems():
if dnaColor == color:
dnaID = _id
return dnaID
def getAnimal(self):
return self.animal
def getGender(self):
return self.gender
def getHead(self):
return self.head
def getHeadColor(self):
return self.headcolor
def getHeadStyle(self):
return [
self.head, self.headcolor]
def getHeadLength(self):
return self.headLength
def getTorso(self):
return self.torso
def getTorsoColor(self):
return self.torsocolor
def getTorsoStyle(self):
return [
self.torso, self.torsocolor]
def getLegs(self):
return self.legs
def getLegColor(self):
return self.legcolor
def getLegStyle(self):
return [
self.legs, self.legcolor]
def getShirt(self):
return self.shirt
def getShirtColor(self):
return self.shirtColor
def getShirtStyle(self):
return [
self.shirt, self.shirtColor]
def getSleeve(self):
return self.sleeve
def getSleeveColor(self):
return self.sleeveColor
def getSleeveStyle(self):
return [
self.sleeve, self.sleeveColor]
def getShorts(self):
return self.shorts
def getShortColor(self):
return self.shortColor
def getShortStyle(self):
return [
self.shorts, self.shortColor]
def getGloveColor(self):
return self.gloveColor
def setDNAStrand(self, dnaStrand):
self.dnaStrand = dnaStrand
self.parseDNAStrand(dnaStrand)
def getDNAStrand(self):
return self.dnaStrand
def isCoach(self):
return self.getDNAStrand() == NPC_DNA['Coach']
def getToonAnimalNoise(self, noise):
if self.isCoach():
return 'phase_3/audio/dial/coach.ogg'
return 'phase_3.5/audio/dial/AV_' + self.getAnimal() + '_' + noise + '.ogg'
def generateDNAStrandWithCurrentStyle(self):
gender = self.gender2genderDNA[self.gender]
animal = self.animal2animalDNA[self.animal]
head = self.head2headDNA[self.head]
headcolor = self.color2colorDNA[self.headcolor]
torso = self.torso2torsoDNA[self.torso]
torsocolor = self.color2colorDNA[self.torsocolor]
legs = self.leg2legDNA[self.legs]
legcolor = self.color2colorDNA[self.legcolor]
shirt = self.shirt2shirtDNA[self.shirt]
sleeve = self.sleeve2sleeveDNA[self.sleeve]
shorts = self.short2shortDNA[self.shorts]
shirtColor = self.color2colorDNA[self.shirtColor]
sleeveColor = self.color2colorDNA[self.sleeveColor]
shortColor = self.color2colorDNA[self.shortColor]
gloveColor = self.color2colorDNA[self.gloveColor]
strand = '%s/%s/%s/%s/%s/%s/%s/%s/%s/%s/%s/%s/%s/%s/%s' % (
gender, animal, head, headcolor, torso,
torsocolor, legs, legcolor, shirt, sleeve,
shorts, shirtColor, sleeveColor, shortColor, gloveColor)
self.setDNAStrand(strand)
print 'generated'
def parseDNAStrand(self, dnaStrand):
dnaParts = dnaStrand.split('/')
strandLength = len(dnaParts) * 2
isString = type(dnaStrand) is types.StringType
if strandLength == self.requiredStrandLength and isString:
self.gender = self.genderDNA2gender[dnaParts[0]]
self.animal = self.animalDNA2animal[dnaParts[1]]
self.head = self.headDNA2head[dnaParts[2]]
if self.head in self.LongHeads:
self.headLength = 'l'
else:
if self.head in self.ShortHeads:
self.headLength = 's'
self.headcolor = self.colorDNA2color[dnaParts[3]]
self.torso = self.torsoDNA2torso[dnaParts[4]]
self.torsocolor = self.colorDNA2color[dnaParts[5]]
self.legs = self.legDNA2leg[dnaParts[6]]
self.legcolor = self.colorDNA2color[dnaParts[7]]
self.shirt = self.shirtDNA2shirt[dnaParts[8]]
self.sleeve = self.sleeveDNA2sleeve[dnaParts[9]]
self.shorts = self.shortDNA2short[dnaParts[10]]
self.shirtColor = self.colorDNA2color[dnaParts[11]]
self.sleeveColor = self.colorDNA2color[dnaParts[12]]
self.shortColor = self.colorDNA2color[dnaParts[13]]
self.gloveColor = self.colorDNA2color[dnaParts[14]]
else:
self.notify.error('The DNA strand %s is formatted incorrectly.' % dnaStrand)
| 37.071053
| 106
| 0.588912
|
4a11980ad87c585932d8363f6b9d8857ce4e09f2
| 2,984
|
py
|
Python
|
OoTRandomizer.py
|
dragonbane0/OoT-Randomizer
|
fbd906bb6fe5f65395c3ef49a4a5b5a2ec79e1c0
|
[
"MIT"
] | null | null | null |
OoTRandomizer.py
|
dragonbane0/OoT-Randomizer
|
fbd906bb6fe5f65395c3ef49a4a5b5a2ec79e1c0
|
[
"MIT"
] | null | null | null |
OoTRandomizer.py
|
dragonbane0/OoT-Randomizer
|
fbd906bb6fe5f65395c3ef49a4a5b5a2ec79e1c0
|
[
"MIT"
] | 1
|
2019-06-21T14:59:39.000Z
|
2019-06-21T14:59:39.000Z
|
#!/usr/bin/env python3
import argparse
import os
import logging
import random
import textwrap
import sys
import hashlib
from Gui import guiMain
from Main import main, from_patch_file, cosmetic_patch
from Utils import is_bundled, close_console, check_version, VersionError, check_python_version, default_output_path
from Settings import get_settings_from_command_line_args
class ArgumentDefaultsHelpFormatter(argparse.RawTextHelpFormatter):
def _get_help_string(self, action):
return textwrap.dedent(action.help)
def start():
settings, gui, args_loglevel = get_settings_from_command_line_args()
if is_bundled() and len(sys.argv) == 1:
# for the bundled builds, if we have no arguments, the user
# probably wants the gui. Users of the bundled build who want the command line
# interface shouuld specify at least one option, possibly setting a value to a
# default if they like all the defaults
close_console()
guiMain()
sys.exit(0)
# ToDo: Validate files further than mere existance
if settings.compress_rom != 'None' and not os.path.isfile(settings.rom):
input('Could not find valid base rom for patching at expected path %s. Please run with -h to see help for further information. \nPress Enter to exit.' % settings.rom)
sys.exit(1)
# set up logger
loglevel = {'error': logging.ERROR, 'info': logging.INFO, 'warning': logging.WARNING, 'debug': logging.DEBUG}[args_loglevel]
logging.basicConfig(format='%(message)s', level=loglevel)
logger = logging.getLogger('')
settings_string_hash = hashlib.sha1(settings.settings_string.encode('utf-8')).hexdigest().upper()[:5]
if settings.output_file:
outfilebase = settings.output_file
elif settings.world_count > 1:
outfilebase = 'OoT_%s_%s_W%d' % (settings_string_hash, settings.seed, settings.world_count)
else:
outfilebase = 'OoT_%s_%s' % (settings_string_hash, settings.seed)
output_dir = default_output_path(settings.output_dir)
log_path = os.path.join(output_dir, '%s.log' % outfilebase)
log_file = logging.FileHandler(log_path)
logger.addHandler(log_file)
if not settings.check_version:
try:
version_error = check_version(settings.checked_version)
except VersionError as e:
logger.warning(str(e))
try:
if gui:
guiMain(settings)
elif settings.cosmetics_only:
cosmetic_patch(settings)
elif settings.patch_file != '':
from_patch_file(settings)
elif settings.count != None and settings.count > 1:
orig_seed = settings.seed
for i in range(settings.count):
settings.update_seed(orig_seed + '-' + str(i))
main(settings)
else:
main(settings)
except Exception as ex:
logger.exception(ex)
if __name__ == '__main__':
check_python_version()
start()
| 35.105882
| 174
| 0.685657
|
4a11985f2b27617d92ffd462a401a979277242a0
| 1,091
|
py
|
Python
|
src/dxtbx/format/FormatSMVADSCmlfsom.py
|
toastisme/dxtbx
|
fc24e215a8052e7e17be4ad4b41f9dbb474d852a
|
[
"BSD-3-Clause"
] | null | null | null |
src/dxtbx/format/FormatSMVADSCmlfsom.py
|
toastisme/dxtbx
|
fc24e215a8052e7e17be4ad4b41f9dbb474d852a
|
[
"BSD-3-Clause"
] | null | null | null |
src/dxtbx/format/FormatSMVADSCmlfsom.py
|
toastisme/dxtbx
|
fc24e215a8052e7e17be4ad4b41f9dbb474d852a
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
from dxtbx.format.FormatSMVADSC import FormatSMVADSC
class FormatSMVADSCmlfsom(FormatSMVADSC):
"""A class for reading SMV::ADSC-format images generated by MLFSOM
simulation."""
@staticmethod
def understand(image_file):
size, header = FormatSMVADSC.get_smv_header(image_file)
unwanted_header_items = ["TIME", "DATE"]
for header_item in unwanted_header_items:
if header_item in header:
return False
return True
def _scan(self):
"""Return the scan information for this image."""
exposure_time = 1.0 # dummy argument; ought to be explicitly output by MLFSOM!
epoch = None
# assert(epoch)
osc_start = float(self._header_dictionary["OSC_START"])
osc_range = float(self._header_dictionary["OSC_RANGE"])
return self._sequence_factory.single_file(
self._image_file, exposure_time, osc_start, osc_range, epoch
)
if __name__ == "__main__":
for arg in sys.argv[1:]:
print(FormatSMVADSCmlfsom.understand(arg))
| 26.609756
| 87
| 0.663611
|
4a119890d85a06981e46cb4bff3b5e5989fe14a8
| 537
|
py
|
Python
|
server.py
|
emelianovss-yandex-praktikum/18_django
|
cb33becb2edde855ca81fc91d82ff208f1527034
|
[
"MIT"
] | null | null | null |
server.py
|
emelianovss-yandex-praktikum/18_django
|
cb33becb2edde855ca81fc91d82ff208f1527034
|
[
"MIT"
] | null | null | null |
server.py
|
emelianovss-yandex-praktikum/18_django
|
cb33becb2edde855ca81fc91d82ff208f1527034
|
[
"MIT"
] | 1
|
2021-11-29T18:49:17.000Z
|
2021-11-29T18:49:17.000Z
|
import random
import json
from http.server import BaseHTTPRequestHandler, HTTPServer
class IndexHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps({'random_int': random.randint(1, 1000)}).encode())
return
def run():
server_address = ('', 8000)
httpd = HTTPServer(server_address, IndexHandler)
httpd.serve_forever()
if __name__ == '__main__':
run()
| 24.409091
| 86
| 0.687151
|
4a119944d0968ae5968df38e7b802e57a49e6430
| 3,248
|
py
|
Python
|
setup_console.py
|
karstenw/nodebox-pyobjc
|
477aeaa53636944cd400ecdfe97f4ff2480761c7
|
[
"MIT"
] | 6
|
2016-05-02T02:25:23.000Z
|
2022-03-16T14:01:54.000Z
|
setup_console.py
|
karstenw/nodebox-pyobjc
|
477aeaa53636944cd400ecdfe97f4ff2480761c7
|
[
"MIT"
] | 2
|
2018-01-27T18:24:20.000Z
|
2018-02-10T23:33:46.000Z
|
setup_console.py
|
karstenw/nodebox-pyobjc
|
477aeaa53636944cd400ecdfe97f4ff2480761c7
|
[
"MIT"
] | 1
|
2018-01-28T15:22:11.000Z
|
2018-01-28T15:22:11.000Z
|
# This is a setup file for a command-line version of NodeBox.
# If you want to work on the Mac OS X version, go look in macsetup.py.
# This is your standard setup.py, so to install the package, use:
# python setup.py install
# We require some dependencies:
# - PyObjC
# - py2app
# - cPathMatics (included in the "libs" folder)
# - polymagic (included in the "libs" folder)
# - Numpy (installable using "easy_install numpy")
from distutils.core import setup, Extension
import nodebox
NAME = 'NodeBox'
VERSION = nodebox.__version__
AUTHOR = "Frederik De Bleser",
AUTHOR_EMAIL = "frederik@pandora.be",
URL = "http://nodebox.net/",
CLASSIFIERS = (
"Development Status :: 5 - Production/Stable",
"Environment :: MacOS X :: Cocoa",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python",
"Topic :: Artistic Software",
"Topic :: Multimedia :: Graphics",
"Topic :: Multimedia :: Graphics :: Editors :: Vector-Based",
"Topic :: Multimedia :: Video",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: User Interfaces",
"Topic :: Text Editors :: Integrated Development Environments (IDE)",
)
DESCRIPTION = "Simple application for creating 2-dimensional graphics and animation using Python code"
LONG_DESCRIPTION = """NodeBox is a Mac OS X application that allows you to create visual output
with programming code. The application targets an audience of designers, with an easy set of state
commands that is both intuitive and creative. It is essentially a learning environment and an automation tool.
The current version features:
* State-based graphics context
* Extensive reference documentation and tutorials
* PDF export for graphics
* QuickTime export for animations
* Manipulate every numeric variable in a script by command-dragging it, even during animation
* Creating simple user interfaces using text fields, sliders, and buttons
* Stop a running script by typing command-period
* Universal Binary
* Integrated bezier mathematics and boolean operations
* Command-line interface
* Zooming
"""
ext_modules=[
Extension('bwdithering', ['libs/bwdithering/bwdithering.c']),
Extension('fractal', ['libs/fractal/fractal.c']),
Extension('cGeo', ['libs/cGeo/cGeo.c']),
Extension('cPathmatics', ['libs/pathmatics/pathmatics.c']),
Extension('cPolymagic', ['libs/polymagic/gpc.c', 'libs/polymagic/polymagic.m'],
extra_link_args=['-framework', 'AppKit', '-framework', 'Foundation'])
]
packages = [
'nodebox',
'nodebox.graphics',
# 'nodebox.graphics.bezier',
# 'nodebox.graphics.cocoa',
'nodebox.util',
'nodebox.util.kgp',
'nodebox.util.QTSupport',
'nodebox.geo']
if __name__=='__main__':
setup(name = NAME,
version = VERSION,
description = DESCRIPTION,
long_description = LONG_DESCRIPTION,
author = AUTHOR,
author_email = AUTHOR_EMAIL,
url = URL,
classifiers = CLASSIFIERS,
ext_modules = ext_modules,
packages = packages
)
| 34.189474
| 110
| 0.699815
|
4a11997f7162f402450c62da958f83dc83f0a67d
| 322
|
py
|
Python
|
apps/__init__.py
|
Lu0Hui/flask-app
|
934b0231ad9fa138a42a8edeaa87251a4090e0a3
|
[
"MIT"
] | null | null | null |
apps/__init__.py
|
Lu0Hui/flask-app
|
934b0231ad9fa138a42a8edeaa87251a4090e0a3
|
[
"MIT"
] | null | null | null |
apps/__init__.py
|
Lu0Hui/flask-app
|
934b0231ad9fa138a42a8edeaa87251a4090e0a3
|
[
"MIT"
] | null | null | null |
from flask import Flask
def create_app():
app = Flask(__name__)
from apps.core.views import core as core_blueprint
app.register_blueprint(core_blueprint, url_prefix='/')
from apps.users.views import user as user_blueprint
app.register_blueprint(user_blueprint, url_prefix='/users')
return app
| 23
| 63
| 0.745342
|
4a1199f8abe51ef447f07120b8839477c2037939
| 4,693
|
py
|
Python
|
tools/make_nr/make_nr.py
|
bernt-matthias/galaxy_blast
|
3ac00a9f99a93cbc8a6c136a302e7fbb17da8f71
|
[
"MIT"
] | null | null | null |
tools/make_nr/make_nr.py
|
bernt-matthias/galaxy_blast
|
3ac00a9f99a93cbc8a6c136a302e7fbb17da8f71
|
[
"MIT"
] | null | null | null |
tools/make_nr/make_nr.py
|
bernt-matthias/galaxy_blast
|
3ac00a9f99a93cbc8a6c136a302e7fbb17da8f71
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Make FASTA files non-redundant by combining duplicated sequences.
This script takes one or more (optionally gzipped) FASTA filenames as input,
and will return a non-zero error if any duplicate identifiers are found.
Writes output to stdout by default.
Keeps all the sequences in memory, beware!
"""
from __future__ import print_function
import gzip
import os
import sys
from optparse import OptionParser
if "-v" in sys.argv or "--version" in sys.argv:
print("v0.0.1")
sys.exit(0)
# Parse Command Line
usage = """Use as follows:
$ python make_nr.py [options] A.fasta [B.fasta ...]
For example,
$ python make_nr.py -o dedup.fasta -s ";" input1.fasta input2.fasta
The input files should be plain text FASTA format, optionally gzipped.
The -a option controls how the representative replacement record for
duplicated records are named. By default the identifiers are taken
in the input file order, combined with the separator. If the -a or
alphasort option is picked, the identifiers are alphabetically sorted
first. This ensures the same names are used even if the input file
order (or the record order within the input files) is randomised.
There is additional guidance in the help text in the make_nr.xml file,
which is shown to the user via the Galaxy interface to this tool.
"""
parser = OptionParser(usage=usage)
parser.add_option(
"-s",
"--sep",
dest="sep",
default=";",
help="Separator character for combining identifiers "
"of duplicated records e.g. '|' or ';' (required)",
)
parser.add_option(
"-a",
"--alphasort",
action="store_true",
help="When merging duplicated records sort their "
"identifiers alphabetically before combining them. "
"Default is input file order.",
)
parser.add_option(
"-o",
"--output",
dest="output",
default="/dev/stdout",
metavar="FILE",
help="Output filename (defaults to stdout)",
)
options, args = parser.parse_args()
if not args:
sys.exit("Expects at least one input FASTA filename")
def gzip_open(filename):
"""Open a possibly gzipped text file."""
with open(filename, "rb") as h:
magic = h.read(2)
if magic == b"\x1f\x8b":
return gzip.open(filename, "rt")
else:
return open(filename)
def make_nr(input_fasta, output_fasta, sep=";", sort_ids=False):
"""Make the sequences in FASTA files non-redundant.
Argument input_fasta is a list of filenames.
"""
by_seq = dict()
try:
from Bio.SeqIO.FastaIO import SimpleFastaParser
except ImportError:
sys.exit("Missing Biopython")
for f in input_fasta:
with gzip_open(f) as handle:
for title, seq in SimpleFastaParser(handle):
idn = title.split(None, 1)[0] # first word only
seq = seq.upper()
try:
by_seq[seq].append(idn)
except KeyError:
by_seq[seq] = [idn]
unique = 0
representatives = dict()
duplicates = set()
for cluster in by_seq.values():
if len(cluster) > 1:
# Is it useful to offer to sort here?
# if sort_ids:
# cluster.sort()
representatives[cluster[0]] = cluster
duplicates.update(cluster[1:])
else:
unique += 1
del by_seq
if duplicates:
# TODO - refactor as a generator with single SeqIO.write(...) call
with open(output_fasta, "w") as handle:
for f in input_fasta:
with gzip_open(f) as in_handle:
for title, seq in SimpleFastaParser(in_handle):
idn = title.split(None, 1)[0] # first word only
if idn in representatives:
cluster = representatives[idn]
if sort_ids:
cluster.sort()
idn = sep.join(cluster)
title = "%s representing %i records" % (idn, len(cluster))
elif idn in duplicates:
continue
# TODO - line wrapping
handle.write(">%s\n%s\n" % (title, seq))
sys.stderr.write(
"%i unique entries; removed %i duplicates "
"leaving %i representative records\n"
% (unique, len(duplicates), len(representatives))
)
else:
os.symlink(os.path.abspath(input_fasta), output_fasta)
sys.stderr.write("No perfect duplicates in file, %i unique entries\n" % unique)
make_nr(args, options.output, options.sep, options.alphasort)
| 31.709459
| 87
| 0.610484
|
4a119cc6a3179ace628fbc1839348c0ca1000ed9
| 18
|
py
|
Python
|
horizon_telemetry/models.py
|
simonpasquier/horizon-telemetry-dashboard
|
f284ec6ae8b1932079852fe3e9ab4b7a27ff58d7
|
[
"Apache-2.0"
] | null | null | null |
horizon_telemetry/models.py
|
simonpasquier/horizon-telemetry-dashboard
|
f284ec6ae8b1932079852fe3e9ab4b7a27ff58d7
|
[
"Apache-2.0"
] | 2
|
2017-10-10T07:30:41.000Z
|
2017-10-19T18:34:13.000Z
|
horizon_telemetry/models.py
|
simonpasquier/horizon-telemetry-dashboard
|
f284ec6ae8b1932079852fe3e9ab4b7a27ff58d7
|
[
"Apache-2.0"
] | null | null | null |
# Placeholder file
| 18
| 18
| 0.833333
|
4a119d2e7554eb76f1efd5f07151c512e4df0825
| 506
|
py
|
Python
|
scripts/trim.py
|
SonicMastr/h-encore
|
5ae59490605f6495f273ba72b9dacb185d94fd3f
|
[
"MIT"
] | null | null | null |
scripts/trim.py
|
SonicMastr/h-encore
|
5ae59490605f6495f273ba72b9dacb185d94fd3f
|
[
"MIT"
] | null | null | null |
scripts/trim.py
|
SonicMastr/h-encore
|
5ae59490605f6495f273ba72b9dacb185d94fd3f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
from sys import argv, exit
import struct
def main():
if len(argv) != 2:
print("Usage: trim.py binary")
return -1
with open(argv[1], "rb") as f:
data = f.read()
new_len = len(data)
if new_len < 4:
return -1
for i in range(new_len-4, 0, -4):
word, = struct.unpack("<I", data[(i):(i+4)])
if word != 0:
new_len = i
break
open(argv[1], "wb").write(data[:new_len+4])
if __name__ == "__main__":
exit(main())
| 18.740741
| 49
| 0.537549
|
4a119f5494b74a85b83d9e5535344a530a0d2071
| 4,481
|
py
|
Python
|
python/datasets/pamap_common.py
|
icdm-extract/extract
|
18d6e8509f2f35719535e1de6c88874ec533cfb9
|
[
"MIT"
] | 1
|
2019-03-05T01:17:05.000Z
|
2019-03-05T01:17:05.000Z
|
python/datasets/pamap_common.py
|
icdm-extract/extract
|
18d6e8509f2f35719535e1de6c88874ec533cfb9
|
[
"MIT"
] | null | null | null |
python/datasets/pamap_common.py
|
icdm-extract/extract
|
18d6e8509f2f35719535e1de6c88874ec533cfb9
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from ..utils.files import basename
from ..utils.arrays import downsampleMat, zNormalizeCols, zeroOneScaleMat
from ..utils import sequence as seq
# ================================================================
# numeric consts
# clipping / value manipulation
MINVAL = -150.
MAXVAL = 175.
WIDTH_LINE_GRAPH = 13
HEIGHT_LINE_GRAPH = 6
WIDTH_IMG = 4
HEIGHT_IMG = 10
# ================================================================
# Column names in data
TIMESTAMP_COL_NAME = 'time'
LABEL_COL_NAME = 'activity_id'
INITIAL_COL_NAMES = [TIMESTAMP_COL_NAME, LABEL_COL_NAME, 'heartRate']
# ================================================================
# Activity IDs
OTHER_ACTIVITY_ID = 0
# ================================================================
# Activity names
# shared
NAME_OTHER = 'NA'
NAME_LYING = 'lying'
NAME_SITTING = 'sitting'
NAME_STANDING = 'standing'
NAME_VACUUM = 'vacuum'
NAME_WALK = 'walk'
NAME_NORDIC_WALK = 'Nordic walk'
NAME_ASCEND_STAIRS = 'ascend stairs'
NAME_DESCEND_STAIRS = 'descend stairs'
NAME_RUN = 'run'
NAME_SOCCER = 'soccer'
NAME_JUMP_ROPE = 'jump rope'
NAME_CYCLE = 'cycle'
NAME_IRONING = 'iron'
# pamap only
NAME_SLOW_WALK = 'slow walk'
# pamap2 only
NAME_WATCH_TV = 'watch TV'
NAME_COMPUTER_WORK = 'computer work'
NAME_DRIVE = 'drive'
NAME_FOLD_LAUNDRY = 'folding laundry'
NAME_CLEANING = 'cleaning'
# ================================================================
# funcs
def removeNullCols(colNames):
return filter(lambda name: 'null' not in name, colNames)
# -------------------------------
# data parsing funcs
# -------------------------------
def parseDataFileName(f):
name = basename(f, noexts=True)
subjId = int(name[-1])
return subjId
def dfFromFileAtPath(path, missingDataVal, allColNames, keepColNames):
# read in the data file and pull out the
# columns with valid data (and also replace
# their missing data marker with nan
data = np.genfromtxt(path)
data[data == missingDataVal] = np.nan
df = pd.DataFrame(data=data, columns=allColNames)
return df.filter(keepColNames)
def findActivityBoundaries(df, labelColName=LABEL_COL_NAME):
labelCol = df[labelColName]
boundaries = seq.rangesOfConstantValue(labelCol)
labels = [labelCol[row[0]] for row in boundaries]
assert(len(labels) == len(boundaries))
return boundaries, labels
# -------------------------------
# plotting funcs
# -------------------------------
def plotVertLine(x, ymin, ymax):
plt.plot([x, x], [ymin, ymax], color='k',
linestyle='--', linewidth=1)
def imshowData(data, znorm=False):
if (znorm):
data = zNormalizeCols(data)
data = zeroOneScaleMat(data)
plt.imshow(data, aspect='auto')
plt.colorbar()
def plotRecording(sampleTimes, data, boundaries, labelStrings,
minVal=MINVAL, maxVal=MAXVAL):
maxTimestamp = sampleTimes[-1]
plt.gca().set_autoscale_on(False)
plt.plot(sampleTimes, data)
plt.xlabel("Times (s)")
for i, row in enumerate(boundaries):
# plot line
idx = row[0]
timestamp = sampleTimes[idx]
plotVertLine(timestamp, minVal, maxVal)
# write label
x = timestamp / maxTimestamp
y = .05 + (.8 * (i % 2)) + (.025 * (i % 4)) # stagger heights
name = labelStrings[i]
plt.gca().annotate(name, xy=(x, y), xycoords='axes fraction')
plt.xlim([np.min(sampleTimes), np.max(sampleTimes)])
plt.ylim([minVal, maxVal])
# ================================================================
# recording class
class Recording(object):
def __init__(self, filePath, missingDataVal, colNames, ids2labelStrs):
dataColNames = colNames[2:]
usedColNames = removeNullCols(colNames)
self.subjId = parseDataFileName(filePath)
self.df = dfFromFileAtPath(filePath, missingDataVal,
colNames, usedColNames)
self.sampleTimes = self.df.as_matrix([TIMESTAMP_COL_NAME])
self.data = self.df.as_matrix(columns=dataColNames)
np.clip(self.data, MINVAL, MAXVAL, out=self.data)
self.boundaries, self.labels = findActivityBoundaries(self.df)
self.labelStrs = [ids2labelStrs[label] for label in self.labels]
def plot(self):
plotRecording(self.sampleTimes, self.data,
self.boundaries, self.labelStrs)
def imshow(self, znorm=False):
data = self.data
data[np.isnan(data)] = MINVAL # znorming breaks everything without this !!!
# downsample by k cuz otherwise a whole bunch of rows get averaged
# together in the plot and the whole thing is just ~.5
data = downsampleMat(data, rowsBy=4)
imshowData(data, znorm)
| 28.360759
| 78
| 0.65231
|
4a119f8b8a89d8455e2c6696de294f5947837715
| 1,282
|
py
|
Python
|
kafka/test_kafka.py
|
deep2064/initialization-actions
|
13290bffe7eaef2576071218ada6195a2756ef76
|
[
"Apache-2.0"
] | 127
|
2020-01-17T17:56:28.000Z
|
2022-03-26T21:10:01.000Z
|
kafka/test_kafka.py
|
deep2064/initialization-actions
|
13290bffe7eaef2576071218ada6195a2756ef76
|
[
"Apache-2.0"
] | 258
|
2020-01-17T19:24:20.000Z
|
2022-03-30T18:28:41.000Z
|
kafka/test_kafka.py
|
deep2064/initialization-actions
|
13290bffe7eaef2576071218ada6195a2756ef76
|
[
"Apache-2.0"
] | 136
|
2020-01-16T11:50:45.000Z
|
2022-03-26T03:39:36.000Z
|
import os
from absl.testing import absltest
from absl.testing import parameterized
from integration_tests.dataproc_test_case import DataprocTestCase
class KafkaTestCase(DataprocTestCase):
COMPONENT = 'kafka'
INIT_ACTIONS = ['kafka/kafka.sh']
TEST_SCRIPT_FILE_NAME = 'validate.sh'
def verify_instance(self, name):
self.upload_test_file(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
self.TEST_SCRIPT_FILE_NAME), name)
self.__run_test_script(name)
self.remove_test_script(self.TEST_SCRIPT_FILE_NAME, name)
def __run_test_script(self, name):
self.assert_instance_command(
name, "bash {}".format(self.TEST_SCRIPT_FILE_NAME))
@parameterized.parameters(
("HA", ["m-0", "m-1", "m-2"]), )
def test_kafka(self, configuration, machine_suffixes):
if self.getImageOs() == 'centos':
self.skipTest("Not supported in CentOS-based images")
self.createCluster(configuration, self.INIT_ACTIONS)
for machine_suffix in machine_suffixes:
self.verify_instance("{}-{}".format(self.getClusterName(),
machine_suffix))
if __name__ == '__main__':
absltest.main()
| 32.05
| 70
| 0.650546
|
4a119fb1aba839d294db5c19a8fd263ddeae21ae
| 11,594
|
py
|
Python
|
mwr_raw2l1/measurement/measurement_construct_helpers.py
|
MeteoSwiss/mwr_raw2l1
|
6f8d8b80c203bcbd7f42a53b618ae63f321b68cb
|
[
"BSD-3-Clause"
] | null | null | null |
mwr_raw2l1/measurement/measurement_construct_helpers.py
|
MeteoSwiss/mwr_raw2l1
|
6f8d8b80c203bcbd7f42a53b618ae63f321b68cb
|
[
"BSD-3-Clause"
] | null | null | null |
mwr_raw2l1/measurement/measurement_construct_helpers.py
|
MeteoSwiss/mwr_raw2l1
|
6f8d8b80c203bcbd7f42a53b618ae63f321b68cb
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import xarray as xr
from mwr_raw2l1.errors import DimensionError, MissingInputArgument
from mwr_raw2l1.log import logger
from mwr_raw2l1.measurement.scan_transform import scan_to_timeseries_from_aux
def attex_to_datasets(data_all, dims, vars, vars_opt):
"""generate unique :class:`xarray.Dataset` for each type of obs in 'data' using dimensions and variables specified
Args:
data_all: single instance of the read-in class (with observations in instance variable 'data') or as a list
containing a series of instances of read-in classes.
dims: list of keys that are a dimension (must correspond to the order of dimensions in data)
vars: list of keys that are data variables (dimensions don't need to be specified again)
vars_opt: list of keys that are optional data variables
Returns:
a :class:`xarray.Dataset` containing the data
"""
if not isinstance(data_all, list): # accept also single instances of read-in class not inside a list
data_all = [data_all]
# add dim in second pos as Attex are single-channel instruments but frequency shall be present as 2nd
for dat in data_all:
if dat.data['Tb'].ndim == 2:
dat.data['Tb'] = np.expand_dims(dat.data['Tb'], 1)
return to_single_dataset([dat.data for dat in data_all], dims, vars, vars_opt)
def radiometrics_to_datasets(data_all, dims, vars, vars_opt):
"""generate unique :class:`xarray.Dataset` for each type of obs in 'data' using dimensions and variables specified
Args:
data_all: single instance of the read-in class (with observations in instance variable 'data') or as a list
containing a series of instances of read-in classes.
dims: list of keys that are a dimension (must correspond to the order of dimensions in data)
vars: list of keys that are data variables (dimensions don't need to be specified again)
vars_opt: list of keys that are optional data variables
Returns:
dictionary with one :class:`xarray.Dataset` for each key. It contains one item for each key in data
"""
if not isinstance(data_all, list): # accept also single instances of read-in class not inside a list
data_all = [data_all]
out = {}
sources = data_all[0].data.keys()
for src in sources:
out[src] = to_single_dataset([dat.data[src] for dat in data_all], dims[src], vars[src], vars_opt[src])
return out
def rpg_to_datasets(data, dims, vars, vars_opt):
"""generate unique :class:`xarray.Dataset` for each type of obs in 'data' using dimensions and variables specified
Args:
data: dictionary containing the observations by type. Its keys correspond to the type of observations (e.g. brt,
blb, irt ...). The observations themselves can be given as a single instance of the read-in class
(with observations in variable 'data') or as a list containing a series of instances of read-in classes.
dims: list of keys that are a dimension (must correspond to the order of dimensions in data)
vars: list of keys that are data variables (dimensions don't need to be specified again)
vars_opt: list of keys that are optional data variables (added as 1-dim series of NaN if missing in 'data')
Returns:
dictionary with one :class:`xarray.Dataset` for each key. It contains one item for each key in data
"""
multidim_vars_per_obstype = {'irt': {'IRT': 2}, 'brt': {'Tb': 2}, 'blb': {'Tb': 3}}
out = {}
for src, data_series in data.items():
if src in multidim_vars_per_obstype:
multidim_vars = multidim_vars_per_obstype[src]
else:
multidim_vars = {}
if not data_series: # fill in NaN variables if meas source does not exist (loop over empty data_series skipped)
if src in ('brt', 'blb'): # don't create empty datasets for missing MWR data
continue
logger.info('No {}-data available. Will generate a dataset fill values only for {}'.format(src, src))
min_time = min([x.data['time'][0] for x in data['hkd']]) # class instances in data['hkd'] can be unordered
max_time = max([x.data['time'][-1] for x in data['hkd']]) # class instances in data['hkd'] can be unordered
out[src] = make_dataset(None, dims[src], vars[src], vars_opt[src],
multidim_vars=multidim_vars, time_vector=[min_time, max_time])
continue
elif not isinstance(data_series, list): # accept also single instances of read-in class not inside a list
data_series = [data_series]
# if HKD has no statusflag (has_statusflag=0) 'channels' variable will not be set, hence dim must not be set
if src == 'hkd' and 'channels' not in data_series[0].data and 'channels' in dims[src]:
dims[src].remove('channels')
out[src] = to_single_dataset([dat.data for dat in data_series], dims[src], vars[src], vars_opt[src],
multidim_vars=multidim_vars)
return out
def make_dataset(data, dims, vars, vars_opt=None, multidim_vars=None, time_vector=None):
"""generate a :class:`xarray.Dataset` from 'data' dictionary using the dimensions and variables specified
Args:
data: dictionary containing the data. If set to None or empty a placeholder dataset with all-NaN time series
(except variable IRT, which is 2d) is returned. If set to None or empty time_vector must be specified.
dims: list of keys that are a dimension (must correspond to the order of dimensions in data)
vars: list of keys that are data variables (dimensions don't need to be specified again)
vars_opt (optional): list of keys that are optional data variables (added as 1-d series of NaN if not in 'data')
multidim_vars (optional): dictionary of variables with more than time dimension. Variable name as key, number of
dimensions as values. This argument will be ignored as long as the variable is present in dataset
time_vector (optional): :class:`numpy.ndarray` of :class:`numpy.datetime64` to take as time dimension for
generating all-NaN datasets. This argument will be ignored as long as data is not None or empty
Returns:
:class:`xarray.Dataset`
"""
# config for empty datasets or variables
missing_val = np.nan
if multidim_vars is None:
multidim_vars = {}
# init
if vars_opt is None:
vars_opt = []
all_vars = vars + vars_opt
# prepare for empty variables
ndims_per_var = {var: 1 for var in dims + all_vars}
for var, nd in multidim_vars.items(): # can grow larger than keys that shall be in output, only accessed by key
ndims_per_var[var] = nd
# prepare all NaN-variables for case of data==None or empty
if data is None or not data:
if time_vector is None:
raise MissingInputArgument('if data is empty or None the input argument time_vector must be specified')
data = {'time': time_vector} # start overwriting empty data variable
for dim in dims[1:]: # assume first dimension to be 'time'
data[dim] = np.array([missing_val]) # other dimensions all one-element
for var in all_vars:
shape_act = [len(data[dims[k]]) for k in range(ndims_per_var[var])]
data[var] = np.full(shape_act, missing_val)
# add optional variables as NaN-series to data if not in input data
for varo in vars_opt:
if varo not in data:
shape_act = [len(data[dims[k]]) for k in range(ndims_per_var[varo])]
data[varo] = np.full(shape_act, missing_val)
logger.info('Optional variable {} not found in input data. Will create a all-NaN placeholder'.format(varo))
# collect specifications and data for generating xarray Dataset from dict
spec = {}
for dim in dims:
spec[dim] = dict(dims=dim, data=data[dim])
# add vars to spec
for var in all_vars:
nd = np.ndim(data[var])
if nd > len(dims):
raise DimensionError(dims, var, nd)
spec[var] = dict(dims=dims[0:nd], data=data[var])
return xr.Dataset.from_dict(spec)
def to_single_dataset(data_dicts, *args, **kwargs):
"""return a single :class:`xarray.Dataset` with unique time vector from a list of data dictionaries
Args:
data_dicts: list of data dictionaries to be concatenated to a time series
*args: dimension and variable specifications passed on to :func:`make_dataset`
**kwargs: dimension and variable specifications passed on to :func:`make_dataset`
"""
datasets = []
for dat in data_dicts:
datasets.append(make_dataset(dat, *args, **kwargs))
out = xr.concat(datasets, dim='time') # merge all datasets of the same type
out = drop_duplicates(out, dim='time') # remove duplicate measurements
return out
def merge_aux_data(mwr_data, all_data, srcs_to_ignore=None):
"""merge auxiliary data to time grid of microwave data
Args:
mwr_data: :class:`xarray.Dataset` of microwave radiometer data
all_data: Dictionary of data from different sources (keys) as :class:`xarray.Dataset` (values). Can also contain
the data in 'mwr_data' in which case it must be made sure the key is specified in 'srcs_to_ignore'
srcs_to_ignore (optional): list of sources (keys) to ignore from 'all_data' e.g. because they are already
contained in 'mwr_data'. Defaults to ['mwr', 'brt', 'blb']
Returns:
merged dataset of type :class:`xarray.Dataset`
"""
if srcs_to_ignore is None:
srcs_to_ignore = ['mwr', 'brt', 'blb']
out = mwr_data
for src in all_data:
if src in srcs_to_ignore:
continue
# to make sure no variable is overwritten rename duplicates by suffixing it with its source
for var in all_data[src]:
if var in out:
varname_map = {var: var + '_' + src}
all_data[src] = all_data[src].rename(varname_map)
# interp to same time grid (time grid from blb now stems from some interp) and merge into out
srcdat_interp = all_data[src].interp(time=out['time'], method='nearest') # nearest: flags stay integer
out = out.merge(srcdat_interp, join='left')
return out
def drop_duplicates(ds, dim):
"""drop duplicates from all data in ds for duplicates in dimension vector
Args:
ds: :class:`xarray.Dataset` or :class:`xarray.DataArray` containing the data
dim: string indicating the dimension name to check for duplicates
Returns:
ds with unique dimension vector
"""
_, ind = np.unique(ds[dim], return_index=True) # keep first index but assume duplicate values identical anyway
return ds.isel({dim: ind})
def merge_brt_blb(all_data):
"""merge brt (zenith MWR) and blb (scanning MWR) observations from an RPG instrument
Args:
all_data: dictionary with a :class:`xarray.Dataset` attached to each key (output of :func:`rpg_to_datasets`)
"""
if 'brt' in all_data:
out = all_data['brt']
if 'blb' in all_data:
if 'brt' in all_data:
blb_ts = scan_to_timeseries_from_aux(all_data['blb'], hkd=all_data['hkd'], brt=all_data['brt'])
out = out.merge(blb_ts, join='outer')
else:
out = scan_to_timeseries_from_aux(all_data['blb'], hkd=all_data['hkd'])
return out
| 47.516393
| 120
| 0.669398
|
4a11a042b21fcfc29095bc5a81e025723b4a8c05
| 2,761
|
py
|
Python
|
tests/api/test_metrics_api.py
|
ldbenitez/redis-python-course
|
529bcf94cbba6358a940002e8568bb69e32cb53a
|
[
"MIT"
] | null | null | null |
tests/api/test_metrics_api.py
|
ldbenitez/redis-python-course
|
529bcf94cbba6358a940002e8568bb69e32cb53a
|
[
"MIT"
] | null | null | null |
tests/api/test_metrics_api.py
|
ldbenitez/redis-python-course
|
529bcf94cbba6358a940002e8568bb69e32cb53a
|
[
"MIT"
] | null | null | null |
import datetime
from collections import deque
from typing import Deque
from typing import Generator
from typing import List
import pytest
from redisolar.dao.redis.metric_timeseries import MetricDaoRedisTimeseries
from redisolar.models import Measurement
from redisolar.models import MeterReading
from redisolar.schema import MeasurementSchema
TESTING_SITE_ID = 1
NOW = datetime.datetime.utcnow()
@pytest.fixture
def metric_dao(redis_timeseries, key_schema):
yield MetricDaoRedisTimeseries(redis_timeseries, key_schema)
@pytest.fixture
def readings(metric_dao) -> Generator[Deque[MeterReading], None, None]:
readings: deque = deque()
time = NOW
for i in range(72 * 60):
readings.appendleft(
MeterReading(site_id=1,
temp_c=i * 1.0,
wh_used=i * 1.0,
wh_generated=i * 1.0,
timestamp=time))
time = time - datetime.timedelta(minutes=1)
yield readings
def _check_measurements(measurements: List[Measurement], limit: int):
assert len(measurements) == limit
i = limit
for measurement in measurements:
assert measurement.value == (i - 1) * 1.0
i -= 1
# Callenge #2
@pytest.mark.skip("Remove for challenge #2")
def _test_insert_and_retrieve(client, readings: List[MeterReading],
metric_dao: MetricDaoRedisTimeseries, limit: int):
for reading in readings:
metric_dao.insert(reading)
resp = client.get(f'/metrics/{TESTING_SITE_ID}?count={limit}').json
plots = resp['plots']
for plot in plots:
measurements = MeasurementSchema(many=True).load(plot['measurements'])
_check_measurements(measurements, limit)
@pytest.mark.skip("Remove for challenge #2")
def test_datetime_is_unix_timestamp(metric_dao, client):
reading = MeterReading(site_id=1,
temp_c=1.0,
wh_used=1.0,
wh_generated=1.0,
timestamp=NOW)
metric_dao.insert(reading)
resp = client.get(f'/metrics/{TESTING_SITE_ID}?count=1').json
plots = resp['plots']
measurement = plots[0]['measurements'][0]
mdt = datetime.datetime.fromtimestamp(measurement['timestamp'])
rdt = reading.timestamp
assert mdt.hour == rdt.hour and mdt.minute == rdt.minute \
and mdt.day == mdt.day and mdt.year == rdt.year
@pytest.mark.skip("Remove for challenge #2")
def test_small(metric_dao, readings, client):
_test_insert_and_retrieve(client, readings, metric_dao, 1)
@pytest.mark.skip("Remove for challenge #2")
def test_large(metric_dao, readings, client):
_test_insert_and_retrieve(client, readings, metric_dao, 100)
| 30.677778
| 80
| 0.666787
|
4a11a0f097fc26d34f468e4abc8ca218382a47d5
| 1,363
|
py
|
Python
|
term/commands.py
|
synw/django-terminal
|
2a90f1445c59b92415893bd98e918e45e6a056eb
|
[
"MIT"
] | 2
|
2018-03-02T11:19:58.000Z
|
2021-07-05T17:53:00.000Z
|
term/commands.py
|
synw/django-terminal
|
2a90f1445c59b92415893bd98e918e45e6a056eb
|
[
"MIT"
] | null | null | null |
term/commands.py
|
synw/django-terminal
|
2a90f1445c59b92415893bd98e918e45e6a056eb
|
[
"MIT"
] | 1
|
2019-05-09T00:11:09.000Z
|
2019-05-09T00:11:09.000Z
|
from goerr import err
from django.conf import settings
from django.utils.html import strip_tags
from instant.producers import publish
from term.conf import COMMAND_CHANNEL
class Command:
def __init__(self, name, runfunc, thelp):
self.name = name
self.runfunc = runfunc
self.help = thelp
def run(self, request, cmd_args=[]):
try:
self.runfunc(request, cmd_args)
self.end()
return
except Exception as e:
exc = str(e)
if err.exists:
err.new(Command.run, "Can not run command")
cmderr(exc)
def end(self):
publish("COMMAND_END", event_class="__command_end__",
channel=COMMAND_CHANNEL)
def __repr__(self):
return "<Term command: %s>" % self.name
def cmderr(exc):
publish(exc, event_class="__command_error__",
channel=COMMAND_CHANNEL)
if err.exists:
err.report()
def rprint(*args):
msg = ""
for output in args:
msg = msg + " " + str(output)
if settings.DEBUG is True:
print("[Remote terminal]", strip_tags(msg))
try:
publish(msg, event_class="__command__",
channel=COMMAND_CHANNEL)
except Exception as e:
err.new(e, rprint, "Can not publish message for remote print")
err.throw()
| 25.716981
| 70
| 0.597946
|
4a11a132aef4360787943f34ea02baaa67c09f11
| 565
|
py
|
Python
|
var/spack/repos/builtin/packages/perl-number-format/package.py
|
whitfin/spack
|
aabd2be31a511d0e00c1017f7311a421659319d9
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3
|
2019-06-27T13:26:50.000Z
|
2019-07-01T16:24:54.000Z
|
var/spack/repos/builtin/packages/perl-number-format/package.py
|
openbiox/spack
|
bb6ec7fb40c14b37e094a860e3625af53f633174
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75
|
2016-07-27T11:43:00.000Z
|
2020-12-08T15:56:53.000Z
|
var/spack/repos/builtin/packages/perl-number-format/package.py
|
openbiox/spack
|
bb6ec7fb40c14b37e094a860e3625af53f633174
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8
|
2015-10-16T13:51:49.000Z
|
2021-10-18T13:58:03.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlNumberFormat(PerlPackage):
"""Number::Format - Perl extension for formatting numbers"""
homepage = "https://metacpan.org/pod/Number::Format"
url = "https://cpan.metacpan.org/authors/id/W/WR/WRW/Number-Format-1.75.tar.gz"
version('1.75', sha256='82d659cb16461764fd44d11a9ce9e6a4f5e8767dc1069eb03467c6e55de257f3')
| 35.3125
| 94
| 0.748673
|
4a11a17ece139b1f75ae71f4f28accadaa593d0a
| 1,528
|
py
|
Python
|
userbot/Plugins/bash.py
|
its-leo-bitch/LEO-BITCH-USERBOT
|
b465dbebe06137682e604c36b40bf3020b1a01fd
|
[
"MIT"
] | null | null | null |
userbot/Plugins/bash.py
|
its-leo-bitch/LEO-BITCH-USERBOT
|
b465dbebe06137682e604c36b40bf3020b1a01fd
|
[
"MIT"
] | null | null | null |
userbot/Plugins/bash.py
|
its-leo-bitch/LEO-BITCH-USERBOT
|
b465dbebe06137682e604c36b40bf3020b1a01fd
|
[
"MIT"
] | 3
|
2020-12-20T15:58:16.000Z
|
2021-09-07T04:39:02.000Z
|
from telethon import events
import subprocess
from telethon.errors import MessageEmptyError, MessageTooLongError, MessageNotModifiedError
import io
import asyncio
import time
@command(pattern="^.bash ?(.*)")
async def _(event):
if event.fwd_from:
return
DELAY_BETWEEN_EDITS = 0.3
PROCESS_RUN_TIME = 100
cmd = event.pattern_match.group(1)
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
start_time = time.time() + PROCESS_RUN_TIME
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
e = stderr.decode()
if not e:
e = "No Error"
o = stdout.decode()
if not o:
o = "**Tip**: \n`If you want to see the results of your code, I suggest printing them to stdout.`"
else:
_o = o.split("\n")
o = "`\n".join(_o)
OUTPUT = f"**QUERY:**\n__Command:__\n`{cmd}` \n__PID:__\n`{process.pid}`\n\n**stderr:** \n`{e}`\n**Output:**\n{o}"
if len(OUTPUT) > 4095:
with io.BytesIO(str.encode(OUTPUT)) as out_file:
out_file.name = "exec.text"
await bot.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=cmd,
reply_to=reply_to_id
)
await event.delete()
await event.edit(OUTPUT)
| 31.183673
| 118
| 0.607984
|
4a11a1b39d678a49c27aa171bbe056bc66d0c625
| 18,247
|
py
|
Python
|
fairseq/modules/multihead_attention.py
|
layer6ai-labs/T-Fixup
|
b17e166d9f60671a92c3f39e24c9b8c93995e5a3
|
[
"MIT"
] | 70
|
2020-07-12T12:07:05.000Z
|
2022-03-24T03:09:56.000Z
|
fairseq/modules/multihead_attention.py
|
melody-rain/T-Fixup
|
b17e166d9f60671a92c3f39e24c9b8c93995e5a3
|
[
"MIT"
] | 8
|
2020-07-12T12:12:17.000Z
|
2022-01-07T18:37:42.000Z
|
fairseq/modules/multihead_attention.py
|
melody-rain/T-Fixup
|
b17e166d9f60671a92c3f39e24c9b8c93995e5a3
|
[
"MIT"
] | 13
|
2020-07-20T15:17:14.000Z
|
2022-02-01T06:30:51.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from torch import Tensor, nn
from torch.nn import Parameter
from fairseq.incremental_decoding_utils import with_incremental_state
@with_incremental_state
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
self.enable_torch_version = False
if hasattr(F, "multi_head_attention_forward"):
self.enable_torch_version = True
else:
self.enable_torch_version = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if (
self.enable_torch_version
and not self.onnx_trace
and incremental_state is None
and not static_kv
):
assert key is not None and value is not None
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
self.training,
key_padding_mask,
need_weights,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(
attn_weights_float.type_as(attn_weights),
p=self.dropout,
training=self.training,
)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - prev_key_padding_mask.size(1))
if prev_key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - key_padding_mask.size(1))
if key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
def reorder_incremental_state(
self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
| 39.495671
| 96
| 0.57615
|
4a11a2545c09ed29736aaeb3f04c0cb866029bcb
| 3,267
|
py
|
Python
|
aliyun-python-sdk-vod/aliyunsdkvod/request/v20170321/DescribeVodRefreshTasksRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-vod/aliyunsdkvod/request/v20170321/DescribeVodRefreshTasksRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-vod/aliyunsdkvod/request/v20170321/DescribeVodRefreshTasksRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvod.endpoint import endpoint_data
class DescribeVodRefreshTasksRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'vod', '2017-03-21', 'DescribeVodRefreshTasks','vod')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ObjectPath(self):
return self.get_query_params().get('ObjectPath')
def set_ObjectPath(self,ObjectPath):
self.add_query_param('ObjectPath',ObjectPath)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_ObjectType(self):
return self.get_query_params().get('ObjectType')
def set_ObjectType(self,ObjectType):
self.add_query_param('ObjectType',ObjectType)
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_query_param('TaskId',TaskId)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Status(self):
return self.get_query_params().get('Status')
def set_Status(self,Status):
self.add_query_param('Status',Status)
| 31.718447
| 81
| 0.759106
|
4a11a3e55706826bdaecd4bcd6c16fdcb61fe938
| 12,071
|
py
|
Python
|
project/settings.py
|
xelaxela13/stock
|
ef7df50194be340e7faff915e9de4e3b1ade4eca
|
[
"MIT"
] | 1
|
2019-02-19T08:43:51.000Z
|
2019-02-19T08:43:51.000Z
|
project/settings.py
|
xelaxela13/stock
|
ef7df50194be340e7faff915e9de4e3b1ade4eca
|
[
"MIT"
] | 6
|
2021-03-19T02:07:34.000Z
|
2022-02-10T08:27:57.000Z
|
project/settings.py
|
xelaxela13/stock
|
ef7df50194be340e7faff915e9de4e3b1ade4eca
|
[
"MIT"
] | null | null | null |
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
from os import path, environ
from celery.schedules import crontab
from decouple import config
from django.urls import reverse_lazy
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
# from whitenoise.storage import CompressedManifestStaticFilesStorage
# Build paths inside the project like this: path.join(BASE_DIR, ...)
BASE_DIR = path.dirname(path.dirname(path.abspath(__file__)))
def rel(*x):
# For example: rel('log', 'file.log') will to returned /var/www/stock/log/file.log
return path.join(BASE_DIR, *x)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', cast=bool, default=False)
def log_level():
return 'DEBUG' if DEBUG else 'INFO'
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=lambda v: [s.strip() for s in v.split(',')], default='127.0.0.1')
ADMINS = [('Alex', 'xelaxela13@gmail.com'), ]
# Application definition
INSTALLED_APPS = [
# custom dashboard
'jet.dashboard', # comment this - migrate and then uncomment and migrate again
'jet',
# django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# external apps
'bootstrap4',
'rosetta',
'django_celery_results',
'django_celery_beat',
'import_export',
'pipeline',
# local apps
'project',
'accounts',
'home',
'fileupload',
'stock',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware', # i18n
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
# 'whitenoise.middleware.WhiteNoiseMiddleware',
'project.middleware.DefaultLanguageMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [rel('templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'project.context_processors.settings_to_template', # my context processor
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': config('NAME', default='postgres'),
'USER': config('USER', default='postgres'),
'HOST': config('HOST', default='db'),
'PORT': config('PORT', default=5432),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_USER_MODEL = 'accounts.User'
LOGIN_REDIRECT_URL = reverse_lazy('panel')
LOGOUT_REDIRECT_URL = '/'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'ru'
LANGUAGES = [
('ru', _('Russian')),
('en', _('English')),
]
LOCALE_PATHS = [
rel('locale'),
]
TIME_ZONE = 'Europe/Kiev'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SHOW_LANG_SWITCH = False
DEFAULT_LANGUAGE = LANGUAGE_CODE
# Email send
# https://docs.djangoproject.com/en/2.0/topics/email/
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_PASSWORD = config('GMAIL_PASSWORD', default='')
EMAIL_HOST_USER = config('GMAIL_USER', default='')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# EMAIL_USE_SSL = True
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': config('MEMCACHED_HOST', default='127.0.0.1') + ':' + config('MEMCACHED_PORT', default='11211'),
'TIMEOUT': 60 * 60, # 1h,
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_FOLDER = 'static_content'
STATIC_URL = '/static/'
STATIC_ROOT = rel(STATIC_FOLDER, 'asset')
STATICFILES_DIRS = [
rel('asset_dev')
]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.FileSystemFinder',
'pipeline.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# CompressedManifestStaticFilesStorage.manifest_strict = False
# STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
PIPELINE = {
'COMPILERS': ('pipeline.compilers.es6.ES6Compiler', 'pipeline.compilers.sass.SASSCompiler', ),
# 'BABEL_BINARY': '/usr/lib/node_modules/@babel',
'BABEL_ARGUMENTS': '--presets /usr/lib/node_modules/@babel/preset-env',
'JS_COMPRESSOR': None, #'pipeline.compressors.jsmin.JSMinCompressor',
'STYLESHEETS': {
'styles': {
'source_filenames': (
'styles.scss',
'open-iconic/font/css/open-iconic-bootstrap.scss',
'magic/magic.min.css'
),
'output_filename': 'styles.css',
'extra_context': {
'media': 'screen',
},
},
'admin': {
'source_filenames': (
'admin/css/custom.scss',
),
'output_filename': 'admin/css/custom.css',
'extra_context': {
'media': 'screen',
},
},
},
'JAVASCRIPT': {
'js': {
'source_filenames': (
'script.es6',
),
'output_filename': 'script.js',
}
}
}
SITE_LOGO_FIRST = path.join(STATIC_URL, 'images/logo.png')
SITE_LOGO_SECOND = path.join(STATIC_URL, 'images/logo.png')
# Media files
# https://docs.djangoproject.com/en/2.0/howto/static-files/#serving-files-uploaded-by-a-user-during-development
MEDIA_URL = '/media/'
MEDIA_ROOT = rel(STATIC_FOLDER, 'media')
THUMBNAIL_SIZE = [250, 250]
DELETE_MEDIA_FILES = True # delete files after deleting model entity
# https://ipstack.com/
# free geo api
IPSTACK_ACCESS_KEY = config('IPSTACK_ACCESS_KEY', default='')
# Activate Django-Heroku, uncomment it when deploy to Heroku
if not DEBUG and config('HEROKU', default=False):
import dj_database_url
DATABASES['default'] = dj_database_url.config(conn_max_age=600, ssl_require=True)
import django_heroku
django_heroku.settings(locals())
# Google Cloud API
GOOGLE_APPLICATION_CREDENTIALS = rel('baseprojectdjango-208a1c3136b5.json')
environ['GOOGLE_APPLICATION_CREDENTIALS'] = rel('baseprojectdjango-208a1c3136b5.json')
# Celery settings
#: Only add pickle to this list if your broker is secured
#: from unwanted access (see userguide/security.html)
# REDIS related settings
CELERY_REDIS_HOST = 'redis'
CELERY_REDIS_PORT = '6379'
CELERY_BROKER_URL = 'redis://' + CELERY_REDIS_HOST + ':' + CELERY_REDIS_PORT + '/0'
CELERY_RESULT_BACKEND = 'django_celery_results.backends.database.DatabaseBackend'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = TIME_ZONE
CELERY_BEAT_SCHEDULE = {
'Clear log files': {
'task': 'project.tasks.clear_log_files',
'schedule': crontab(hour=1, day_of_week=0),
},
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
}
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
}
},
'handlers': {
'file_log': {
'level': log_level(),
'class': 'logging.FileHandler',
'mode': 'w' if DEBUG else 'a',
'filename': rel('log', '{}_django.log'.format(timezone.now().strftime('%Y%m%d'))),
'formatter': 'verbose',
},
'secure_file_log': {
'level': 'WARNING',
'class': 'logging.FileHandler',
'mode': 'w' if DEBUG else 'a',
'filename': rel('log', '{}_secure.log'.format(timezone.now().strftime('%Y%m%d'))),
'formatter': 'verbose',
},
'request_file_log': {
'level': 'WARNING',
'class': 'logging.FileHandler',
'mode': 'w' if DEBUG else 'a',
'filename': rel('log', '{}_request.log'.format(timezone.now().strftime('%Y%m%d'))),
'formatter': 'verbose',
},
'celery_file_log': {
'level': log_level(),
'class': 'logging.FileHandler',
'mode': 'w' if DEBUG else 'a',
'filename': rel('log', '{}_celery.log'.format(timezone.now().strftime('%Y%m%d'))),
'formatter': 'verbose',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'formatter': 'verbose',
'include_html': True,
},
'console': {
'level': log_level(),
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['file_log', 'console'],
'propagate': True,
'level': log_level(),
},
'django.request': {
'handler': ['mail_admins', 'request_file_log'],
'propagate': True,
'level': 'WARNING',
'email_backend': 'django.core.mail.backends.smtp.EmailBackend',
},
'django.security': {
'handlers': ['mail_admins', 'secure_file_log'],
'level': 'WARNING',
'propagate': False,
},
'celery': {
'handlers': ['celery_file_log', 'console'],
'level': log_level(),
'propagate': True
},
}
}
if DEBUG:
INTERNAL_IPS = ('127.0.0.1',)
INSTALLED_APPS.append('debug_toolbar')
MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware')
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
DEBUG_TOOLBAR_CONFIG = {
"SHOW_TOOLBAR_CALLBACK": lambda request: True,
}
PIPELINE['JS_COMPRESSOR'] = None
from pprint import pprint
from pdb import set_trace
__builtins__["pp"] = pprint
__builtins__["st"] = set_trace
| 31.272021
| 116
| 0.632508
|
4a11a49d95efd8b44dd3b30ab4d40a155be15020
| 2,052
|
py
|
Python
|
Soil Sensor/Soil sensor settings.py
|
ronpang/WIZnet-HK_Ron
|
d9df912ee9afe70c3aad17e0d703428afe2f2b4c
|
[
"Apache-2.0"
] | null | null | null |
Soil Sensor/Soil sensor settings.py
|
ronpang/WIZnet-HK_Ron
|
d9df912ee9afe70c3aad17e0d703428afe2f2b4c
|
[
"Apache-2.0"
] | null | null | null |
Soil Sensor/Soil sensor settings.py
|
ronpang/WIZnet-HK_Ron
|
d9df912ee9afe70c3aad17e0d703428afe2f2b4c
|
[
"Apache-2.0"
] | null | null | null |
import board
import digitalio
import analogio
import time
import busio
# Set A0 for receving data from the soil hudmidity module
soil = analogio.AnalogIn(board.A0)
# Set LED for checking the network system working
led = digitalio.DigitalInOut(board.LED)
led.direction = digitalio.Direction.OUTPUT
#Waiting time for changing to collecting dry value
delay_counter = 0
while delay_counter < 10:
delay_counter = delay_counter + 1
print("Please change to collect dry value..." + str(delay_counter))
time.sleep(1)
# Collecting the dry value
dry_counter= 0
dry_number = 0
dry_average = 0
while dry_counter < 100: #collect 100 samples
dry_counter= dry_counter + 1
dry_number = soil.value + dry_number
time.sleep(0.1)
print ("Dry value counting..." + str(dry_counter))
#Average dry value
dry_average = dry_number / dry_counter
print('Average(dry): '+ str(dry_counter) +' '+ str(dry_average))
#Waiting time for changing to collecting wet value
delay_counter = 0
while delay_counter < 10:
delay_counter = delay_counter + 1
print("Please change to collect wet value..." + str(delay_counter))
time.sleep(1)
# Collecting the wet value
wet_counter= 0
wet_number = 0
wet_average = 0
while wet_counter <100: #collect 100 samples
wet_counter = wet_counter + 1
wet_number = soil.value + wet_number
time.sleep(0.1)
print ("Wet value counting..." + str(wet_counter))
#Average wet value
wet_average = wet_number / wet_counter
print('Average(wet): '+ str(wet_counter) +' '+ str(wet_average))
time.sleep(5) #delay for collecting actual moisture value
#Collect moisture value
while True:
if soil.value < dry_average or soil.value > wet_average:
print("Error value, please put the sensor to your plant")
else:
per_value = (soil.value - dry_average) / ((wet_average - dry_average)/100) #calculate the
print("Percentage: " + str(per_value))
led.value = not led.value
time.sleep(1)
| 29.73913
| 99
| 0.691033
|
4a11a5823a6a48ef4215fbd874a162fbf9f8b0dd
| 3,420
|
py
|
Python
|
python/inspect_func_test_decorator.py
|
fabianlee/blogcode
|
55db576e0b789944ce2cf795e89e5dead2290345
|
[
"MIT"
] | 34
|
2017-05-19T12:46:28.000Z
|
2022-02-03T10:51:25.000Z
|
python/inspect_func_test_decorator.py
|
fabianlee/blogcode
|
55db576e0b789944ce2cf795e89e5dead2290345
|
[
"MIT"
] | 2
|
2018-05-10T15:33:02.000Z
|
2019-02-21T13:10:14.000Z
|
python/inspect_func_test_decorator.py
|
fabianlee/blogcode
|
55db576e0b789944ce2cf795e89e5dead2290345
|
[
"MIT"
] | 32
|
2017-05-08T03:48:16.000Z
|
2022-03-11T15:51:55.000Z
|
#!/usr/bin/env python
"""
Use Decorators and introspection to see the arguments passed to a function
https://fabianlee.org/2019/09/22/python-using-a-custom-decorator-to-inspect-function-arguments/
"""
import sys
import argparse
import inspect
import functools
def showargs_decorator(func):
"""custom decorator"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""updates special attributes e.g. __name__,__doc__"""
# call custom inspection logic
inspect_decorator(func, args, kwargs)
# calls original function
return func(*args, **kwargs)
return wrapper
def inspect_decorator(func, args, kwargs):
"""inspect function name, parameters"""
funcname = func.__name__
print("function {}()".format(funcname))
# py3 vs py2
if sys.version_info >= (3, 0):
co = func.__code__ # py3
else:
co = func.func_code # py2
# get description of function parameters expected
argspec = inspect.getargspec(func)
# go through each position based argument
counter = 0
if argspec.args and type(argspec.args is list):
for arg in args:
# when you run past the formal positional arguments
try:
print(str(argspec.args[counter]) + "=" + str(arg))
counter += 1
except IndexError as e:
# then fallback to using the positional varargs name
if argspec.varargs:
varargsname = argspec.varargs
print("*" + varargsname + "=" + str(arg))
# finally show the named varargs
if argspec.keywords:
kwargsname = argspec.keywords
for k, v in kwargs.items():
print("**" + kwargsname + " " + k + "=" + str(v))
@showargs_decorator
def show_hello_world():
print("Hello, World!")
@showargs_decorator
def show_math_result(a, b, show_lower=True):
"""perform math operation, by default addition"""
op_display = "plus" if show_lower else "PLUS"
print("{} {} {} = {}".format(a, op_display, b, a + b))
return a + b
@showargs_decorator
def sum_var_positional_args(a, b, *args):
"""perform math operation, variable number of positional args"""
# does not work unless decorator used functools.wrap
# print("func name inside
# {}".format(show_math_resultVarPositionalArgs.__name__))
sum = a + b
for n in args:
sum += n
return sum
@showargs_decorator
def sum_var_named_args(a, b, **kwargs):
"""perform math operation, variable number of named args"""
sum = a + b
for key, value in sorted(kwargs.items()):
sum += value
return sum
def main(argv):
# parse arguments
ap = argparse.ArgumentParser(description="introspection using decorator")
ap.add_argument('a', type=int, help="first integer")
ap.add_argument('b', type=int, help="second integer")
args = ap.parse_args()
# no args function
print("")
show_hello_world()
# function with ints and optional params
print("")
res = show_math_result(args.a, args.b)
print("final sum = {}".format(res))
print("")
res = sum_var_positional_args(args.a, args.b, 4, 5, 6)
print("final sum = {}".format(res))
print("")
res = sum_var_named_args(args.a, args.b, c=4, d=5, e=6)
print("final sum = {}".format(res))
if __name__ == '__main__':
main(sys.argv)
| 25.714286
| 96
| 0.626316
|
4a11a71d61e89793606b7df8c1bce5fb1d08e8b8
| 79,847
|
py
|
Python
|
Lib/unittest/mock.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | 1
|
2018-06-21T18:21:24.000Z
|
2018-06-21T18:21:24.000Z
|
Lib/unittest/mock.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | null | null | null |
Lib/unittest/mock.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | null | null | null |
# mock.py
# Test tools dla mocking oraz patching.
# Maintained by Michael Foord
# Backport dla other versions of Python available from
# http://pypi.python.org/pypi/mock
__all__ = (
'Mock',
'MagicMock',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR',
'NonCallableMock',
'NonCallableMagicMock',
'mock_open',
'PropertyMock',
)
__version__ = '1.0'
zaimportuj inspect
zaimportuj pprint
zaimportuj sys
zaimportuj builtins
z types zaimportuj ModuleType
z functools zaimportuj wraps, partial
_builtins = {name dla name w dir(builtins) jeżeli nie name.startswith('_')}
BaseExceptions = (BaseException,)
jeżeli 'java' w sys.platform:
# jython
zaimportuj java
BaseExceptions = (BaseException, java.lang.Throwable)
FILTER_DIR = Prawda
# Workaround dla issue #12370
# Without this, the __class__ properties wouldn't be set correctly
_safe_super = super
def _is_instance_mock(obj):
# can't use isinstance on Mock objects because they override __class__
# The base klasa dla all mocks jest NonCallableMock
zwróć issubclass(type(obj), NonCallableMock)
def _is_exception(obj):
zwróć (
isinstance(obj, BaseExceptions) albo
isinstance(obj, type) oraz issubclass(obj, BaseExceptions)
)
klasa _slotted(object):
__slots__ = ['a']
DescriptorTypes = (
type(_slotted.a),
property,
)
def _get_signature_object(func, as_instance, eat_self):
"""
Given an arbitrary, possibly callable object, try to create a suitable
signature object.
Return a (reduced func, signature) tuple, albo Nic.
"""
jeżeli isinstance(func, type) oraz nie as_instance:
# If it's a type oraz should be modelled jako a type, use __init__.
spróbuj:
func = func.__init__
wyjąwszy AttributeError:
zwróć Nic
# Skip the `self` argument w __init__
eat_self = Prawda
albo_inaczej nie isinstance(func, FunctionTypes):
# If we really want to mousuń an instance of the dalejed type,
# __call__ should be looked up, nie __init__.
spróbuj:
func = func.__call__
wyjąwszy AttributeError:
zwróć Nic
jeżeli eat_self:
sig_func = partial(func, Nic)
inaczej:
sig_func = func
spróbuj:
zwróć func, inspect.signature(sig_func)
wyjąwszy ValueError:
# Certain callable types are nie supported by inspect.signature()
zwróć Nic
def _check_signature(func, mock, skipfirst, instance=Nieprawda):
sig = _get_signature_object(func, instance, skipfirst)
jeżeli sig jest Nic:
zwróć
func, sig = sig
def checksig(_mock_self, *args, **kwargs):
sig.bind(*args, **kwargs)
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
spróbuj:
funcopy.__text_signature__ = func.__text_signature__
wyjąwszy AttributeError:
dalej
# we explicitly don't copy func.__dict__ into this copy jako it would
# expose original attributes that should be mocked
spróbuj:
funcopy.__module__ = func.__module__
wyjąwszy AttributeError:
dalej
spróbuj:
funcopy.__defaults__ = func.__defaults__
wyjąwszy AttributeError:
dalej
spróbuj:
funcopy.__kwdefaults__ = func.__kwdefaults__
wyjąwszy AttributeError:
dalej
def _callable(obj):
jeżeli isinstance(obj, type):
zwróć Prawda
jeżeli getattr(obj, '__call__', Nic) jest nie Nic:
zwróć Prawda
zwróć Nieprawda
def _is_list(obj):
# checks dla list albo tuples
# XXXX badly named!
zwróć type(obj) w (list, tuple)
def _instance_callable(obj):
"""Given an object, zwróć Prawda jeżeli the object jest callable.
For classes, zwróć Prawda jeżeli instances would be callable."""
jeżeli nie isinstance(obj, type):
# already an instance
zwróć getattr(obj, '__call__', Nic) jest nie Nic
# *could* be broken by a klasa overriding __mro__ albo __dict__ via
# a metaclass
dla base w (obj,) + obj.__mro__:
jeżeli base.__dict__.get('__call__') jest nie Nic:
zwróć Prawda
zwróć Nieprawda
def _set_signature(mock, original, instance=Nieprawda):
# creates a function przy signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda przy the same
# signature jako the original.
jeżeli nie _callable(original):
zwróć
skipfirst = isinstance(original, type)
result = _get_signature_object(original, instance, skipfirst)
jeżeli result jest Nic:
zwróć
func, sig = result
def checksig(*args, **kwargs):
sig.bind(*args, **kwargs)
_copy_func_details(func, checksig)
name = original.__name__
jeżeli nie name.isidentifier():
name = 'funcopy'
context = {'_checksig_': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
_checksig_(*args, **kwargs)
zwróć mock(*args, **kwargs)""" % name
exec (src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
zwróć funcopy
def _setup_func(funcopy, mock):
funcopy.mock = mock
# can't use isinstance przy mocks
jeżeli nie _is_instance_mock(mock):
zwróć
def assert_called_with(*args, **kwargs):
zwróć mock.assert_called_with(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
zwróć mock.assert_called_once_with(*args, **kwargs)
def assert_has_calls(*args, **kwargs):
zwróć mock.assert_has_calls(*args, **kwargs)
def assert_any_call(*args, **kwargs):
zwróć mock.assert_any_call(*args, **kwargs)
def reset_mock():
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
mock.reset_mock()
ret = funcopy.return_value
jeżeli _is_instance_mock(ret) oraz nie ret jest mock:
ret.reset_mock()
funcopy.called = Nieprawda
funcopy.call_count = 0
funcopy.call_args = Nic
funcopy.call_args_list = _CallList()
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
funcopy.return_value = mock.return_value
funcopy.side_effect = mock.side_effect
funcopy._mock_children = mock._mock_children
funcopy.assert_called_przy = assert_called_with
funcopy.assert_called_once_przy = assert_called_once_with
funcopy.assert_has_calls = assert_has_calls
funcopy.assert_any_call = assert_any_call
funcopy.reset_mock = reset_mock
mock._mock_delegate = funcopy
def _is_magic(name):
zwróć '__%s__' % name[2:-2] == name
klasa _SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
zwróć 'sentinel.%s' % self.name
klasa _Sentinel(object):
"""Access attributes to zwróć a named object, usable jako a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
jeżeli name == '__bases__':
# Without this help(unittest.mock) podnieśs an exception
podnieś AttributeError
zwróć self._sentinels.setdefault(name, _SentinelObject(name))
sentinel = _Sentinel()
DEFAULT = sentinel.DEFAULT
_missing = sentinel.MISSING
_deleted = sentinel.DELETED
def _copy(value):
jeżeli type(value) w (dict, list, tuple, set):
zwróć type(value)(value)
zwróć value
_allowed_names = {
'return_value', '_mock_return_value', 'side_effect',
'_mock_side_effect', '_mock_parent', '_mock_new_parent',
'_mock_name', '_mock_new_name'
}
def _delegating_property(name):
_allowed_names.add(name)
_the_name = '_mock_' + name
def _get(self, name=name, _the_name=_the_name):
sig = self._mock_delegate
jeżeli sig jest Nic:
zwróć getattr(self, _the_name)
zwróć getattr(sig, name)
def _set(self, value, name=name, _the_name=_the_name):
sig = self._mock_delegate
jeżeli sig jest Nic:
self.__dict__[_the_name] = value
inaczej:
setattr(sig, name, value)
zwróć property(_get, _set)
klasa _CallList(list):
def __contains__(self, value):
jeżeli nie isinstance(value, list):
zwróć list.__contains__(self, value)
len_value = len(value)
len_self = len(self)
jeżeli len_value > len_self:
zwróć Nieprawda
dla i w range(0, len_self - len_value + 1):
sub_list = self[i:i+len_value]
jeżeli sub_list == value:
zwróć Prawda
zwróć Nieprawda
def __repr__(self):
zwróć pprint.pformat(list(self))
def _check_and_set_parent(parent, value, name, new_name):
jeżeli nie _is_instance_mock(value):
zwróć Nieprawda
jeżeli ((value._mock_name albo value._mock_new_name) albo
(value._mock_parent jest nie Nic) albo
(value._mock_new_parent jest nie Nic)):
zwróć Nieprawda
_parent = parent
dopóki _parent jest nie Nic:
# setting a mock (value) jako a child albo zwróć value of itself
# should nie modify the mock
jeżeli _parent jest value:
zwróć Nieprawda
_parent = _parent._mock_new_parent
jeżeli new_name:
value._mock_new_parent = parent
value._mock_new_name = new_name
jeżeli name:
value._mock_parent = parent
value._mock_name = name
zwróć Prawda
# Internal klasa to identify jeżeli we wrapped an iterator object albo not.
klasa _MockIter(object):
def __init__(self, obj):
self.obj = iter(obj)
def __iter__(self):
zwróć self
def __next__(self):
zwróć next(self.obj)
klasa Base(object):
_mock_return_value = DEFAULT
_mock_side_effect = Nic
def __init__(self, *args, **kwargs):
dalej
klasa NonCallableMock(Base):
"""A non-callable version of `Mock`"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# klasa without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
instance = object.__new__(new)
zwróć instance
def __init__(
self, spec=Nic, wraps=Nic, name=Nic, spec_set=Nic,
parent=Nic, _spec_state=Nic, _new_name='', _new_parent=Nic,
_spec_as_instance=Nieprawda, _eat_self=Nic, unsafe=Nieprawda, **kwargs
):
jeżeli _new_parent jest Nic:
_new_parent = parent
__dict__ = self.__dict__
__dict__['_mock_parent'] = parent
__dict__['_mock_name'] = name
__dict__['_mock_new_name'] = _new_name
__dict__['_mock_new_parent'] = _new_parent
jeżeli spec_set jest nie Nic:
spec = spec_set
spec_set = Prawda
jeżeli _eat_self jest Nic:
_eat_self = parent jest nie Nic
self._mock_add_spec(spec, spec_set, _spec_as_instance, _eat_self)
__dict__['_mock_children'] = {}
__dict__['_mock_wraps'] = wraps
__dict__['_mock_delegate'] = Nic
__dict__['_mock_called'] = Nieprawda
__dict__['_mock_call_args'] = Nic
__dict__['_mock_call_count'] = 0
__dict__['_mock_call_args_list'] = _CallList()
__dict__['_mock_mock_calls'] = _CallList()
__dict__['method_calls'] = _CallList()
__dict__['_mock_unsafe'] = unsafe
jeżeli kwargs:
self.configure_mock(**kwargs)
_safe_super(NonCallableMock, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state
)
def attach_mock(self, mock, attribute):
"""
Attach a mock jako an attribute of this one, replacing its name oraz
parent. Calls to the attached mock will be recorded w the
`method_calls` oraz `mock_calls` attributes of this one."""
mock._mock_parent = Nic
mock._mock_new_parent = Nic
mock._mock_name = ''
mock._mock_new_name = Nic
setattr(self, attribute, mock)
def mock_add_spec(self, spec, spec_set=Nieprawda):
"""Add a spec to a mock. `spec` can either be an object albo a
list of strings. Only attributes on the `spec` can be fetched as
attributes z the mock.
If `spec_set` jest Prawda then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
def _mock_add_spec(self, spec, spec_set, _spec_as_instance=Nieprawda,
_eat_self=Nieprawda):
_spec_class = Nic
_spec_signature = Nic
jeżeli spec jest nie Nic oraz nie _is_list(spec):
jeżeli isinstance(spec, type):
_spec_class = spec
inaczej:
_spec_class = _get_class(spec)
res = _get_signature_object(spec,
_spec_as_instance, _eat_self)
_spec_signature = res oraz res[1]
spec = dir(spec)
__dict__ = self.__dict__
__dict__['_spec_class'] = _spec_class
__dict__['_spec_set'] = spec_set
__dict__['_spec_signature'] = _spec_signature
__dict__['_mock_methods'] = spec
def __get_return_value(self):
ret = self._mock_return_value
jeżeli self._mock_delegate jest nie Nic:
ret = self._mock_delegate.return_value
jeżeli ret jest DEFAULT:
ret = self._get_child_mock(
_new_parent=self, _new_name='()'
)
self.return_value = ret
zwróć ret
def __set_return_value(self, value):
jeżeli self._mock_delegate jest nie Nic:
self._mock_delegate.return_value = value
inaczej:
self._mock_return_value = value
_check_and_set_parent(self, value, Nic, '()')
__return_value_doc = "The value to be returned when the mock jest called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
@property
def __class__(self):
jeżeli self._spec_class jest Nic:
zwróć type(self)
zwróć self._spec_class
called = _delegating_property('called')
call_count = _delegating_property('call_count')
call_args = _delegating_property('call_args')
call_args_list = _delegating_property('call_args_list')
mock_calls = _delegating_property('mock_calls')
def __get_side_effect(self):
delegated = self._mock_delegate
jeżeli delegated jest Nic:
zwróć self._mock_side_effect
sf = delegated.side_effect
jeżeli (sf jest nie Nic oraz nie callable(sf)
oraz nie isinstance(sf, _MockIter) oraz nie _is_exception(sf)):
sf = _MockIter(sf)
delegated.side_effect = sf
zwróć sf
def __set_side_effect(self, value):
value = _try_iter(value)
delegated = self._mock_delegate
jeżeli delegated jest Nic:
self._mock_side_effect = value
inaczej:
delegated.side_effect = value
side_effect = property(__get_side_effect, __set_side_effect)
def reset_mock(self, visited=Nic):
"Restore the mock object to its initial state."
jeżeli visited jest Nic:
visited = []
jeżeli id(self) w visited:
zwróć
visited.append(id(self))
self.called = Nieprawda
self.call_args = Nic
self.call_count = 0
self.mock_calls = _CallList()
self.call_args_list = _CallList()
self.method_calls = _CallList()
dla child w self._mock_children.values():
jeżeli isinstance(child, _SpecState):
kontynuuj
child.reset_mock(visited)
ret = self._mock_return_value
jeżeli _is_instance_mock(ret) oraz ret jest nie self:
ret.reset_mock(visited)
def configure_mock(self, **kwargs):
"""Set attributes on the mock through keyword arguments.
Attributes plus zwróć values oraz side effects can be set on child
mocks using standard dot notation oraz unpacking a dictionary w the
method call:
>>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
>>> mock.configure_mock(**attrs)"""
dla arg, val w sorted(kwargs.items(),
# we sort on the number of dots so that
# attributes are set before we set attributes on
# attributes
key=lambda enspróbuj: entry[0].count('.')):
args = arg.split('.')
final = args.pop()
obj = self
dla entry w args:
obj = getattr(obj, entry)
setattr(obj, final, val)
def __getattr__(self, name):
jeżeli name w {'_mock_methods', '_mock_unsafe'}:
podnieś AttributeError(name)
albo_inaczej self._mock_methods jest nie Nic:
jeżeli name nie w self._mock_methods albo name w _all_magics:
podnieś AttributeError("Mock object has no attribute %r" % name)
albo_inaczej _is_magic(name):
podnieś AttributeError(name)
jeżeli nie self._mock_unsafe:
jeżeli name.startswith(('assert', 'assret')):
podnieś AttributeError(name)
result = self._mock_children.get(name)
jeżeli result jest _deleted:
podnieś AttributeError(name)
albo_inaczej result jest Nic:
wraps = Nic
jeżeli self._mock_wraps jest nie Nic:
# XXXX should we get the attribute without triggering code
# execution?
wraps = getattr(self._mock_wraps, name)
result = self._get_child_mock(
parent=self, name=name, wraps=wraps, _new_name=name,
_new_parent=self
)
self._mock_children[name] = result
albo_inaczej isinstance(result, _SpecState):
result = create_autospec(
result.spec, result.spec_set, result.instance,
result.parent, result.name
)
self._mock_children[name] = result
zwróć result
def __repr__(self):
_name_list = [self._mock_new_name]
_parent = self._mock_new_parent
last = self
dot = '.'
jeżeli _name_list == ['()']:
dot = ''
seen = set()
dopóki _parent jest nie Nic:
last = _parent
_name_list.append(_parent._mock_new_name + dot)
dot = '.'
jeżeli _parent._mock_new_name == '()':
dot = ''
_parent = _parent._mock_new_parent
# use ids here so jako nie to call __hash__ on the mocks
jeżeli id(_parent) w seen:
przerwij
seen.add(id(_parent))
_name_list = list(reversed(_name_list))
_first = last._mock_name albo 'mock'
jeżeli len(_name_list) > 1:
jeżeli _name_list[1] nie w ('()', '().'):
_first += '.'
_name_list[0] = _first
name = ''.join(_name_list)
name_string = ''
jeżeli name nie w ('mock', 'mock.'):
name_string = ' name=%r' % name
spec_string = ''
jeżeli self._spec_class jest nie Nic:
spec_string = ' spec=%r'
jeżeli self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
zwróć "<%s%s%s id='%s'>" % (
type(self).__name__,
name_string,
spec_string,
id(self)
)
def __dir__(self):
"""Filter the output of `dir(mock)` to only useful members."""
jeżeli nie FILTER_DIR:
zwróć object.__dir__(self)
extras = self._mock_methods albo []
from_type = dir(type(self))
from_dict = list(self.__dict__)
from_type = [e dla e w from_type jeżeli nie e.startswith('_')]
from_dict = [e dla e w from_dict jeżeli nie e.startswith('_') albo
_is_magic(e)]
zwróć sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
jeżeli name w _allowed_names:
# property setters go through here
zwróć object.__setattr__(self, name, value)
albo_inaczej (self._spec_set oraz self._mock_methods jest nie Nic oraz
name nie w self._mock_methods oraz
name nie w self.__dict__):
podnieś AttributeError("Mock object has no attribute '%s'" % name)
albo_inaczej name w _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
podnieś AttributeError(msg)
albo_inaczej name w _all_magics:
jeżeli self._mock_methods jest nie Nic oraz name nie w self._mock_methods:
podnieś AttributeError("Mock object has no attribute '%s'" % name)
jeżeli nie _is_instance_mock(value):
setattr(type(self), name, _get_method(name, value))
original = value
value = lambda *args, **kw: original(self, *args, **kw)
inaczej:
# only set _new_name oraz nie name so that mock_calls jest tracked
# but nie method calls
_check_and_set_parent(self, value, Nic, name)
setattr(type(self), name, value)
self._mock_children[name] = value
albo_inaczej name == '__class__':
self._spec_class = value
zwróć
inaczej:
jeżeli _check_and_set_parent(self, value, name, name):
self._mock_children[name] = value
zwróć object.__setattr__(self, name, value)
def __delattr__(self, name):
jeżeli name w _all_magics oraz name w type(self).__dict__:
delattr(type(self), name)
jeżeli name nie w self.__dict__:
# dla magic methods that are still MagicProxy objects oraz
# nie set on the instance itself
zwróć
jeżeli name w self.__dict__:
object.__delattr__(self, name)
obj = self._mock_children.get(name, _missing)
jeżeli obj jest _deleted:
podnieś AttributeError(name)
jeżeli obj jest nie _missing:
usuń self._mock_children[name]
self._mock_children[name] = _deleted
def _format_mock_call_signature(self, args, kwargs):
name = self._mock_name albo 'mock'
zwróć _format_call_signature(name, args, kwargs)
def _format_mock_failure_message(self, args, kwargs):
message = 'Expected call: %s\nActual call: %s'
expected_string = self._format_mock_call_signature(args, kwargs)
call_args = self.call_args
jeżeli len(call_args) == 3:
call_args = call_args[1:]
actual_string = self._format_mock_call_signature(*call_args)
zwróć message % (expected_string, actual_string)
def _call_matcher(self, _call):
"""
Given a call (or simply a (args, kwargs) tuple), zwróć a
comparison key suitable dla matching przy other calls.
This jest a best effort method which relies on the spec's signature,
jeżeli available, albo falls back on the arguments themselves.
"""
sig = self._spec_signature
jeżeli sig jest nie Nic:
jeżeli len(_call) == 2:
name = ''
args, kwargs = _call
inaczej:
name, args, kwargs = _call
spróbuj:
zwróć name, sig.bind(*args, **kwargs)
wyjąwszy TypeError jako e:
zwróć e.with_traceback(Nic)
inaczej:
zwróć _call
def assert_not_called(_mock_self):
"""assert that the mock was never called.
"""
self = _mock_self
jeżeli self.call_count != 0:
msg = ("Expected '%s' to nie have been called. Called %s times." %
(self._mock_name albo 'mock', self.call_count))
podnieś AssertionError(msg)
def assert_called_with(_mock_self, *args, **kwargs):
"""assert that the mock was called przy the specified arguments.
Raises an AssertionError jeżeli the args oraz keyword args dalejed w are
different to the last call to the mock."""
self = _mock_self
jeżeli self.call_args jest Nic:
expected = self._format_mock_call_signature(args, kwargs)
podnieś AssertionError('Expected call: %s\nNot called' % (expected,))
def _error_message():
msg = self._format_mock_failure_message(args, kwargs)
zwróć msg
expected = self._call_matcher((args, kwargs))
actual = self._call_matcher(self.call_args)
jeżeli expected != actual:
cause = expected jeżeli isinstance(expected, Exception) inaczej Nic
podnieś AssertionError(_error_message()) z cause
def assert_called_once_with(_mock_self, *args, **kwargs):
"""assert that the mock was called exactly once oraz przy the specified
arguments."""
self = _mock_self
jeżeli nie self.call_count == 1:
msg = ("Expected '%s' to be called once. Called %s times." %
(self._mock_name albo 'mock', self.call_count))
podnieś AssertionError(msg)
zwróć self.assert_called_with(*args, **kwargs)
def assert_has_calls(self, calls, any_order=Nieprawda):
"""assert the mock has been called przy the specified calls.
The `mock_calls` list jest checked dla the calls.
If `any_order` jest Nieprawda (the default) then the calls must be
sequential. There can be extra calls before albo after the
specified calls.
If `any_order` jest Prawda then the calls can be w any order, but
they must all appear w `mock_calls`."""
expected = [self._call_matcher(c) dla c w calls]
cause = expected jeżeli isinstance(expected, Exception) inaczej Nic
all_calls = _CallList(self._call_matcher(c) dla c w self.mock_calls)
jeżeli nie any_order:
jeżeli expected nie w all_calls:
podnieś AssertionError(
'Calls nie found.\nExpected: %r\n'
'Actual: %r' % (calls, self.mock_calls)
) z cause
zwróć
all_calls = list(all_calls)
not_found = []
dla kall w expected:
spróbuj:
all_calls.remove(kall)
wyjąwszy ValueError:
not_found.append(kall)
jeżeli not_found:
podnieś AssertionError(
'%r nie all found w call list' % (tuple(nie_found),)
) z cause
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called przy the specified arguments.
The assert dalejes jeżeli the mock has *ever* been called, unlike
`assert_called_with` oraz `assert_called_once_with` that only dalej if
the call jest the most recent one."""
expected = self._call_matcher((args, kwargs))
actual = [self._call_matcher(c) dla c w self.call_args_list]
jeżeli expected nie w actual:
cause = expected jeżeli isinstance(expected, Exception) inaczej Nic
expected_string = self._format_mock_call_signature(args, kwargs)
podnieś AssertionError(
'%s call nie found' % expected_string
) z cause
def _get_child_mock(self, **kw):
"""Create the child mocks dla attributes oraz zwróć value.
By default child mocks will be the same type jako the parent.
Subclasses of Mock may want to override this to customize the way
child mocks are made.
For non-callable mocks the callable variant will be used (rather than
any custom subclass)."""
_type = type(self)
jeżeli nie issubclass(_type, CallableMixin):
jeżeli issubclass(_type, NonCallableMagicMock):
klass = MagicMock
albo_inaczej issubclass(_type, NonCallableMock) :
klass = Mock
inaczej:
klass = _type.__mro__[1]
zwróć klass(**kw)
def _try_iter(obj):
jeżeli obj jest Nic:
zwróć obj
jeżeli _is_exception(obj):
zwróć obj
jeżeli _callable(obj):
zwróć obj
spróbuj:
zwróć iter(obj)
wyjąwszy TypeError:
# XXXX backwards compatibility
# but this will blow up on first call - so maybe we should fail early?
zwróć obj
klasa CallableMixin(Base):
def __init__(self, spec=Nic, side_effect=Nic, return_value=DEFAULT,
wraps=Nic, name=Nic, spec_set=Nic, parent=Nic,
_spec_state=Nic, _new_name='', _new_parent=Nic, **kwargs):
self.__dict__['_mock_return_value'] = return_value
_safe_super(CallableMixin, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state, _new_name, _new_parent, **kwargs
)
self.side_effect = side_effect
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced przy one przy a specific signature
dalej
def __call__(_mock_self, *args, **kwargs):
# can't use self in-case a function / method we are mocking uses self
# w the signature
_mock_self._mock_check_sig(*args, **kwargs)
zwróć _mock_self._mock_call(*args, **kwargs)
def _mock_call(_mock_self, *args, **kwargs):
self = _mock_self
self.called = Prawda
self.call_count += 1
_new_name = self._mock_new_name
_new_parent = self._mock_new_parent
_call = _Call((args, kwargs), two=Prawda)
self.call_args = _call
self.call_args_list.append(_call)
self.mock_calls.append(_Call(('', args, kwargs)))
seen = set()
skip_next_dot = _new_name == '()'
do_method_calls = self._mock_parent jest nie Nic
name = self._mock_name
dopóki _new_parent jest nie Nic:
this_mock_call = _Call((_new_name, args, kwargs))
jeżeli _new_parent._mock_new_name:
dot = '.'
jeżeli skip_next_dot:
dot = ''
skip_next_dot = Nieprawda
jeżeli _new_parent._mock_new_name == '()':
skip_next_dot = Prawda
_new_name = _new_parent._mock_new_name + dot + _new_name
jeżeli do_method_calls:
jeżeli _new_name == name:
this_method_call = this_mock_call
inaczej:
this_method_call = _Call((name, args, kwargs))
_new_parent.method_calls.append(this_method_call)
do_method_calls = _new_parent._mock_parent jest nie Nic
jeżeli do_method_calls:
name = _new_parent._mock_name + '.' + name
_new_parent.mock_calls.append(this_mock_call)
_new_parent = _new_parent._mock_new_parent
# use ids here so jako nie to call __hash__ on the mocks
_new_parent_id = id(_new_parent)
jeżeli _new_parent_id w seen:
przerwij
seen.add(_new_parent_id)
ret_val = DEFAULT
effect = self.side_effect
jeżeli effect jest nie Nic:
jeżeli _is_exception(effect):
podnieś effect
jeżeli nie _callable(effect):
result = next(effect)
jeżeli _is_exception(result):
podnieś result
jeżeli result jest DEFAULT:
result = self.return_value
zwróć result
ret_val = effect(*args, **kwargs)
jeżeli (self._mock_wraps jest nie Nic oraz
self._mock_return_value jest DEFAULT):
zwróć self._mock_wraps(*args, **kwargs)
jeżeli ret_val jest DEFAULT:
ret_val = self.return_value
zwróć ret_val
klasa Mock(CallableMixin, NonCallableMock):
"""
Create a new `Mock` object. `Mock` takes several optional arguments
that specify the behaviour of the Mock object:
* `spec`: This can be either a list of strings albo an existing object (a
klasa albo instance) that acts jako the specification dla the mock object. If
you dalej w an object then a list of strings jest formed by calling dir on
the object (excluding unsupported magic attributes oraz methods). Accessing
any attribute nie w this list will podnieś an `AttributeError`.
If `spec` jest an object (rather than a list of strings) then
`mock.__class__` returns the klasa of the spec object. This allows mocks
to dalej `isinstance` tests.
* `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
albo get an attribute on the mock that isn't on the object dalejed as
`spec_set` will podnieś an `AttributeError`.
* `side_effect`: A function to be called whenever the Mock jest called. See
the `side_effect` attribute. Useful dla raising exceptions albo
dynamically changing zwróć values. The function jest called przy the same
arguments jako the mock, oraz unless it returns `DEFAULT`, the zwróć
value of this function jest used jako the zwróć value.
If `side_effect` jest an iterable then each call to the mock will zwróć
the next value z the iterable. If any of the members of the iterable
are exceptions they will be podnieśd instead of returned.
* `return_value`: The value returned when the mock jest called. By default
this jest a new Mock (created on first access). See the
`return_value` attribute.
* `wraps`: Item dla the mock object to wrap. If `wraps` jest nie Nic then
calling the Mock will dalej the call through to the wrapped object
(returning the real result). Attribute access on the mock will zwróć a
Mock object that wraps the corresponding attribute of the wrapped object
(so attempting to access an attribute that doesn't exist will podnieś an
`AttributeError`).
If the mock has an explicit `return_value` set then calls are nie dalejed
to the wrapped object oraz the `return_value` jest returned instead.
* `name`: If the mock has a name then it will be used w the repr of the
mock. This can be useful dla debugging. The name jest propagated to child
mocks.
Mocks can also be called przy arbitrary keyword arguments. These will be
used to set attributes on the mock after it jest created.
"""
def _dot_lookup(thing, comp, import_path):
spróbuj:
zwróć getattr(thing, comp)
wyjąwszy AttributeError:
__import__(import_path)
zwróć getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
dla comp w components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
zwróć thing
def _is_started(patcher):
# XXXX horrible
zwróć hasattr(patcher, 'is_local')
klasa _patch(object):
attribute_name = Nic
_active_patches = []
def __init__(
self, getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
):
jeżeli new_callable jest nie Nic:
jeżeli new jest nie DEFAULT:
podnieś ValueError(
"Cannot use 'new' oraz 'new_callable' together"
)
jeżeli autospec jest nie Nic:
podnieś ValueError(
"Cannot use 'autospec' oraz 'new_callable' together"
)
self.getter = getter
self.attribute = attribute
self.new = new
self.new_callable = new_callable
self.spec = spec
self.create = create
self.has_local = Nieprawda
self.spec_set = spec_set
self.autospec = autospec
self.kwargs = kwargs
self.additional_patchers = []
def copy(self):
patcher = _patch(
self.getter, self.attribute, self.new, self.spec,
self.create, self.spec_set,
self.autospec, self.new_callable, self.kwargs
)
patcher.attribute_name = self.attribute_name
patcher.additional_patchers = [
p.copy() dla p w self.additional_patchers
]
zwróć patcher
def __call__(self, func):
jeżeli isinstance(func, type):
zwróć self.decorate_class(func)
zwróć self.decorate_callable(func)
def decorate_class(self, klass):
dla attr w dir(klass):
jeżeli nie attr.startswith(patch.TEST_PREFIX):
kontynuuj
attr_value = getattr(klass, attr)
jeżeli nie hasattr(attr_value, "__call__"):
kontynuuj
patcher = self.copy()
setattr(klass, attr, patcher(attr_value))
zwróć klass
def decorate_callable(self, func):
jeżeli hasattr(func, 'patchings'):
func.patchings.append(self)
zwróć func
@wraps(func)
def patched(*args, **keywargs):
extra_args = []
entered_patchers = []
exc_info = tuple()
spróbuj:
dla patching w patched.patchings:
arg = patching.__enter__()
entered_patchers.append(patching)
jeżeli patching.attribute_name jest nie Nic:
keywargs.update(arg)
albo_inaczej patching.new jest DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
zwróć func(*args, **keywargs)
wyjąwszy:
jeżeli (patching nie w entered_patchers oraz
_is_started(patching)):
# the patcher may have been started, but an exception
# podnieśd whilst entering one of its additional_patchers
entered_patchers.append(patching)
# Pass the exception to __exit__
exc_info = sys.exc_info()
# re-raise the exception
podnieś
w_końcu:
dla patching w reversed(entered_patchers):
patching.__exit__(*exc_info)
patched.patchings = [self]
zwróć patched
def get_original(self):
target = self.getter()
name = self.attribute
original = DEFAULT
local = Nieprawda
spróbuj:
original = target.__dict__[name]
wyjąwszy (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
inaczej:
local = Prawda
jeżeli name w _builtins oraz isinstance(target, ModuleType):
self.create = Prawda
jeżeli nie self.create oraz original jest DEFAULT:
podnieś AttributeError(
"%s does nie have the attribute %r" % (target, name)
)
zwróć original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
autospec, kwargs = self.autospec, self.kwargs
new_callable = self.new_callable
self.target = self.getter()
# normalise Nieprawda to Nic
jeżeli spec jest Nieprawda:
spec = Nic
jeżeli spec_set jest Nieprawda:
spec_set = Nic
jeżeli autospec jest Nieprawda:
autospec = Nic
jeżeli spec jest nie Nic oraz autospec jest nie Nic:
podnieś TypeError("Can't specify spec oraz autospec")
jeżeli ((spec jest nie Nic albo autospec jest nie Nic) oraz
spec_set nie w (Prawda, Nic)):
podnieś TypeError("Can't provide explicit spec_set *and* spec albo autospec")
original, local = self.get_original()
jeżeli new jest DEFAULT oraz autospec jest Nic:
inherit = Nieprawda
jeżeli spec jest Prawda:
# set spec to the object we are replacing
spec = original
jeżeli spec_set jest Prawda:
spec_set = original
spec = Nic
albo_inaczej spec jest nie Nic:
jeżeli spec_set jest Prawda:
spec_set = spec
spec = Nic
albo_inaczej spec_set jest Prawda:
spec_set = original
jeżeli spec jest nie Nic albo spec_set jest nie Nic:
jeżeli original jest DEFAULT:
podnieś TypeError("Can't use 'spec' przy create=Prawda")
jeżeli isinstance(original, type):
# If we're patching out a klasa oraz there jest a spec
inherit = Prawda
Klass = MagicMock
_kwargs = {}
jeżeli new_callable jest nie Nic:
Klass = new_callable
albo_inaczej spec jest nie Nic albo spec_set jest nie Nic:
this_spec = spec
jeżeli spec_set jest nie Nic:
this_spec = spec_set
jeżeli _is_list(this_spec):
not_callable = '__call__' nie w this_spec
inaczej:
not_callable = nie callable(this_spec)
jeżeli not_callable:
Klass = NonCallableMagicMock
jeżeli spec jest nie Nic:
_kwargs['spec'] = spec
jeżeli spec_set jest nie Nic:
_kwargs['spec_set'] = spec_set
# add a name to mocks
jeżeli (isinstance(Klass, type) oraz
issubclass(Klass, NonCallableMock) oraz self.attribute):
_kwargs['name'] = self.attribute
_kwargs.update(kwargs)
new = Klass(**_kwargs)
jeżeli inherit oraz _is_instance_mock(new):
# we can only tell jeżeli the instance should be callable jeżeli the
# spec jest nie a list
this_spec = spec
jeżeli spec_set jest nie Nic:
this_spec = spec_set
jeżeli (nie _is_list(this_spec) oraz nie
_instance_callable(this_spec)):
Klass = NonCallableMagicMock
_kwargs.pop('name')
new.return_value = Klass(_new_parent=new, _new_name='()',
**_kwargs)
albo_inaczej autospec jest nie Nic:
# spec jest ignored, new *must* be default, spec_set jest treated
# jako a boolean. Should we check spec jest nie Nic oraz that spec_set
# jest a bool?
jeżeli new jest nie DEFAULT:
podnieś TypeError(
"autospec creates the mock dla you. Can't specify "
"autospec oraz new."
)
jeżeli original jest DEFAULT:
podnieś TypeError("Can't use 'autospec' przy create=Prawda")
spec_set = bool(spec_set)
jeżeli autospec jest Prawda:
autospec = original
new = create_autospec(autospec, spec_set=spec_set,
_name=self.attribute, **kwargs)
albo_inaczej kwargs:
# can't set keyword args when we aren't creating the mock
# XXXX If new jest a Mock we could call new.configure_mock(**kwargs)
podnieś TypeError("Can't dalej kwargs to a mock we aren't creating")
new_attr = new
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
jeżeli self.attribute_name jest nie Nic:
extra_args = {}
jeżeli self.new jest DEFAULT:
extra_args[self.attribute_name] = new
dla patching w self.additional_patchers:
arg = patching.__enter__()
jeżeli patching.new jest DEFAULT:
extra_args.update(arg)
zwróć extra_args
zwróć new
def __exit__(self, *exc_info):
"""Undo the patch."""
jeżeli nie _is_started(self):
podnieś RuntimeError('stop called on unstarted patcher')
jeżeli self.is_local oraz self.temp_original jest nie DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
inaczej:
delattr(self.target, self.attribute)
jeżeli nie self.create oraz nie hasattr(self.target, self.attribute):
# needed dla proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
usuń self.temp_original
usuń self.is_local
usuń self.target
dla patcher w reversed(self.additional_patchers):
jeżeli _is_started(patcher):
patcher.__exit__(*exc_info)
def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.append(self)
zwróć result
def stop(self):
"""Stop an active patch."""
spróbuj:
self._active_patches.remove(self)
wyjąwszy ValueError:
# If the patch hasn't been started this will fail
dalej
zwróć self.__exit__()
def _get_target(target):
spróbuj:
target, attribute = target.rsplit('.', 1)
wyjąwszy (TypeError, ValueError):
podnieś TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
getter = lambda: _importer(target)
zwróć getter, attribute
def _patch_object(
target, attribute, new=DEFAULT, spec=Nic,
create=Nieprawda, spec_set=Nic, autospec=Nic,
new_callable=Nic, **kwargs
):
"""
patch the named member (`attribute`) on an object (`target`) przy a mock
object.
`patch.object` can be used jako a decorator, klasa decorator albo a context
manager. Arguments `new`, `spec`, `create`, `spec_set`,
`autospec` oraz `new_callable` have the same meaning jako dla `patch`. Like
`patch`, `patch.object` takes arbitrary keyword arguments dla configuring
the mock object it creates.
When used jako a klasa decorator `patch.object` honours `patch.TEST_PREFIX`
dla choosing which methods to wrap.
"""
getter = lambda: target
zwróć _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
def _patch_multiple(target, spec=Nic, create=Nieprawda, spec_set=Nic,
autospec=Nic, new_callable=Nic, **kwargs):
"""Perform multiple patches w a single call. It takes the object to be
patched (either jako an object albo a string to fetch the object by importing)
oraz keyword arguments dla the patches::
przy patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` jako the value jeżeli you want `patch.multiple` to create
mocks dla you. In this case the created mocks are dalejed into a decorated
function by keyword, oraz a dictionary jest returned when `patch.multiple` jest
used jako a context manager.
`patch.multiple` can be used jako a decorator, klasa decorator albo a context
manager. The arguments `spec`, `spec_set`, `create`,
`autospec` oraz `new_callable` have the same meaning jako dla `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used jako a klasa decorator `patch.multiple` honours `patch.TEST_PREFIX`
dla choosing which methods to wrap.
"""
jeżeli type(target) jest str:
getter = lambda: _importer(target)
inaczej:
getter = lambda: target
jeżeli nie kwargs:
podnieś ValueError(
'Must supply at least one keyword argument przy patch.multiple'
)
# need to wrap w a list dla python 3, where items jest a view
items = list(kwargs.items())
attribute, new = items[0]
patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
patcher.attribute_name = attribute
dla attribute, new w items[1:]:
this_patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
this_patcher.attribute_name = attribute
patcher.additional_patchers.append(this_patcher)
zwróć patcher
def patch(
target, new=DEFAULT, spec=Nic, create=Nieprawda,
spec_set=Nic, autospec=Nic, new_callable=Nic, **kwargs
):
"""
`patch` acts jako a function decorator, klasa decorator albo a context
manager. Inside the body of the function albo przy statement, the `target`
jest patched przy a `new` object. When the function/przy statement exits
the patch jest undone.
If `new` jest omitted, then the target jest replaced przy a
`MagicMock`. If `patch` jest used jako a decorator oraz `new` jest
omitted, the created mock jest dalejed w jako an extra argument to the
decorated function. If `patch` jest used jako a context manager the created
mock jest returned by the context manager.
`target` should be a string w the form `'package.module.ClassName'`. The
`target` jest imported oraz the specified object replaced przy the `new`
object, so the `target` must be importable z the environment you are
calling `patch` from. The target jest imported when the decorated function
jest executed, nie at decoration time.
The `spec` oraz `spec_set` keyword arguments are dalejed to the `MagicMock`
jeżeli patch jest creating one dla you.
In addition you can dalej `spec=Prawda` albo `spec_set=Prawda`, which causes
patch to dalej w the object being mocked jako the spec/spec_set object.
`new_callable` allows you to specify a different class, albo callable object,
that will be called to create the `new` object. By default `MagicMock` jest
used.
A more powerful form of `spec` jest `autospec`. If you set `autospec=Prawda`
then the mock will be created przy a spec z the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods oraz functions being
mocked will have their arguments checked oraz will podnieś a `TypeError` if
they are called przy the wrong signature. For mocks replacing a class,
their zwróć value (the 'instance') will have the same spec jako the class.
Instead of `autospec=Prawda` you can dalej `autospec=some_object` to use an
arbitrary object jako the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you dalej w `create=Prawda`, oraz the attribute doesn't exist, patch will
create the attribute dla you when the patched function jest called, oraz
delete it again afterwards. This jest useful dla writing tests against
attributes that your production code creates at runtime. It jest off by
default because it can be dangerous. With it switched on you can write
dalejing tests against APIs that don't actually exist!
Patch can be used jako a `TestCase` klasa decorator. It works by
decorating each test method w the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking dla method names that start przy `patch.TEST_PREFIX`.
By default this jest `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used jako a context manager, przy the przy statement. Here the
patching applies to the indented block after the przy statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful jeżeli `patch` jest creating a mock object dla you.
`patch` takes arbitrary keyword arguments. These will be dalejed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` oraz `patch.object(...)` are
available dla alternate use-cases.
"""
getter, attribute = _get_target(target)
zwróć _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
klasa _patch_dict(object):
"""
Patch a dictionary, albo dictionary like object, oraz restore the dictionary
to its original state after the test.
`in_dict` can be a dictionary albo a mapping like container. If it jest a
mapping then it must at least support getting, setting oraz deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set w the dictionary. `values`
can also be an iterable of `(key, value)` pairs.
If `clear` jest Prawda then the dictionary will be cleared before the new
values are set.
`patch.dict` can also be called przy arbitrary keyword arguments to set
values w the dictionary::
przy patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
...
`patch.dict` can be used jako a context manager, decorator albo class
decorator. When used jako a klasa decorator `patch.dict` honours
`patch.TEST_PREFIX` dla choosing which methods to wrap.
"""
def __init__(self, in_dict, values=(), clear=Nieprawda, **kwargs):
jeżeli isinstance(in_dict, str):
in_dict = _importer(in_dict)
self.in_dict = in_dict
# support any argument supported by dict(...) constructor
self.values = dict(values)
self.values.update(kwargs)
self.clear = clear
self._original = Nic
def __call__(self, f):
jeżeli isinstance(f, type):
zwróć self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
spróbuj:
zwróć f(*args, **kw)
w_końcu:
self._unpatch_dict()
zwróć _inner
def decorate_class(self, klass):
dla attr w dir(klass):
attr_value = getattr(klass, attr)
jeżeli (attr.startswith(patch.TEST_PREFIX) oraz
hasattr(attr_value, "__call__")):
decorator = _patch_dict(self.in_dict, self.values, self.clear)
decorated = decorator(attr_value)
setattr(klass, attr, decorated)
zwróć klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
values = self.values
in_dict = self.in_dict
clear = self.clear
spróbuj:
original = in_dict.copy()
wyjąwszy AttributeError:
# dict like object przy no copy method
# must support iteration over keys
original = {}
dla key w in_dict:
original[key] = in_dict[key]
self._original = original
jeżeli clear:
_clear_dict(in_dict)
spróbuj:
in_dict.update(values)
wyjąwszy AttributeError:
# dict like object przy no update method
dla key w values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
_clear_dict(in_dict)
spróbuj:
in_dict.update(original)
wyjąwszy AttributeError:
dla key w original:
in_dict[key] = original[key]
def __exit__(self, *args):
"""Unpatch the dict."""
self._unpatch_dict()
zwróć Nieprawda
start = __enter__
stop = __exit__
def _clear_dict(in_dict):
spróbuj:
in_dict.clear()
wyjąwszy AttributeError:
keys = list(in_dict)
dla key w keys:
usuń in_dict[key]
def _patch_stopall():
"""Stop all active patches. LIFO to unroll nested patches."""
dla patch w reversed(_patch._active_patches):
patch.stop()
patch.object = _patch_object
patch.dict = _patch_dict
patch.multiple = _patch_multiple
patch.stopall = _patch_stopall
patch.TEST_PREFIX = 'test'
magic_methods = (
"lt le gt ge eq ne "
"getitem setitem delitem "
"len contains iter "
"hash str sizeof "
"enter exit "
# we added divmod oraz rdivmod here instead of numerics
# because there jest no idivmod
"divmod rdivmod neg pos abs invert "
"complex int float index "
"trunc floor ceil "
"bool next "
)
numerics = (
"add sub mul matmul div floordiv mod lshift rshift oraz xor albo pow truediv"
)
inplace = ' '.join('i%s' % n dla n w numerics.split())
right = ' '.join('r%s' % n dla n w numerics.split())
# nie including __prepare__, __instancecheck__, __subclasscheck__
# (as they are metaclass methods)
# __del__ jest nie supported at all jako it causes problems jeżeli it exists
_non_defaults = {
'__get__', '__set__', '__delete__', '__reversed__', '__missing__',
'__reduce__', '__reduce_ex__', '__getinitargs__', '__getnewargs__',
'__getstate__', '__setstate__', '__getformat__', '__setformat__',
'__repr__', '__dir__', '__subclasses__', '__format__',
}
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
zwróć func(self, *args, **kw)
method.__name__ = name
zwróć method
_magics = {
'__%s__' % method dla method w
' '.join([magic_methods, numerics, inplace, right]).split()
}
_all_magics = _magics | _non_defaults
_unsupported_magics = {
'__getattr__', '__setattr__',
'__init__', '__new__', '__prepare__'
'__instancecheck__', '__subclasscheck__',
'__del__'
}
_calculate_return_value = {
'__hash__': lambda self: object.__hash__(self),
'__str__': lambda self: object.__str__(self),
'__sizeof__': lambda self: object.__sizeof__(self),
}
_return_values = {
'__lt__': NotImplemented,
'__gt__': NotImplemented,
'__le__': NotImplemented,
'__ge__': NotImplemented,
'__int__': 1,
'__contains__': Nieprawda,
'__len__': 0,
'__exit__': Nieprawda,
'__complex__': 1j,
'__float__': 1.0,
'__bool__': Prawda,
'__index__': 1,
}
def _get_eq(self):
def __eq__(other):
ret_val = self.__eq__._mock_return_value
jeżeli ret_val jest nie DEFAULT:
zwróć ret_val
zwróć self jest other
zwróć __eq__
def _get_ne(self):
def __ne__(other):
jeżeli self.__ne__._mock_return_value jest nie DEFAULT:
zwróć DEFAULT
zwróć self jest nie other
zwróć __ne__
def _get_iter(self):
def __iter__():
ret_val = self.__iter__._mock_return_value
jeżeli ret_val jest DEFAULT:
zwróć iter([])
# jeżeli ret_val was already an iterator, then calling iter on it should
# zwróć the iterator unchanged
zwróć iter(ret_val)
zwróć __iter__
_side_effect_methods = {
'__eq__': _get_eq,
'__ne__': _get_ne,
'__iter__': _get_iter,
}
def _set_return_value(mock, method, name):
fixed = _return_values.get(name, DEFAULT)
jeżeli fixed jest nie DEFAULT:
method.return_value = fixed
zwróć
return_calulator = _calculate_return_value.get(name)
jeżeli return_calulator jest nie Nic:
spróbuj:
return_value = return_calulator(mock)
wyjąwszy AttributeError:
# XXXX why do we zwróć AttributeError here?
# set it jako a side_effect instead?
return_value = AttributeError(name)
method.return_value = return_value
zwróć
side_effector = _side_effect_methods.get(name)
jeżeli side_effector jest nie Nic:
method.side_effect = side_effector(mock)
klasa MagicMixin(object):
def __init__(self, *args, **kw):
self._mock_set_magics() # make magic work dla kwargs w init
_safe_super(MagicMixin, self).__init__(*args, **kw)
self._mock_set_magics() # fix magic broken by upper level init
def _mock_set_magics(self):
these_magics = _magics
jeżeli getattr(self, "_mock_methods", Nic) jest nie Nic:
these_magics = _magics.intersection(self._mock_methods)
remove_magics = set()
remove_magics = _magics - these_magics
dla entry w remove_magics:
jeżeli entry w type(self).__dict__:
# remove unneeded magic methods
delattr(self, entry)
# don't overwrite existing attributes jeżeli called a second time
these_magics = these_magics - set(type(self).__dict__)
_type = type(self)
dla entry w these_magics:
setattr(_type, entry, MagicProxy(entry, self))
klasa NonCallableMagicMock(MagicMixin, NonCallableMock):
"""A version of `MagicMock` that isn't callable."""
def mock_add_spec(self, spec, spec_set=Nieprawda):
"""Add a spec to a mock. `spec` can either be an object albo a
list of strings. Only attributes on the `spec` can be fetched as
attributes z the mock.
If `spec_set` jest Prawda then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
klasa MagicMock(MagicMixin, Mock):
"""
MagicMock jest a subclass of Mock przy default implementations
of most of the magic methods. You can use MagicMock without having to
configure the magic methods yourself.
If you use the `spec` albo `spec_set` arguments then *only* magic
methods that exist w the spec will be created.
Attributes oraz the zwróć value of a `MagicMock` will also be `MagicMocks`.
"""
def mock_add_spec(self, spec, spec_set=Nieprawda):
"""Add a spec to a mock. `spec` can either be an object albo a
list of strings. Only attributes on the `spec` can be fetched as
attributes z the mock.
If `spec_set` jest Prawda then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
klasa MagicProxy(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __call__(self, *args, **kwargs):
m = self.create_mock()
zwróć m(*args, **kwargs)
def create_mock(self):
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry,
_new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
zwróć m
def __get__(self, obj, _type=Nic):
zwróć self.create_mock()
klasa _ANY(object):
"A helper object that compares equal to everything."
def __eq__(self, other):
zwróć Prawda
def __ne__(self, other):
zwróć Nieprawda
def __repr__(self):
zwróć '<ANY>'
ANY = _ANY()
def _format_call_signature(name, args, kwargs):
message = '%s(%%s)' % name
formatted_args = ''
args_string = ', '.join([repr(arg) dla arg w args])
kwargs_string = ', '.join([
'%s=%r' % (key, value) dla key, value w sorted(kwargs.items())
])
jeżeli args_string:
formatted_args = args_string
jeżeli kwargs_string:
jeżeli formatted_args:
formatted_args += ', '
formatted_args += kwargs_string
zwróć message % formatted_args
klasa _Call(tuple):
"""
A tuple dla holding the results of a call to a mock, either w the form
`(args, kwargs)` albo `(name, args, kwargs)`.
If args albo kwargs are empty then a call tuple will compare equal to
a tuple without those values. This makes comparisons less verbose::
_Call(('name', (), {})) == ('name',)
_Call(('name', (1,), {})) == ('name', (1,))
_Call(((), {'a': 'b'})) == ({'a': 'b'},)
The `_Call` object provides a useful shortcut dla comparing przy call::
_Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
_Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
If the _Call has no name then it will match any name.
"""
def __new__(cls, value=(), name=Nic, parent=Nic, two=Nieprawda,
from_kall=Prawda):
name = ''
args = ()
kwargs = {}
_len = len(value)
jeżeli _len == 3:
name, args, kwargs = value
albo_inaczej _len == 2:
first, second = value
jeżeli isinstance(first, str):
name = first
jeżeli isinstance(second, tuple):
args = second
inaczej:
kwargs = second
inaczej:
args, kwargs = first, second
albo_inaczej _len == 1:
value, = value
jeżeli isinstance(value, str):
name = value
albo_inaczej isinstance(value, tuple):
args = value
inaczej:
kwargs = value
jeżeli two:
zwróć tuple.__new__(cls, (args, kwargs))
zwróć tuple.__new__(cls, (name, args, kwargs))
def __init__(self, value=(), name=Nic, parent=Nic, two=Nieprawda,
from_kall=Prawda):
self.name = name
self.parent = parent
self.from_kall = from_kall
def __eq__(self, other):
jeżeli other jest ANY:
zwróć Prawda
spróbuj:
len_other = len(other)
wyjąwszy TypeError:
zwróć Nieprawda
self_name = ''
jeżeli len(self) == 2:
self_args, self_kwargs = self
inaczej:
self_name, self_args, self_kwargs = self
other_name = ''
jeżeli len_other == 0:
other_args, other_kwargs = (), {}
albo_inaczej len_other == 3:
other_name, other_args, other_kwargs = other
albo_inaczej len_other == 1:
value, = other
jeżeli isinstance(value, tuple):
other_args = value
other_kwargs = {}
albo_inaczej isinstance(value, str):
other_name = value
other_args, other_kwargs = (), {}
inaczej:
other_args = ()
other_kwargs = value
inaczej:
# len 2
# could be (name, args) albo (name, kwargs) albo (args, kwargs)
first, second = other
jeżeli isinstance(first, str):
other_name = first
jeżeli isinstance(second, tuple):
other_args, other_kwargs = second, {}
inaczej:
other_args, other_kwargs = (), second
inaczej:
other_args, other_kwargs = first, second
jeżeli self_name oraz other_name != self_name:
zwróć Nieprawda
# this order jest important dla ANY to work!
zwróć (other_args, other_kwargs) == (self_args, self_kwargs)
def __call__(self, *args, **kwargs):
jeżeli self.name jest Nic:
zwróć _Call(('', args, kwargs), name='()')
name = self.name + '()'
zwróć _Call((self.name, args, kwargs), name=name, parent=self)
def __getattr__(self, attr):
jeżeli self.name jest Nic:
zwróć _Call(name=attr, from_kall=Nieprawda)
name = '%s.%s' % (self.name, attr)
zwróć _Call(name=name, parent=self, from_kall=Nieprawda)
def count(self, *args, **kwargs):
zwróć self.__getattr__('count')(*args, **kwargs)
def index(self, *args, **kwargs):
zwróć self.__getattr__('index')(*args, **kwargs)
def __repr__(self):
jeżeli nie self.from_kall:
name = self.name albo 'call'
jeżeli name.startswith('()'):
name = 'call%s' % name
zwróć name
jeżeli len(self) == 2:
name = 'call'
args, kwargs = self
inaczej:
name, args, kwargs = self
jeżeli nie name:
name = 'call'
albo_inaczej nie name.startswith('()'):
name = 'call.%s' % name
inaczej:
name = 'call%s' % name
zwróć _format_call_signature(name, args, kwargs)
def call_list(self):
"""For a call object that represents multiple calls, `call_list`
returns a list of all the intermediate calls jako well jako the
final call."""
vals = []
thing = self
dopóki thing jest nie Nic:
jeżeli thing.from_kall:
vals.append(thing)
thing = thing.parent
zwróć _CallList(reversed(vals))
call = _Call(from_kall=Nieprawda)
def create_autospec(spec, spec_set=Nieprawda, instance=Nieprawda, _parent=Nic,
_name=Nic, **kwargs):
"""Create a mock object using another object jako a spec. Attributes on the
mock will use the corresponding attribute on the `spec` object jako their
spec.
Functions albo methods being mocked will have their arguments checked
to check that they are called przy the correct signature.
If `spec_set` jest Prawda then attempting to set attributes that don't exist
on the spec object will podnieś an `AttributeError`.
If a klasa jest used jako a spec then the zwróć value of the mock (the
instance of the class) will have the same spec. You can use a klasa jako the
spec dla an instance object by dalejing `instance=Prawda`. The returned mock
will only be callable jeżeli instances of the mock are callable.
`create_autospec` also takes arbitrary keyword arguments that are dalejed to
the constructor of the created mock."""
jeżeli _is_list(spec):
# can't dalej a list instance to the mock constructor jako it will be
# interpreted jako a list of strings
spec = type(spec)
is_type = isinstance(spec, type)
_kwargs = {'spec': spec}
jeżeli spec_set:
_kwargs = {'spec_set': spec}
albo_inaczej spec jest Nic:
# Nic we mock przy a normal mock without a spec
_kwargs = {}
jeżeli _kwargs oraz instance:
_kwargs['_spec_as_instance'] = Prawda
_kwargs.update(kwargs)
Klass = MagicMock
jeżeli type(spec) w DescriptorTypes:
# descriptors don't have a spec
# because we don't know what type they zwróć
_kwargs = {}
albo_inaczej nie _callable(spec):
Klass = NonCallableMagicMock
albo_inaczej is_type oraz instance oraz nie _instance_callable(spec):
Klass = NonCallableMagicMock
_name = _kwargs.pop('name', _name)
_new_name = _name
jeżeli _parent jest Nic:
# dla a top level object no _new_name should be set
_new_name = ''
mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
name=_name, **_kwargs)
jeżeli isinstance(spec, FunctionTypes):
# should only happen at the top level because we don't
# recurse dla functions
mock = _set_signature(mock, spec)
inaczej:
_check_signature(spec, mock, is_type, instance)
jeżeli _parent jest nie Nic oraz nie instance:
_parent._mock_children[_name] = mock
jeżeli is_type oraz nie instance oraz 'return_value' nie w kwargs:
mock.return_value = create_autospec(spec, spec_set, instance=Prawda,
_name='()', _parent=mock)
dla entry w dir(spec):
jeżeli _is_magic(entry):
# MagicMock already does the useful magic methods dla us
kontynuuj
# XXXX do we need a better way of getting attributes without
# triggering code execution (?) Probably nie - we need the actual
# object to mock it so we would rather trigger a property than mock
# the property descriptor. Likewise we want to mock out dynamically
# provided attributes.
# XXXX what about attributes that podnieś exceptions other than
# AttributeError on being fetched?
# we could be resilient against it, albo catch oraz propagate the
# exception when the attribute jest fetched z the mock
spróbuj:
original = getattr(spec, entry)
wyjąwszy AttributeError:
kontynuuj
kwargs = {'spec': original}
jeżeli spec_set:
kwargs = {'spec_set': original}
jeżeli nie isinstance(original, FunctionTypes):
new = _SpecState(original, spec_set, mock, entry, instance)
mock._mock_children[entry] = new
inaczej:
parent = mock
jeżeli isinstance(spec, FunctionTypes):
parent = mock.mock
skipfirst = _must_skip(spec, entry, is_type)
kwargs['_eat_self'] = skipfirst
new = MagicMock(parent=parent, name=entry, _new_name=entry,
_new_parent=parent,
**kwargs)
mock._mock_children[entry] = new
_check_signature(original, new, skipfirst=skipfirst)
# so functions created przy _set_signature become instance attributes,
# *plus* their underlying mock exists w _mock_children of the parent
# mock. Adding to _mock_children may be unnecessary where we are also
# setting jako an instance attribute?
jeżeli isinstance(new, FunctionTypes):
setattr(mock, entry, new)
zwróć mock
def _must_skip(spec, entry, is_type):
"""
Return whether we should skip the first argument on spec's `entry`
attribute.
"""
jeżeli nie isinstance(spec, type):
jeżeli entry w getattr(spec, '__dict__', {}):
# instance attribute - shouldn't skip
zwróć Nieprawda
spec = spec.__class__
dla klass w spec.__mro__:
result = klass.__dict__.get(entry, DEFAULT)
jeżeli result jest DEFAULT:
kontynuuj
jeżeli isinstance(result, (staticmethod, classmethod)):
zwróć Nieprawda
albo_inaczej isinstance(getattr(result, '__get__', Nic), MethodWrapperTypes):
# Normal method => skip jeżeli looked up on type
# (jeżeli looked up on instance, self jest already skipped)
zwróć is_type
inaczej:
zwróć Nieprawda
# shouldn't get here unless function jest a dynamically provided attribute
# XXXX untested behaviour
zwróć is_type
def _get_class(obj):
spróbuj:
zwróć obj.__class__
wyjąwszy AttributeError:
# it jest possible dla objects to have no __class__
zwróć type(obj)
klasa _SpecState(object):
def __init__(self, spec, spec_set=Nieprawda, parent=Nic,
name=Nic, ids=Nic, instance=Nieprawda):
self.spec = spec
self.ids = ids
self.spec_set = spec_set
self.parent = parent
self.instance = instance
self.name = name
FunctionTypes = (
# python function
type(create_autospec),
# instance method
type(ANY.__eq__),
)
MethodWrapperTypes = (
type(ANY.__eq__.__get__),
)
file_spec = Nic
def _iterate_read_data(read_data):
# Helper dla mock_open:
# Retrieve lines z read_data via a generator so that separate calls to
# readline, read, oraz readlines are properly interleaved
sep = b'\n' jeżeli isinstance(read_data, bytes) inaczej '\n'
data_as_list = [l + sep dla l w read_data.split(sep)]
jeżeli data_as_list[-1] == sep:
# If the last line ended w a newline, the list comprehension will have an
# extra entry that's just a newline. Remove this.
data_as_list = data_as_list[:-1]
inaczej:
# If there wasn't an extra newline by itself, then the file being
# emulated doesn't have a newline to end the last line remove the
# newline that our naive format() added
data_as_list[-1] = data_as_list[-1][:-1]
dla line w data_as_list:
uzyskaj line
def mock_open(mock=Nic, read_data=''):
"""
A helper function to create a mock to replace the use of `open`. It works
dla `open` called directly albo used jako a context manager.
The `mock` argument jest the mock object to configure. If `Nic` (the
default) then a `MagicMock` will be created dla you, przy the API limited
to methods albo attributes available on standard file handles.
`read_data` jest a string dla the `read` methoddline`, oraz `readlines` of the
file handle to return. This jest an empty string by default.
"""
def _readlines_side_effect(*args, **kwargs):
jeżeli handle.readlines.return_value jest nie Nic:
zwróć handle.readlines.return_value
zwróć list(_state[0])
def _read_side_effect(*args, **kwargs):
jeżeli handle.read.return_value jest nie Nic:
zwróć handle.read.return_value
zwróć type(read_data)().join(_state[0])
def _readline_side_effect():
jeżeli handle.readline.return_value jest nie Nic:
dopóki Prawda:
uzyskaj handle.readline.return_value
dla line w _state[0]:
uzyskaj line
global file_spec
jeżeli file_spec jest Nic:
zaimportuj _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
jeżeli mock jest Nic:
mock = MagicMock(name='open', spec=open)
handle = MagicMock(spec=file_spec)
handle.__enter__.return_value = handle
_state = [_iterate_read_data(read_data), Nic]
handle.write.return_value = Nic
handle.read.return_value = Nic
handle.readline.return_value = Nic
handle.readlines.return_value = Nic
handle.read.side_effect = _read_side_effect
_state[1] = _readline_side_effect()
handle.readline.side_effect = _state[1]
handle.readlines.side_effect = _readlines_side_effect
def reset_data(*args, **kwargs):
_state[0] = _iterate_read_data(read_data)
jeżeli handle.readline.side_effect == _state[1]:
# Only reset the side effect jeżeli the user hasn't overridden it.
_state[1] = _readline_side_effect()
handle.readline.side_effect = _state[1]
zwróć DEFAULT
mock.side_effect = reset_data
mock.return_value = handle
zwróć mock
klasa PropertyMock(Mock):
"""
A mock intended to be used jako a property, albo other descriptor, on a class.
`PropertyMock` provides `__get__` oraz `__set__` methods so you can specify
a zwróć value when it jest fetched.
Fetching a `PropertyMock` instance z an object calls the mock, with
no args. Setting it calls the mock przy the value being set.
"""
def _get_child_mock(self, **kwargs):
zwróć MagicMock(**kwargs)
def __get__(self, obj, obj_type):
zwróć self()
def __set__(self, obj, val):
self(val)
| 33.648125
| 89
| 0.619936
|
4a11a89d9f50a9315bde94221f25b7d20e2fd10d
| 1,327
|
py
|
Python
|
Classificação/Arvore_Decisão/arvore_decisão_census.py
|
BigWalvi/Machine-Learning-and-Data-Science
|
709392d82178dfb29b00ad459bad906b75d2d706
|
[
"MIT"
] | null | null | null |
Classificação/Arvore_Decisão/arvore_decisão_census.py
|
BigWalvi/Machine-Learning-and-Data-Science
|
709392d82178dfb29b00ad459bad906b75d2d706
|
[
"MIT"
] | null | null | null |
Classificação/Arvore_Decisão/arvore_decisão_census.py
|
BigWalvi/Machine-Learning-and-Data-Science
|
709392d82178dfb29b00ad459bad906b75d2d706
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 5 15:53:08 2021
@author: Walvi
"""
import pandas as pd
base = pd.read_csv('census.csv')
previsores = base.iloc[:,0:14].values
classe = base.iloc[:,14].values
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.compose import ColumnTransformer
column_transformer = ColumnTransformer([('one_hot_encoder', OneHotEncoder(), [1,3,5,6,7,8,9,13])], remainder='passthrough')
previsores = column_transformer.fit_transform(previsores).toarray()
labelencoder_classe = LabelEncoder()
classe = labelencoder_classe.fit_transform(classe)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
previsores = scaler.fit_transform(previsores)
from sklearn.model_selection import train_test_split
previsores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(previsores, classe, test_size=0.15, random_state=0)
from sklearn.tree import DecisionTreeClassifier
classificador = DecisionTreeClassifier(criterion='entropy', random_state=0)
classificador.fit(previsores_treinamento, classe_treinamento)
previsoes = classificador.predict(previsores_teste)
from sklearn.metrics import accuracy_score, confusion_matrix
precisao = accuracy_score(classe_teste, previsoes)
matriz = confusion_matrix(classe_teste, previsoes)
| 33.175
| 145
| 0.813866
|
4a11a8bf7cc84b5d709e7777f45e592b39ca13f1
| 1,471
|
py
|
Python
|
src/db_handler.py
|
AccraZed/Klove
|
f49c219424473aad2ed8e4b9dc8dce18a3d7b860
|
[
"MIT"
] | null | null | null |
src/db_handler.py
|
AccraZed/Klove
|
f49c219424473aad2ed8e4b9dc8dce18a3d7b860
|
[
"MIT"
] | null | null | null |
src/db_handler.py
|
AccraZed/Klove
|
f49c219424473aad2ed8e4b9dc8dce18a3d7b860
|
[
"MIT"
] | null | null | null |
import sqlite3
from sqlite3 import Error
# Python doesn't support function overloading(later function takes precedence)
class DatabaseHandler:
def __init__(self, path_database):
try:
self.connection = sqlite3.connect(path_database)
print("Connection to SQLite DB successful")
except Error as e:
print(f"The error '{e}' occurred")
# Writes to DB, given the appropriate query
def write(self, query, params=None):
cursor = self.connection.cursor()
try:
if params is not None:
cursor.execute(query, params)
else:
cursor.execute(query)
# THIS CHANGES THE CONTENTS OF THE DB! USE WITH CAUTION
self.connection.commit()
except Error as e:
print(f"The error '{e}' occurred")
def read(self, query, params=None):
cursor = self.connection.cursor()
result = None
try:
if params is not None:
cursor.execute(query, params)
else:
cursor.execute(query)
result = [dict((cursor.description[i][0], value)
for i, value in enumerate(row)) for row in cursor.fetchall()]
return result
except Error as e:
print(f"The error '{e}' occurred")
# for speeding up alterations to the table (Justice)
# db_path = "src/db.sqlite"
# db = DatabaseHandler(db_path)
| 31.297872
| 88
| 0.582597
|
4a11aa150cb541a3c5ac09ad57260dadb6800409
| 315
|
py
|
Python
|
python3/arrange_coins.py
|
joshiaj7/CodingChallenges
|
f95dd79132f07c296e074d675819031912f6a943
|
[
"MIT"
] | 1
|
2020-10-08T09:17:40.000Z
|
2020-10-08T09:17:40.000Z
|
python3/arrange_coins.py
|
joshiaj7/CodingChallenges
|
f95dd79132f07c296e074d675819031912f6a943
|
[
"MIT"
] | null | null | null |
python3/arrange_coins.py
|
joshiaj7/CodingChallenges
|
f95dd79132f07c296e074d675819031912f6a943
|
[
"MIT"
] | null | null | null |
"""
Space : O(1)
Time : O(n)
"""
class Solution:
def arrangeCoins(self, n: int) -> int:
ans, count = 0, 0
add = 1
while count < n:
count += add
add += 1
ans += 1
if count > n:
ans -= 1
return ans
| 17.5
| 42
| 0.361905
|
4a11aa19011efd9671ba0be1262d9daaba524f04
| 2,082
|
py
|
Python
|
project2/src/tools.py
|
errikos/ml-makarona
|
5e0c9efe3405245119bf5aa9bd81a4ca5159eab1
|
[
"MIT"
] | null | null | null |
project2/src/tools.py
|
errikos/ml-makarona
|
5e0c9efe3405245119bf5aa9bd81a4ca5159eab1
|
[
"MIT"
] | null | null | null |
project2/src/tools.py
|
errikos/ml-makarona
|
5e0c9efe3405245119bf5aa9bd81a4ca5159eab1
|
[
"MIT"
] | 1
|
2019-10-24T22:47:38.000Z
|
2019-10-24T22:47:38.000Z
|
#!/usr/bin/env python3
import click
import os
import helpers
def _write_normalized(data, base_path, fname):
with open(os.path.join(base_path, fname), 'w+') as f:
f.write('user,item,rating\n')
f.writelines('{u},{i},{r}\n'.format(u=u, i=i, r=r) for u, i, r in data)
@click.group(help='Various useful tools for the recommendation system training workflow.')
@click.pass_context
def cli(ctx, **kwargs):
ctx.obj.update(**kwargs)
@cli.command(help='Convert a dataset from the upstream to the normalized format.')
@click.option('-i', '--input', 'input_path', type=click.Path(exists=True), required=True,
help='The input dataset file path.')
@click.option('-o', '--output', 'output_path', type=click.Path(exists=False), required=True,
help='The output dataset file path.')
def normalize(input_path, output_path, **kwargs):
helpers.normalize(input_path, output_path)
@cli.command(help='Convert a dataset from the normalized to the upstream format.')
@click.option('-i', '--input', 'input_path', type=click.Path(exists=True), required=True,
help='The input dataset file path.')
@click.option('-o', '--output', 'output_path', type=click.Path(exists=False), required=True,
help='The output dataset file path.')
def denormalize(input_path, output_path, **kwargs):
helpers.denormalize(input_path, output_path)
@cli.command(help='Split a normalized dataset into training and testing.')
@click.argument('dataset_path', type=click.Path(exists=True), required=True)
@click.option('-r', '--ratio', metavar='RATIO', type=float, default=0.9, help='The split ratio (default: 0.9).')
@click.option('-s', '--seed', metavar='SEED', type=int, default=988, help='The seed to use (default: 988).')
def split_train_test(dataset_path, **kwargs):
train, test = helpers.split_normalized_data(dataset_path, **kwargs)
_write_normalized(train, os.path.dirname(dataset_path), 'training.csv')
_write_normalized(test, os.path.dirname(dataset_path), 'testing.csv')
if __name__ == '__main__':
cli(obj={})
| 41.64
| 112
| 0.695485
|
4a11ac4de6ce55910da1412be1db34c4a4695a36
| 385
|
py
|
Python
|
plim/util.py
|
spollard/Plim
|
7689de85364691063ed5c43a891c433f9ebef5b9
|
[
"MIT"
] | 85
|
2015-01-08T20:15:54.000Z
|
2022-03-12T21:51:27.000Z
|
plim/util.py
|
spollard/Plim
|
7689de85364691063ed5c43a891c433f9ebef5b9
|
[
"MIT"
] | 18
|
2015-02-27T14:59:08.000Z
|
2021-09-24T10:27:19.000Z
|
plim/util.py
|
spollard/Plim
|
7689de85364691063ed5c43a891c433f9ebef5b9
|
[
"MIT"
] | 14
|
2015-02-26T07:20:42.000Z
|
2022-02-01T17:52:16.000Z
|
import sys
PY3K = sys.version_info >= (3, 0)
if PY3K:
from io import StringIO
joined = lambda buf: ''.join(buf)
space_separated = lambda buf: ' '.join(buf)
u = str
MAXSIZE = sys.maxsize
else:
from StringIO import StringIO
joined = lambda buf: u('').join(buf)
space_separated = lambda buf: u(' ').join(buf)
u = unicode
MAXSIZE = sys.maxint
| 18.333333
| 50
| 0.620779
|
4a11aed49f41bd68d470286b595bf71b3638b130
| 6,750
|
py
|
Python
|
web/src/stadistic/forms.py
|
frhumanes/consulting
|
400df4fc59240d2cd1c5807feaabacd056fdce03
|
[
"Apache-2.0"
] | null | null | null |
web/src/stadistic/forms.py
|
frhumanes/consulting
|
400df4fc59240d2cd1c5807feaabacd056fdce03
|
[
"Apache-2.0"
] | null | null | null |
web/src/stadistic/forms.py
|
frhumanes/consulting
|
400df4fc59240d2cd1c5807feaabacd056fdce03
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
from django import forms
from django.conf import settings
from userprofile.models import Profile
from formula.models import Variable, Dimension
from survey.models import Option
from consulting.models import Medicine
from illness.models import Illness
from django.utils.translation import ugettext_lazy as _
from django.utils.html import strip_tags
class MultipleValueField(forms.MultiValueField):
def draw(self):
attrs = self.widget.attrs
attrs['id'] = 'id_' + self.label.replace(' ','_').lower()
return self.widget.render(self.label.replace(' ','-'), [], attrs)
class RangeWidget(forms.MultiWidget):
def __init__(self, attrs=None):
widgets = (forms.HiddenInput(attrs=attrs), forms.HiddenInput(attrs=attrs))
super(RangeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return [value.start, value.stop]
return [None, None]
def format_output(self, rendered_widgets):
return '<div class="range">%s</div>' % u' '.join(rendered_widgets)
class DateWidget(forms.MultiWidget):
def __init__(self, attrs=None, format=None):
widgets = (forms.DateInput(attrs=attrs, format=format), forms.DateInput(attrs=attrs, format=format))
super(DateWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return [value.start, value.stop]
return [None, None]
def format_output(self, rendered_widgets):
return u' - '.join(rendered_widgets)
class FiltersForm(forms.Form):
options = forms.MultipleChoiceField(
label=_(u'Opciones'),
choices=[('filter', u'Mostrar sólo mis pacientes'),
('ungroup', u'Separar por episodios*')],
widget=forms.CheckboxSelectMultiple(),
help_text=_(u'* Segrega distintos episodios de un mismo paciente'))
sex = forms.MultipleChoiceField(
label=_(u'Sexo'),
choices=Profile.SEX,
widget=forms.CheckboxSelectMultiple())
marital = forms.MultipleChoiceField(
label=_(u'Estado civil'),
choices=Profile.STATUS,
widget=forms.CheckboxSelectMultiple())
education = forms.MultipleChoiceField(
label=_(u'Nivel de estudios'),
choices=Profile.EDUCATION,
widget=forms.CheckboxSelectMultiple())
profession = forms.MultipleChoiceField(
label=_(u'Profesión'),
choices=[(p['profession'], p['profession']) for p in Profile.objects.exclude(profession='').values('profession').order_by('profession').distinct()],
widget=forms.CheckboxSelectMultiple())
age = forms.MultiValueField(
label=_(u'Edad'),
widget=RangeWidget(attrs={'class':'span3','min':'0', 'max':'100'}))
illnesses =forms.MultipleChoiceField(
label='Diagnóstico',
choices=[(i['code'], '('+i['code']+') '+i['name']) for i in Illness.objects.filter(illnesses_profiles__isnull=False).values('code', 'name').order_by('code').distinct()],
widget=forms.CheckboxSelectMultiple())
date = forms.MultiValueField(
label=_(u'Fechas'),
widget=DateWidget(attrs={'class':'span5'},
format=settings.DATE_INPUT_FORMAT))
anxiety = forms.MultipleChoiceField(
label=_(u'Nivel de Ansiedad'),
choices=[(c, strip_tags(settings.HAMILTON[v][0])) for c, v in enumerate(sorted(settings.HAMILTON))],
widget=forms.CheckboxSelectMultiple())
depression = forms.MultipleChoiceField(
label=_(u'Nivel de Depresión'),
choices=[(c, strip_tags(settings.BECK[v][0])) for c, v in enumerate(sorted(settings.BECK))],
widget=forms.CheckboxSelectMultiple())
unhope = forms.MultipleChoiceField(
label=_(u'Nivel de Desesperanza'),
choices=[(c, strip_tags(settings.UNHOPE[v][0])) for c, v in enumerate(sorted(settings.UNHOPE))],
widget=forms.CheckboxSelectMultiple())
suicide = forms.MultipleChoiceField(
label=_(u'Nivel de Riesgo'),
choices=[(c, strip_tags(settings.SUICIDE[v][0])) for c, v in enumerate(sorted(settings.SUICIDE))],
widget=forms.CheckboxSelectMultiple())
ybocs = forms.MultipleChoiceField(
label=_(u'Nivel de Obsesión-compulsión'),
choices=[(c, strip_tags(settings.Y_BOCS[v][0])) for c, v in enumerate(sorted(settings.Y_BOCS))],
widget=forms.CheckboxSelectMultiple())
aves = forms.MultipleChoiceField(
label=_(u'Acontecimientos Vitales Estresantes'),
choices=[(op.id, op.text) for op in Option.objects.filter(code__startswith='AVE', option_answers__isnull=False).distinct().order_by('text')],
widget=forms.CheckboxSelectMultiple())
treatment =forms.MultipleChoiceField(
label='Tratamiento',
choices=[(m['component__name'], m['component__name']) for m in Medicine.objects.filter(is_previous=False).values('component__name').order_by('component__name').distinct()],
widget=forms.CheckboxSelectMultiple())
def __init__(self, *args, **kwargs):
block = None
if 'block' in kwargs:
block = kwargs.pop('block')
super(forms.Form, self).__init__(*args, **kwargs)
if block:
self.variables = []
for v in Variable.objects.filter(variables_categories__categories_blocks=block).distinct():
#vars()['group_'+v.code] = forms.MultiValueField(
# label=_(v.name),
# widget=RangeWidget(attrs={'class':'span3','min':'0', 'max':'5'}))
self.variables.append(MultipleValueField(
label='variables.'+v.name,
widget=RangeWidget(attrs={'class':'span3',
'min':v.vmin,
'max':v.vmax})))
self.dimensions = []
for d in Dimension.objects.exclude(name=''):
self.dimensions.append(MultipleValueField(
label='dimensions.'+d.name,
widget=RangeWidget(attrs={'class':'span3','min':'0', 'max':'10'})))
| 45.608108
| 192
| 0.580593
|
4a11b05d19f88282e59e641d44f49bdc7669176a
| 5,624
|
py
|
Python
|
quart_session/__init__.py
|
adrienyhuel/quart-session
|
264a9ad3693477484a6000e5924116f68383fb42
|
[
"BSD-3-Clause"
] | null | null | null |
quart_session/__init__.py
|
adrienyhuel/quart-session
|
264a9ad3693477484a6000e5924116f68383fb42
|
[
"BSD-3-Clause"
] | null | null | null |
quart_session/__init__.py
|
adrienyhuel/quart-session
|
264a9ad3693477484a6000e5924116f68383fb42
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
quart_session
~~~~~~~~~~~~~
Adds server session support to your application.
:copyright: (c) 2014 by Shipeng Feng.
:copyright: (c) 2020 by Sander.
:license: BSD, see LICENSE for more details.
"""
__version__ = '1.0.2'
import os
from quart import Quart
from .sessions import RedisSessionInterface, RedisTrioSessionInterface, MemcachedSessionInterface, NullSessionInterface
class Session(object):
"""This class is used to add Server-side Session to one or more Quart
applications.
There are two usage modes. One is initialize the instance with a very
specific Quart application::
app = Quart(__name__)
Session(app)
The second possibility is to create the object once and configure the
application later::
sess = Session()
def create_app():
app = Quart(__name__)
sess.init_app(app)
return app
By default Quart-Session will use :class:`NullSessionInterface`, you
really should configure your app to use a different SessionInterface.
.. note::
You can not use ``Session`` instance directly, what ``Session`` does
is just change the :attr:`~quart.Quart.session_interface` attribute on
your Quart applications.
"""
def __init__(self, app: Quart = None) -> None:
self._current_async_library = "asyncio"
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app: Quart) -> None:
"""This is used to set up session for your app object.
:param app: the Quart app object with proper configuration.
"""
try:
import quart_trio
if isinstance(app, quart_trio.QuartTrio):
self._current_async_library = "trio"
except ImportError:
pass
app.session_interface = self._get_interface(app)
@app.before_serving
async def setup():
await app.session_interface.create(app)
def _get_interface(self, app: Quart):
config = app.config.copy()
config.setdefault('SESSION_TYPE', 'null')
config.setdefault('SESSION_PERMANENT', True)
config.setdefault('SESSION_USE_SIGNER', False)
config.setdefault('SESSION_KEY_PREFIX', 'session:')
config.setdefault('SESSION_PROTECTION', False)
config.setdefault('SESSION_REVERSE_PROXY', False)
config.setdefault('SESSION_STATIC_FILE', False)
config.setdefault('SESSION_EXPLICIT', False)
config.setdefault('SESSION_REDIS', None)
config.setdefault('SESSION_MEMCACHED', None)
config.setdefault('SESSION_FILE_DIR',
os.path.join(os.getcwd(), 'quart_session'))
config.setdefault('SESSION_FILE_THRESHOLD', 500)
config.setdefault('SESSION_FILE_MODE', 384)
config = {k: v for k, v in config.items() if k.startswith('SESSION_')}
if isinstance(config.get("SESSION_HIJACK_PROTECTION"), bool):
app.logger.warning("Deprecation: `SESSION_HIJACK_PROTECTION` "
"has been renamed to `SESSION_PROTECTION`")
if isinstance(config.get("SESSION_HIJACK_REVERSE_PROXY"), str):
app.logger.warning("Deprecation: `SESSION_HIJACK_REVERSE_PROXY` "
"has been renamed to `SESSION_REVERSE_PROXY`")
backend_warning = f"Please specify a session backend. " \
f"Available interfaces: redis, redis+trio, " \
f"memcached, null. e.g: app.config['SESSION_TYPE'] = 'redis'"
if config['SESSION_TYPE'] == 'redis':
options = {
"redis": config['SESSION_REDIS'],
"key_prefix": config['SESSION_KEY_PREFIX'],
"use_signer": config['SESSION_USE_SIGNER'],
"permanent": config['SESSION_PERMANENT'],
**config
}
if self._current_async_library == "asyncio":
session_interface = RedisSessionInterface(**options)
elif self._current_async_library == "trio":
session_interface = RedisTrioSessionInterface(**options)
else:
raise NotImplementedError("Unknown eventloop")
elif config['SESSION_TYPE'] == 'redis+trio':
session_interface = RedisTrioSessionInterface(
redis=config['SESSION_REDIS'],
key_prefix=config['SESSION_KEY_PREFIX'],
use_signer=config['SESSION_USE_SIGNER'],
premanent=config['SESSION_PERMANENT'],
**config
)
elif config['SESSION_TYPE'] == 'memcached':
session_interface = MemcachedSessionInterface(
memcached=config['SESSION_MEMCACHED'],
key_prefix=config['SESSION_KEY_PREFIX'],
use_signer=config['SESSION_USE_SIGNER'],
permanent=config['SESSION_PERMANENT'],
**config)
elif config['SESSION_TYPE'] == 'null':
app.logger.warning(f"{backend_warning}. Currently using: null")
session_interface = NullSessionInterface(
key_prefix=config['SESSION_KEY_PREFIX'],
use_signer=config['SESSION_USE_SIGNER'],
permanent=config['SESSION_PERMANENT'],
**config)
else:
raise NotImplementedError(f"No such session interface "
f"\"{config['SESSION_TYPE']}\". {backend_warning}")
return session_interface
| 38
| 119
| 0.608286
|
4a11b0916e906c7095a2d703924db83060ba8dd1
| 6,639
|
py
|
Python
|
reinforce/reinforce_pixels.py
|
uchendui/reinforcement-learning
|
121e7f7c325dad2b3b2f0437b9ac78e23a841b45
|
[
"MIT"
] | null | null | null |
reinforce/reinforce_pixels.py
|
uchendui/reinforcement-learning
|
121e7f7c325dad2b3b2f0437b9ac78e23a841b45
|
[
"MIT"
] | null | null | null |
reinforce/reinforce_pixels.py
|
uchendui/reinforcement-learning
|
121e7f7c325dad2b3b2f0437b9ac78e23a841b45
|
[
"MIT"
] | null | null | null |
import gym
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from util.network import PolicyNetworkBuilder
from util.misc import to_one_hot
class Trajectory:
def __init__(self, states, actions, rewards, num_actions):
"""
Represents a single trajectory sampled from our policy
Args:
states: list of states visited during the trajectory (excluding final state)
actions: list of actions taken from each state
rewards: list of rewards received in the trajectory
num_actions: number of actions
"""
self.states = np.array(states)
self.actions = to_one_hot(actions, num_actions)
self.q_values = np.zeros(shape=len(self.actions))
mean_reward = np.mean(rewards)
std = np.std(rewards)
std = 1 if std == 0 else std
for i in range(len(states)):
self.q_values[i] = (np.sum(rewards[i:]) - mean_reward) / std
class TrainReinforce:
def __init__(self,
env,
sess,
render=False,
max_episodes=1000,
print_freq=20,
load_path=None,
save_path=None,
):
"""Trains an agent via vanilla policy gradient with average reward baseline.
Args:
env: gym.Env where our agent resides.
sess: tensorflow session
render: True to render the environment, else False
max_episodes: maximum number of episodes to train for
print_freq: Displays logging information every 'print_freq' episodes
load_path: (str) Path to load existing model from
save_path: (str) Path to save model during training
"""
self.max_episodes = max_episodes
self.print_freq = print_freq
self.env = env
self.input_dim = env.observation_space.shape
self.output_dim = env.action_space.n
self.sess = sess
self.render = render
self.save_path = save_path
self.rewards = []
self.rnb = PolicyNetworkBuilder(self.input_dim,
self.output_dim,
learning_rate=0.0005,
conv=True)
if load_path is not None:
self.rnb.saver.restore(sess, load_path)
print(f'Successfully loaded model from {load_path}')
def act(self, observation):
pred = self.sess.run(self.rnb.output_pred,
feed_dict={self.rnb.input_ph: np.expand_dims(observation, axis=0)})
return np.random.choice(range(self.output_dim), p=pred.flatten())
def reinforce(self):
"""Trains an agent via vanilla policy gradient"""
total_reward = 0
mean_reward = None
for e in range(self.max_episodes):
# Sample trajectories from our policy (run it on the robot)
traj, reward = self.sample()
total_reward += reward
self.rewards.append(reward)
# Compute and apply the gradient of the log policy multiplied by our baseline
self.update(traj.states, traj.actions, traj.q_values)
if e % self.print_freq == 0 and e > 0:
new_mean_reward = total_reward / self.print_freq
total_reward = 0
print(f"-------------------------------------------------------")
print(f"Mean {self.print_freq} Episode Reward: {new_mean_reward}")
# print(f"Exploration fraction: {eps}")
print(f"Total Episodes: {e}")
# print(f"Total timesteps: {t}")
print(f"-------------------------------------------------------")
# Model saving inspired by Open AI Baseline implementation
if (mean_reward is None or new_mean_reward >= mean_reward) and self.save_path is not None:
print(f"Saving model due to mean reward increase:{mean_reward} -> {new_mean_reward}")
print(f'Location: {self.save_path}')
self.save()
mean_reward = new_mean_reward
def update(self, states, actions, q_values):
"""Takes a single gradient step using a trajectory.
Args:
states: array of visited states
actions: array
q_values: array of q values corresponding to each state
"""
self.sess.run([self.rnb.opt, self.rnb.loss], feed_dict={self.rnb.input_ph: states,
self.rnb.actions_ph: actions,
self.rnb.baseline_ph: q_values})
def save(self):
"""Saves the network."""
self.rnb.saver.save(self.sess, self.save_path)
def load(self):
"""Loads the network."""
self.rnb.saver.restore(self.sess, self.save_path)
def sample(self):
"""Samples a single trajectory under the current policy."""
done = False
actions = []
states = []
rewards = []
obs = self.env.reset()
total_reward = 0
ep_len = 0
while not done:
states.append(obs)
if self.render:
self.env.render()
action = self.act(obs)
obs, rew, done, _ = self.env.step(action)
total_reward += rew
ep_len += 1
rewards.append(rew)
actions.append(action)
return Trajectory(states, actions, rewards, self.output_dim), total_reward
def plot_rewards(self, path=None):
"""
Plots a graph of the total rewards received per training episode
:param path:
:return:
"""
plt.plot(self.rewards)
plt.xlabel('Episode')
plt.ylabel('Reward')
if path is None:
plt.show()
else:
plt.savefig(path)
plt.close('all')
def main():
with tf.Session() as sess:
env_name = 'CubeCrash-v0'
env = gym.make(env_name)
reinforce = TrainReinforce(env,
sess,
render=False,
max_episodes=10000,
print_freq=100,
save_path=f'checkpoints/{env_name}.ckpt')
sess.run(tf.initialize_all_variables())
reinforce.reinforce()
reinforce.plot_rewards()
if __name__ == '__main__':
main()
| 36.478022
| 106
| 0.539388
|
4a11b0aac9664f8374608eee731ad52204f7efd3
| 1,650
|
py
|
Python
|
cms/management/commands/generate_dev_fixtures.py
|
digimatronics/py
|
8ee737e5d2dbb226c18a7dd46912c7301b37cc0e
|
[
"Apache-2.0"
] | 1
|
2017-03-12T06:29:04.000Z
|
2017-03-12T06:29:04.000Z
|
cms/management/commands/generate_dev_fixtures.py
|
digimatronics/py
|
8ee737e5d2dbb226c18a7dd46912c7301b37cc0e
|
[
"Apache-2.0"
] | 4
|
2021-03-31T20:10:38.000Z
|
2021-09-08T02:47:18.000Z
|
cms/management/commands/generate_dev_fixtures.py
|
digimatronics/py
|
8ee737e5d2dbb226c18a7dd46912c7301b37cc0e
|
[
"Apache-2.0"
] | null | null | null |
import gzip
import json
import io
from optparse import make_option
from django.core.management import BaseCommand, call_command
from django.contrib.auth.hashers import make_password
class Command(BaseCommand):
"""
Generate fixtures necessary for local development of production database.
NOTE: This should be run as a cron job on the production www.python.org
infrastructure, it is not useful to run this in a local environment except
for testing/debugging purposes of this command itself.
"""
option_list = BaseCommand.option_list + (
make_option(
'--file',
default='/tmp/dev-fixtures.json.gz',
dest='outputfile',
help='Specifies the output file location of the fixtures.',
),
)
help = "Generate development fixtures for local development"
def handle(self, **options):
outputfile = options.get('outputfile')
content = io.StringIO()
call_command(
"dumpdata",
format='json',
indent=4,
exclude=[
"tastypie",
"sessions",
"account.emailconfirmation",
],
stdout=content,
)
content.seek(0)
raw_json = content.getvalue()
data = json.loads(raw_json)
# Scrub User passwords for security
for obj in data:
if obj['model'] != "users.user":
continue
obj['fields']['password'] = make_password(None)
with gzip.open(outputfile, 'wb') as out:
out.write(bytes(json.dumps(data, indent=4), 'UTF-8'))
| 27.04918
| 78
| 0.594545
|
4a11b0cd1b4c33e61ed8b8fd7704d521317cb164
| 11,606
|
py
|
Python
|
mayan/apps/documents/south_migrations/0010_auto__chg_field_document_date_added.py
|
camerondphillips/MAYAN
|
b8cd44af50f0b2f2b59286d9c88e2f7aa573a93f
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/documents/south_migrations/0010_auto__chg_field_document_date_added.py
|
camerondphillips/MAYAN
|
b8cd44af50f0b2f2b59286d9c88e2f7aa573a93f
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/documents/south_migrations/0010_auto__chg_field_document_date_added.py
|
camerondphillips/MAYAN
|
b8cd44af50f0b2f2b59286d9c88e2f7aa573a93f
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Document.date_added'
db.alter_column('documents_document', 'date_added', self.gf('django.db.models.fields.DateTimeField')())
def backwards(self, orm):
# Changing field 'Document.date_added'
db.alter_column('documents_document', 'date_added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'documents.document': {
'Meta': {'ordering': "['-date_added']", 'object_name': 'Document'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'document_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['documents.DocumentType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '48', 'blank': 'True'})
},
'documents.documentpage': {
'Meta': {'ordering': "['page_number']", 'object_name': 'DocumentPage'},
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'document_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['documents.DocumentVersion']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_label': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'page_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'documents.documentpagetransformation': {
'Meta': {'ordering': "('order',)", 'object_name': 'DocumentPageTransformation'},
'arguments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'document_page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['documents.DocumentPage']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'transformation': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'documents.documenttype': {
'Meta': {'ordering': "['name']", 'object_name': 'DocumentType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'documents.documenttypefilename': {
'Meta': {'ordering': "['filename']", 'object_name': 'DocumentTypeFilename'},
'document_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['documents.DocumentType']"}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'documents.documentversion': {
'Meta': {'unique_together': "(('document', 'major', 'minor', 'micro', 'release_level', 'serial'),)", 'object_name': 'DocumentVersion'},
'checksum': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['documents.Document']"}),
'encoding': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'filename': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'major': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'micro': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'mimetype': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'minor': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'release_level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {})
},
'documents.recentdocument': {
'Meta': {'ordering': "('-datetime_accessed',)", 'object_name': 'RecentDocument'},
'datetime_accessed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['documents.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['documents']
| 76.860927
| 182
| 0.564708
|
4a11b0d2d8667c62ac2b2a95786c30f5200aa245
| 5,115
|
py
|
Python
|
setup.py
|
margudo/marvin
|
6f5a11b5b7ef80dbdb43a4538e27ccda126bab6e
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
margudo/marvin
|
6f5a11b5b7ef80dbdb43a4538e27ccda126bab6e
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
margudo/marvin
|
6f5a11b5b7ef80dbdb43a4538e27ccda126bab6e
|
[
"BSD-3-Clause"
] | null | null | null |
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2016-10-19 17:36:00
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-12-03 10:56:33
#
# This is the Marvin setup
#
from setuptools import setup, find_packages
import os
from astropy.utils.data import download_file
import argparse
import shutil
import sys
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def add_data_file(directory, data_files):
extern_path = os.path.join(os.path.dirname(__file__), directory)
for root, __, filenames in os.walk(extern_path):
for filename in filenames:
data_files.append(os.path.join('..', root.lstrip('python/'), filename))
def get_data_files(with_web=True):
data_files = []
# add_data_file('python/marvin/extern/', data_files)
if with_web:
add_data_file('python/marvin/web/configuration/', data_files)
add_data_file('python/marvin/web/lib/', data_files)
add_data_file('python/marvin/web/static/', data_files)
add_data_file('python/marvin/web/templates/', data_files)
add_data_file('python/marvin/web/uwsgi_conf_files/', data_files)
# data_files.append('../marvin/db/dbconfig.ini')
# data_files.append('../../requirements.txt')
# data_files.append('../../README.md')
# data_files.append('utils/plot/Linear_L_0-1.csv')
return data_files
def remove_args(parser):
''' Remove custom arguments from the parser '''
arguments = []
for action in list(parser._get_optional_actions()):
if '--help' not in action.option_strings:
arguments += action.option_strings
for arg in arguments:
if arg in sys.argv:
sys.argv.remove(arg)
# requirements
requirements_file = os.path.join(os.path.dirname(__file__), 'requirements.txt')
install_requires = [line.strip().replace('==', '>=') for line in open(requirements_file)
if not line.strip().startswith('#') and line.strip() != '']
NAME = 'sdss-marvin'
# do not use x.x.x-dev. things complain. instead use x.x.xdev
VERSION = '2.3.3dev'
RELEASE = 'dev' not in VERSION
def run(data_files, packages):
setup(name=NAME,
version=VERSION,
license='BSD3',
description='Toolsuite for dealing with the MaNGA dataset',
long_description=open('README.rst').read(),
author='The Marvin Developers',
author_email='havok2063@hotmail.com',
keywords='marvin manga astronomy MaNGA',
url='https://github.com/sdss/marvin',
packages=packages,
package_dir={'': 'python'},
package_data={'': data_files},
include_package_data=True,
install_requires=install_requires,
scripts=['bin/run_marvin', 'bin/check_marvin'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Database :: Front-Ends',
'Topic :: Documentation :: Sphinx',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: User Interfaces'
],
)
if __name__ == '__main__':
# Custom parser to decide whether we include or not the web. By default we do.
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]))
parser.add_argument('-w', '--noweb', dest='noweb', default=False, action='store_true',
help='Does not build the web.')
# We use parse_known_args because we want to leave the remaining args for distutils
args = parser.parse_known_args()[0]
if args.noweb:
packages = find_packages(where='python', exclude=['marvin.web*'])
else:
packages = find_packages(where='python')
data_files = get_data_files(with_web=not args.noweb)
maskbits_path = download_file('https://svn.sdss.org/public/repo/sdss/idlutils/'
'trunk/data/sdss/sdssMaskbits.par')
shutil.copy(maskbits_path, os.path.join(os.path.dirname(__file__),
'python/marvin/data/',
'sdssMaskbits.par'))
# Now we remove all our custom arguments to make sure they don't interfere with distutils
remove_args(parser)
# Runs distutils
run(data_files, packages)
| 33.651316
| 93
| 0.618768
|
4a11b1cd1ff503c915dfb780a532c1659b1ba207
| 1,999
|
py
|
Python
|
swig/python/samples/make_fuzzer_friendly_archive.py
|
gajgeospatial/gdal-3.2.2
|
f03032b8b734f611d5b3039c0e5cdbf81adc306e
|
[
"Apache-2.0"
] | null | null | null |
swig/python/samples/make_fuzzer_friendly_archive.py
|
gajgeospatial/gdal-3.2.2
|
f03032b8b734f611d5b3039c0e5cdbf81adc306e
|
[
"Apache-2.0"
] | null | null | null |
swig/python/samples/make_fuzzer_friendly_archive.py
|
gajgeospatial/gdal-3.2.2
|
f03032b8b734f611d5b3039c0e5cdbf81adc306e
|
[
"Apache-2.0"
] | 1
|
2022-02-21T06:31:07.000Z
|
2022-02-21T06:31:07.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ******************************************************************************
# $Id: make_fuzzer_friendly_archive.py 428d6fbc987332afb0ba6c7b6913390f7386e864 2020-01-17 22:19:28 +0100 Even Rouault $
#
# Project: GDAL
# Purpose: Make fuzzer friendly archive (only works in DEBUG mode)
# Author: Even Rouault, <even dot rouault at spatialys dot com>
#
# ******************************************************************************
# Copyright (c) 2016 Even Rouault, <even dot rouault at spatialys dot com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
import os
import sys
fout = open(sys.argv[1], "wb")
fout.write('FUZZER_FRIENDLY_ARCHIVE\n'.encode('ascii'))
for filename in sys.argv[2:]:
fout.write(('***NEWFILE***:%s\n' % os.path.basename(filename)).encode('ascii'))
fout.write(open(filename, 'rb').read())
fout.close()
| 48.756098
| 121
| 0.653827
|
4a11b2c672553de35fb777bcd84e78191a8595a1
| 1,308
|
py
|
Python
|
[tampered]addBlock.py
|
isidharthrai/Blockchain-Simulation-using-Python
|
a822011d85ef50696d95f6e897daa7899ca977f5
|
[
"MIT"
] | 1
|
2019-08-17T10:01:10.000Z
|
2019-08-17T10:01:10.000Z
|
[tampered]addBlock.py
|
SidharthRai/Blockchain-Simulation-using-Python
|
a822011d85ef50696d95f6e897daa7899ca977f5
|
[
"MIT"
] | null | null | null |
[tampered]addBlock.py
|
SidharthRai/Blockchain-Simulation-using-Python
|
a822011d85ef50696d95f6e897daa7899ca977f5
|
[
"MIT"
] | 5
|
2019-08-10T12:31:09.000Z
|
2019-12-09T02:14:50.000Z
|
import hashlib as hasher
import datetime as date
#importing the block
from block import Block
#importing the chain
import jupiter_block as jb
import pluto_block as pb
import neptune_block as nb
# Generate all later blocks in the blockchain
def next_block(last_block):
this_index = last_block.index + 1
this_timestamp = date.datetime.now()
this_data = last_block.data
this_hash = last_block.hash
return Block(this_index, this_timestamp, this_data, this_hash)
# Create the blockchain and add the genesis block
blockchain = [jb.create_jupiter_block(), pb.create_pluto_block(), nb.create_neptune_block()]
# How many blocks should we add to the chain
# after the genesis block
num_of_blocks_to_add = 1
# Add blocks to the chain
for i in range (len(blockchain)):
previous_block = blockchain[i]
for i in range(0, num_of_blocks_to_add):
block_to_add = next_block(previous_block)
blockchain.append(block_to_add)
previous_block = block_to_add
# Tell everyone about it!
print("Block #{} added ".format(block_to_add.data))
print("Hash: {}".format(block_to_add.hash))
print("TimeStamp: {}".format(block_to_add.timestamp))
print("Previous hash: {} \n".format(block_to_add.previous_hash))
| 30.418605
| 93
| 0.712538
|
4a11b2e555c9081f76856ea4ab6bdaa55e944d7a
| 107
|
py
|
Python
|
testzld/test1.py
|
zoulida/sdufeQuant
|
dc3715a62f620c0a437daacfe9a113d5a6ecb62d
|
[
"Apache-2.0"
] | null | null | null |
testzld/test1.py
|
zoulida/sdufeQuant
|
dc3715a62f620c0a437daacfe9a113d5a6ecb62d
|
[
"Apache-2.0"
] | null | null | null |
testzld/test1.py
|
zoulida/sdufeQuant
|
dc3715a62f620c0a437daacfe9a113d5a6ecb62d
|
[
"Apache-2.0"
] | 1
|
2019-09-19T07:37:36.000Z
|
2019-09-19T07:37:36.000Z
|
__author__ = 'zoulida'
def func1():
print('func1')
def func2():
print('func2')
func1()
| 9.727273
| 22
| 0.542056
|
4a11b34c2cd56882854189146c30bb9c7b7f7ac3
| 6,674
|
py
|
Python
|
src/pyeff_system.py
|
pyflosic/pyeff
|
4b76fcc4a0bfb25f9f4106567d01b5ea02db6737
|
[
"Apache-2.0"
] | 3
|
2019-06-24T08:04:25.000Z
|
2020-05-26T03:45:45.000Z
|
src/pyeff_system.py
|
pyflosic/pyeff
|
4b76fcc4a0bfb25f9f4106567d01b5ea02db6737
|
[
"Apache-2.0"
] | null | null | null |
src/pyeff_system.py
|
pyflosic/pyeff
|
4b76fcc4a0bfb25f9f4106567d01b5ea02db6737
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 PyEFF developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'Author: S. Schwalbe'
'part of pyeff'
import numpy as np
class particle:
'mother class'
'particle (nuclei or electron)'
'with properties: typ, label, position (px,py,pz), spin, energy, force (fx,fy,fz,fr)'
def __init__(self,typ,label,px,py,pz,pr,spin,energy,fx,fy,fz,fr):
self.typ = typ
self.label = label
# position
self.px = px
self.py = py
self.pz = pz
self.pr = pr
# spin
self.spin = spin
# energy
self.energy = energy
# force components
self.fx = fx
self.fy = fy
self.fz = fz
self.fr = fr
def update(self,px,py,pz,pr,spin,energy,fx,fy,fz,fr):
self.px = self.px + px
self.py = self.py + py
self.pz = self.pz + pz
self.pr = self.pr + pr
self.spin = self.spin + spin
self.energy = self.energy + energy
self.fx = self.fx + fx
self.fy = self.fy + fy
self.fz = self.fz + fz
self.fr = self.fr + fr
def show(self):
print '%10s\t%10.0f\t%10.5f\t%10.5f\t%10.5f\t%10.5f\t%10.5f' %(self.typ,float(self.label),float(self.energy),float(self.fx),float(self.fy),float(self.fz),float(self.fr))
class system(particle):
'contains all nuclei and electrons of the system'
def __init__(self,nuc_chemical_symbols,elec_chemical_symbols):
self.nuc_chemical_symbols = nuc_chemical_symbols
self.elec_chemical_symbols = elec_chemical_symbols
self.nuc = []
self.elec = []
for n in range(len(nuc_chemical_symbols)):
n_tmp = particle(typ='nuclei',
label=nuc_chemical_symbols[n],
px=0,
py=0,
pz=0,
pr=0,
spin=0,
energy=0,
fx=0,
fy=0,
fz=0,
fr=0)
self.nuc.append(n_tmp)
for e in range(len(elec_chemical_symbols)):
e_tmp = particle(typ='electron',
label=elec_chemical_symbols[e],
energy=0,
px=0,
py=0,
pz=0,
pr=0,
spin=0,
fx=0,
fy=0,
fz=0,
fr=0)
self.elec.append(e_tmp)
def total_energy(self):
etot = 0
for n in self.nuc:
etot = etot + float(n.energy)
for e in self.elec:
etot = etot+ float(e.energy)
return etot
def types(self):
types = []
for n in self.nuc:
types.append(n.typ)
for e in self.elec:
types.append(e.typ)
return types
def chemical_symbols(self):
chemical_symbols = []
for n in self.nuc:
chemical_symbols.append(n.label)
for e in self.elec:
chemical_symbols.append(e.label)
return chemical_symbols
def positions(self):
# positions as triple for all particles (nuclei, electrons)
positions = []
for n in self.nuc:
positions.append([n.px,n.py,n.pz])
for e in self.elec:
positions.append([e.px,e.py,e.pz])
return positions
def forces(self):
# forces as triple for all particles (nuclei, electrons)
forces = []
for n in self.nuc:
forces.append([n.fx,n.fy,n.fz])
for e in self.elec:
forces.append([e.fx,e.fy,e.fz])
return forces
def rforces(self):
# radial forces as 1d vector
rforces = []
for n in self.nuc:
rforces.append(n.fr)
for e in self.elec:
rforces.append(e.fr)
return rforces
def forces1d(self,fix_nuc=None):
# forces as 1d vector for optimization
forces1d = []
for n in self.nuc:
if fix_nuc != None:
n.fx = 0
n.fy = 0
n.fz = 0
forces1d.extend([-1*n.fx,-1*n.fy,-1*n.fz])
for e in self.elec:
forces1d.extend([-1*e.fx,-1*e.fy,-1*e.fz,-1*(e.pr)*e.fr])
return forces1d
def positions1d(self):
# forces as 1d vector for optimization
positions1d = []
for n in self.nuc:
positions1d.extend([n.px,n.py,n.pz])
for e in self.elec:
positions1d.extend([e.px,e.py,e.pz,np.log(e.pr)])
return positions1d
def sizes(self):
sizes = []
for n in self.nuc:
sizes.append(n.pr)
for e in self.elec:
sizes.append(e.pr)
return sizes
def spins(self):
spins = []
for n in self.nuc:
spins.append(n.spin)
for e in self.elec:
spins.append(e.spin)
return spins
def show_all(self):
print '%10s\t%10s\t%10s\t%10s\t%10s\t%10s\t%10s' %('----------','----------','----------','----------','----------','----------','----------')
print '%10s\t%10s\t%10s\t%10s\t%10s\t%10s\t%10s' %('typ','label','energy','fx','fy','fz','fr')
print '%10s\t%10s\t%10s\t%10s\t%10s\t%10s\t%10s' %('----------','----------','----------','----------','----------','----------','----------')
for n in self.nuc:
n.show()
for e in self.elec:
e.show()
print '\n'
print '---------------'
print ' Total Energy '
print '---------------'
print '\n'
print 'Etot =\t %10s' %(self.total_energy())
print '\n'
if __name__ == "__main__":
def main():
# functionality test
nuc = ['C','H','H','H']
elec = ['He','He','X','X']
sys = system(nuc,elec)
print sys.nuc[0].label
print len(sys.elec)
(sys.elec[0]).update(px=1,py=0,pz=0,pr=0,spin=0,energy=0,fx=0,fy=0,fz=0,fr=0)
main()
| 31.630332
| 171
| 0.493257
|
4a11b350eb44f724f04610e4da0b36d388f02b41
| 519
|
py
|
Python
|
strings/reverse_string.py
|
ProgrammerSnigdho/Python
|
0b3cc70dae31b99580dc0fbba75dde563c026e51
|
[
"MIT"
] | null | null | null |
strings/reverse_string.py
|
ProgrammerSnigdho/Python
|
0b3cc70dae31b99580dc0fbba75dde563c026e51
|
[
"MIT"
] | null | null | null |
strings/reverse_string.py
|
ProgrammerSnigdho/Python
|
0b3cc70dae31b99580dc0fbba75dde563c026e51
|
[
"MIT"
] | null | null | null |
# We can reverse a string with indexing
txt = 'This is a sample text' # Making a text for reversing
reversed_text = txt[::-1] # This slicing is starting from the end and going backward
print(reversed_text)
# With a function
def text_reversing(text):
reversed_text = text[::-1] # Storing the reversed text in a variable
return reversed_text # Returning the reversed text variable
result = text_reversing('Hi, How are you?') # Giving the function the text as parameter
print(result) # Printing the reversed text
| 43.25
| 87
| 0.755299
|
4a11b360b47284848b3f36bb4e03ff8a8e919809
| 1,000
|
py
|
Python
|
py/atanet/sentiment/datasets/english_twitter.py
|
deletescape/atanet
|
0c88c769be22f6ee81ed833b7e82f7decb062a06
|
[
"MIT"
] | null | null | null |
py/atanet/sentiment/datasets/english_twitter.py
|
deletescape/atanet
|
0c88c769be22f6ee81ed833b7e82f7decb062a06
|
[
"MIT"
] | null | null | null |
py/atanet/sentiment/datasets/english_twitter.py
|
deletescape/atanet
|
0c88c769be22f6ee81ed833b7e82f7decb062a06
|
[
"MIT"
] | null | null | null |
from atanet.sentiment.datasets.set import SentimentDataset
from atanet.sentiment.language.language import Language
import pandas as pd
import zipfile
import os
TEST_TRAIN_SPLIT = 0.85
MAX_LENGTH = 100
class EnglishTwitterDataset(SentimentDataset):
def __init__(self):
columns = ['Sentiment', 'Id', 'Date', 'Flag', 'User', 'Text']
path = './atanet/sentiment/datasets/twitter_english.csv'
zip_path = './atanet/sentiment/datasets/twitter_english.zip'
if not os.path.isfile(path):
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall('./atanet/sentiment/datasets/')
os.rename('./atanet/sentiment/datasets/training.1600000.processed.noemoticon.csv', path)
loaded: pd.DataFrame = pd.read_csv(path, encoding='ISO-8859-1', names=columns)
loaded.Sentiment /= 4
super().__init__(MAX_LENGTH, TEST_TRAIN_SPLIT, loaded)
def get_language(self) -> Language:
return Language.English
| 34.482759
| 100
| 0.692
|
4a11b5890e17cd60162c0a6c7d55ae396a89e881
| 1,254
|
py
|
Python
|
src/papyrus_scripts/utils/UniprotMatch.py
|
OlivierBeq/Papyrus-scripts
|
54fbe74116f36c810ba08b7ab9f7c4baec264506
|
[
"MIT"
] | 7
|
2021-11-09T10:05:42.000Z
|
2022-03-26T14:15:02.000Z
|
src/papyrus_scripts/utils/UniprotMatch.py
|
OlivierBeq/Papyrus-scripts
|
54fbe74116f36c810ba08b7ab9f7c4baec264506
|
[
"MIT"
] | null | null | null |
src/papyrus_scripts/utils/UniprotMatch.py
|
OlivierBeq/Papyrus-scripts
|
54fbe74116f36c810ba08b7ab9f7c4baec264506
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Functions to interact with UniProt."""
from io import StringIO
from typing import List, Union
import pandas as pd
import requests
def uniprot_mappings(query: Union[str, List[str]],
map_from: str = 'ID',
map_to: str = 'PDB_ID',
) -> pd.DataFrame:
"""Map identifiers using the UniProt identifier mapping tool.
:param query: list or space delimited string of identifiers
:param map_from: type of input identifiers (default: accession)
:param map_to: type of desired output identifiers
(default: PDB identifiers)
See: https://www.uniprot.org/help/api_idmapping
"""
url = 'https://www.uniprot.org/uploadlists/'
if isinstance(query, list):
query = ' '.join(query)
params = {'from': map_from,
'to': map_to,
'format': 'tab',
'query': query,
}
response = requests.post(url, params)
if not response.ok:
raise ValueError("query is wrongly formatted and resulted in a server failure")
data = StringIO(response.text)
df = pd.read_csv(data, sep='\t')
df = df.rename(columns={'To': map_to, 'From': map_from})
return df
| 29.857143
| 87
| 0.600478
|
4a11b61e64ecef75d994efe9b0b6c8049ce31a2d
| 8,992
|
py
|
Python
|
odin/annotator/annotator_classification.py
|
rnt-pmi/odin
|
8cfddf04f964393ef30217aa5f4aa61229d7e811
|
[
"Apache-2.0"
] | 4
|
2021-01-09T10:46:31.000Z
|
2021-12-16T14:38:06.000Z
|
odin/annotator/annotator_classification.py
|
rnt-pmi/odin
|
8cfddf04f964393ef30217aa5f4aa61229d7e811
|
[
"Apache-2.0"
] | null | null | null |
odin/annotator/annotator_classification.py
|
rnt-pmi/odin
|
8cfddf04f964393ef30217aa5f4aa61229d7e811
|
[
"Apache-2.0"
] | 3
|
2021-01-09T10:46:15.000Z
|
2021-05-11T01:33:30.000Z
|
import os
import json
from PIL import Image
from matplotlib import pyplot as plt
from IPython.display import display
from tabulate import tabulate
from odin.classes.safe_writer import SafeWriter
from odin.classes import strings as labels_str
from odin.annotator import AnnotatorInterface
from odin.annotator import MetaPropertiesType
from odin.classes import TaskType
from odin.utils.leaflet_zoom_utils import get_image_container_zoom, show_new_image
class AnnotatorClassification(AnnotatorInterface):
def set_display_function(self, custom_display_function):
self.image_container = None
if custom_display_function is None or type(custom_display_function) is str:
# if custom_display_function == "zoom_js":
# self.custom_display_function = self.__show_image_js_zoom
if custom_display_function == "zoom_leaflet":
self.image_container = get_image_container_zoom()
with self.out:
display(self.image_container)
self.custom_display_function = self.show_image_leaflet
elif custom_display_function == "default":
self.custom_display_function = self.show_image
else:
raise NotImplementedError(f"Function {custom_display_function} not implemented!")
else:
self.custom_display_function = custom_display_function
def set_objects(self):
self.objects = self.dataset_annotated["observations"]
self.max_pos = len(self.objects) - 1
self.count_images = len(self.dataset_annotated["observations"])
def add_annotation_to_mapping(self, ann):
if self.validate:
if not self.validate_function(ann):
return
else:
for k_name, v in self.properties_and_values.items():
if k_name not in ann:
return
classification_type = self.dataset_orig.classification_type
if classification_type == TaskType.CLASSIFICATION_MULTI_LABEL:
cat_names = [self.dataset_orig.get_category_name_from_id(c_id) for c_id in ann['categories']]
elif classification_type == TaskType.CLASSIFICATION_SINGLE_LABEL or classification_type == TaskType.CLASSIFICATION_BINARY:
cat_names = [self.dataset_orig.get_category_name_from_id(ann['category'])]
if ann['id'] not in self.mapping['annotated_ids']: # only adds category counter if it was not annotate
for cat_name in cat_names:
self.mapping['categories_counter'][cat_name] += 1
self.mapping['annotated_ids'].add(ann['id'])
def update_mapping_from_whole_dataset(self):
for ann in self.objects:
self.add_annotation_to_mapping(ann)
self.updated = True
def update_annotation_counter_and_current_pos(self, dataset_annotated):
#prop_names = set(self.properties_and_values.keys())
last_ann_id = dataset_annotated["observations"][0]['id']
self.current_pos = next(i for i, a in enumerate(dataset_annotated["observations"]) if a['id'] == last_ann_id)
def checkbox_changed(self, b):
if b['owner'].value is None or b['name'] != 'value':
return
class_name = b['owner'].description
value = b['owner'].value
annotation_name = b['owner']._dom_classes[0]
ann = self.get_image_record()
if self.properties_and_values[annotation_name][0].value in [MetaPropertiesType.COMPOUND.value]:
if annotation_name not in ann.keys():
ann[annotation_name] = {p: False for p in self.properties_and_values[annotation_name][1]}
ann[annotation_name][class_name] = value
else: # UNIQUE VALUE
ann[annotation_name] = value
self.execute_validation(ann)
if self.current_pos == self.max_pos:
self.save_state()
def show_name_func(self, image_record, path_img):
if self.show_name:
classification_type = self.dataset_orig.classification_type
if classification_type == TaskType.CLASSIFICATION_MULTI_LABEL:
categories = image_record['categories']
str_output = os.path.basename(path_img) + ' - {}: '.format(labels_str.info_class)
str_output += ','.join(['{} [id={}]'.format(self.dataset_orig.get_category_name_from_id(category),
category) for category in categories])
print(str_output)
elif classification_type == TaskType.CLASSIFICATION_SINGLE_LABEL or classification_type == TaskType.CLASSIFICATION_BINARY:
category = image_record['category']
print(os.path.basename(path_img) + ' - {}: {} [id={}]'.format(labels_str.info_class,
self.dataset_orig.get_category_name_from_id(category),
category))
def show_image_leaflet(self, image_record):
path_img = os.path.join(self.dataset_orig.images_abs_path, image_record['file_name'])
self.show_name_func(image_record, path_img)
if not os.path.exists(path_img):
print(f"{labels_str.info_missing} {path_img}")
return
show_new_image(self.image_container, path_img)
def show_image(self, image_record):
path_img = os.path.join(self.dataset_orig.images_abs_path, image_record['file_name'])
self.show_name_func(image_record, path_img)
if not os.path.exists(path_img):
print(f"{labels_str.info_missing} {path_img}")
return
# read img from path and show it
img = Image.open(path_img)
plt.figure(figsize=self.fig_size)
if not self.show_axis:
plt.axis('off')
plt.imshow(img)
plt.show()
def save_state(self):
w = SafeWriter(os.path.join(self.file_path_for_json), "w")
w.write(json.dumps(self.dataset_annotated, indent=4))
w.close()
self.add_annotation_to_mapping(self.dataset_annotated['observations'][self.current_pos])
def perform_action(self):
self.next_button.disabled = (self.current_pos == self.max_pos)
self.previous_button.disabled = (self.current_pos == 0)
current_ann = self.objects[self.current_pos]
self.change_check_radio_boxes_value(current_ann)
image_record = self.get_image_record()
self.execute_validation(image_record)
with self.out:
if self.image_container is None: #is leaflet display
self.out.clear_output()
self.custom_display_function(image_record)
self.text_index.unobserve(self.selected_index)
self.text_index.value = self.current_pos + 1
self.text_index.observe(self.selected_index)
def get_image_record(self):
img_record = self.objects[self.current_pos]
return img_record
def on_save_clicked(self, b):
self.save_state()
image_record = self.get_image_record()
path_img = os.path.join(self.output_directory, 'JPEGImages', image_record['file_name'])
self.save_function(path_img)
def on_reset_clicked(self, b):
current_ann = self.objects[self.current_pos]
for m_k, m_v in self.properties_and_values.items():
if m_k in current_ann:
del current_ann[m_k]
self.change_check_radio_boxes_value(current_ann)
self.execute_validation(current_ann)
def print_statistics(self):
if not self.updated:
self.update_mapping_from_whole_dataset()
table = []
total = 0
for c_k, c_number in self.mapping["categories_counter"].items():
table.append([c_k, c_number])
total += c_number
table = sorted(table, key=lambda x: x[0])
classification_type = self.dataset_orig.classification_type
# show total images only in binary/single-label
if classification_type == TaskType.CLASSIFICATION_MULTI_LABEL:
table.append([labels_str.info_total, '{}'.format(total)])
elif classification_type == TaskType.CLASSIFICATION_SINGLE_LABEL or classification_type == TaskType.CLASSIFICATION_BINARY:
table.append([labels_str.info_total, '{}/{}'.format(total, self.count_images)])
print(tabulate(table, headers=[labels_str.info_class_name, labels_str.info_ann_objects]))
def print_results(self):
complete, incomplete = 0, 0
incomplete_srt = ""
if self.validate:
for index, record in enumerate(self.objects):
if self.validate_function(record):
complete += 1
else:
incomplete += 1
incomplete_srt += f" {index+1}"
print(f"{labels_str.info_completed} {complete}")
print(f"{labels_str.info_incomplete} {incomplete}")
if incomplete > 0:
print(f"{labels_str.info_positions} {incomplete_srt}")
| 43.230769
| 134
| 0.659364
|
4a11b652ac33c507f002904ea94b06a78730819d
| 392
|
py
|
Python
|
example/config.py
|
xremap/xremap-python
|
0784c7fbd30b17362de110829e66a051dc52557c
|
[
"MIT"
] | 2
|
2021-12-22T13:34:43.000Z
|
2022-03-30T14:25:37.000Z
|
example/config.py
|
xremap/xremap-python
|
0784c7fbd30b17362de110829e66a051dc52557c
|
[
"MIT"
] | null | null | null |
example/config.py
|
xremap/xremap-python
|
0784c7fbd30b17362de110829e66a051dc52557c
|
[
"MIT"
] | null | null | null |
from xremap.dsl import *
define_modmap({
'CapsLock': 'Control_L',
})
define_conditional_modmap({ 'only': ['Gnome-terminal'] }, {
'Control_R': 'Esc',
})
define_keymap({ 'only': ['Google-chrome'] }, {
'C-M-j': 'C-Tab',
'C-M-k': 'C-Shift-Tab',
}, 'Chrome')
define_keymap({}, {
'C-b': 'left',
'C-f': 'right',
'C-p': 'up',
'C-n': 'down',
}, 'Emacs-like keys')
| 17.043478
| 59
| 0.52551
|
4a11b85b2fdb856cad4603c3cbde022c1c5b8c95
| 624
|
py
|
Python
|
datasources/migrations/0017_count_external_requests.py
|
tiferrei/PEDASI
|
b819aee93de99c00a1aa3eb9d32102b89f72459e
|
[
"MIT"
] | null | null | null |
datasources/migrations/0017_count_external_requests.py
|
tiferrei/PEDASI
|
b819aee93de99c00a1aa3eb9d32102b89f72459e
|
[
"MIT"
] | 18
|
2019-02-27T12:39:27.000Z
|
2021-03-24T16:32:47.000Z
|
datasources/migrations/0017_count_external_requests.py
|
Southampton-RSG/PEDASI-IoT
|
25a111ac7cf4b23fee50ad8eac6ea21564954859
|
[
"MIT"
] | 1
|
2021-02-16T17:47:15.000Z
|
2021-02-16T17:47:15.000Z
|
# Generated by Django 2.0.8 on 2018-12-13 09:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datasources', '0016_prov_exempt_help_text'),
]
operations = [
migrations.AddField(
model_name='datasource',
name='external_requests',
field=models.PositiveIntegerField(default=0, editable=False),
),
migrations.AddField(
model_name='datasource',
name='external_requests_total',
field=models.PositiveIntegerField(default=0, editable=False),
),
]
| 26
| 73
| 0.621795
|
4a11b890dc7c205164ea543831f552cad1c8cc1a
| 4,330
|
py
|
Python
|
third_party/ctc_decoders/setup.py
|
phecda-xu/PaddleSpeech
|
6bf0d3bf57229091a74912633e837dabc6215c86
|
[
"Apache-2.0"
] | 1
|
2022-02-26T01:48:00.000Z
|
2022-02-26T01:48:00.000Z
|
third_party/ctc_decoders/setup.py
|
ziwenag/PaddleSpeech
|
89e69ee10ee02b875af663146bc46fcf095e812a
|
[
"Apache-2.0"
] | null | null | null |
third_party/ctc_decoders/setup.py
|
ziwenag/PaddleSpeech
|
89e69ee10ee02b875af663146bc46fcf095e812a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to build and install decoder package."""
import argparse
import glob
import multiprocessing.pool
import os
import platform
import sys
from setuptools import distutils
from setuptools import Extension
from setuptools import setup
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--num_processes",
default=1,
type=int,
help="Number of cpu processes to build package. (default: %(default)d)")
args = parser.parse_known_args()
# reconstruct sys.argv to pass to setup below
sys.argv = [sys.argv[0]] + args[1]
# monkey-patch for parallel compilation
# See: https://stackoverflow.com/a/13176803
def parallelCCompile(self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
# those lines are copied from distutils.ccompiler.CCompiler directly
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# parallel code
def _single_compile(obj):
try:
src, ext = build[obj]
except KeyError:
return
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# convert to list, imap is evaluated on-demand
thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes)
list(thread_pool.imap(_single_compile, objects))
return objects
def compile_test(header, library):
dummy_path = os.path.join(os.path.dirname(__file__), "dummy")
command = "bash -c \"g++ -include " + header \
+ " -l" + library + " -x c++ - <<<'int main() {}' -o " \
+ dummy_path + " >/dev/null 2>/dev/null && rm " \
+ dummy_path + " 2>/dev/null\""
return os.system(command) == 0
# hack compile to support parallel compiling
distutils.ccompiler.CCompiler.compile = parallelCCompile
FILES = glob.glob('kenlm/util/*.cc') \
+ glob.glob('kenlm/lm/*.cc') \
+ glob.glob('kenlm/util/double-conversion/*.cc')
FILES += glob.glob('openfst-1.6.3/src/lib/*.cc')
# yapf: disable
FILES = [
fn for fn in FILES if not (fn.endswith('main.cc') or fn.endswith('test.cc')
or fn.endswith('unittest.cc'))
]
# yapf: enable
LIBS = ['stdc++']
if platform.system() != 'Darwin':
LIBS.append('rt')
ARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11']
if compile_test('zlib.h', 'z'):
ARGS.append('-DHAVE_ZLIB')
LIBS.append('z')
if compile_test('bzlib.h', 'bz2'):
ARGS.append('-DHAVE_BZLIB')
LIBS.append('bz2')
if compile_test('lzma.h', 'lzma'):
ARGS.append('-DHAVE_XZLIB')
LIBS.append('lzma')
os.system('swig -python -c++ ./decoders.i')
decoders_module = [
Extension(
name='_paddlespeech_ctcdecoders',
sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'),
language='c++',
include_dirs=[
'.',
'kenlm',
'openfst-1.6.3/src/include',
'ThreadPool',
],
libraries=LIBS,
extra_compile_args=ARGS)
]
setup(
name='paddlespeech_ctcdecoders',
version='0.1.1',
description="CTC decoders in paddlespeech",
author="PaddlePaddle Speech and Language Team",
author_email="paddlesl@baidu.com",
url="https://github.com/PaddlePaddle/PaddleSpeech",
license='Apache 2.0, GNU Lesser General Public License v3 (LGPLv3) (LGPL-3)',
ext_modules=decoders_module,
py_modules=['paddlespeech_ctcdecoders'])
| 31.376812
| 81
| 0.646882
|
4a11b961e1009381a99796b7f29d314c70f29dc0
| 5,938
|
py
|
Python
|
ElectroWeakAnalysis/Skimming/test/EWK_DiElectronSkim_Drop.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 6
|
2017-09-08T14:12:56.000Z
|
2022-03-09T23:57:01.000Z
|
ElectroWeakAnalysis/Skimming/test/EWK_DiElectronSkim_Drop.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 545
|
2017-09-19T17:10:19.000Z
|
2022-03-07T16:55:27.000Z
|
ElectroWeakAnalysis/Skimming/test/EWK_DiElectronSkim_Drop.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 14
|
2017-10-04T09:47:21.000Z
|
2019-10-23T18:04:45.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("EWKDiElectronSkim")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
# source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'rfio:/tmp/ikesisog/10A96AFC-E17C-DE11-A90E-001D0967D9CC.root'
)
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.GlobalTag.globaltag = cms.string('MC_31X_V3::All')
# HLT filter
import HLTrigger.HLTfilters.hltHighLevel_cfi
process.EWK_DiElectronHLTFilter = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone()
# Uncomment this to access 8E29 menu and filter on it
process.EWK_DiElectronHLTFilter.TriggerResultsTag = cms.InputTag("TriggerResults","","HLT8E29")
process.EWK_DiElectronHLTFilter.HLTPaths = ["HLT_Ele15_LW_L1R"]
# Make a collection of good SuperClusters.
#
# Before selection is made, merge the Barrel and EndCap SC's.
process.superClusterMerger = cms.EDProducer("EgammaSuperClusterMerger",
src = cms.VInputTag(cms.InputTag('correctedHybridSuperClusters'), cms.InputTag('correctedMulti5x5SuperClustersWithPreshower'))
)
# Get the above merged SC's and select the particle (gamma) to greate SC's Candidates.
process.superClusterCands = cms.EDProducer("ConcreteEcalCandidateProducer",
src = cms.InputTag("superClusterMerger"), particleType = cms.string('gamma')
)
# Get the above SC's Candidates and place a cut on their Et.
process.goodSuperClusters = cms.EDFilter("CandViewRefSelector",
src = cms.InputTag("superClusterCands"),
cut = cms.string('et > 20.0'),
filter = cms.bool(True)
)
process.superClusterFilter = cms.Sequence(process.superClusterMerger + process.superClusterCands + process.goodSuperClusters)
# Make a collections on good Electrons.
#
process.goodElectrons = cms.EDFilter("CandViewSelector",
src = cms.InputTag("gsfElectrons"),
cut = cms.string('pt > 20.0'),
filter = cms.bool(True)
)
# Filter the above two collections (good SuperClusters and good Electrons)
#
process.electronSuperClusterCombiner = cms.EDFilter("CandViewShallowCloneCombiner",
filter = cms.bool(True),
checkCharge = cms.bool(False),
cut = cms.string('mass > 3.0'),
decay = cms.string('goodElectrons goodSuperClusters')
)
process.electronSuperClusterCounter = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("electronSuperClusterCombiner"),
minNumber = cms.uint32(1)
)
process.electronSuperClusterFilter = cms.Sequence(process.electronSuperClusterCombiner + process.electronSuperClusterCounter)
# Skim path
process.EWK_DiElectronSkimPath = cms.Path(process.EWK_DiElectronHLTFilter +
process.goodElectrons +
process.superClusterFilter +
process.electronSuperClusterFilter
)
# Output module configuration
from Configuration.EventContent.EventContent_cff import *
EWK_DiElectronSkimEventContent = cms.PSet(
outputCommands = cms.untracked.vstring()
)
EWK_DiElectronSkimEventContent.outputCommands.extend(AODEventContent.outputCommands)
EWK_DiElectronSkimEventContent.outputCommands.extend(
cms.untracked.vstring('drop *',
"keep recoSuperClusters_*_*_*",
"keep *_gsfElectrons_*_*",
"keep recoGsfTracks_electronGsfTracks_*_*",
"keep *_gsfElectronCores_*_*",
"keep *_correctedHybridSuperClusters_*_*",
"keep *_correctedMulti5x5SuperClustersWithPreshower_*_*",
"keep edmTriggerResults_*_*_*",
"keep recoCaloMETs_*_*_*",
"keep recoMETs_*_*_*",
"keep *_particleFlow_electrons_*",
"keep *_pfMet_*_*",
"keep *_multi5x5SuperClusterWithPreshower_*_*",
"keep recoVertexs_*_*_*",
"keep *_hltTriggerSummaryAOD_*_*",
"keep floatedmValueMap_*_*_*",
"keep recoBeamSpot_*_*_*" )
)
EWK_DiElectronSkimEventSelection = cms.PSet(
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring(
'EWK_DiElectronSkimPath')
)
)
process.EWK_DiElectronSkimOutputModule = cms.OutputModule("PoolOutputModule",
EWK_DiElectronSkimEventContent,
EWK_DiElectronSkimEventSelection,
dataset = cms.untracked.PSet(
filterName = cms.untracked.string('EWKSKIMEMET'),
dataTier = cms.untracked.string('USER')
),
fileName = cms.untracked.string('EWKDiElectronSkim.root')
)
process.outpath = cms.EndPath(process.EWK_DiElectronSkimOutputModule)
| 43.343066
| 162
| 0.60037
|
4a11b97c12636028c928e8d2db31b9b87eaa217b
| 1,725
|
py
|
Python
|
pay-api/src/pay_api/resources/code.py
|
thorwolpert/sbc-pay
|
ea355dfb13e783ed1e86ed92efaa45293463c348
|
[
"Apache-2.0"
] | 4
|
2020-03-23T21:37:02.000Z
|
2021-06-15T11:25:22.000Z
|
pay-api/src/pay_api/resources/code.py
|
thorwolpert/sbc-pay
|
ea355dfb13e783ed1e86ed92efaa45293463c348
|
[
"Apache-2.0"
] | 757
|
2019-05-02T17:53:52.000Z
|
2022-03-31T22:42:01.000Z
|
pay-api/src/pay_api/resources/code.py
|
thorwolpert/sbc-pay
|
ea355dfb13e783ed1e86ed92efaa45293463c348
|
[
"Apache-2.0"
] | 39
|
2019-01-30T20:05:36.000Z
|
2022-03-24T15:07:54.000Z
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource for code endpoints."""
from http import HTTPStatus
from flask_restx import Namespace, Resource, cors
from pay_api.services.code import Code as CodeService
from pay_api.utils.trace import tracing as _tracing
from pay_api.utils.util import cors_preflight
API = Namespace('codes', description='Payment System - Codes')
@cors_preflight('GET')
@API.route('/<string:code_type>', methods=['GET', 'OPTIONS'])
class Codes(Resource):
"""Endpoint resource to return codes."""
@staticmethod
@cors.crossdomain(origin='*')
@_tracing.trace()
def get(code_type):
"""Return all codes based on code_type."""
return CodeService.find_code_values_by_type(code_type), HTTPStatus.OK
@cors_preflight('GET')
@API.route('/<string:code_type>/<string:code>', methods=['GET', 'OPTIONS'])
class Code(Resource):
"""Endpoint resource to return codes."""
@staticmethod
@cors.crossdomain(origin='*')
@_tracing.trace()
def get(code_type, code):
"""Return all codes based on code_type."""
return CodeService.find_code_value_by_type_and_code(code_type, code), HTTPStatus.OK
| 33.823529
| 91
| 0.729855
|
4a11ba0b889637770433cbf2513b112594b5a2d9
| 981
|
py
|
Python
|
snacks/views.py
|
okayjones/django-x
|
5e5e8056e9181fef5bca473e0351242a1b272aea
|
[
"MIT"
] | null | null | null |
snacks/views.py
|
okayjones/django-x
|
5e5e8056e9181fef5bca473e0351242a1b272aea
|
[
"MIT"
] | null | null | null |
snacks/views.py
|
okayjones/django-x
|
5e5e8056e9181fef5bca473e0351242a1b272aea
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from .models import Snack
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin
class SnackListView(LoginRequiredMixin, ListView):
template_name = 'snacks/snack-list.html'
model = Snack
class SnackDetailView(LoginRequiredMixin, DetailView):
template_name = 'snacks/snack-detail.html'
model = Snack
class SnackCreateView(LoginRequiredMixin, CreateView):
template_name = 'snacks/snack-create.html'
model = Snack
fields = ['name', 'description', 'user']
class SnackUpdateView(LoginRequiredMixin, UpdateView):
template_name = 'snacks/snack-update.html'
model = Snack
fields = ['name', 'description', 'user']
class SnackDeleteView(LoginRequiredMixin, DeleteView):
template_name = 'snacks/snack-delete.html'
model = Snack
success_url = reverse_lazy('snack_list')
| 33.827586
| 89
| 0.762487
|
4a11ba1769a78e5721ce273064204891690e9902
| 390
|
py
|
Python
|
architecture/main.py
|
elisesweetnam/ES595project
|
7c76a0d325b9ca4f31ab0723a217e621902a8100
|
[
"MIT"
] | null | null | null |
architecture/main.py
|
elisesweetnam/ES595project
|
7c76a0d325b9ca4f31ab0723a217e621902a8100
|
[
"MIT"
] | null | null | null |
architecture/main.py
|
elisesweetnam/ES595project
|
7c76a0d325b9ca4f31ab0723a217e621902a8100
|
[
"MIT"
] | null | null | null |
# import our modules
# this modile handles other modules by importing
import data_util
import db_util
import server
def main():
# documentations doc string (more than just a comment)
# done at the top of a function
'''
This module handles all the other modules
'''
server.handleServer()
data_util.handleData()
if __name__ == "__main__":
main()
| 22.941176
| 55
| 0.674359
|
4a11bbf785be2df1dd3dd438ab1a9cddf4c9ba8a
| 1,170
|
py
|
Python
|
megengine_release/configs/freeanchor_res18_coco_3x_800size.py
|
megvii-research/ICD
|
a97e0ecd9b69dbc0e3c2b8168c1d72ea79c6641b
|
[
"Apache-2.0"
] | 32
|
2021-11-09T11:19:21.000Z
|
2022-03-21T17:37:32.000Z
|
configs/freeanchor_res18_coco_3x_800size.py
|
Senwang98/ICD
|
fdda393088fa31ac6dc9ddbd7ec3e7008ea32ff4
|
[
"Apache-2.0"
] | 3
|
2022-02-28T08:51:13.000Z
|
2022-03-30T09:16:41.000Z
|
configs/freeanchor_res18_coco_3x_800size.py
|
Senwang98/ICD
|
fdda393088fa31ac6dc9ddbd7ec3e7008ea32ff4
|
[
"Apache-2.0"
] | 4
|
2021-11-11T11:59:05.000Z
|
2022-03-30T03:26:41.000Z
|
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from megengine import hub
import models
class CustomFreeAnchorConfig(models.FreeAnchorConfig):
def __init__(self):
super().__init__()
self.backbone = "resnet18"
self.fpn_in_channels = [128, 256, 512]
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/"
"freeanchor_res18_coco_3x_800size_38dot1_3d0559a8.pkl"
)
def freeanchor_res18_coco_3x_800size(**kwargs):
r"""
FreeAnchor trained from COCO dataset.
`"FreeAnchor" <https://arxiv.org/abs/1909.02466>`_
`"FPN" <https://arxiv.org/abs/1612.03144>`_
`"COCO" <https://arxiv.org/abs/1405.0312>`_
"""
cfg = models.FreeAnchorConfig()
cfg.backbone_pretrained = False
return models.FreeAnchor(cfg, **kwargs)
Net = models.FreeAnchor
Cfg = CustomFreeAnchorConfig
| 29.25
| 88
| 0.711966
|
4a11bc084f0a427f8208c05df22afeb4c87d178a
| 3,875
|
py
|
Python
|
test/gen_events.py
|
wwt/csna
|
cfef9187ec18b9654fe7a3632afd38a3ecc1f2a2
|
[
"Apache-2.0"
] | 1
|
2022-03-10T18:07:20.000Z
|
2022-03-10T18:07:20.000Z
|
test/gen_events.py
|
wwt/csna
|
cfef9187ec18b9654fe7a3632afd38a3ecc1f2a2
|
[
"Apache-2.0"
] | null | null | null |
test/gen_events.py
|
wwt/csna
|
cfef9187ec18b9654fe7a3632afd38a3ecc1f2a2
|
[
"Apache-2.0"
] | null | null | null |
#!/opt/soar/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 - 2022 World Wide Technology
# All rights reserved.
#
# author: Joel W. King @joelwking - World Wide Technology
#
# references:
# https://kc.mcafee.com/resources/sites/MCAFEE/content/live/CORP_KNOWLEDGEBASE/78000/KB78712/en_US/CEF_White_Paper_20100722.pdf
#
# export PH_AUTH_TOKEN=2adnymvMJMredactedHOM+xBGUNs1wEk=
# export PH_SERVER=54.237.22.123
#
import time
from datetime import datetime
from datetime import timedelta
import os
import sys
import yaml
import argparse
#
# Download from https://raw.githubusercontent.com/joelwking/Phantom-Cyber/master/REST_ingest/PhantomIngest.py
#
import PhantomIngest as ingest
TIME_SPAN = 60
p = ingest.PhantomIngest(os.getenv('PH_SERVER'), os.getenv('PH_AUTH_TOKEN'))
def get_fileobj(filepath=None):
""" Get a file object, handling exceptions and returning False if the file cannot be opened
"""
try:
fileobject = open(filepath, 'r')
except (IOError, OSError, TypeError) as e:
return False
return fileobject
def add_event(document, custom=False):
""" Add a container and artifact(s) based on the YAML definition file.
"""
try:
document['container']
except KeyError:
print("missing required field 'container'")
return False
container = {}
for key in ('name', 'description', 'label'):
container[key] = document['container'].get(key, 'NONE')
try:
container_id = p.add_container(**container)
print(f'Added container: {container_id}')
except AssertionError as e:
print("Any HTTP return code other than OK %s" % e)
return False
except Exception as e:
print("Typically the phantom host did not respond, a connection error %s" % e)
return False
_artifact = {}
for artifact in document['container'].get('artifacts'):
for key in ('name', 'source_data_identifier'):
_artifact[key] = artifact.get(key, 'NONE')
cef = artifact.get('cef', dict())
if custom:
cef = add_defaults_for_custom_fields(cef)
meta_data = artifact.get('meta_data', dict())
try:
artifact_id = p.add_artifact(container_id, cef, meta_data, **_artifact)
print(f' |--- artifact: {artifact_id}')
except (AssertionError, Exception) as e:
print(f'Failure adding artifact: {e}')
_artifact = {}
return
def add_defaults_for_custom_fields(cef):
""" If the string 'default' is the value for StartTime or deviceCustomDate1, calculate values
"""
if cef['startTime'] == 'default':
cef['startTime'] = int((time.time() - (60 * TIME_SPAN)) * 1000) # milliseconds since epoch
if cef['deviceCustomDate1'] == 'default':
current_time = datetime.utcnow()
default_start_time = (current_time - timedelta(minutes=TIME_SPAN)).strftime('%Y-%m-%dT%H:%M:%SZ')
start_time = datetime.strptime(default_start_time, '%Y-%m-%dT%H:%M:%SZ')
cef['deviceCustomDate1'] = start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
return cef
def main():
parser = argparse.ArgumentParser(prog='gen_events', description='Create events in Splunk> SOAR')
parser.add_argument('-c', '--custom', dest='custom', help='update custom fields if not set', action='store_true', required=False)
parser.add_argument('-f', '--file', dest='document', help='a YAML formatted input file', default=None, required=True)
args = parser.parse_args()
stream = get_fileobj(args.document)
if stream:
documents = yaml.safe_load_all(stream)
else:
print(f'...nothing to do!')
sys.exit(1)
for document in documents:
add_event(document, custom=args.custom)
stream.close()
if __name__ == '__main__':
main()
| 31.762295
| 136
| 0.651613
|
4a11bc9f1aaf2a37f8952de30b87a292bf0b1b5c
| 3,932
|
py
|
Python
|
collagen/data/_dataprovider.py
|
MIPT-Oulu/Collagen
|
0cbc4285d60e5c9fcc89f629fcf4321e80b7452c
|
[
"MIT"
] | 4
|
2019-05-14T14:44:51.000Z
|
2020-03-13T08:37:48.000Z
|
collagen/data/_dataprovider.py
|
MIPT-Oulu/Collagen
|
0cbc4285d60e5c9fcc89f629fcf4321e80b7452c
|
[
"MIT"
] | 26
|
2019-04-21T20:35:22.000Z
|
2022-03-12T00:32:57.000Z
|
collagen/data/_dataprovider.py
|
MIPT-Oulu/Collagen
|
0cbc4285d60e5c9fcc89f629fcf4321e80b7452c
|
[
"MIT"
] | 1
|
2019-05-14T14:53:28.000Z
|
2019-05-14T14:53:28.000Z
|
import gc
class DataProvider(object):
""" Provides data from single or multiple ``ItemLoader``s
Parameters
----------
item_loaders : ``collagen.data.ItemLoader``
Dictionary that maps names to ``ItemLoader`` objects
"""
def __init__(self, item_loaders: dict):
self.__loaders = item_loaders
self.__state_dict = {}
for itemloader_name in self.__loaders:
# Auto set name for itemloader
if not self.__loaders[itemloader_name].name or self.__loaders[itemloader_name].name is None:
self.__loaders[itemloader_name].name = itemloader_name
itemloader_len = len(self.__loaders[itemloader_name])
self.__state_dict[itemloader_name] = {"total": itemloader_len,
"samples": None,
"num_sampled": 0,
"num_left": itemloader_len,
"num_loops": 0}
def sample(self, **kwargs):
""" Samples :attr:__loaders with specified number of data
Parameters
----------
kwargs : dict
Dictionary of the names, corresponding to the itemloaders stored in ``DataProvider``, and the number of batches,
which needs to be drawn from each of them.
Returns
-------
list_samples : list
List of samples
"""
list_samples = []
sampling_args = []
for itemloader_name, k in kwargs.items():
if itemloader_name in self.__loaders:
sampling_args.append((itemloader_name, k))
else:
raise ValueError("Not found argument `{}` in itemloader list".format(itemloader_name))
for il_name, k in sampling_args:
list_samples.append(self.__sample(il_name, k))
return list_samples
def __sample(self, itemloader_name: str, k: int):
"""Gets `k` samples from the itemloader specified by `itemloader_name`.
Parameters
----------
itemloader_name : str
``ItemLoader`` name
k : int
The number of samples
Returns
-------
samplers : list
List of sampled data
"""
samples = self.__loaders[itemloader_name].sample(k)
num_samples = len(samples)
self.__state_dict[itemloader_name]["samples"] = samples
# Update state_dict
if self.__state_dict[itemloader_name]["num_sampled"] + num_samples > self.__state_dict[itemloader_name][
"total"]:
self.__state_dict[itemloader_name]["num_loops"] += 1
self.__state_dict[itemloader_name]["num_sampled"] = num_samples
self.__state_dict[itemloader_name]["num_left"] = self.__state_dict[itemloader_name]["total"] - num_samples
else:
self.__state_dict[itemloader_name]["num_sampled"] += num_samples
self.__state_dict[itemloader_name]["num_left"] -= num_samples
return samples
def state_dict(self):
""" Returns :attr:__state_dict
"""
return self.__state_dict
def empty_state(self):
""" Cleans :attr:__state_dict
"""
del self.__state_dict
gc.collect()
self.__state_dict = {}
def get_loader_names(self):
return [name for name in self.__loaders]
def get_loader_by_name(self, name):
if name in self.__loaders:
return self.__loaders[name]
elif isinstance(name, tuple) or isinstance(name, list):
return tuple([self.__loaders[s] for s in name])
else:
raise ValueError("`{}` not found in list of loader names".format(name))
def set_epoch(self, epoch):
for loader in self.__loaders:
self.__loaders[loader].set_epoch(epoch)
| 34.191304
| 124
| 0.579095
|
4a11bd475b8690f577757bfbd8e3031b8d990aca
| 36,344
|
py
|
Python
|
main/global_EoR_figures_producer.py
|
dannyjacobs/PRISim
|
89e544d771cf5c4113a4d5787a57c9586fa98eac
|
[
"MIT"
] | null | null | null |
main/global_EoR_figures_producer.py
|
dannyjacobs/PRISim
|
89e544d771cf5c4113a4d5787a57c9586fa98eac
|
[
"MIT"
] | null | null | null |
main/global_EoR_figures_producer.py
|
dannyjacobs/PRISim
|
89e544d771cf5c4113a4d5787a57c9586fa98eac
|
[
"MIT"
] | null | null | null |
import numpy as NP
import astropy.cosmology as CP
import scipy.constants as FCNST
import argparse
import yaml
import astropy
from astropy.io import fits, ascii
import progressbar as PGB
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import matplotlib.cm as CM
from matplotlib.ticker import FuncFormatter
import geometry as GEOM
import interferometry as RI
import catalog as SM
import constants as CNST
import my_DSP_modules as DSP
import my_operations as OPS
import primary_beams as PB
import baseline_delay_horizon as DLY
import ipdb as PDB
parser = argparse.ArgumentParser(description='Program to analyze and plot global EoR data')
input_group = parser.add_argument_group('Input parameters', 'Input specifications')
input_group.add_argument('-i', '--infile', dest='infile', default='/home/t_nithyanandan/codes/mine/python/interferometry/main/simparameters.yaml', type=file, required=False, help='File specifying input parameters')
args = vars(parser.parse_args())
rootdir = '/data3/t_nithyanandan/'
with args['infile'] as parms_file:
parms = yaml.safe_load(parms_file)
project = parms['project']
telescope_id = parms['telescope']['id']
Tsys = parms['telescope']['Tsys']
latitude = parms['telescope']['latitude']
pfb_method = parms['telescope']['pfb_method']
element_shape = parms['antenna']['shape']
element_size = parms['antenna']['size']
element_ocoords = parms['antenna']['ocoords']
element_orientation = parms['antenna']['orientation']
ground_plane = parms['antenna']['ground_plane']
phased_array = parms['antenna']['phased_array']
phased_elements_file = parms['phasedarray']['file']
delayerr = parms['phasedarray']['delayerr']
gainerr = parms['phasedarray']['gainerr']
nrand = parms['phasedarray']['nrand']
antenna_file = parms['array']['file']
array_layout = parms['array']['layout']
minR = parms['array']['minR']
maxR = parms['array']['maxR']
minbl = parms['baseline']['min']
maxbl = parms['baseline']['max']
bldirection = parms['baseline']['direction']
obs_mode = parms['obsparm']['obs_mode']
n_snaps = parms['obsparm']['n_snaps']
t_snap = parms['obsparm']['t_snap']
t_obs = parms['obsparm']['t_obs']
freq = parms['obsparm']['freq']
freq_resolution = parms['obsparm']['freq_resolution']
nchan = parms['obsparm']['nchan']
avg_drifts = parms['snapshot']['avg_drifts']
beam_switch = parms['snapshot']['beam_switch']
pick_snapshots = parms['snapshot']['pick']
all_snapshots = parms['snapshot']['all']
snapshots_range = parms['snapshot']['range']
pointing_file = parms['pointing']['file']
pointing_info = parms['pointing']['initial']
n_bins_baseline_orientation = parms['processing']['n_bins_blo']
baseline_chunk_size = parms['processing']['bl_chunk_size']
bl_chunk = parms['processing']['bl_chunk']
n_bl_chunks = parms['processing']['n_bl_chunks']
n_sky_sectors = parms['processing']['n_sky_sectors']
bpass_shape = parms['processing']['bpass_shape']
max_abs_delay = parms['processing']['max_abs_delay']
fg_str = parms['fgparm']['model']
nside = parms['fgparm']['nside']
spindex_rms = parms['fgparm']['spindex_rms']
spindex_seed = parms['fgparm']['spindex_seed']
pc = parms['phasing']['center']
pc_coords = parms['phasing']['coords']
if project not in ['project_MWA', 'project_global_EoR', 'project_HERA', 'project_drift_scan', 'project_beams', 'project_LSTbin']:
raise ValueError('Invalid project specified')
else:
project_dir = project + '/'
if telescope_id not in ['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'custom', 'paper_dipole', 'mwa_tools']:
raise ValueError('Invalid telescope specified')
if element_shape is None:
element_shape = 'delta'
elif element_shape not in ['dish', 'delta', 'dipole']:
raise ValueError('Invalid antenna element shape specified')
if element_shape != 'delta':
if element_size is None:
raise ValueError('No antenna element size specified')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive')
if element_ocoords not in ['altaz', 'dircos']:
if element_ocoords is not None:
raise ValueError('Antenna element orientation must be "altaz" or "dircos"')
if element_orientation is None:
if element_ocoords == 'altaz':
element_orientation = NP.asarray([0.0, 90.0])
elif element_ocoords == 'dircos':
element_orientation = NP.asarray([1.0, 0.0, 0.0])
else:
element_orientation = NP.asarray(element_orientation)
if ground_plane is None:
ground_plane_str = 'no_ground_'
else:
if ground_plane > 0.0:
ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)
else:
raise ValueError('Height of antenna element above ground plane must be positive.')
if not isinstance(phased_array, bool):
raise TypeError('phased_array specification must be boolean')
if delayerr is None:
delayerr_str = ''
delayerr = 0.0
elif delayerr < 0.0:
raise ValueError('delayerr must be non-negative.')
else:
delayerr_str = 'derr_{0:.3f}ns'.format(delayerr)
delayerr *= 1e-9
if gainerr is None:
gainerr_str = ''
gainerr = 0.0
elif gainerr < 0.0:
raise ValueError('gainerr must be non-negative.')
else:
gainerr_str = '_gerr_{0:.2f}dB'.format(gainerr)
if nrand is None:
nrandom_str = ''
nrand = 1
elif nrand < 1:
raise ValueError('nrandom must be positive')
else:
nrandom_str = '_nrand_{0:0d}_'.format(nrand)
if (delayerr_str == '') and (gainerr_str == ''):
nrand = 1
nrandom_str = ''
delaygain_err_str = delayerr_str + gainerr_str + nrandom_str
if (antenna_file is None) and (array_layout is None):
raise ValueError('One of antenna array file or layout must be specified')
if (antenna_file is not None) and (array_layout is not None):
raise ValueError('Only one of antenna array file or layout must be specified')
if antenna_file is not None:
try:
ant_info = NP.loadtxt(antenna_file, skiprows=6, comments='#', usecols=(0,1,2,3))
ant_id = ant_info[:,0].astype(int).astype(str)
ant_locs = ant_info[:,1:]
except IOError:
raise IOError('Could not open file containing antenna locations.')
else:
if array_layout not in ['MWA-128T', 'HERA-7', 'HERA-19', 'HERA-37', 'HERA-61', 'HERA-91', 'HERA-127', 'HERA-169', 'HERA-217', 'HERA-271', 'HERA-331', 'CIRC']:
raise ValueError('Invalid array layout specified')
if array_layout == 'MWA-128T':
ant_info = NP.loadtxt('/data3/t_nithyanandan/project_MWA/MWA_128T_antenna_locations_MNRAS_2012_Beardsley_et_al.txt', skiprows=6, comments='#', usecols=(0,1,2,3))
ant_id = ant_info[:,0].astype(int).astype(str)
ant_locs = ant_info[:,1:]
elif array_layout == 'HERA-7':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=7)
elif array_layout == 'HERA-19':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=19)
elif array_layout == 'HERA-37':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=37)
elif array_layout == 'HERA-61':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=61)
elif array_layout == 'HERA-91':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=91)
elif array_layout == 'HERA-127':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=127)
elif array_layout == 'HERA-169':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=169)
elif array_layout == 'HERA-217':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=217)
elif array_layout == 'HERA-271':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=271)
elif array_layout == 'HERA-331':
ant_locs, ant_id = RI.hexagon_generator(14.6, n_total=331)
elif array_layout == 'CIRC':
ant_locs, ant_id = RI.circular_antenna_array(element_size, minR, maxR=maxR)
telescope = {}
if telescope_id in ['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'mwa_tools']:
telescope['id'] = telescope_id
telescope['shape'] = element_shape
telescope['size'] = element_size
telescope['orientation'] = element_orientation
telescope['ocoords'] = element_ocoords
telescope['groundplane'] = ground_plane
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole'):
element_size = 0.74
element_shape = 'dipole'
if telescope_id == 'mwa': phased_array = True
elif telescope_id == 'vla':
element_size = 25.0
element_shape = 'dish'
elif telescope_id == 'gmrt':
element_size = 45.0
element_shape = 'dish'
elif telescope_id == 'hera':
element_size = 14.0
element_shape = 'dish'
elif telescope_id == 'custom':
if element_shape != 'delta':
if (element_shape is None) or (element_size is None):
raise ValueError('Both antenna element shape and size must be specified for the custom telescope type.')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive.')
elif telescope_id == 'mwa_tools':
pass
else:
raise ValueError('telescope ID must be specified.')
if telescope_id == 'custom':
if element_shape == 'delta':
telescope_id = 'delta'
else:
telescope_id = '{0:.1f}m_{1:}'.format(element_size, element_shape)
if phased_array:
telescope_id = telescope_id + '_array'
telescope_str = telescope_id+'_'
if element_orientation is None:
if element_ocoords is not None:
if element_ocoords == 'altaz':
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
elif element_ocoords == 'dircos':
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([1.0, 0.0, 0.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([0.0, 0.0, 1.0]).reshape(1,-1)
else:
raise ValueError('Invalid value specified antenna element orientation coordinate system.')
else:
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):
element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)
else:
element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)
element_ocoords = 'altaz'
else:
if element_ocoords is None:
raise ValueError('Antenna element orientation coordinate system must be specified to describe the specified antenna orientation.')
element_orientation = NP.asarray(element_orientation).reshape(1,-1)
if (element_orientation.size < 2) or (element_orientation.size > 3):
raise ValueError('Antenna element orientation must be a two- or three-element vector.')
elif (element_ocoords == 'altaz') and (element_orientation.size != 2):
raise ValueError('Antenna element orientation must be a two-element vector if using Alt-Az coordinates.')
if ground_plane is None:
ground_plane_str = 'no_ground_'
else:
if ground_plane > 0.0:
ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)
else:
raise ValueError('Height of antenna element above ground plane must be positive.')
if obs_mode is None:
obs_mode = 'custom'
elif obs_mode not in ['drift', 'track', 'dns']:
raise ValueError('Invalid observing mode specified')
if avg_drifts + beam_switch + (pick_snapshots is not None) + (snapshots_range is not None) + all_snapshots != 1:
raise ValueError('One and only one of avg_drifts, beam_switch, pick_snapshots, snapshots_range, all_snapshots must be set')
snapshot_type_str = ''
if avg_drifts and (obs_mode == 'dns'):
snapshot_type_str = 'drift_averaged_'
if beam_switch and (obs_mode == 'dns'):
snapshot_type_str = 'beam_switches_'
if (snapshots_range is not None) and ((obs_mode == 'dns') or (obs_mode == 'lstbin')):
snapshot_type_str = 'snaps_{0[0]:0d}-{0[1]:0d}_'.format(snapshots_range)
if (pointing_file is None) and (pointing_info is None):
raise ValueError('One and only one of pointing file and initial pointing must be specified')
elif (pointing_file is not None) and (pointing_info is not None):
raise ValueError('One and only one of pointing file and initial pointing must be specified')
duration_str = ''
if obs_mode in ['track', 'drift']:
if (t_snap is not None) and (n_snaps is not None):
duration_str = '_{0:0d}x{1:.1f}s'.format(n_snaps, t_snap)
geor_duration_str = '_{0:0d}x{1:.1f}s'.format(1, t_snap)
if pointing_file is not None:
pointing_init = None
pointing_info_from_file = NP.loadtxt(pointing_file, comments='#', usecols=(1,2,3), delimiter=',')
obs_id = NP.loadtxt(pointing_file, comments='#', usecols=(0,), delimiter=',', dtype=str)
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays_str = NP.loadtxt(pointing_file, comments='#', usecols=(4,), delimiter=',', dtype=str)
delays_list = [NP.fromstring(delaystr, dtype=float, sep=';', count=-1) for delaystr in delays_str]
delay_settings = NP.asarray(delays_list)
delay_settings *= 435e-12
delays = NP.copy(delay_settings)
if n_snaps is None:
n_snaps = pointing_info_from_file.shape[0]
pointing_info_from_file = pointing_info_from_file[:min(n_snaps, pointing_info_from_file.shape[0]),:]
obs_id = obs_id[:min(n_snaps, pointing_info_from_file.shape[0])]
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = delay_settings[:min(n_snaps, pointing_info_from_file.shape[0]),:]
n_snaps = min(n_snaps, pointing_info_from_file.shape[0])
pointings_altaz = pointing_info_from_file[:,:2].reshape(-1,2)
pointings_altaz_orig = pointing_info_from_file[:,:2].reshape(-1,2)
lst = 15.0 * pointing_info_from_file[:,2]
lst_wrapped = lst + 0.0
lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0
lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))
if obs_mode is None:
obs_mode = 'custom'
if (obs_mode == 'dns') and (avg_drifts or beam_switch):
angle_diff = GEOM.sphdist(pointings_altaz[1:,1], pointings_altaz[1:,0], pointings_altaz[:-1,1], pointings_altaz[:-1,0])
angle_diff = NP.concatenate(([0.0], angle_diff))
shift_threshold = 1.0 # in degrees
# lst_edges = NP.concatenate(([lst_edges[0]], lst_edges[angle_diff > shift_threshold], [lst_edges[-1]]))
lst_wrapped = NP.concatenate(([lst_wrapped[0]], lst_wrapped[angle_diff > shift_threshold], [lst_wrapped[-1]]))
n_snaps = lst_wrapped.size - 1
pointings_altaz = NP.vstack((pointings_altaz[0,:].reshape(-1,2), pointings_altaz[angle_diff>shift_threshold,:].reshape(-1,2)))
obs_id = NP.concatenate(([obs_id[0]], obs_id[angle_diff>shift_threshold]))
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = NP.vstack((delay_settings[0,:], delay_settings[angle_diff>shift_threshold,:]))
obs_mode = 'custom'
if avg_drifts:
lst_edges = NP.concatenate(([lst_edges[0]], lst_edges[angle_diff > shift_threshold], [lst_edges[-1]]))
else:
lst_edges_left = lst_wrapped[:-1] + 0.0
lst_edges_right = NP.concatenate(([lst_edges[1]], lst_edges[NP.asarray(NP.where(angle_diff > shift_threshold)).ravel()+1]))
elif snapshots_range is not None:
snapshots_range[1] = snapshots_range[1] % n_snaps
if snapshots_range[0] > snapshots_range[1]:
raise IndexError('min snaphost # must be <= max snapshot #')
lst_wrapped = lst_wrapped[snapshots_range[0]:snapshots_range[1]+2]
lst_edges = NP.copy(lst_wrapped)
pointings_altaz = pointings_altaz[snapshots_range[0]:snapshots_range[1]+1,:]
obs_id = obs_id[snapshots_range[0]:snapshots_range[1]+1]
if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):
delays = delay_settings[snapshots_range[0]:snapshots_range[1]+1,:]
n_snaps = snapshots_range[1]-snapshots_range[0]+1
elif pick_snapshots is not None:
pick_snapshots = NP.asarray(pick_snapshots)
n_snaps = pick_snapshots.size
lst_begin = NP.asarray(lst_wrapped[pick_snapshots])
pointings_altaz = pointings_altaz[pick_snapshots,:]
obs_id = obs_id[pick_snapshots]
if (telescope_id == 'mwa') or (phased_array) or (telescope_id == 'mwa_tools'):
delays = delay_settings[pick_snapshots,:]
if obs_mode != 'lstbin':
lst_end = NP.asarray(lst_wrapped[pick_snapshots+1])
t_snap = (lst_end - lst_begin) / 15.0 * 3.6e3
# n_snaps = t_snap.size
lst = 0.5 * (lst_begin + lst_end)
obs_mode = 'custom'
else:
t_snap = 112.0 + NP.zeros(n_snaps) # in seconds (needs to be generalized)
lst = lst_wrapped + 0.5 * t_snap/3.6e3 * 15.0
if pick_snapshots is None:
if obs_mode != 'lstbin':
if not beam_switch:
lst = 0.5*(lst_edges[1:]+lst_edges[:-1])
t_snap = (lst_edges[1:]-lst_edges[:-1]) / 15.0 * 3.6e3
else:
lst = 0.5*(lst_edges_left + lst_edges_right)
t_snap = (lst_edges_right - lst_edges_left) / 15.0 * 3.6e3
else:
t_snap = 112.0 + NP.zeros(n_snaps) # in seconds (needs to be generalized)
lst = lst_wrapped + 0.5 * t_snap/3.6e3 * 15.0
# pointings_dircos_orig = GEOM.altaz2dircos(pointings_altaz_orig, units='degrees')
# pointings_hadec_orig = GEOM.altaz2hadec(pointings_altaz_orig, latitude, units='degrees')
# pointings_radec_orig = NP.hstack(((lst-pointings_hadec_orig[:,0]).reshape(-1,1), pointings_hadec_orig[:,1].reshape(-1,1)))
# pointings_radec_orig[:,0] = pointings_radec_orig[:,0] % 360.0
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')
pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))
pointings_radec[:,0] = pointings_radec[:,0] % 360.0
t_obs = NP.sum(t_snap)
elif pointing_info is not None:
pointing_init = NP.asarray(pointing_info[1:])
lst_init = pointing_info[0]
pointing_file = None
if t_snap is None:
raise NameError('t_snap must be provided for an automated observing run')
if (n_snaps is None) and (t_obs is None):
raise NameError('n_snaps or t_obs must be provided for an automated observing run')
elif (n_snaps is not None) and (t_obs is not None):
raise ValueError('Only one of n_snaps or t_obs must be provided for an automated observing run')
elif n_snaps is None:
n_snaps = int(t_obs/t_snap)
else:
t_obs = n_snaps * t_snap
t_snap = t_snap + NP.zeros(n_snaps)
lst = (lst_init + (t_snap/3.6e3) * NP.arange(n_snaps)) * 15.0 # in degrees
if obs_mode is None:
obs_mode = 'track'
if obs_mode == 'track':
pointings_radec = NP.repeat(NP.asarray(pointing_init).reshape(-1,2), n_snaps, axis=0)
else:
ha_init = lst_init * 15.0 - pointing_init[0]
pointings_radec = NP.hstack((NP.asarray(lst-ha_init).reshape(-1,1), pointing_init[1]+NP.zeros(n_snaps).reshape(-1,1)))
pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))
pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_radec_orig = NP.copy(pointings_radec)
pointings_hadec_orig = NP.copy(pointings_hadec)
pointings_altaz_orig = NP.copy(pointings_altaz)
pointings_dircos_orig = NP.copy(pointings_dircos)
lst_wrapped = lst + 0.0
lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0
if lst_wrapped.size > 1:
lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))
else:
lst_edges = NP.concatenate((lst_wrapped, lst_wrapped+t_snap/3.6e3*15))
duration_str = '_{0:0d}x{1:.1f}s'.format(n_snaps, t_snap[0])
geor_duration_str = '_{0:0d}x{1:.1f}s'.format(1, t_snap[0])
bl, bl_id = RI.baseline_generator(ant_locs, ant_id=ant_id, auto=False, conjugate=False)
bl, select_bl_ind, bl_count = RI.uniq_baselines(bl)
bl_id = bl_id[select_bl_ind]
bl_length = NP.sqrt(NP.sum(bl**2, axis=1))
bl_orientation = NP.angle(bl[:,0] + 1j * bl[:,1], deg=True)
sortind = NP.argsort(bl_length, kind='mergesort')
bl = bl[sortind,:]
bl_id = bl_id[sortind]
bl_length = bl_length[sortind]
bl_orientation = bl_orientation[sortind]
bl_count = bl_count[sortind]
neg_bl_orientation_ind = (bl_orientation < -67.5) | (bl_orientation > 112.5)
# neg_bl_orientation_ind = NP.logical_or(bl_orientation < -0.5*180.0/n_bins_baseline_orientation, bl_orientation > 180.0 - 0.5*180.0/n_bins_baseline_orientation)
bl[neg_bl_orientation_ind,:] = -1.0 * bl[neg_bl_orientation_ind,:]
bl_orientation = NP.angle(bl[:,0] + 1j * bl[:,1], deg=True)
if minbl is None:
minbl = 0.0
elif not isinstance(minbl, (int,float)):
raise TypeError('Minimum baseline length must be a scalar')
elif minbl < 0.0:
minbl = 0.0
if maxbl is None:
maxbl = bl_length.max()
elif not isinstance(maxbl, (int,float)):
raise TypeError('Maximum baseline length must be a scalar')
elif maxbl < minbl:
maxbl = bl_length.max()
min_blo = -67.5
max_blo = 112.5
select_bl_ind = NP.zeros(bl_length.size, dtype=NP.bool)
if bldirection is not None:
if isinstance(bldirection, str):
if bldirection not in ['SE', 'E', 'NE', 'N']:
raise ValueError('Invalid baseline direction criterion specified')
else:
bldirection = [bldirection]
if isinstance(bldirection, list):
for direction in bldirection:
if direction in ['SE', 'E', 'NE', 'N']:
if direction == 'SE':
oind = (bl_orientation >= -67.5) & (bl_orientation < -22.5)
select_bl_ind[oind] = True
elif direction == 'E':
oind = (bl_orientation >= -22.5) & (bl_orientation < 22.5)
select_bl_ind[oind] = True
elif direction == 'NE':
oind = (bl_orientation >= 22.5) & (bl_orientation < 67.5)
select_bl_ind[oind] = True
else:
oind = (bl_orientation >= 67.5) & (bl_orientation < 112.5)
select_bl_ind[oind] = True
else:
raise TypeError('Baseline direction criterion must specified as string or list of strings')
else:
select_bl_ind = NP.ones(bl_length.size, dtype=NP.bool)
select_bl_ind = select_bl_ind & (bl_length >= minbl) & (bl_length <= maxbl)
bl_id = bl_id[select_bl_ind]
bl = bl[select_bl_ind,:]
bl_length = bl_length[select_bl_ind]
bl_orientation = bl_orientation[select_bl_ind]
total_baselines = bl_length.size
baseline_bin_indices = range(0,total_baselines,baseline_chunk_size)
bllstr = map(str, bl_length)
uniq_bllstr, ind_uniq_bll = NP.unique(bllstr, return_index=True)
count_uniq_bll = [bllstr.count(ubll) for ubll in uniq_bllstr]
count_uniq_bll = NP.asarray(count_uniq_bll)
geor_bl = bl[ind_uniq_bll,:]
geor_bl_id = bl_id[ind_uniq_bll]
geor_bl_orientation = bl_orientation[ind_uniq_bll]
geor_bl_length = bl_length[ind_uniq_bll]
sortind = NP.argsort(geor_bl_length, kind='mergesort')
geor_bl = geor_bl[sortind,:]
geor_bl_id = geor_bl_id[sortind]
geor_bl_length = geor_bl_length[sortind]
geor_bl_orientation = geor_bl_orientation[sortind]
count_uniq_bll = count_uniq_bll[sortind]
use_GSM = False
use_DSM = False
use_CSM = False
use_SUMSS = False
use_GLEAM = False
use_USM = False
use_NVSS = False
use_HI_monopole = False
use_HI_cube = False
use_HI_fluctuations = False
if fg_str not in ['asm', 'dsm', 'csm', 'nvss', 'sumss', 'gleam', 'mwacs', 'ps', 'usm', 'mss', 'HI_cube', 'HI_monopole', 'HI_fluctuations']:
raise ValueError('Invalid foreground model string specified.')
if fg_str == 'asm':
use_GSM = True
elif fg_str == 'dsm':
use_DSM = True
elif fg_str == 'csm':
use_CSM = True
elif fg_str == 'sumss':
use_SUMSS = True
elif fg_str == 'gleam':
use_GLEAM = True
elif fg_str == 'point':
use_PS = True
elif fg_str == 'nvss':
use_NVSS = True
elif fg_str == 'usm':
use_USM = True
elif fg_str == 'HI_monopole':
use_HI_monopole = True
elif fg_str == 'HI_fluctuations':
use_HI_fluctuations = True
elif fg_str == 'HI_cube':
use_HI_cube = True
spindex_seed_str = ''
if not isinstance(spindex_rms, (int,float)):
raise TypeError('Spectral Index rms must be a scalar')
if spindex_rms > 0.0:
spindex_rms_str = '{0:.1f}'.format(spindex_rms)
else:
spindex_rms = 0.0
if spindex_seed is not None:
if not isinstance(spindex_seed, (int, float)):
raise TypeError('Spectral index random seed must be a scalar')
spindex_seed_str = '{0:0d}_'.format(spindex_seed)
if n_sky_sectors == 1:
sky_sector_str = '_all_sky_'
freq = NP.float(freq)
freq_resolution = NP.float(freq_resolution)
wavelength = FCNST.c / freq # in meters
redshift = CNST.rest_freq_HI / freq - 1
bw = nchan * freq_resolution
bandpass_str = '{0:0d}x{1:.1f}_kHz'.format(nchan, freq_resolution/1e3)
if bpass_shape not in ['bnw', 'bhw', 'rect']:
raise ValueError('Invalid bandpass shape specified')
if pc_coords not in ['altaz', 'radec', 'hadec', 'dircos']:
raise ValueError('Invalid coordinate system specified for phase center')
else:
pc = NP.asarray(pc).ravel()
if pc_coords == 'radec':
if pc.size != 2:
raise ValueError('Phase center must be a 2-element vector')
pc_hadec = NP.hstack((lst.reshape(-1,1)-pc[0], pc[1]+NP.zeros((lst.size,1))))
pc_altaz = GEOM.hadec2altaz(pc_hadec, latitude, units='degrees')
pc_dircos = GEOM.altaz2dircos(pc_altaz, units='degrees')
elif pc_coords == 'hadec':
if pc.size != 2:
raise ValueError('Phase center must be a 2-element vector')
pc_altaz = GEOM.hadec2altaz(pc.reshape(1,-1), latitude, units='degrees')
pc_dircos = GEOM.altaz2dircos(pc_altaz, units='degrees')
elif pc_coords == 'altaz':
if pc.size != 2:
raise ValueError('Phase center must be a 2-element vector')
pc_dircos = GEOM.altaz2dircos(pc.reshape(1,-1), units='degrees')
else:
if pc.size != 3:
raise ValueError('Phase center must be a 3-element vector in dircos coordinates')
pc_coords = NP.asarray(pc).reshape(1,-1)
if pfb_method is not None:
use_pfb = True
else:
use_pfb = False
h = 0.7 # Hubble constant coefficient
cosmodel100 = CP.FlatLambdaCDM(H0=100.0, Om0=0.27) # Using H0 = 100 km/s/Mpc
cosmodel = CP.FlatLambdaCDM(H0=h*100.0, Om0=0.27) # Using H0 = h * 100 km/s/Mpc
def kprll(eta, z):
return 2 * NP.pi * eta * cosmodel100.H0.value * CNST.rest_freq_HI * cosmodel100.efunc(z) / FCNST.c / (1+z)**2 * 1e3
def kperp(u, z):
return 2 * NP.pi * u / cosmodel100.comoving_transverse_distance(z).value
geor_infile = rootdir+project_dir+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+geor_duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[0]],bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'HI_monopole'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+'no_pfb.fits'
fg_infile = rootdir+project_dir+telescope_str+'multi_baseline_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[0]],bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+fg_str+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+'no_pfb.fits'
geor_clean_infile = rootdir+project_dir+telescope_str+'multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+geor_duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[0]],bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+'HI_monopole'+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+'no_pfb_'+bpass_shape+'.fits'
fg_clean_infile = rootdir+project_dir+telescope_str+'multi_baseline_CLEAN_visibilities_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(bl_length[baseline_bin_indices[0]],bl_length[min(baseline_bin_indices[n_bl_chunks-1]+baseline_chunk_size-1,total_baselines-1)])+fg_str+sky_sector_str+'sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+'no_pfb_'+bpass_shape+'.fits'
PDB.set_trace()
ia = RI.InterferometerArray(None, None, None, init_file=fg_infile)
hdulist = fits.open(geor_clean_infile)
clean_lags = hdulist['SPECTRAL INFO'].data['lag']
geor_cc_skyvis_lag = hdulist['CLEAN NOISELESS DELAY SPECTRA REAL'].data + 1j * hdulist['CLEAN NOISELESS DELAY SPECTRA IMAG'].data
geor_cc_skyvis_lag_res = hdulist['CLEAN NOISELESS DELAY SPECTRA RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS DELAY SPECTRA RESIDUALS IMAG'].data
hdulist.close()
hdulist = fits.open(fg_clean_infile)
fg_cc_skyvis_lag = hdulist['CLEAN NOISELESS DELAY SPECTRA REAL'].data + 1j * hdulist['CLEAN NOISELESS DELAY SPECTRA IMAG'].data
fg_cc_skyvis_lag_res = hdulist['CLEAN NOISELESS DELAY SPECTRA RESIDUALS REAL'].data + 1j * hdulist['CLEAN NOISELESS DELAY SPECTRA RESIDUALS IMAG'].data
hdulist.close()
geor_cc_skyvis_lag += geor_cc_skyvis_lag_res
fg_cc_skyvis_lag += fg_cc_skyvis_lag_res
geor_cc_skyvis_lag = DSP.downsampler(geor_cc_skyvis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
fg_cc_skyvis_lag = DSP.downsampler(fg_cc_skyvis_lag, 1.0*clean_lags.size/ia.lags.size, axis=1)
fg_cc_skyvis_lag_res = DSP.downsampler(fg_cc_skyvis_lag_res, 1.0*clean_lags.size/ia.lags.size, axis=1)
clean_lags_orig = NP.copy(clean_lags)
clean_lags = DSP.downsampler(clean_lags, 1.0*clean_lags.size/ia.lags.size, axis=-1)
clean_lags = clean_lags.ravel()
delaymat = DLY.delay_envelope(bl, pc_dircos, units='mks')
min_delay = -delaymat[0,:,1]-delaymat[0,:,0]
max_delay = delaymat[0,:,0]-delaymat[0,:,1]
clags = clean_lags.reshape(1,-1)
min_delay = min_delay.reshape(-1,1)
max_delay = max_delay.reshape(-1,1)
thermal_noise_window = NP.abs(clags) >= max_abs_delay*1e-6
thermal_noise_window = NP.repeat(thermal_noise_window, bl.shape[0], axis=0)
EoR_window = NP.logical_or(clags > max_delay+1/bw, clags < min_delay-1/bw)
wedge_window = NP.logical_and(clags <= max_delay, clags >= min_delay)
non_wedge_window = NP.logical_not(wedge_window)
bll_bin_count, bll_edges, bll_binnum, bll_ri = OPS.binned_statistic(bl_length, values=None, statistic='count', bins=NP.hstack((geor_bl_length-1e-10, geor_bl_length.max()+1e-10)))
snap_min = 0
snap_max = 39
fg_cc_skyvis_lag_tavg = NP.mean(fg_cc_skyvis_lag[:,:,snap_min:snap_max+1], axis=2)
fg_cc_skyvis_lag_res_tavg = NP.mean(fg_cc_skyvis_lag_res[:,:,snap_min:snap_max+1], axis=2)
fg_cc_skyvis_lag_blavg = NP.zeros((geor_bl_length.size, clags.size, snap_max-snap_min+1), dtype=NP.complex64)
fg_cc_skyvis_lag_res_blavg = NP.zeros((geor_bl_length.size, clags.size, snap_max-snap_min+1), dtype=NP.complex64)
for i in xrange(geor_bl_length.size):
blind = bll_ri[bll_ri[i]:bll_ri[i+1]]
if blind.size != bll_bin_count[i]: PDB.set_trace()
fg_cc_skyvis_lag_blavg[i,:,:] = NP.mean(fg_cc_skyvis_lag[blind,:,snap_min:snap_max+1], axis=0)
fg_cc_skyvis_lag_res_blavg[i,:,:] = NP.mean(fg_cc_skyvis_lag_res[blind,:,snap_min:snap_max+1], axis=0)
fg_cc_skyvis_lag_avg = NP.mean(fg_cc_skyvis_lag_blavg, axis=2)
fg_cc_skyvis_lag_res_avg = NP.mean(fg_cc_skyvis_lag_res_blavg, axis=2)
for i in xrange(int(NP.ceil(geor_bl_length.size/4.0))):
fig, axs = PLT.subplots(min(4,geor_bl_length.size-4*i), sharex=True, figsize=(6,9))
for j in range(4*i, min(4*(i+1),geor_bl_length.size)):
blind = bll_ri[bll_ri[j]:bll_ri[j+1]]
axs[j%len(axs)].plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag[blind[0],:,0]), ls='--', lw=2, color='black')
axs[j%len(axs)].plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_blavg[j,:,0]), ls='-.', lw=2, color='black')
axs[j%len(axs)].plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_tavg[blind[0],:]), ls=':', lw=2, color='black')
axs[j%len(axs)].plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_avg[j,:]), ls='-', lw=2, color='black')
axs[j%len(axs)].plot(1e6*clags.ravel(), NP.abs(geor_cc_skyvis_lag[j,:,0]), ls='-', lw=2, color='gray')
axs[j%len(axs)].plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_res_avg[j,:]), ls=':', lw=2, color='red')
axs[j%len(axs)].axvline(x=1e6*min_delay[blind[0],0], ls=':', lw=2, color='gray')
axs[j%len(axs)].axvline(x=1e6*max_delay[blind[0],0], ls=':', lw=2, color='gray')
axs[j%len(axs)].text(0.05, 0.8, r'$|\mathbf{b}|$'+' = {0:.1f} m'.format(geor_bl_length[j]), fontsize=12, weight='medium', transform=axs[j%len(axs)].transAxes)
axs[j%len(axs)].set_ylim(NP.abs(geor_cc_skyvis_lag).min(), NP.abs(fg_cc_skyvis_lag[:,:,snap_min:snap_max+1]).max())
axs[j%len(axs)].set_xlim(1e6*clags.min(), 1e6*clags.max())
axs[j%len(axs)].set_yscale('log')
axs[j%len(axs)].set_yticks(NP.logspace(4,12,5,endpoint=True).tolist())
if j%len(axs) == len(axs)-1:
axs[j%len(axs)].set_xlabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium')
if j%len(axs) == 0:
axs_kprll = axs[j%len(axs)].twiny()
axs_kprll.set_xticks(kprll(axs[j%len(axs)].get_xticks()*1e-6, redshift))
axs_kprll.set_xlim(kprll(NP.asarray(axs[j%len(axs)].get_xlim())*1e-6, redshift))
xformatter = FuncFormatter(lambda x, pos: '{0:.2f}'.format(x))
axs_kprll.xaxis.set_major_formatter(xformatter)
axs_kprll.xaxis.tick_top()
axs_kprll.set_xlabel(r'$k_\parallel$ [$h$ Mpc$^{-1}$]', fontsize=16, weight='medium')
axs_kprll.xaxis.set_label_position('top')
fig.subplots_adjust(hspace=0)
big_ax = fig.add_subplot(111)
big_ax.set_axis_bgcolor('none')
big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_ylabel(r"$|V_b|$ [Jy Hz]", fontsize=16, weight='medium', labelpad=30)
PLT.savefig(rootdir+project_dir+'figures/'+telescope_str+'delay_spectra_'+ground_plane_str+snapshot_type_str+obs_mode+duration_str+'_baseline_range_{0:.1f}-{1:.1f}_'.format(geor_bl_length[4*i],geor_bl_length[j])+fg_str+'_nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1}_{2:.1f}_MHz_'.format(Tsys, bandpass_str, freq/1e6)+'no_pfb_'+bpass_shape+'.png', bbox_inches=0)
# fig = PLT.figure(figsize=(6,6))
# ax = fig.add_subplot(111)
# ax.plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag[-1,:,0]), ls='--', lw=2, color='black')
# ax.plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_blavg[-1,:,0]), ls='-.', lw=2, color='black')
# ax.plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_tavg[-1,:]), ls=':', lw=2, color='black')
# ax.plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_avg[-1,:]), ls='-', lw=2, color='black')
# ax.plot(1e6*clags.ravel(), NP.abs(geor_cc_skyvis_lag[-1,:,0]), ls='-', lw=2, color='gray')
# ax.plot(1e6*clags.ravel(), NP.abs(fg_cc_skyvis_lag_res_avg[-1,:]), ls='-', lw=2, color='red')
# ax.set_ylim(NP.abs(geor_cc_skyvis_lag).min(), NP.abs(fg_cc_skyvis_lag).max())
# ax.set_xlim(1e6*clags.min(), 1e6*clags.max())
# ax.set_yscale('log')
# ax.set_xlabel(r'$\tau$ [$\mu$s]', fontsize=16, weight='medium')
# ax.set_ylabel(r'$|V_b|$'+' [Jy Hz]', fontsize=16, weight='medium')
# PLT.show()
PDB.set_trace()
| 47.884058
| 555
| 0.691641
|
4a11bd581432d5fa5ab64b205ced705eb086fc93
| 468
|
py
|
Python
|
tests/conftest.py
|
Ivo-B/CC-DL-template
|
9374c968a62a6604f8939b62e775af027c75a76b
|
[
"MIT"
] | 1
|
2022-01-11T09:55:23.000Z
|
2022-01-11T09:55:23.000Z
|
tests/conftest.py
|
Ivo-B/CC-DL-template
|
9374c968a62a6604f8939b62e775af027c75a76b
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
Ivo-B/CC-DL-template
|
9374c968a62a6604f8939b62e775af027c75a76b
|
[
"MIT"
] | null | null | null |
import pytest
default_args: dict = {
"project_name": "My test project",
"repo_name": "my-test-repo",
"module_name": "mtr",
"author_name": "Ivo Baltruschat",
"author_mail": "im.baltruschat@icloud.com",
"description": "A nice test project.",
"dl_framework": "Tensorflow",
"add_example_code": "yes",
"license": "MIT",
}
@pytest.fixture()
def context(tmpdir) -> dict:
"""Creates default prompt values."""
return default_args
| 23.4
| 47
| 0.638889
|
4a11be74add726183da27c78d4a565e93955e42d
| 140
|
py
|
Python
|
Learning/Test50_map.py
|
liang1024/Python
|
a80127500f7a171567e32699f42128f3ddc44b3f
|
[
"Apache-2.0"
] | 1
|
2017-03-07T13:49:27.000Z
|
2017-03-07T13:49:27.000Z
|
Learning/Test50_map.py
|
liang1024/Python
|
a80127500f7a171567e32699f42128f3ddc44b3f
|
[
"Apache-2.0"
] | null | null | null |
Learning/Test50_map.py
|
liang1024/Python
|
a80127500f7a171567e32699f42128f3ddc44b3f
|
[
"Apache-2.0"
] | null | null | null |
'''
Map:
'''
income=[10,30,75]
def double_money(dollars):
return dollars*2
new_income=list(map(double_money,income))
print(new_income)
| 14
| 41
| 0.721429
|
4a11bf9093295cd7192778ef71496efbf6a87f83
| 1,252
|
py
|
Python
|
okta/komand_okta/actions/assign_user_to_app_sso/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
okta/komand_okta/actions/assign_user_to_app_sso/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
okta/komand_okta/actions/assign_user_to_app_sso/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
APPLICATIONID = "applicationId"
APPUSER = "appuser"
class Output:
RESULT = "result"
class AssignUserToAppSsoInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"applicationId": {
"type": "string",
"title": "Application ID",
"description": "Application ID",
"order": 1
},
"appuser": {
"type": "object",
"title": "Application User Model",
"description": "Application user model as JSON object, see https://developer.okta.com/docs/api/resources/apps#application-user-model",
"order": 2
}
},
"required": [
"applicationId"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class AssignUserToAppSsoOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"result": {
"type": "object",
"title": "Result",
"description": "Result",
"order": 1
}
},
"required": [
"result"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 19.261538
| 140
| 0.577476
|
4a11c167970b1074ae81df53df3f7ef204489a35
| 983
|
py
|
Python
|
boots/envs/batched_env.py
|
roosephu/boots
|
2f4f500f54feb95cf36abd863f3de4510d6f4950
|
[
"MIT"
] | 13
|
2019-10-15T10:43:39.000Z
|
2021-03-20T06:27:15.000Z
|
boots/envs/batched_env.py
|
roosephu/boots
|
2f4f500f54feb95cf36abd863f3de4510d6f4950
|
[
"MIT"
] | null | null | null |
boots/envs/batched_env.py
|
roosephu/boots
|
2f4f500f54feb95cf36abd863f3de4510d6f4950
|
[
"MIT"
] | 6
|
2020-01-21T06:51:18.000Z
|
2021-05-27T20:25:35.000Z
|
import numpy as np
from gym import Wrapper
from . import BaseBatchedEnv
class BatchedEnv(BaseBatchedEnv, Wrapper):
def __init__(self, make_envs):
envs = [make_env() for make_env in make_envs]
super().__init__(envs[0])
self.envs = envs
self.n_envs = len(envs)
def step(self, actions):
buf, infos = [], []
for env, action in zip(self.envs, actions):
next_state, reward, done, info = env.step(action)
buf.append((next_state, reward, done))
infos.append(info)
return [*(np.array(x) for x in zip(*buf)), infos]
def reset(self):
return self.partial_reset(range(self.n_envs))
def partial_reset(self, indices):
states = []
for index in indices:
states.append(self.envs[index].reset())
return np.array(states)
def __repr__(self):
return f'Batch<{self.n_envs}x {self.env}>'
def set_state(self, state):
pass
| 25.868421
| 61
| 0.599186
|
4a11c169e5267f7c09900d6652a1df96d5776f7b
| 72,844
|
py
|
Python
|
jax/lax/lax_control_flow.py
|
megioliver/jax
|
620bf4300b74c298a5e0133e08f60f76700cf37f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-03-12T04:34:46.000Z
|
2020-03-12T04:34:46.000Z
|
jax/lax/lax_control_flow.py
|
megioliver/jax
|
620bf4300b74c298a5e0133e08f60f76700cf37f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/lax/lax_control_flow.py
|
megioliver/jax
|
620bf4300b74c298a5e0133e08f60f76700cf37f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Control flow primitives.
"""
import collections
import functools
import itertools
import operator
import threading
from typing import Callable
import numpy as onp
from jax import api
from jax import core
from jax import dtypes
from jax.lax import lax
from jax import linear_util as lu
from jax.abstract_arrays import ShapedArray, raise_to_shaped
from jax.api_util import flatten_fun_nokwargs, apply_flat_fun_nokwargs
from jax.core import get_aval
from jax.interpreters import ad
from jax.interpreters import partial_eval as pe
from jax.interpreters import xla
from jax.interpreters import batching
from jax.interpreters import masking
from jax.lib import xla_bridge as xb
from jax.lib import xla_client
from jax.util import (partial, unzip2, safe_map, safe_zip, split_list,
split_dict, cache, extend_name_stack)
from jax.tree_util import (tree_flatten, tree_unflatten, treedef_is_leaf,
treedef_children, treedef_tuple)
from jax import ad_util
_map = safe_map
zip = safe_zip
_reduce = functools.reduce
@cache()
def _initial_style_jaxpr(fun: Callable, in_tree, in_avals):
in_pvals = [pe.PartialVal((aval, core.unit)) for aval in in_avals]
fun, out_tree = flatten_fun_nokwargs(lu.wrap_init(fun), in_tree)
jaxpr, out_pvals, consts = pe.trace_to_jaxpr(fun, in_pvals, instantiate=True,
stage_out_calls=True)
out_avals = _map(raise_to_shaped, unzip2(out_pvals)[0])
const_avals = tuple(raise_to_shaped(core.get_aval(c)) for c in consts)
typed_jaxpr = core.TypedJaxpr(pe.convert_constvars_jaxpr(jaxpr),
(), const_avals + in_avals, out_avals)
return typed_jaxpr, consts, out_tree()
def _abstractify(x):
return raise_to_shaped(core.get_aval(x))
def typecheck(aval, x):
aval = raise_to_shaped(aval).strip_weak_type()
try:
return aval == core.lattice_join(aval, core.get_aval(x)).strip_weak_type()
except TypeError:
return False
def typematch(aval1, aval2):
return (raise_to_shaped(aval1).strip_weak_type() ==
raise_to_shaped(aval2).strip_weak_type())
class FixedPointError(Exception): pass
### fori_loop and while_loop
def _fori_cond_fun(loop_carry):
i, upper, _ = loop_carry
return lax.lt(i, upper)
@cache()
def _fori_body_fun(body_fun):
def while_body_fun(loop_carry):
i, upper, x = loop_carry
return lax.add(i, lax._const(i, 1)), upper, body_fun(i, x)
return while_body_fun
def fori_loop(lower, upper, body_fun, init_val):
"""Loop from ``lower`` to ``upper`` by reduction to ``while_loop``.
The type signature in brief is
.. code-block:: haskell
fori_loop :: Int -> Int -> ((int, a) -> a) -> a -> a
The semantics of ``fori_loop`` are given by this Python implementation::
def fori_loop(lower, upper, body_fun, init_val):
val = init_val
for i in range(lower, upper):
val = body_fun(i, val)
return val
Unlike that Python version, ``fori_loop`` is implemented in terms of a call to
``while_loop``. See the docstring for ``while_loop`` for more information.
Also unlike the Python analogue, the loop-carried value ``val`` must hold a
fixed shape and dtype across all iterations (and not just be consistent up to
NumPy rank/shape broadcasting and dtype promotion rules, for example). In
other words, the type ``a`` in the type signature above represents an array
with a fixed shape and dtype (or a nested tuple/list/dict container data
structure with a fixed structure and arrays with fixed shape and dtype at the
leaves).
Args:
lower: an integer representing the loop index lower bound (inclusive)
upper: an integer representing the loop index upper bound (exclusive)
body_fun: function of type ``(int, a) -> a``.
init_val: initial loop carry value of type ``a``.
Returns:
Loop value from the final iteration, of type ``a``.
"""
# TODO: perhaps do more type checking here, for better error messages.
lower_dtype = dtypes.canonicalize_dtype(lax.dtype(lower))
upper_dtype = dtypes.canonicalize_dtype(lax.dtype(upper))
if lower_dtype != upper_dtype:
msg = ("lower and upper arguments to fori_loop must have equal types, "
"got {} and {}")
raise TypeError(msg.format(lower_dtype.name, upper_dtype.name))
_, _, result = while_loop(_fori_cond_fun, _fori_body_fun(body_fun),
(lower, upper, init_val))
return result
def while_loop(cond_fun, body_fun, init_val):
"""Call ``body_fun`` repeatedly in a loop while ``cond_fun`` is True.
The type signature in brief is
.. code-block:: haskell
while_loop :: (a -> Bool) -> (a -> a) -> a -> a
The semantics of ``while_loop`` are given by this Python implementation::
def while_loop(cond_fun, body_fun, init_val):
val = init_val
while cond_fun(val):
val = body_fun(val)
return val
Unlike that Python version, ``while_loop`` is a JAX primitive and is lowered
to a single XLA While HLO. That makes it useful for reducing compilation times
for jit-compiled functions, since native Python loop constructs in an ``@jit``
function are unrolled, leading to large XLA computations.
Also unlike the Python analogue, the loop-carried value ``val`` must hold a
fixed shape and dtype across all iterations (and not just be consistent up to
NumPy rank/shape broadcasting and dtype promotion rules, for example). In
other words, the type ``a`` in the type signature above represents an array
with a fixed shape and dtype (or a nested tuple/list/dict container data
structure with a fixed structure and arrays with fixed shape and dtype at the
leaves).
Another difference from using Python-native loop constructs is that
``while_loop`` is not reverse-mode differentiable because XLA computations
require static bounds on memory requirements.
Args:
cond_fun: function of type ``a -> Bool``.
body_fun: function of type ``a -> a``.
init_val: value of type ``a``, a type that can be a scalar, array, or any
pytree (nested Python tuple/list/dict) thereof, representing the initial
loop carry value.
Returns:
The output from the final iteration of body_fun, of type ``a``.
"""
init_vals, in_tree = tree_flatten((init_val,))
init_avals = tuple(_map(_abstractify, init_vals))
cond_jaxpr, cond_consts, cond_tree = _initial_style_jaxpr(cond_fun, in_tree, init_avals)
body_jaxpr, body_consts, body_tree = _initial_style_jaxpr(body_fun, in_tree, init_avals)
if not treedef_is_leaf(cond_tree) or len(cond_jaxpr.out_avals) != 1:
msg = "cond_fun must return a boolean scalar, but got pytree {}."
raise TypeError(msg.format(cond_tree))
if cond_jaxpr.out_avals[0].strip_weak_type() != ShapedArray((), onp.bool_):
msg = "cond_fun must return a boolean scalar, but got output type(s) {}."
raise TypeError(msg.format(cond_jaxpr.out_avals))
in_tree_children = in_tree.children()
assert len(in_tree_children) == 1
_check_tree_and_avals("body_fun output and input",
# Extract the subtree and avals for the first element of the return tuple
body_tree, body_jaxpr.out_avals,
in_tree_children[0], init_avals)
outs = while_p.bind(*itertools.chain(cond_consts, body_consts, init_vals),
cond_nconsts=len(cond_consts), cond_jaxpr=cond_jaxpr,
body_nconsts=len(body_consts), body_jaxpr=body_jaxpr)
return tree_unflatten(body_tree, outs)
def _while_loop_abstract_eval(*args, **kwargs):
return _map(raise_to_shaped, kwargs["body_jaxpr"].out_avals)
def _while_loop_translation_rule(c, axis_env, name_stack, *args, **kwargs):
backend = kwargs.pop('backend')
cond_jaxpr, body_jaxpr, cond_nconsts, body_nconsts = split_dict(
kwargs, ["cond_jaxpr", "body_jaxpr", "cond_nconsts", "body_nconsts"])
cond_consts, body_consts, init_vals = split_list(args, [cond_nconsts, body_nconsts])
batched = bool(cond_jaxpr.out_avals[0].shape)
# Since jaxprs don't have tuples and have multiple return values, but we need
# the HLO While loop to take a single tuple input and output a single boolean
# (for the cond computation) or a single tuple output (for the body
# computation), we build XLA computations that handle the tuple munging before
# generating a Call into the computations formed from the jaxprs.
init_carry = c.Tuple(*(cond_consts + body_consts + init_vals))
cond_c = xb.make_computation_builder("cond_computation")
cond_carry = cond_c.ParameterWithShape(c.GetShape(init_carry))
cond_carry_elts = [cond_c.GetTupleElement(cond_carry, i) for i in range(len(args))]
x, _, z = split_list(cond_carry_elts, [cond_nconsts, body_nconsts])
pred, = xla.jaxpr_subcomp(cond_c, cond_jaxpr.jaxpr, backend, axis_env,
_map(cond_c.Constant, cond_jaxpr.literals),
extend_name_stack(name_stack, 'cond'), *(x + z))
if batched:
scalar = ShapedArray((), onp.bool_)
or_ = xla.primitive_subcomputation(lax.or_p, scalar, scalar)
pred = cond_c.Reduce(pred, cond_c.Constant(onp.array(False)), or_,
list(range(cond_jaxpr.out_avals[0].ndim)))
body_c = xb.make_computation_builder("body_computation")
body_carry = body_c.ParameterWithShape(c.GetShape(init_carry))
body_carry_elts = [body_c.GetTupleElement(body_carry, i) for i in range(len(args))]
x, y, z = split_list(body_carry_elts, [cond_nconsts, body_nconsts])
new_z = xla.jaxpr_subcomp(body_c, body_jaxpr.jaxpr, backend, axis_env,
_map(body_c.Constant, body_jaxpr.literals),
extend_name_stack(name_stack, 'body'), *(y + z))
if batched:
body_pred, = xla.jaxpr_subcomp(body_c, cond_jaxpr.jaxpr, backend, axis_env,
_map(body_c.Constant, cond_jaxpr.literals),
extend_name_stack(name_stack, 'body_pred'), *(x + z))
new_z = _map(partial(_pred_bcast_select, body_c, body_pred), new_z, z)
assert _map(body_c.GetShape, new_z) == _map(body_c.GetShape, z) # no broadcast
new_carry = body_c.Tuple(*itertools.chain(x, y, new_z))
ans = c.While(cond_c.Build(pred), body_c.Build(new_carry), init_carry)
ans_elts = [c.GetTupleElement(ans, i) for i in range(len(args))]
_, _, z = split_list(ans_elts, [cond_nconsts, body_nconsts])
return c.Tuple(*z)
def _pred_bcast_select(c, pred, x, y):
pred_shape = c.GetShape(pred).dimensions()
x_shape = c.GetShape(x).dimensions()
y_shape = c.GetShape(y).dimensions()
assert x_shape == y_shape
assert pred_shape == x_shape[:len(pred_shape)] == y_shape[:len(pred_shape)]
bcast_pred = c.BroadcastInDim(pred, x_shape, list(range(len(pred_shape))))
return c.Select(bcast_pred, x, y)
def _while_loop_batching_rule(args, dims, cond_nconsts, cond_jaxpr,
body_nconsts, body_jaxpr):
size, = {x.shape[d] for x, d in zip(args, dims) if d is not batching.not_mapped}
orig_batched = [d is not batching.not_mapped for d in dims]
cconst_bat, bconst_bat, init_bat = split_list(orig_batched, [cond_nconsts, body_nconsts])
# Fixpoint computation of which carry are batched: either
# batched from init, or the carry out is batched. Each iteration promotes
# at least one carry to batched. We need at most len(carry) iterations,
# but we need one last iteration to prepare the jaxpr based on the final
# carry_bat.
carry_bat = init_bat
for _ in range(1 + len(carry_bat)):
batched = bconst_bat + carry_bat
body_jaxpr_batched, carry_bat_out = batching.batch_jaxpr(
body_jaxpr, size, batched, instantiate=carry_bat)
cond_jaxpr_batched, (pred_bat,) = batching.batch_jaxpr(
cond_jaxpr, size, cconst_bat + carry_bat, instantiate=False)
carry_bat_out = _map(partial(operator.or_, pred_bat), carry_bat_out)
if carry_bat_out == carry_bat:
break
else:
carry_bat = _map(operator.or_, carry_bat, carry_bat_out)
else:
assert False, "Fixpoint not reached"
consts, init = split_list(args, [cond_nconsts + body_nconsts])
const_dims, init_dims = split_list(dims, [cond_nconsts + body_nconsts])
new_consts = [batching.moveaxis(x, d, 0) if d is not batching.not_mapped and d != 0
else x for x, d in zip(consts, const_dims)]
new_init = [batching.broadcast(x, size, 0) if now_bat and not was_bat
else batching.moveaxis(x, d, 0) if now_bat else x
for x, d, was_bat, now_bat in zip(init, init_dims, init_bat, carry_bat)]
outs = while_p.bind(*(new_consts + new_init),
cond_nconsts=cond_nconsts, cond_jaxpr=cond_jaxpr_batched,
body_nconsts=body_nconsts, body_jaxpr=body_jaxpr_batched)
out_bdims = [0 if b else batching.not_mapped for b in carry_bat]
return outs, out_bdims
def _while_loop_jvp(primals, tangents, cond_nconsts, cond_jaxpr, body_nconsts,
body_jaxpr):
nonzeros = [t is not ad_util.zero for t in tangents]
cconst_nz, bconst_nz, init_nz = split_list(nonzeros, [cond_nconsts, body_nconsts])
carry_nz = init_nz
for _ in range(1 + len(carry_nz)):
body_nonzeros = bconst_nz + carry_nz
body_jvp, nonzeros_out = ad.jvp_jaxpr(
body_jaxpr, body_nonzeros, instantiate=carry_nz)
if nonzeros_out == carry_nz:
break
carry_nz = _map(operator.or_, carry_nz, nonzeros_out)
else:
assert False, "Fixpoint not reached"
nonzeros = cconst_nz + body_nonzeros
tangents = [ad.instantiate_zeros(x, t) if t is ad_util.zero and nz else t
for x, t, nz in zip(primals, tangents, nonzeros)]
cconst, bconst, init = split_list(primals, [cond_nconsts, body_nconsts])
_, bconst_dot, init_dot = split_list(tangents, [cond_nconsts, body_nconsts])
bconst_dot = _prune_zeros(bconst_dot)
init_dot = _prune_zeros(init_dot)
num_carry = len(primals) - cond_nconsts - body_nconsts
body_jvp_rearranged = ad.rearrange_binders(
body_jvp,
[body_nconsts, num_carry], [len(bconst_dot), len(init_dot)],
[num_carry], [len(init_dot)])
newvar = core.gensym('')
invars_aug = (
cond_jaxpr.jaxpr.invars + [newvar(get_aval(x)) for x in init_dot])
cond_jaxpr_augmented = core.Jaxpr(cond_jaxpr.jaxpr.constvars,
invars_aug,
cond_jaxpr.jaxpr.outvars,
cond_jaxpr.jaxpr.eqns)
in_avals_aug = (cond_jaxpr.in_avals[:cond_nconsts] +
body_jvp_rearranged.in_avals[body_nconsts + len(bconst_dot):])
cond_jaxpr_augmented = core.TypedJaxpr(cond_jaxpr_augmented,
cond_jaxpr.literals,
in_avals_aug,
cond_jaxpr.out_avals)
out = while_p.bind(
*(cconst + bconst + bconst_dot + init + init_dot),
cond_nconsts=cond_nconsts,
cond_jaxpr=cond_jaxpr_augmented,
body_nconsts=len(bconst) + len(bconst_dot),
body_jaxpr=body_jvp_rearranged)
out_carry, out_carry_dot = split_list(out, [num_carry])
out_tangents_iter = iter(out_carry_dot)
out_tangents = [next(out_tangents_iter) if nz else ad_util.zero
for nz in nonzeros_out]
return out_carry, out_tangents
while_p = lax.Primitive('while')
while_p.multiple_results = True
while_p.def_impl(partial(xla.apply_primitive, while_p))
while_p.def_abstract_eval(_while_loop_abstract_eval)
ad.primitive_jvps[while_p] = _while_loop_jvp
xla.initial_style_translations[while_p] = _while_loop_translation_rule
batching.primitive_batchers[while_p] = _while_loop_batching_rule
### cond
def cond(pred, true_operand, true_fun, false_operand, false_fun):
"""Conditionally apply ``true_fun`` or ``false_fun``.
Has equivalent semantics to this Python implementation::
def cond(pred, true_operand, true_fun, false_operand, false_fun):
if pred:
return true_fun(true_operand)
else:
return false_fun(false_operand)
Pred has to be a scalar type, collection types (list, tuple) are not supported
"""
if len(onp.shape(pred)) != 0:
raise TypeError("Pred must be a scalar, got {} of shape {}.".format(pred, onp.shape(pred)))
try:
pred_dtype = dtypes.result_type(pred)
except TypeError as err:
msg = ("Pred type must be either boolean or number, got {}.")
raise TypeError(msg.format(pred)) from err
if pred_dtype.kind != 'b':
if pred_dtype.kind in 'iuf':
pred = pred != 0
else:
msg = ("Pred type must be either boolean or number, got {}.")
raise TypeError(msg.format(pred_dtype))
true_ops, true_tree = tree_flatten((true_operand,))
true_avals = tuple(_map(_abstractify, true_ops))
true_jaxpr, true_consts, true_out_tree = _initial_style_jaxpr(true_fun, true_tree, true_avals)
false_ops, false_tree = tree_flatten((false_operand,))
false_avals = tuple(_map(_abstractify, false_ops))
false_jaxpr, false_consts, false_out_tree = _initial_style_jaxpr(false_fun, false_tree, false_avals)
_check_tree_and_avals("true_fun and false_fun output",
true_out_tree, true_jaxpr.out_avals,
false_out_tree, false_jaxpr.out_avals)
linear = (False,) * (len(true_consts) + len(true_ops) + len(false_consts) +
len(false_ops))
out = cond_p.bind(
*itertools.chain([pred], true_consts, true_ops, false_consts, false_ops),
true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr, linear=linear)
return tree_unflatten(true_out_tree, out)
def _cond_abstract_eval(*args, **kwargs):
return _map(raise_to_shaped, kwargs["true_jaxpr"].out_avals)
def _cond_translation_rule(c, axis_env, name_stack, pred, *args,
true_jaxpr, false_jaxpr, linear, backend=None):
del linear # Unused.
true_ops, false_ops = split_list(args, [len(true_jaxpr.in_avals)])
def make_computation(name, jaxpr, op_shape):
c = xb.make_computation_builder(name + '_comp')
op = c.ParameterWithShape(op_shape)
ops = [c.GetTupleElement(op, i) for i in range(len(jaxpr.in_avals))]
outs = xla.jaxpr_subcomp(c, jaxpr.jaxpr, backend, axis_env,
_map(c.Constant, jaxpr.literals),
extend_name_stack(name_stack, name + '_fun'), *ops)
return c.Build(c.Tuple(*outs))
true_op = c.Tuple(*true_ops)
true_c = make_computation('true', true_jaxpr, c.GetShape(true_op))
false_op = c.Tuple(*false_ops)
false_c = make_computation('false', false_jaxpr, c.GetShape(false_op))
return c.Conditional(pred, true_op, true_c, false_op, false_c)
def _cond_pred_bcast_select(pred, x, y):
if core.get_aval(x) is core.get_aval(y) is core.abstract_unit:
return x
else:
bcast_pred = lax.broadcast_in_dim(pred, onp.shape(x), list(range(onp.ndim(pred))))
return lax.select(bcast_pred, x, y)
def _cond_batching_rule(args, dims, true_jaxpr, false_jaxpr, linear):
# TODO: maybe avoid moving arg axes to front if we're promoting to select?
size, = {x.shape[d] for x, d in zip(args, dims) if d is not batching.not_mapped}
args = [batching.moveaxis(x, d, 0) if d is not batching.not_mapped and d != 0
else x for x, d in zip(args, dims)]
orig_bat = [d is not batching.not_mapped for d in dims]
del dims
(pred,), true_ops, false_ops = split_list(args, [1, len(true_jaxpr.in_avals)])
(pred_bat,), t_bat, f_bat = split_list(orig_bat, [1, len(true_jaxpr.in_avals)])
_, true_out_bat = batching.batch_jaxpr(true_jaxpr, size, t_bat, False)
_, false_out_bat = batching.batch_jaxpr(false_jaxpr, size, f_bat, False)
out_bat = [a or b for a, b in zip(true_out_bat, false_out_bat)]
true_jaxpr_batched, _ = batching.batch_jaxpr(true_jaxpr, size, t_bat, out_bat)
false_jaxpr_batched, _ = batching.batch_jaxpr(false_jaxpr, size, f_bat, out_bat)
if pred_bat:
true_out = core.jaxpr_as_fun(true_jaxpr_batched)(*true_ops)
false_out = core.jaxpr_as_fun(false_jaxpr_batched)(*false_ops)
true_out = [batching.broadcast(x, size, 0) if not b else x
for x, b in zip(true_out, out_bat)]
false_out = [batching.broadcast(x, size, 0) if not b else x
for x, b in zip(false_out, out_bat)]
return [_cond_pred_bcast_select(pred, t, f)
for t, f in zip(true_out, false_out)], [0] * len(true_out)
else:
out_dims = [0 if b else batching.not_mapped for b in out_bat]
out = cond_p.bind(
*itertools.chain([pred], true_ops, false_ops),
true_jaxpr=true_jaxpr_batched, false_jaxpr=false_jaxpr_batched, linear=linear)
return out, out_dims
def _cond_jvp(primals, tangents, true_jaxpr, false_jaxpr, linear):
nonzeros = [t is not ad_util.zero for t in tangents]
(pred_nz,), t_nz, f_nz = split_list(nonzeros, [1, len(true_jaxpr.in_avals)])
assert pred_nz is False
_, true_out_nz = ad.jvp_jaxpr(true_jaxpr, t_nz, instantiate=False)
_, false_out_nz = ad.jvp_jaxpr(false_jaxpr, f_nz, instantiate=False)
out_nz = [a or b for a, b in zip(true_out_nz, false_out_nz)]
true_jvp, _ = ad.jvp_jaxpr(true_jaxpr, t_nz, instantiate=out_nz)
false_jvp, _ = ad.jvp_jaxpr(false_jaxpr, f_nz, instantiate=out_nz)
(pred,), tops, fops = split_list(primals, [1, len(true_jaxpr.in_avals)])
_, tops_dot, fops_dot = split_list(tangents, [1, len(true_jaxpr.in_avals)])
tops_dot = _prune_zeros(tops_dot)
fops_dot = _prune_zeros(fops_dot)
tops_lin, fops_lin = _map(tuple, split_list(linear, [len(tops)]))
linear_jvp = (tops_lin + (True,) * len(tops_dot) +
fops_lin + (True,) * len(fops_dot))
out = cond_p.bind(
*itertools.chain([pred], tops, tops_dot, fops, fops_dot),
true_jaxpr=true_jvp, false_jaxpr=false_jvp, linear=linear_jvp)
out_primals, out_tangents = split_list(out, [len(out_nz)])
out_tangents_iter = iter(out_tangents)
out_tangents = [
next(out_tangents_iter) if nz else ad_util.zero for nz in out_nz]
return out_primals, out_tangents
def _cond_partial_eval(trace, *tracers, true_jaxpr, false_jaxpr, linear):
unknowns = [t.pval[0] is not None for t in tracers]
(pred_uk,), t_uk, f_uk = split_list(unknowns, [1, len(true_jaxpr.in_avals)])
if pred_uk:
# When the predicate is unknown, we stage out the whole cond.
params = dict(true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr, linear=linear)
return trace.default_process_primitive(cond_p, tracers, params)
_, _, t_out_uks = pe.partial_eval_jaxpr(true_jaxpr, t_uk, instantiate=False)
_, _, f_out_uks = pe.partial_eval_jaxpr(false_jaxpr, f_uk, instantiate=False)
out_uks = [a or b for a, b in zip(t_out_uks, f_out_uks)]
true_jaxpr_1, true_jaxpr_2, _ = pe.partial_eval_jaxpr(true_jaxpr, t_uk,
instantiate=out_uks)
false_jaxpr_1, false_jaxpr_2, _ = pe.partial_eval_jaxpr(false_jaxpr, f_uk,
instantiate=out_uks)
num_t_res = len(true_jaxpr_1.out_avals) - len(out_uks)
num_f_res = len(false_jaxpr_1.out_avals) - len(out_uks)
move = [False] * len(true_jaxpr.in_avals) + [True] * num_t_res
true_jaxpr_2 = pe.move_binders_to_front(true_jaxpr_2, move)
move = [False] * len(false_jaxpr.in_avals) + [True] * num_f_res
false_jaxpr_2 = pe.move_binders_to_front(false_jaxpr_2, move)
# TODO(frostig,mattjj): pe.partial_eval_jaxpr should raise to shaped avals
t_res_avals = _map(raise_to_shaped, true_jaxpr_2.in_avals[:num_t_res])
f_res_avals = _map(raise_to_shaped, false_jaxpr_2.in_avals[:num_f_res])
assert len(true_jaxpr_2.out_avals) == len(false_jaxpr_2.out_avals)
num_outs = len(true_jaxpr_2.out_avals)
true_jaxpr_1 = _join_cond_outputs(
true_jaxpr_1, num_outs, f_res_avals, zeros_on_left=False)
false_jaxpr_1 = _join_cond_outputs(
false_jaxpr_1, num_outs, t_res_avals, zeros_on_left=True)
# TODO(frostig,mattjj): reinstate this assertion once pe.partial_eval_jaxpr
# raises to shaped avals
# assert true_jaxpr_1.out_avals == false_jaxpr_1.out_avals
num_res = num_t_res + num_f_res
_, in_consts = unzip2([t.pval for t in tracers])
out_consts_res = cond_p.bind(
*in_consts, true_jaxpr=true_jaxpr_1, false_jaxpr=false_jaxpr_1,
linear=linear)
out_consts, res = split_list(out_consts_res, [len(out_consts_res) - num_res])
# TODO(frostig,mattjj): remove raised_to_shaped of avals once
# pe.partial_eval_jaxpr handles it
out_avals = _map(raise_to_shaped, true_jaxpr_2.out_avals)
out_pvs = [aval if uk else None for aval, uk in zip(out_avals, out_uks)]
pred_tracer = trace.instantiate_const(tracers[0])
ops_tracers = [trace.instantiate_const(t) if uk
else trace.new_instantiated_literal(core.unit)
for uk, t in zip(unknowns[1:], tracers[1:])]
true_ops_tracers, false_ops_tracers = split_list(
ops_tracers, [len(true_jaxpr.in_avals)])
res_tracers = _map(trace.new_instantiated_const, res)
true_res_tracers, false_res_tracers = split_list(res_tracers, [num_t_res])
out_tracers = [pe.JaxprTracer(trace, pe.PartialVal((pv, const)), None)
for pv, const in zip(out_pvs, out_consts)]
tops_lin, fops_lin = _map(tuple, split_list(linear, [len(true_jaxpr.in_avals)]))
linear_2 = ((False,) * num_t_res + tops_lin + (False,) * num_f_res + fops_lin)
params = dict(true_jaxpr=true_jaxpr_2, false_jaxpr=false_jaxpr_2,
linear=linear_2)
eqn = pe.new_eqn_recipe([pred_tracer] +
true_res_tracers + true_ops_tracers +
false_res_tracers + false_ops_tracers,
out_tracers,
cond_p, params)
for t in out_tracers: t.recipe = eqn
return out_tracers
def _join_cond_outputs(jaxpr, num_prefix, zeros_avals, zeros_on_left):
@lu.wrap_init
def f_aug(*args):
prefix_and_rest = core.jaxpr_as_fun(jaxpr)(*args)
prefix, rest = split_list(prefix_and_rest, [num_prefix])
zeros = [ad_util.zeros_like_aval(a) for a in zeros_avals]
if zeros_on_left:
return prefix + zeros + rest
else:
return prefix + rest + zeros
return _make_typed_jaxpr(f_aug, jaxpr.in_avals)
def _transpose_cond_jaxpr(jaxpr, num_res):
num_non_res = len(jaxpr.in_avals) - num_res
res_avals, primal_avals = split_list(jaxpr.in_avals, [num_res])
primal_avals = _map(raise_to_shaped, primal_avals)
@lu.wrap_init
def transposed(*args):
res, cts_out = split_list(args, [num_res])
primals = res + [ad.undefined_primal] * num_non_res
cts_in = ad.backward_pass(
jaxpr.jaxpr, jaxpr.literals, primals, cts_out)
_, cts_in = split_list(cts_in, [num_res])
return _map(ad.instantiate_zeros_aval, primal_avals, cts_in)
return _make_typed_jaxpr(transposed, res_avals + jaxpr.out_avals)
def _cond_transpose(cts, *args, true_jaxpr, false_jaxpr, linear):
(pred,), tops, fops = split_list(args, [1, len(true_jaxpr.in_avals)])
tops_lin, fops_lin = split_list(linear, [len(true_jaxpr.in_avals)])
in_avals = _map(raise_to_shaped, true_jaxpr.in_avals + false_jaxpr.in_avals)
num_t_res = len(tops) - sum(tops_lin)
num_f_res = len(fops) - sum(fops_lin)
t_jaxpr_trans = _transpose_cond_jaxpr(true_jaxpr, num_t_res)
f_jaxpr_trans = _transpose_cond_jaxpr(false_jaxpr, num_f_res)
lin_in_avals = _map(raise_to_shaped, [a for a, l in zip(in_avals, linear) if l])
assert t_jaxpr_trans.out_avals + f_jaxpr_trans.out_avals == lin_in_avals
t_jaxpr_trans_ = _join_cond_outputs(
t_jaxpr_trans, 0, f_jaxpr_trans.out_avals, zeros_on_left=False)
f_jaxpr_trans_ = _join_cond_outputs(
f_jaxpr_trans, 0, t_jaxpr_trans.out_avals, zeros_on_left=True)
assert t_jaxpr_trans_.out_avals == f_jaxpr_trans_.out_avals == lin_in_avals
t_res, _ = split_list(tops, [num_t_res])
f_res, _ = split_list(fops, [num_f_res])
linear_trans = ((False,) * num_t_res + (True,) * len(cts) +
(False,) * num_f_res + (True,) * len(cts))
cts = _map(ad.instantiate_zeros_aval, true_jaxpr.out_avals, cts)
out = cond_p.bind(
pred, *itertools.chain(t_res, cts, f_res, cts),
true_jaxpr=t_jaxpr_trans_, false_jaxpr=f_jaxpr_trans_,
linear=linear_trans)
assert all(_map(typecheck, lin_in_avals, out))
out_iter = iter(out)
out = [next(out_iter) if l else None for l in linear]
assert next(out_iter, None) is None
return [None] + out
def cond_bind(*args, true_jaxpr, false_jaxpr, linear):
if not core.skip_checks:
assert len(linear) + 1 == len(args)
assert len(args) == 1 + len(true_jaxpr.in_avals) + len(false_jaxpr.in_avals)
(pred,), tops, fops = split_list(args, [1, len(true_jaxpr.in_avals)])
assert all(_map(typecheck, true_jaxpr.in_avals, tops))
assert all(_map(typecheck, false_jaxpr.in_avals, fops))
core.check_jaxpr(true_jaxpr.jaxpr)
core.check_jaxpr(false_jaxpr.jaxpr)
return core.Primitive.bind(cond_p, *args, true_jaxpr=true_jaxpr,
false_jaxpr=false_jaxpr, linear=linear)
cond_p = lax.Primitive('cond')
cond_p.multiple_results = True
cond_p.def_impl(partial(xla.apply_primitive, cond_p))
cond_p.def_abstract_eval(_cond_abstract_eval)
cond_p.def_custom_bind(cond_bind)
ad.primitive_jvps[cond_p] = _cond_jvp
ad.primitive_transposes[cond_p] = _cond_transpose
pe.custom_partial_eval_rules[cond_p] = _cond_partial_eval
batching.primitive_batchers[cond_p] = _cond_batching_rule
xla.initial_style_translations[cond_p] = _cond_translation_rule
### scan
def scan(f, init, xs, length=None):
"""Scan a function over leading array axes while carrying along state.
The type signature in brief is
.. code-block:: haskell
scan :: (c -> a -> (c, b)) -> c -> [a] -> (c, [b])
where we use [t] here to denote the type t with an additional leading axis.
That is, if t is an array type then [t] represents the type with an additional
leading axis, and if t is a pytree (container) type with array leaves then [t]
represents the type with the same pytree structure and corresponding leaves
each with an additional leading axis.
When ``a`` is an array type or None, and ``b`` is an array type, the semantics
of ``scan`` are given roughly by this Python implementation::
def scan(f, init, xs, length=None):
if xs is None:
xs = [None] * length
carry = init
ys = []
for x in xs:
carry, y = f(carry, x)
ys.append(y)
return carry, np.stack(ys)
Unlike that Python version, both ``a`` and ``b`` may be arbitrary pytree
types, and so multiple arrays can be scanned over at once and produce multiple
output arrays. (None is actually an empty pytree.)
Also unlike that Python version, ``scan`` is a JAX primitive and is lowered to
a single XLA While HLO. That makes it useful for reducing compilation times
for jit-compiled functions, since native Python loop constructs in an ``@jit``
function are unrolled, leading to large XLA computations.
Finally, the loop-carried value ``carry`` must hold a fixed shape and dtype
across all iterations (and not just be consistent up to NumPy rank/shape
broadcasting and dtype promotion rules, for example). In other words, the type
``c`` in the type signature above represents an array with a fixed shape and
dtype (or a nested tuple/list/dict container data structure with a fixed
structure and arrays with fixed shape and dtype at the leaves).
Args:
f: a Python function to be scanned of type ``c -> a -> (c, b)``, meaning
that ``f`` accepts two arguments where the first is a value of the loop
carry and the second is a slice of ``xs`` along its leading axis, and that
``f`` returns a pair where the first element represents a new value for
the loop carry and the second represents a slice of the output.
init: an initial loop carry value of type ``c``, which can be a scalar,
array, or any pytree (nested Python tuple/list/dict) thereof, representing
the initial loop carry value. This value must have the same structure as
the first element of the pair returned by ``f``.
xs: the value of type ``[a]`` over which to scan along the leading axis,
where ``[a]`` can be an array or any pytree (nested Python
tuple/list/dict) thereof with consistent leading axis sizes.
length: optional integer specifying the number of loop iterations, which
must agree with the sizes of leading axes of the arrays in ``xs`` (but can
be used to perform scans where no input ``xs`` are needed).
Returns:
A pair of type ``(c, [b])`` where the first element represents the final
loop carry value and the second element represents the stacked outputs of
the second output of ``f`` when scanned over the leading axis of the inputs.
"""
init_flat, init_tree = tree_flatten(init)
xs_flat, _ = tree_flatten(xs)
in_flat, in_tree = tree_flatten((init, xs))
try:
lengths = [x.shape[0] for x in xs_flat]
except AttributeError as err:
msg = "scan got value with no leading axis to scan over: {}."
raise ValueError(
msg.format(', '.join(str(x) for x in xs_flat
if not hasattr(x, 'shape')))) from err
if length is not None:
length = int(length)
if not all(length == l for l in lengths):
msg = ("scan got `length` argument of {} which disagrees with "
"leading axis sizes {}.")
raise ValueError(msg.format(length, [x.shape[0] for x in xs_flat]))
else:
unique_lengths = set(lengths)
if len(unique_lengths) > 1:
msg = "scan got values with different leading axis sizes: {}."
raise ValueError(msg.format(', '.join(str(x.shape[0]) for x in xs_flat)))
elif len(unique_lengths) == 0:
msg = "scan got no values to scan over and `length` not provided."
raise ValueError(msg)
else:
length, = unique_lengths
carry_avals = tuple(_map(_abstractify, init_flat))
x_shapes = [masking.padded_shape_as_value(x.shape[1:]) for x in xs_flat]
x_dtypes = [x.dtype for x in xs_flat]
x_avals = tuple(_map(ShapedArray, x_shapes, x_dtypes))
jaxpr, consts, out_tree = _initial_style_jaxpr(f, in_tree, carry_avals + x_avals)
out_tree_children = out_tree.children()
if len(out_tree_children) != 2:
msg = "scan body output must be a pair, got {}."
raise TypeError(msg.format(tree_unflatten(out_tree, jaxpr.out_avals)))
_check_tree_and_avals("scan carry output and input",
# Extract the subtree and avals for the first element of the return tuple
out_tree_children[0], jaxpr.out_avals[:out_tree_children[0].num_leaves],
init_tree, carry_avals)
out = scan_p.bind(*itertools.chain(consts, in_flat),
forward=True, length=length, jaxpr=jaxpr,
num_consts=len(consts), num_carry=len(init_flat),
linear=(False,) * (len(consts) + len(in_flat)))
return tree_unflatten(out_tree, out)
def _scan_impl(*args, forward, length, num_consts, num_carry, jaxpr, linear):
consts, init, xs = split_list(args, [num_consts, num_carry])
_, _, x_avals = split_list(jaxpr.in_avals, [num_consts, num_carry])
_, y_avals = split_list(jaxpr.out_avals, [num_carry])
def body_fun(i, vals):
i = i if forward else length - i - 1
carry, ys = split_list(vals, [num_carry])
x = _map(partial(_index_array, i), x_avals, xs)
out_flat = core.jaxpr_as_fun(jaxpr)(*(consts + carry + x))
carry_out, y_updates = split_list(out_flat, [num_carry])
ys_out = _map(partial(_update_array, i), y_avals, ys, y_updates)
return carry_out + ys_out
ys_init = _map(partial(_empty_array, length), y_avals)
return fori_loop(lax._const(length, 0), length, body_fun, init + ys_init)
def _index_array(i, aval, x):
if aval is core.abstract_unit:
return core.unit
else:
return lax.dynamic_index_in_dim(x, i, keepdims=False)
def _empty_array(sz, aval):
if aval is core.abstract_unit:
return core.unit
else:
return lax.full((sz,) + aval.shape, 0, aval.dtype)
def _update_array(i, aval, xs, x):
if aval is core.abstract_unit:
return core.unit
else:
return lax.dynamic_update_index_in_dim(xs, x, i, 0)
# TODO(mattjj): make scan a primitive
# def _scan_abstract_eval(*args, forward, length, num_consts, num_carry, jaxpr, linear):
# carry_avals, y_avals = split_list(jaxpr.out_avals, [num_carry])
# ys_avals = [ShapedArray((length,) + aval.shape, aval.dtype)
# if aval is not core.abstract_unit else aval for aval in y_avals]
# return carry_avals + y_avals
def _scan_jvp(primals, tangents, forward, length, jaxpr, num_consts, num_carry,
linear):
num_xs = len(jaxpr.in_avals) - num_carry - num_consts
num_ys = len(jaxpr.out_avals) - num_carry
nonzeros = [t is not ad_util.zero for t in tangents]
const_nz, init_nz, xs_nz = split_list(nonzeros, [num_consts, num_carry])
# Fixpoint computation of which carry are not ad.zero: either
# non-zero from init, or the carry out is non-zero. Each iteration promotes
# at least one carry to non-zero. We need at most len(carry) iterations,
# but we need one last iteration to prepare the jaxpr based on the final
# carry_nz.
carry_nz = init_nz
for _ in range(1 + len(carry_nz)):
nonzeros = const_nz + carry_nz + xs_nz
jaxpr_jvp, nonzeros_out = ad.jvp_jaxpr(
jaxpr, nonzeros, instantiate=carry_nz + [False] * num_ys)
carry_nz_out, ys_nz = nonzeros_out[:num_carry], nonzeros_out[num_carry:]
if carry_nz_out == carry_nz:
break
else:
carry_nz = _map(operator.or_, carry_nz, carry_nz_out)
else:
assert False, "Fixpoint not reached"
tangents = [ad.instantiate_zeros(x, t) if t is ad_util.zero and nz else t
for x, t, nz in zip(primals, tangents, nonzeros)]
consts, init, xs = split_list(primals, [num_consts, num_carry])
all_tangents = split_list(tangents, [num_consts, num_carry])
consts_dot, init_dot, xs_dot = _map(_prune_zeros, all_tangents)
jaxpr_jvp_rearranged = ad.rearrange_binders(
jaxpr_jvp,
[num_consts, num_carry, num_xs], [len(consts_dot), len(init_dot), len(xs_dot)],
[num_carry, num_ys], [len(init_dot), sum(nonzeros_out) - len(init_dot)])
consts_linear, init_linear, xs_linear = split_list(linear, [num_consts, num_carry])
jaxpr_jvp_linear = tuple(consts_linear + [True] * len(consts_dot)
+ init_linear + [True] * len(init_dot)
+ xs_linear + [True] * len(xs_dot))
out_flat = scan_p.bind(
*(consts + consts_dot + init + init_dot + xs + xs_dot),
forward=forward, length=length, jaxpr=jaxpr_jvp_rearranged,
num_consts=num_consts+len(consts_dot), num_carry=num_carry+len(init_dot),
linear=jaxpr_jvp_linear)
carry, carry_dot, ys, ys_dot = split_list(out_flat, [num_carry, len(init_dot), num_ys])
primals_out = carry + ys
tangents_out_iter = iter(carry_dot + ys_dot)
tangents_out = [next(tangents_out_iter) if nz else ad_util.zero
for nz in nonzeros_out]
return primals_out, tangents_out
def _prune_zeros(ts):
return [t for t in ts if t is not ad_util.zero]
def _scan_partial_eval(trace, *tracers, forward, length, num_consts, num_carry,
jaxpr, linear):
num_xs = len(jaxpr.in_avals) - num_carry - num_consts
num_ys = len(jaxpr.out_avals) - num_carry
unknowns = [t.pval[0] is not None for t in tracers]
const_uk, init_uk, xs_uk = split_list(unknowns, [num_consts, num_carry])
# Fixpoint computation of which carry are unknown (not a constant): either
# unknown from init, or the carry out is unknown. Each iteration promotes
# at least one carry to unknown. We need at most len(carry) iterations,
# but we need one last iteration to prepare the jaxpr based on the final
# carry_uk.
carry_uk = init_uk
for _ in range(1 + len(carry_uk)):
unknowns = const_uk + carry_uk + xs_uk
jaxpr_1, jaxpr_2, out_uk = pe.partial_eval_jaxpr(
jaxpr, unknowns, instantiate=carry_uk + [False] * num_ys)
carry_uk_out, ys_uk = out_uk[:num_carry], out_uk[num_carry:]
if carry_uk_out == carry_uk:
break
else:
carry_uk = _map(operator.or_, carry_uk, carry_uk_out)
else:
assert False, "Fixpoint not reached"
num_res = len(jaxpr_1.out_avals) - len(jaxpr_2.out_avals)
# The residuals are treated as extensive outputs of jaxpr_1 (and extensive
# inputs to jaxpr_2), but residuals that are loop-invariant can be hoisted.
# TODO(mattjj): hoist other loop-invariant values here too (instantiate=False)
invariant_pvals = [pe.PartialVal((None, core.unit if uk else t.pval[1]))
for uk, t in zip(unknowns[:num_consts], tracers[:num_consts])]
other_pvals = [pe.PartialVal((a, core.unit)) for a in jaxpr_1.in_avals[num_consts:]]
in_pvals_1 = invariant_pvals + other_pvals
untyped_jaxpr_1, out_pvals_1, consts_1 = pe.trace_to_jaxpr(
lu.wrap_init(core.jaxpr_as_fun(jaxpr_1)), in_pvals_1,
instantiate=[True] * (num_carry + num_ys) + [False] * num_res)
const_avals_1 = [raise_to_shaped(core.get_aval(c)) for c in consts_1]
in_avals_1 = [core.abstract_unit] * num_consts + jaxpr_1.in_avals[num_consts:]
out_avals_1 = [core.abstract_unit if pv is None else pv for pv, c in out_pvals_1]
# TODO(cjfj): Explain the need for the code below.
for var in untyped_jaxpr_1.invars[:num_consts]:
var.aval = core.abstract_unit
jaxpr_1_opt = pe.TypedJaxpr(pe.convert_constvars_jaxpr(untyped_jaxpr_1),
(), const_avals_1 + in_avals_1, out_avals_1)
num_consts_1 = num_consts + len(consts_1)
# any now-known residuals are intensive, so we want to revise jaxpr_2 to take
# those inputs as constants rather than as extensive inputs
_, _, res_pvals = split_list(out_pvals_1, [num_carry, num_ys])
intensive_residuals = [const for pv, const in res_pvals if pv is None]
move = [False] * len(jaxpr_1.in_avals) + [pv is None for pv, _ in res_pvals]
jaxpr_2_opt = pe.move_binders_to_front(jaxpr_2, move)
num_consts_2 = num_consts + len(intensive_residuals)
in_consts = (list(consts_1) + [core.unit] * num_consts +
[core.unit if uk else t.pval[1]
for uk, t in zip(unknowns[num_consts:], tracers[num_consts:])])
linear_1 = ([False] * len(consts_1) + [True] * num_consts +
[lin or uk for uk, lin
in zip(unknowns[num_consts:], linear[num_consts:])])
out_flat = scan_p.bind(
*in_consts, forward=forward, length=length, jaxpr=jaxpr_1_opt,
num_consts=num_consts_1, num_carry=num_carry, linear=tuple(linear_1))
out_carry, ys, res_and_units = split_list(out_flat, [num_carry, num_ys])
extensive_residuals = [r for r, (pv, _) in zip(res_and_units, res_pvals) if pv is not None]
new_tracers = [trace.instantiate_const(t) if uk else trace.new_instantiated_literal(core.unit)
for uk, t in zip(unknowns, tracers)]
carry_avals, y_avals = split_list(jaxpr.out_avals, [num_carry])
ys_avals = _map(partial(_promote_aval_rank, length), y_avals)
out_avals = carry_avals + ys_avals
out_pvs = [aval if uk else None for aval, uk in zip(out_avals, out_uk)]
out_consts = out_carry + ys
int_res_tracers = _map(trace.new_instantiated_const, intensive_residuals)
ext_res_tracers = _map(trace.new_instantiated_const, extensive_residuals)
out_tracers = [pe.JaxprTracer(trace, pe.PartialVal((pv, const)), None)
for pv, const in zip(out_pvs, out_consts)]
linear_2 = ([False] * len(int_res_tracers) +
[lin or not uk for uk, lin in zip(unknowns, linear)] +
[False] * len(ext_res_tracers))
eqn = pe.new_eqn_recipe(int_res_tracers + new_tracers + ext_res_tracers,
out_tracers, scan_p,
dict(forward=forward, length=length, jaxpr=jaxpr_2_opt,
num_consts=num_consts_2,
num_carry=num_carry, linear=tuple(linear_2)))
for t in out_tracers: t.recipe = eqn
return out_tracers
def _promote_aval_rank(sz, aval):
if aval is core.abstract_unit:
return core.abstract_unit
else:
return ShapedArray((sz,) + aval.shape, aval.dtype)
def _scan_transpose(cts, *args, forward, length, num_consts, num_carry, jaxpr, linear):
# we've only implemented transposing scans with specific lin/nonlin patterns
consts_lin, init_lin, xs_lin = split_list(linear, [num_consts, num_carry])
num_ires = len(consts_lin) - sum(consts_lin)
num_eres = len(xs_lin) - sum(xs_lin)
if consts_lin != [False] * num_ires + [True] * (len(consts_lin) - num_ires):
raise NotImplementedError
if xs_lin != [True] * (len(xs_lin) - num_eres) + [False] * num_eres:
raise NotImplementedError
if not all(init_lin):
pass # TODO(mattjj): error check https://github.com/google/jax/issues/1963
consts, _, xs = split_list(args, [num_consts, num_carry])
ires, _ = split_list(consts, [num_ires])
_, eres = split_list(xs, [sum(xs_lin)])
assert not any(r is ad.undefined_primal for r in ires)
assert not any(r is ad.undefined_primal for r in eres)
carry_avals, y_avals = split_list(jaxpr.out_avals, [num_carry])
ys_avals = _map(partial(_promote_aval_rank, length), y_avals)
ct_carry, ct_ys = split_list(cts, [num_carry])
ct_carry = _map(ad.instantiate_zeros_aval, carry_avals, ct_carry)
ct_ys = _map(ad.instantiate_zeros_aval, ys_avals, ct_ys)
ct_consts = _map(ad_util.zeros_like_aval, jaxpr.in_avals[num_ires:num_consts])
# jaxpr :: [ires, T d] -> [T c] -> [T a, eres] -> ([T c], [T b])
# jaxpr_trans :: [ires] -> [CT d, CT c] -> [CT b, eres] -> ([CT d, CT c], [CT a])
jaxpr_trans = _transpose_scan_jaxpr(
num_ires, num_consts - num_ires, num_eres, jaxpr)
linear_trans = ([False] * num_ires +
[True] * (len(ct_consts) + len(ct_carry) + len(ct_ys)) +
[False] * num_eres)
outs = scan_p.bind(
*(ires + ct_consts + ct_carry + ct_ys + eres), forward=not forward,
length=length, jaxpr=jaxpr_trans, num_consts=num_ires,
num_carry=num_consts-num_ires+num_carry, linear=tuple(linear_trans))
ct_consts, ct_init, ct_xs = split_list(outs, [num_consts - num_ires, num_carry])
return [None] * num_ires + ct_consts + ct_init + ct_xs + [None] * num_eres
# transpose_scan_jaxpr :: ([res1, c, a, res2] -> b)
# -> ([res1, CT c, CT b, res2] -> [CT c, CT a])
def _transpose_scan_jaxpr(num_res1, num_c, num_res2, jaxpr):
num_a = len(jaxpr.in_avals) - num_res1 - num_c - num_res2
res1_avals, c_avals, a_avals, res2_avals = split_list(
jaxpr.in_avals, [num_res1, num_c, num_a])
num_b = len(jaxpr.out_avals)
b_avals = list(jaxpr.out_avals)
@lu.wrap_init
def transposed(*res1_cbar_bbar_res2):
res1, c_bar, b_bar, res2 = split_list(
res1_cbar_bbar_res2, [num_res1, num_c, num_b])
primals = res1 + [ad.undefined_primal] * (num_c + num_a) + res2
cbar_abar = ad.backward_pass(jaxpr.jaxpr, jaxpr.literals, primals,
b_bar)
_, new_c_bar, a_bar, _ = split_list(cbar_abar, [num_res1, num_c, num_a])
a_bar = _map(ad.instantiate_zeros_aval, a_avals, a_bar)
c_bar = _map(ad.instantiate_zeros_aval, c_avals,
_map(ad.add_tangents, c_bar, new_c_bar))
return c_bar + a_bar
return _make_typed_jaxpr(transposed, res1_avals + c_avals + b_avals + res2_avals)
def _make_typed_jaxpr(traceable: lu.WrappedFun, in_avals):
pvals = [pe.PartialVal((aval, core.unit)) for aval in in_avals]
jaxpr, pvals_out, consts = pe.trace_to_jaxpr(traceable, pvals, instantiate=True)
out_avals, _ = unzip2(pvals_out)
return core.TypedJaxpr(jaxpr, consts, in_avals, _map(raise_to_shaped, out_avals))
def _scan_batching_rule(args, dims, forward, length, jaxpr, num_consts,
num_carry, linear):
num_ys = len(jaxpr.out_avals) - num_carry
size, = {x.shape[d] for x, d in zip(args, dims) if d is not batching.not_mapped}
orig_batched = [d is not batching.not_mapped for d in dims]
const_batched, init_batched, xs_batched = split_list(orig_batched, [num_consts, num_carry])
# Fixpoint computation of which carry are batched: either
# batched from init, or the carry out is batched. Each iteration promotes
# at least one carry to batched. We need at most len(carry) iterations,
# but we need one last iteration to prepare the jaxpr based on the final
# carry_batched.
carry_batched = init_batched
for _ in range(1 + len(carry_batched)):
batched = const_batched + carry_batched + xs_batched
jaxpr_batched, batched_out = batching.batch_jaxpr(
jaxpr, size, batched, instantiate=carry_batched + [False] * num_ys)
carry_batched_out, ys_batched = batched_out[:num_carry], batched_out[num_carry:]
if carry_batched_out == carry_batched:
break
else:
carry_batched = _map(operator.or_, carry_batched, carry_batched_out)
else:
assert False, "Fixpoint not reached"
consts, init, xs = split_list(args, [num_consts, num_carry])
consts_bdims, init_bdims, xs_bdims = split_list(dims, [num_consts, num_carry])
new_consts = [batching.moveaxis(x, d, 0) if d is not batching.not_mapped and d != 0
else x for x, d in zip(consts, consts_bdims)]
new_init = [batching.broadcast(x, size, 0) if now_batched and not was_batched
else batching.moveaxis(x, d, 0) if now_batched else x
for x, d, was_batched, now_batched in
zip(init, init_bdims, init_batched, carry_batched)]
new_xs = [batching.moveaxis(x, d, 1) if d is not batching.not_mapped and d != 1
else x for x, d in zip(xs, xs_bdims)]
new_args = new_consts + new_init + new_xs
outs = scan_p.bind(*new_args, forward=forward, length=length, jaxpr=jaxpr_batched,
num_consts=num_consts, num_carry=num_carry, linear=linear)
carry_bdims = [0 if b else batching.not_mapped for b in carry_batched]
ys_bdims = [1 if b else batching.not_mapped for b in ys_batched]
return outs, carry_bdims + ys_bdims
def _scan_shape_rule(shapes, forward, length, jaxpr,
num_consts, num_carry, linear):
const_shexprs, init_shexprs, xs_shexprs = split_list(shapes, [num_consts, num_carry])
_, y_avals = split_list(jaxpr.out_avals, [num_carry])
ys_shapes = [(length,) + tuple(y_aval.shape) for y_aval in y_avals]
return init_shexprs + ys_shapes
def _scan_masking_rule(shape_envs, padded_vals, shape_exprs, forward, length,
jaxpr, num_consts, num_carry, linear):
out_shape = _scan_shape_rule(shape_exprs, forward, length, jaxpr,
num_consts, num_carry, linear)
dynamic_length = masking.eval_dim_expr(shape_envs.logical, length)
masked_jaxpr = _masked_scan_jaxpr(jaxpr, num_consts, num_carry)
consts, init, xs = split_list(padded_vals, [num_consts, num_carry])
max_length, = {x.shape[0] for x in xs}
const_linear, init_linear, xs_linear = split_list(linear, [num_consts, num_carry])
out_vals = scan_p.bind(
*itertools.chain([dynamic_length] + consts, [0], init, xs),
forward=forward, length=max_length, jaxpr=masked_jaxpr,
num_consts=1 + num_consts, num_carry=1 + num_carry,
linear=[False] + const_linear + [False] + init_linear + xs_linear)
return out_vals[1:], out_shape
def _masked_scan_jaxpr(jaxpr, num_consts, num_carry):
fun = core.jaxpr_as_fun(jaxpr)
@lu.wrap_init
def masked(*args):
[dynamic_length], consts, [i], carry, xs = split_list(
args, [1, num_consts, 1, num_carry])
out = fun(*(consts + carry + xs))
new_carry, ys = split_list(out, [num_carry])
new_carry = [lax.select(i < dynamic_length, new_c, c)
for new_c, c in zip(new_carry, carry)]
return [i + 1] + new_carry + ys
aval = ShapedArray((), dtypes.int_)
const_avals, carry_avals, x_avals = split_list(jaxpr.in_avals, [num_consts, num_carry])
return _make_typed_jaxpr(masked, [aval] + const_avals + [aval] + carry_avals + x_avals)
def scan_bind(*args, forward, length, num_consts, num_carry, jaxpr, linear):
if not core.skip_checks:
assert len(linear) == len(args)
consts, init, xs = split_list(args, [num_consts, num_carry])
consts_avals, init_avals, x_avals = split_list(jaxpr.in_avals, [num_consts, num_carry])
xs_avals = _map(partial(_promote_aval_rank, length), x_avals)
assert all(_map(typecheck, consts_avals, consts)), (consts, consts_avals)
assert all(_map(typecheck, init_avals, init))
# assert all(_map(typecheck, xs_avals, xs))
carry_avals, _ = split_list(jaxpr.out_avals, [num_carry])
assert all(_map(typematch, init_avals, carry_avals))
core.check_jaxpr(jaxpr.jaxpr)
return core.Primitive.bind(scan_p, *args, forward=forward, length=length,
jaxpr=jaxpr, num_consts=num_consts,
num_carry=num_carry, linear=linear)
scan_p = core.Primitive("scan")
scan_p.multiple_results = True
scan_p.def_custom_bind(scan_bind)
scan_p.def_impl(_scan_impl)
ad.primitive_jvps[scan_p] = _scan_jvp
ad.primitive_transposes[scan_p] = _scan_transpose
pe.custom_partial_eval_rules[scan_p] = _scan_partial_eval
xla.initial_style_translations[scan_p] = xla.lower_fun(_scan_impl, initial_style=True)
batching.primitive_batchers[scan_p] = _scan_batching_rule
masking.shape_parameterized_primitive_rules[scan_p] = _scan_masking_rule
def map(f, xs):
"""Map a function over leading array axes.
Like Python's builtin map, except inputs and outputs are in the form of
stacked arrays. Consider using the ``jax.vmap`` transform instead, unless you
need to apply a function element by element for reduced memory usage or
heterogeneous computation with other control flow primitives.
When ``xs`` is an array type, the semantics of ``map`` are given by this
Python implementation::
def map(f, xs):
return np.stack([f(x) for x in xs])
Like ``scan``, ``map`` is implemented in terms of JAX primitives so many of
the same advantages over a Python loop apply: ``xs`` may be an arbitrary
nested pytree type, and the mapped computation is compiled only once.
Args:
f: a Python function to apply element-wise over the first axis or axes of
``xs``.
xs: values over which to map along the leading axis.
Returns:
Mapped values.
"""
g = lambda _, x: ((), f(x))
_, ys = scan(g, (), xs)
return ys
def _concat_masking_rule(padded_vals, logical_shapes, dimension, operand_shapes):
del operand_shapes # Unused.
result = lax.concatenate(padded_vals, dimension) # fragmented
offset = 0
for padded_val, logical_shape in zip(padded_vals, logical_shapes):
result = _memcpy(dimension, logical_shape[dimension], padded_val,
result, offset)
offset = offset + logical_shape[dimension]
return result
def _memcpy(axis, num, src, dst, offset):
def body(i, dst):
update = lax.dynamic_index_in_dim(src, i, axis)
return lax.dynamic_update_index_in_dim(dst, update, i + offset, axis)
return fori_loop(0, num, body, dst)
masking.masking_rules[lax.concatenate_p] = _concat_masking_rule
def _check_tree(func_name, expected_name, actual_tree, expected_tree):
if actual_tree != expected_tree:
raise TypeError(
"{}() output pytree structure must match {}, got {} and {}."
.format(func_name, expected_name, actual_tree, expected_tree))
def _check_tree_and_avals(what, tree1, avals1, tree2, avals2):
"""Raises TypeError if (tree1, avals1) does not match (tree2, avals2).
Corresponding `tree` and `avals` must match in the sense that the number of leaves in
`tree` must be equal to the length of `avals`.
`what` will be prepended to details of the mismatch in TypeError.
"""
if tree1 != tree2:
msg = ("{} must have same type structure, got {} and {}.")
raise TypeError(msg.format(what, tree1, tree2))
if not all(safe_map(typematch, avals1, avals2)):
msg = ("{} must have identical types, "
"got\n{}\nand\n{}.")
raise TypeError(msg.format(what, tree_unflatten(tree1, avals1),
tree_unflatten(tree2, avals2)))
def _stop_gradient_fun(f):
"""Create a version of f() that stops all gradients."""
def wrapper(*args, **kwargs):
args_flat, in_args_tree = tree_flatten((args, kwargs))
args_avals = tuple(_map(_abstractify, args_flat))
g = lambda a, b: f(*a, **b)
jaxpr, consts, out_tree = _initial_style_jaxpr(g, in_args_tree, args_avals)
out = core.jaxpr_as_fun(jaxpr)(*lax.stop_gradient(consts + tuple(args_flat)))
return tree_unflatten(out_tree, out)
return wrapper
_RootTuple = collections.namedtuple('_RootTuple', 'f, solve, l_and_s')
def _split_root_args(args, const_lengths):
params_list = split_list(args, list(const_lengths))
return _RootTuple(*params_list[:-1]), params_list[-1]
def custom_root(f, initial_guess, solve, tangent_solve):
"""Differentiably solve for a roots of a function.
This is a low-level routine, mostly intended for internal use in JAX.
Gradients of custom_root() are defined with respect to closed-over variables
from the provided function ``f`` via the implicit function theorem:
https://en.wikipedia.org/wiki/Implicit_function_theorem
Args:
f: function for which to find a root. Should accept a single argument,
return a tree of arrays with the same structure as its input.
initial_guess: initial guess for a zero of f.
solve: function to solve for the roots of f. Should take two positional
arguments, f and initial_guess, and return a solution with the same
structure as initial_guess such that func(solution) = 0. In other words,
the following is assumed to be true (but not checked)::
solution = solve(f, initial_guess)
error = f(solution)
assert all(error == 0)
tangent_solve: function to solve the tangent system. Should take two
positional arguments, a linear function ``g`` (the function ``f``
linearized at its root) and a tree of array(s) ``y`` with the same
structure as initial_guess, and return a solution ``x`` such that
``g(x)=y``:
- For scalar ``y``, use ``lambda g, y: y / g(1.0)``.
- For vector ``y``, you could use a linear solve with the Jacobian, if
dimensionality of ``y`` is not too large:
``lambda g, y: np.linalg.solve(jacobian(g)(y), y)``.
Returns:
The result of calling solve(f, initial_guess) with gradients defined via
implicit differentiation assuming ``f(solve(f, initial_guess)) == 0``.
"""
guess_flat, in_args_tree = tree_flatten((initial_guess,))
guess_avals = tuple(_map(_abstractify, guess_flat))
f_jaxpr, f_consts, out_tree = _initial_style_jaxpr(
f, in_args_tree, guess_avals)
in_tree, = treedef_children(in_args_tree)
_check_tree("f", "initial_guess", out_tree, in_tree)
solve_jaxpr, solve_consts, solution_tree = _initial_style_jaxpr(
partial(solve, _stop_gradient_fun(f)), in_args_tree, guess_avals)
_check_tree("solve", "initial_guess", solution_tree, in_tree)
def linearize_and_solve(x, b):
unchecked_zeros, f_jvp = api.linearize(f, x)
return tangent_solve(f_jvp, b)
l_and_s_jaxpr, l_and_s_consts, out_tree = _initial_style_jaxpr(
linearize_and_solve, treedef_tuple((in_tree,) * 2), guess_avals * 2)
_check_tree("tangent_solve", "x", out_tree, in_tree)
all_consts = [f_consts, solve_consts, l_and_s_consts]
const_lengths = _RootTuple(*_map(len, all_consts))
jaxprs = _RootTuple(f_jaxpr, solve_jaxpr, l_and_s_jaxpr)
out_flat = root_p.bind(
*(_flatten(all_consts) + guess_flat),
const_lengths=const_lengths, jaxprs=jaxprs)
return tree_unflatten(out_tree, out_flat)
def _root_abstract_eval(*args, **kwargs):
return _map(raise_to_shaped, args[sum(kwargs['const_lengths']):])
def _root_impl(*args, **kwargs):
const_lengths, jaxprs = split_dict(kwargs, ['const_lengths', 'jaxprs'])
params, initial_guess = _split_root_args(args, const_lengths)
solution = core.jaxpr_as_fun(jaxprs.solve)(*(params.solve + initial_guess))
return solution
def _root_jvp(primals, tangents, const_lengths, jaxprs):
params, _ = _split_root_args(primals, const_lengths)
solution = tuple(root_p.bind(
*primals, const_lengths=const_lengths, jaxprs=jaxprs))
params_dot, _ = _split_root_args(tangents, const_lengths)
# F(m, u) = 0 # system of equations in u, parameterized by m
# # solution is u*(m) defined in a neighborhood
# F(m, u*(m)) = 0 # satisfied in a neighborhood
#
# ∂_0 F(m, u*(m)) + ∂_1 F(m, u*(m)) ∂ u*(m) = 0 # implied by line above
# ∂ u*(m) = - (∂_1 F(m, u*(m)))^{-1} ∂_0 F(m, u*(m)) # rearrange
#
# ∂ u*(m)[v] = - (∂_1 F(m, u*(m)))^{-1} [∂_0 F(m, u*(m))[v]] # jvp
f = core.jaxpr_as_fun(jaxprs.f)
linearize_and_solve = partial(
core.jaxpr_as_fun(jaxprs.l_and_s), *params.l_and_s)
f_at_solution = lambda *params: f(*itertools.chain(params, solution))
_, rhs = ad.jvp(lu.wrap_init(f_at_solution)).call_wrapped(
params.f, params_dot.f)
solution_dot = _map(
operator.neg, linearize_and_solve(*itertools.chain(solution, rhs)))
return solution, solution_dot
root_p = core.Primitive('root')
root_p.multiple_results = True
root_p.def_impl(_root_impl)
root_p.def_abstract_eval(_root_abstract_eval)
ad.primitive_jvps[root_p] = _root_jvp
xla.initial_style_translations[root_p] = xla.lower_fun(
_root_impl, initial_style=True)
# TODO(shoyer): write batching rule
class _LinearSolveTuple(collections.namedtuple(
'_LinearSolveTuple', 'matvec, vecmat, solve, transpose_solve')):
def transpose(self):
return type(self)(self.vecmat, self.matvec, self.transpose_solve, self.solve)
def _split_linear_solve_args(args, const_lengths):
params_list = split_list(args, list(const_lengths))
return _LinearSolveTuple(*params_list[:-1]), params_list[-1]
def _transpose_function(linear_fun, primals):
"""Transpose a linear function."""
# TODO(shoyer): can we use something more direct than the vjp machinery?
# It's particularly awkward that we need the second argument to give
# particular values of the primals, which are entirely arbitrary.
_, vjp_fun = api.vjp(linear_fun, primals)
def transposed_fun(x):
(y,) = vjp_fun(x)
return y
return transposed_fun
def _flatten(args):
return [x for arg in args for x in arg]
def _check_shapes(func_name, expected_name, actual, expected, tree):
actual_shapes = _map(onp.shape, actual)
expected_shapes = _map(onp.shape, expected)
if actual_shapes != expected_shapes:
actual_shape_tree = tree_unflatten(tree, actual_shapes)
act_shape_tree = tree_unflatten(tree, actual_shapes)
raise ValueError('{}() output shapes must match {}, got {} and {}'
.format(func_name, expected_name,
tree_unflatten(tree, actual_shapes),
tree_unflatten(tree, expected_shapes)))
def custom_linear_solve(
matvec, b, solve, transpose_solve=None, symmetric=False):
"""Perform a matrix-free linear solve with implicitly defined gradients.
This function allows for overriding or defining gradients for a linear
solve directly via implicit differentiation at the solution, rather than by
differentiating *through* the solve operation. This can sometimes be much faster
or more numerically stable, or differentiating through the solve operation
may not even be implemented (e.g., if ``solve`` uses ``lax.while_loop``).
Required invariant::
x = solve(matvec, b) # solve the linear equation
assert matvec(x) == b # not checked
Args:
matvec: linear function to invert. Must be differentiable.
b: constant right handle side of the equation. May be any nested structure
of arrays.
solve: higher level function that solves for solution to the linear
equation, i.e., ``solve(matvec, x)) == x`` for all ``x`` of the same form
as ``b``. This function need not be differentiable.
transpose_solve: higher level function for solving the transpose linear
equation, i.e., ``transpose_solve(vecmat, x) == x``, where ``vecmat`` is
the transpose of the linear map ``matvec`` (computed automatically with
autodiff). Required for backwards mode automatic differentiation, unless
``symmetric=True``, in which case ``solve`` provides the default value.
symmetric: bool indicating if it is safe to assume the linear map
corresponds to a symmetric matrix, i.e., ``matvec == vecmat``.
Returns:
Result of ``solve(matvec, b)``, with gradients defined assuming that the
solution ``x`` satisfies the linear equation ``matvec(x) == b``.
"""
if transpose_solve is None and symmetric:
transpose_solve = solve
b_flat, in_args_tree = tree_flatten((b,))
b_avals = tuple(_map(_abstractify, b_flat))
matvec_jaxpr, matvec_consts, out_tree = _initial_style_jaxpr(
matvec, in_args_tree, b_avals)
tree, = treedef_children(in_args_tree)
_check_tree("matvec", "b", out_tree, tree)
solve_jaxpr, solve_consts, out_tree = _initial_style_jaxpr(
partial(solve, matvec), in_args_tree, b_avals)
_check_tree("solve", "b", out_tree, tree)
if transpose_solve is None:
vecmat_jaxpr = tr_solve_jaxpr = None
vecmat_consts = tr_solve_consts = []
else:
if symmetric:
vecmat = matvec
vecmat_jaxpr = matvec_jaxpr
vecmat_consts = matvec_consts
else:
vecmat = _transpose_function(matvec, b)
vecmat_jaxpr, vecmat_consts, out_tree = _initial_style_jaxpr(
vecmat, in_args_tree, b_avals)
assert out_tree == tree
tr_solve_jaxpr, tr_solve_consts, out_tree = _initial_style_jaxpr(
partial(transpose_solve, vecmat), in_args_tree, b_avals)
_check_tree("transpose_solve", "b", out_tree, tree)
all_consts = [matvec_consts, vecmat_consts, solve_consts, tr_solve_consts]
const_lengths = _LinearSolveTuple(*_map(len, all_consts))
jaxprs = _LinearSolveTuple(
matvec_jaxpr, vecmat_jaxpr, solve_jaxpr, tr_solve_jaxpr)
out_flat = linear_solve_p.bind(
*(_flatten(all_consts) + b_flat),
const_lengths=const_lengths, jaxprs=jaxprs, tree=tree)
return tree_unflatten(tree, out_flat)
def _linear_solve_abstract_eval(*args, **kwargs):
return _map(raise_to_shaped, args[sum(kwargs['const_lengths']):])
def _custom_linear_solve_impl(*args, **kwargs):
const_lengths, jaxprs, tree = split_dict(
kwargs, ['const_lengths', 'jaxprs', 'tree'])
params, b = _split_linear_solve_args(args, const_lengths)
x = core.jaxpr_as_fun(jaxprs.solve)(*(params.solve + b))
_check_shapes('solve', 'b', x, b, tree)
return x
def _tangent_linear_map(func, params, params_dot, *x):
"""Compute the tangent of a linear map.
Assuming ``func(*params, *x)`` is linear in ``x`` and computes ``A @ x``,
this function computes ``∂A @ x``.
"""
assert any(p is not ad_util.zero for p in params_dot)
zeros = [ad_util.zero] * len(x)
_, out_tangent = ad.jvp(lu.wrap_init(func)).call_wrapped(
params + list(x), params_dot + zeros)
return out_tangent
def _custom_linear_solve_jvp(primals, tangents, const_lengths, jaxprs, tree):
# A x - b = 0
# ∂A x + A ∂x - ∂b = 0
# ∂x = A^{-1} (∂b - ∂A x)
kwargs = dict(const_lengths=const_lengths, jaxprs=jaxprs, tree=tree)
x = linear_solve_p.bind(*primals, **kwargs)
params, _ = _split_linear_solve_args(primals, const_lengths)
params_dot, b_dot = _split_linear_solve_args(tangents, const_lengths)
if all(p is ad_util.zero for p in params_dot.matvec):
# no need to evaluate matvec_tangents
rhs = b_dot
else:
matvec_tangents = _tangent_linear_map(
core.jaxpr_as_fun(jaxprs.matvec), params.matvec, params_dot.matvec, *x)
_check_shapes("matvec", "b", matvec_tangents, x, tree)
rhs = _map(ad.add_tangents, b_dot, _map(operator.neg, matvec_tangents))
x_dot = linear_solve_p.bind(*(_flatten(params) + rhs), **kwargs)
return x, x_dot
def _linear_solve_transpose_rule(cotangent, *primals, **kwargs):
const_lengths, jaxprs, tree = split_dict(
kwargs, ['const_lengths', 'jaxprs', 'tree'])
if jaxprs.transpose_solve is None:
raise TypeError('transpose_solve required for backwards mode automatic '
'differentiation of custom_linear_solve')
params, b = _split_linear_solve_args(primals, const_lengths)
assert b == [ad.undefined_primal] * len(b)
cotangent_b = linear_solve_p.bind(
*(_flatten(params.transpose()) + cotangent),
const_lengths=const_lengths.transpose(), jaxprs=jaxprs.transpose(),
tree=tree)
return [None] * sum(const_lengths) + cotangent_b
def _linear_solve_batching_rule(args, dims, **kwargs):
const_lengths, jaxprs, tree = split_dict(kwargs,
["const_lengths", "jaxprs", "tree"])
orig_bat = [d is not batching.not_mapped for d in dims]
size, = {
a.shape[d] for a, d in zip(args, dims) if d is not batching.not_mapped
}
params, b = _split_linear_solve_args(args, const_lengths)
params_dims, b_dims = _split_linear_solve_args(dims, const_lengths)
params_bat, orig_b_bat = _split_linear_solve_args(orig_bat, const_lengths)
(matvec, vecmat, solve, solve_t) = jaxprs
(matvec_bat, vecmat_bat, solve_bat, solve_t_bat) = params_bat
# Fixpoint computation of which parts of x and b are batched; we need to
# ensure this is consistent between all four jaxprs
b_bat = orig_b_bat
x_bat = [False] * len(solve.out_avals)
for i in range(1 + len(orig_b_bat) + len(solve.out_avals)):
# Apply vecmat and solve -> new batched parts of x
solve_jaxpr_batched, solve_x_bat = batching.batch_jaxpr(
solve, size, solve_bat + b_bat, instantiate=x_bat)
if vecmat is None:
vecmat_jaxpr_batched = None
x_bat_out = solve_x_bat
else:
vecmat_jaxpr_batched, vecmat_x_bat = batching.batch_jaxpr(
vecmat, size, vecmat_bat + b_bat, instantiate=x_bat)
x_bat_out = _map(operator.or_, vecmat_x_bat, solve_x_bat)
# Apply matvec and solve_t -> new batched parts of b
matvec_jaxpr_batched, matvec_b_bat = batching.batch_jaxpr(
matvec, size, matvec_bat + x_bat_out, instantiate=b_bat)
if solve_t is None:
solve_t_jaxpr_batched = None
b_bat_out = _map(operator.or_, matvec_b_bat, orig_b_bat)
else:
solve_t_jaxpr_batched, solve_t_b_bat = batching.batch_jaxpr(
solve_t, size, solve_t_bat + x_bat_out, instantiate=b_bat)
b_bat_out = _map(lambda m, s, o: m or s or o, matvec_b_bat, solve_t_b_bat,
orig_b_bat)
if x_bat_out == x_bat and b_bat_out == b_bat:
break
else:
x_bat = x_bat_out
b_bat = b_bat_out
else:
assert False, "Fixedpoint not reached"
batched_jaxprs = _LinearSolveTuple(matvec_jaxpr_batched, vecmat_jaxpr_batched,
solve_jaxpr_batched, solve_t_jaxpr_batched)
# Move batched axes to the front
new_params = [
batching.moveaxis(x, d, 0)
if d is not batching.not_mapped and d != 0 else x
for x, d in zip(_flatten(params), _flatten(params_dims))
]
# Broadcast out b if necessary
new_b = [
batching.broadcast(x, size, 0) if now_bat and not was_bat else
batching.moveaxis(x, d, 0) if now_bat and d != 0 else x
for x, d, was_bat, now_bat in zip(b, b_dims, orig_b_bat, b_bat)
]
outs = linear_solve_p.bind(
*(new_params + new_b),
const_lengths=const_lengths,
jaxprs=batched_jaxprs,
tree=tree)
out_dims = [0 if batched else batching.not_mapped for batched in b_bat]
return outs, out_dims
linear_solve_p = core.Primitive('custom_linear_solve')
linear_solve_p.multiple_results = True
linear_solve_p.def_impl(_custom_linear_solve_impl)
linear_solve_p.def_abstract_eval(_linear_solve_abstract_eval)
ad.primitive_jvps[linear_solve_p] = _custom_linear_solve_jvp
xla.initial_style_translations[linear_solve_p] = xla.lower_fun(
_custom_linear_solve_impl, initial_style=True)
ad.primitive_transposes[linear_solve_p] = _linear_solve_transpose_rule
batching.primitive_batchers[linear_solve_p] = _linear_solve_batching_rule
| 43.593058
| 102
| 0.702474
|
4a11c1a96bd8d5adf526ac9f8f4aa9650070f07b
| 47,851
|
py
|
Python
|
tests/admin_filters/tests.py
|
beniwohli/django
|
514b2c989a948e3c59bda0da0c9427acf643cf5b
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 12
|
2018-06-30T15:20:10.000Z
|
2020-10-20T02:15:00.000Z
|
tests/admin_filters/tests.py
|
beniwohli/django
|
514b2c989a948e3c59bda0da0c9427acf643cf5b
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 7
|
2018-06-18T17:56:50.000Z
|
2020-06-24T16:51:04.000Z
|
tests/admin_filters/tests.py
|
beniwohli/django
|
514b2c989a948e3c59bda0da0c9427acf643cf5b
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 5
|
2018-07-17T05:41:04.000Z
|
2020-07-31T12:30:46.000Z
|
import datetime
import sys
import unittest
from django.contrib.admin import (
AllValuesFieldListFilter, BooleanFieldListFilter, ModelAdmin,
RelatedOnlyFieldListFilter, SimpleListFilter, site,
)
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.test import RequestFactory, TestCase, override_settings
from .models import Book, Bookmark, Department, Employee, TaggedItem
def select_by(dictlist, key, value):
return [x for x in dictlist if x[key] == value][0]
class DecadeListFilter(SimpleListFilter):
def lookups(self, request, model_admin):
return (
('the 80s', "the 1980's"),
('the 90s', "the 1990's"),
('the 00s', "the 2000's"),
('other', "other decades"),
)
def queryset(self, request, queryset):
decade = self.value()
if decade == 'the 80s':
return queryset.filter(year__gte=1980, year__lte=1989)
if decade == 'the 90s':
return queryset.filter(year__gte=1990, year__lte=1999)
if decade == 'the 00s':
return queryset.filter(year__gte=2000, year__lte=2009)
class NotNinetiesListFilter(SimpleListFilter):
title = "Not nineties books"
parameter_name = "book_year"
def lookups(self, request, model_admin):
return (
('the 90s', "the 1990's"),
)
def queryset(self, request, queryset):
if self.value() == 'the 90s':
return queryset.filter(year__gte=1990, year__lte=1999)
else:
return queryset.exclude(year__gte=1990, year__lte=1999)
class DecadeListFilterWithTitleAndParameter(DecadeListFilter):
title = 'publication decade'
parameter_name = 'publication-decade'
class DecadeListFilterWithoutTitle(DecadeListFilter):
parameter_name = 'publication-decade'
class DecadeListFilterWithoutParameter(DecadeListFilter):
title = 'publication decade'
class DecadeListFilterWithNoneReturningLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
pass
class DecadeListFilterWithFailingQueryset(DecadeListFilterWithTitleAndParameter):
def queryset(self, request, queryset):
raise 1 / 0
class DecadeListFilterWithQuerysetBasedLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
qs = model_admin.get_queryset(request)
if qs.filter(year__gte=1980, year__lte=1989).exists():
yield ('the 80s', "the 1980's")
if qs.filter(year__gte=1990, year__lte=1999).exists():
yield ('the 90s', "the 1990's")
if qs.filter(year__gte=2000, year__lte=2009).exists():
yield ('the 00s', "the 2000's")
class DecadeListFilterParameterEndsWith__In(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__in' # Ends with '__in"
class DecadeListFilterParameterEndsWith__Isnull(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__isnull' # Ends with '__isnull"
class DepartmentListFilterLookupWithNonStringValue(SimpleListFilter):
title = 'department'
parameter_name = 'department'
def lookups(self, request, model_admin):
return sorted({
(employee.department.id, # Intentionally not a string (Refs #19318)
employee.department.code)
for employee in model_admin.get_queryset(request).all()
})
def queryset(self, request, queryset):
if self.value():
return queryset.filter(department__id=self.value())
class DepartmentListFilterLookupWithUnderscoredParameter(DepartmentListFilterLookupWithNonStringValue):
parameter_name = 'department__whatever'
class DepartmentListFilterLookupWithDynamicValue(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
if self.value() == 'the 80s':
return (('the 90s', "the 1990's"),)
elif self.value() == 'the 90s':
return (('the 80s', "the 1980's"),)
else:
return (('the 80s', "the 1980's"), ('the 90s', "the 1990's"),)
class CustomUserAdmin(UserAdmin):
list_filter = ('books_authored', 'books_contributed')
class BookAdmin(ModelAdmin):
list_filter = ('year', 'author', 'contributors', 'is_best_seller', 'date_registered', 'no')
ordering = ('-id',)
class BookAdminWithTupleBooleanFilter(BookAdmin):
list_filter = (
'year',
'author',
'contributors',
('is_best_seller', BooleanFieldListFilter),
'date_registered',
'no',
)
class BookAdminWithUnderscoreLookupAndTuple(BookAdmin):
list_filter = (
'year',
('author__email', AllValuesFieldListFilter),
'contributors',
'is_best_seller',
'date_registered',
'no',
)
class BookAdminWithCustomQueryset(ModelAdmin):
def __init__(self, user, *args, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
list_filter = ('year',)
def get_queryset(self, request):
return super().get_queryset(request).filter(author=self.user)
class BookAdminRelatedOnlyFilter(ModelAdmin):
list_filter = (
'year', 'is_best_seller', 'date_registered', 'no',
('author', RelatedOnlyFieldListFilter),
('contributors', RelatedOnlyFieldListFilter),
('employee__department', RelatedOnlyFieldListFilter),
)
ordering = ('-id',)
class DecadeFilterBookAdmin(ModelAdmin):
list_filter = ('author', DecadeListFilterWithTitleAndParameter)
ordering = ('-id',)
class NotNinetiesListFilterAdmin(ModelAdmin):
list_filter = (NotNinetiesListFilter,)
class DecadeFilterBookAdminWithoutTitle(ModelAdmin):
list_filter = (DecadeListFilterWithoutTitle,)
class DecadeFilterBookAdminWithoutParameter(ModelAdmin):
list_filter = (DecadeListFilterWithoutParameter,)
class DecadeFilterBookAdminWithNoneReturningLookups(ModelAdmin):
list_filter = (DecadeListFilterWithNoneReturningLookups,)
class DecadeFilterBookAdminWithFailingQueryset(ModelAdmin):
list_filter = (DecadeListFilterWithFailingQueryset,)
class DecadeFilterBookAdminWithQuerysetBasedLookups(ModelAdmin):
list_filter = (DecadeListFilterWithQuerysetBasedLookups,)
class DecadeFilterBookAdminParameterEndsWith__In(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__In,)
class DecadeFilterBookAdminParameterEndsWith__Isnull(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__Isnull,)
class EmployeeAdmin(ModelAdmin):
list_display = ['name', 'department']
list_filter = ['department']
class DepartmentFilterEmployeeAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithNonStringValue, ]
class DepartmentFilterUnderscoredEmployeeAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithUnderscoredParameter, ]
class DepartmentFilterDynamicValueBookAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithDynamicValue, ]
class BookmarkAdminGenericRelation(ModelAdmin):
list_filter = ['tags__tag']
class ListFiltersTests(TestCase):
def setUp(self):
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.one_week_ago = self.today - datetime.timedelta(days=7)
if self.today.month == 12:
self.next_month = self.today.replace(year=self.today.year + 1, month=1, day=1)
else:
self.next_month = self.today.replace(month=self.today.month + 1, day=1)
self.next_year = self.today.replace(year=self.today.year + 1, month=1, day=1)
self.request_factory = RequestFactory()
# Users
self.alfred = User.objects.create_user('alfred', 'alfred@example.com')
self.bob = User.objects.create_user('bob', 'bob@example.com')
self.lisa = User.objects.create_user('lisa', 'lisa@example.com')
# Books
self.djangonaut_book = Book.objects.create(
title='Djangonaut: an art of living', year=2009,
author=self.alfred, is_best_seller=True, date_registered=self.today,
)
self.bio_book = Book.objects.create(
title='Django: a biography', year=1999, author=self.alfred,
is_best_seller=False, no=207,
)
self.django_book = Book.objects.create(
title='The Django Book', year=None, author=self.bob,
is_best_seller=None, date_registered=self.today, no=103,
)
self.guitar_book = Book.objects.create(
title='Guitar for dummies', year=2002, is_best_seller=True,
date_registered=self.one_week_ago,
)
self.guitar_book.contributors.set([self.bob, self.lisa])
# Departments
self.dev = Department.objects.create(code='DEV', description='Development')
self.design = Department.objects.create(code='DSN', description='Design')
# Employees
self.john = Employee.objects.create(name='John Blue', department=self.dev)
self.jack = Employee.objects.create(name='Jack Red', department=self.design)
def test_choicesfieldlistfilter_has_none_choice(self):
"""
The last choice is for the None value.
"""
class BookmarkChoicesAdmin(ModelAdmin):
list_display = ['none_or_null']
list_filter = ['none_or_null']
modeladmin = BookmarkChoicesAdmin(Bookmark, site)
request = self.request_factory.get('/', {})
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['display'], 'None')
self.assertEqual(choices[-1]['query_string'], '?none_or_null__isnull=True')
def test_datefieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = modeladmin.get_changelist(request)
request = self.request_factory.get('/', {'date_registered__gte': self.today,
'date_registered__lt': self.tomorrow})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Today")
self.assertIs(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
self.today,
self.tomorrow,
)
)
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(day=1),
'date_registered__lt': self.next_month})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
if (self.today.year, self.today.month) == (self.one_week_ago.year, self.one_week_ago.month):
# In case one week ago is in the same month.
self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This month")
self.assertIs(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
self.today.replace(day=1),
self.next_month,
)
)
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(month=1, day=1),
'date_registered__lt': self.next_year})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
if self.today.year == self.one_week_ago.year:
# In case one week ago is in the same year.
self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This year")
self.assertIs(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
self.today.replace(month=1, day=1),
self.next_year,
)
)
request = self.request_factory.get('/', {
'date_registered__gte': str(self.one_week_ago),
'date_registered__lt': str(self.tomorrow),
})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Past 7 days")
self.assertIs(choice['selected'], True)
self.assertEqual(
choice['query_string'],
'?date_registered__gte=%s&date_registered__lt=%s' % (
str(self.one_week_ago),
str(self.tomorrow),
)
)
# Null/not null queries
request = self.request_factory.get('/', {'date_registered__isnull': 'True'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset[0], self.bio_book)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, 'date registered')
choice = select_by(filterspec.choices(changelist), 'display', 'No date')
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__isnull=True')
request = self.request_factory.get('/', {'date_registered__isnull': 'False'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(queryset.count(), 3)
self.assertEqual(list(queryset), [self.guitar_book, self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(filterspec.title, 'date registered')
choice = select_by(filterspec.choices(changelist), 'display', 'Has date')
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__isnull=False')
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows doesn't support setting a timezone that differs from the "
"system timezone."
)
@override_settings(USE_TZ=True)
def test_datefieldlistfilter_with_time_zone_support(self):
# Regression for #17830
self.test_datefieldlistfilter()
def test_allvaluesfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'year__isnull': 'True'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'year')
choices = list(filterspec.choices(changelist))
self.assertIs(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?year__isnull=True')
request = self.request_factory.get('/', {'year': '2002'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'year')
choices = list(filterspec.choices(changelist))
self.assertIs(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?year=2002')
def test_allvaluesfieldlistfilter_custom_qs(self):
# Make sure that correct filters are returned with custom querysets
modeladmin = BookAdminWithCustomQueryset(self.alfred, Book, site)
request = self.request_factory.get('/')
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
choices = list(filterspec.choices(changelist))
# Should have 'All', 1999 and 2009 options i.e. the subset of years of
# books written by alfred (which is the filtering criteria set by
# BookAdminWithCustomQueryset.get_queryset())
self.assertEqual(3, len(choices))
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['query_string'], '?year=1999')
self.assertEqual(choices[2]['query_string'], '?year=2009')
def test_relatedfieldlistfilter_foreignkey(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = modeladmin.get_changelist_instance(request)
# Make sure that all users are present in the author's list filter
filterspec = changelist.get_filters(request)[0][1]
expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
request = self.request_factory.get('/', {'author__isnull': 'True'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'Verbose Author')
choices = list(filterspec.choices(changelist))
self.assertIs(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?author__isnull=True')
request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'Verbose Author')
# order of choices depends on User model, which has no order
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk)
def test_relatedfieldlistfilter_manytomany(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = modeladmin.get_changelist_instance(request)
# Make sure that all users are present in the contrib's list filter
filterspec = changelist.get_filters(request)[0][2]
expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
request = self.request_factory.get('/', {'contributors__isnull': 'True'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book, self.bio_book, self.djangonaut_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(filterspec.title, 'Verbose Contributors')
choices = list(filterspec.choices(changelist))
self.assertIs(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True')
request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(filterspec.title, 'Verbose Contributors')
choice = select_by(filterspec.choices(changelist), "display", "bob")
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk)
def test_relatedfieldlistfilter_reverse_relationships(self):
modeladmin = CustomUserAdmin(User, site)
# FK relationship -----
request = self.request_factory.get('/', {'books_authored__isnull': 'True'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.lisa])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'book')
choices = list(filterspec.choices(changelist))
self.assertIs(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_authored__isnull=True')
request = self.request_factory.get('/', {'books_authored__id__exact': self.bio_book.pk})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'book')
choice = select_by(filterspec.choices(changelist), "display", self.bio_book.title)
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_authored__id__exact=%d' % self.bio_book.pk)
# M2M relationship -----
request = self.request_factory.get('/', {'books_contributed__isnull': 'True'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.alfred])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'book')
choices = list(filterspec.choices(changelist))
self.assertIs(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_contributed__isnull=True')
request = self.request_factory.get('/', {'books_contributed__id__exact': self.django_book.pk})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'book')
choice = select_by(filterspec.choices(changelist), "display", self.django_book.title)
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_contributed__id__exact=%d' % self.django_book.pk)
# With one book, the list filter should appear because there is also a
# (None) option.
Book.objects.exclude(pk=self.djangonaut_book.pk).delete()
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 2)
# With no books remaining, no list filters should appear.
Book.objects.all().delete()
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_relatedonlyfieldlistfilter_foreignkey(self):
modeladmin = BookAdminRelatedOnlyFilter(Book, site)
request = self.request_factory.get('/')
changelist = modeladmin.get_changelist_instance(request)
# Make sure that only actual authors are present in author's list filter
filterspec = changelist.get_filters(request)[0][4]
expected = [(self.alfred.pk, 'alfred'), (self.bob.pk, 'bob')]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
def test_relatedonlyfieldlistfilter_underscorelookup_foreignkey(self):
Department.objects.create(code='TEST', description='Testing')
self.djangonaut_book.employee = self.john
self.djangonaut_book.save()
self.bio_book.employee = self.jack
self.bio_book.save()
modeladmin = BookAdminRelatedOnlyFilter(Book, site)
request = self.request_factory.get('/')
changelist = modeladmin.get_changelist_instance(request)
# Only actual departments should be present in employee__department's
# list filter.
filterspec = changelist.get_filters(request)[0][6]
expected = [
(self.dev.code, str(self.dev)),
(self.design.code, str(self.design)),
]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
def test_relatedonlyfieldlistfilter_manytomany(self):
modeladmin = BookAdminRelatedOnlyFilter(Book, site)
request = self.request_factory.get('/')
changelist = modeladmin.get_changelist_instance(request)
# Make sure that only actual contributors are present in contrib's list filter
filterspec = changelist.get_filters(request)[0][5]
expected = [(self.bob.pk, 'bob'), (self.lisa.pk, 'lisa')]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
def test_listfilter_genericrelation(self):
django_bookmark = Bookmark.objects.create(url='https://www.djangoproject.com/')
python_bookmark = Bookmark.objects.create(url='https://www.python.org/')
kernel_bookmark = Bookmark.objects.create(url='https://www.kernel.org/')
TaggedItem.objects.create(content_object=django_bookmark, tag='python')
TaggedItem.objects.create(content_object=python_bookmark, tag='python')
TaggedItem.objects.create(content_object=kernel_bookmark, tag='linux')
modeladmin = BookmarkAdminGenericRelation(Bookmark, site)
request = self.request_factory.get('/', {'tags__tag': 'python'})
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
expected = [python_bookmark, django_bookmark]
self.assertEqual(list(queryset), expected)
def test_booleanfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def test_booleanfieldlistfilter_tuple(self):
modeladmin = BookAdminWithTupleBooleanFilter(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def verify_booleanfieldlistfilter(self, modeladmin):
request = self.request_factory.get('/')
changelist = modeladmin.get_changelist_instance(request)
request = self.request_factory.get('/', {'is_best_seller__exact': 0})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(filterspec.title, 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "No")
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=0')
request = self.request_factory.get('/', {'is_best_seller__exact': 1})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(filterspec.title, 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Yes")
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=1')
request = self.request_factory.get('/', {'is_best_seller__isnull': 'True'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(filterspec.title, 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Unknown")
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__isnull=True')
def test_fieldlistfilter_underscorelookup_tuple(self):
"""
Ensure ('fieldpath', ClassName ) lookups pass lookup_allowed checks
when fieldpath contains double underscore in value (#19182).
"""
modeladmin = BookAdminWithUnderscoreLookupAndTuple(Book, site)
request = self.request_factory.get('/')
changelist = modeladmin.get_changelist_instance(request)
request = self.request_factory.get('/', {'author__email': 'alfred@example.com'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book, self.djangonaut_book])
def test_fieldlistfilter_invalid_lookup_parameters(self):
"""Filtering by an invalid value."""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'author__id__exact': 'StringNotInteger!'})
with self.assertRaises(IncorrectLookupParameters):
modeladmin.get_changelist_instance(request)
def test_simplelistfilter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
# Make sure that the first option is 'All' ---------------------------
request = self.request_factory.get('/', {})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), list(Book.objects.all().order_by('-id')))
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertIs(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
# Look for books in the 1980s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 80s'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'the 1980\'s')
self.assertIs(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+80s')
# Look for books in the 1990s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 90s'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertIs(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+90s')
# Look for books in the 2000s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.guitar_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertIs(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s')
# Combine multiple filters -------------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s', 'author__id__exact': self.alfred.pk})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.djangonaut_book])
# Make sure the correct choices are selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertIs(choices[3]['selected'], True)
self.assertEqual(
choices[3]['query_string'],
'?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk
)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'Verbose Author')
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertIs(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk)
def test_listfilter_without_title(self):
"""
Any filter must define a title.
"""
modeladmin = DecadeFilterBookAdminWithoutTitle(Book, site)
request = self.request_factory.get('/', {})
msg = "The list filter 'DecadeListFilterWithoutTitle' does not specify a 'title'."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
modeladmin.get_changelist_instance(request)
def test_simplelistfilter_without_parameter(self):
"""
Any SimpleListFilter must define a parameter_name.
"""
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get('/', {})
msg = "The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
modeladmin.get_changelist_instance(request)
def test_simplelistfilter_with_none_returning_lookups(self):
"""
A SimpleListFilter lookups method can return None but disables the
filter completely.
"""
modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_filter_with_failing_queryset(self):
"""
When a filter's queryset method fails, it fails loudly and
the corresponding exception doesn't get swallowed (#17828).
"""
modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site)
request = self.request_factory.get('/', {})
with self.assertRaises(ZeroDivisionError):
modeladmin.get_changelist_instance(request)
def test_simplelistfilter_with_queryset_based_lookups(self):
modeladmin = DecadeFilterBookAdminWithQuerysetBasedLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(len(choices), 3)
self.assertEqual(choices[0]['display'], 'All')
self.assertIs(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'the 1990\'s')
self.assertIs(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+90s')
self.assertEqual(choices[2]['display'], 'the 2000\'s')
self.assertIs(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+00s')
def test_two_characters_long_field(self):
"""
list_filter works with two-characters long field names (#16080).
"""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'no': '207'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(filterspec.title, 'number')
choices = list(filterspec.choices(changelist))
self.assertIs(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?no=207')
def test_parameter_ends_with__in__or__isnull(self):
"""
A SimpleListFilter's parameter name is not mistaken for a model field
if it ends with '__isnull' or '__in' (#17091).
"""
# When it ends with '__in' -----------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)
request = self.request_factory.get('/', {'decade__in': 'the 90s'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertIs(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__in=the+90s')
# When it ends with '__isnull' ---------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site)
request = self.request_factory.get('/', {'decade__isnull': 'the 90s'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertIs(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__isnull=the+90s')
def test_lookup_with_non_string_value(self):
"""
Ensure choices are set the selected class when using non-string values
for lookups in SimpleListFilters (#19318).
"""
modeladmin = DepartmentFilterEmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {'department': self.john.department.pk})
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(filterspec.title, 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'DEV')
self.assertIs(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department=%s' % self.john.department.pk)
def test_lookup_with_non_string_value_underscored(self):
"""
Ensure SimpleListFilter lookups pass lookup_allowed checks when
parameter_name attribute contains double-underscore value (#19182).
"""
modeladmin = DepartmentFilterUnderscoredEmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {'department__whatever': self.john.department.pk})
changelist = modeladmin.get_changelist_instance(request)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(filterspec.title, 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'DEV')
self.assertIs(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__whatever=%s' % self.john.department.pk)
def test_fk_with_to_field(self):
"""
A filter on a FK respects the FK's to_field attribute (#17972).
"""
modeladmin = EmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.jack, self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(filterspec.title, 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertIs(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertIs(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertIs(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
# Filter by Department=='Development' --------------------------------
request = self.request_factory.get('/', {'department__code__exact': 'DEV'})
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(filterspec.title, 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertIs(choices[0]['selected'], False)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertIs(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertIs(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
def test_lookup_with_dynamic_value(self):
"""
Ensure SimpleListFilter can access self.value() inside the lookup.
"""
modeladmin = DepartmentFilterDynamicValueBookAdmin(Book, site)
def _test_choices(request, expected_displays):
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(filterspec.title, 'publication decade')
choices = tuple(c['display'] for c in filterspec.choices(changelist))
self.assertEqual(choices, expected_displays)
_test_choices(self.request_factory.get('/', {}),
("All", "the 1980's", "the 1990's"))
_test_choices(self.request_factory.get('/', {'publication-decade': 'the 80s'}),
("All", "the 1990's"))
_test_choices(self.request_factory.get('/', {'publication-decade': 'the 90s'}),
("All", "the 1980's"))
def test_list_filter_queryset_filtered_by_default(self):
"""
A list filter that filters the queryset by default gives the correct
full_result_count.
"""
modeladmin = NotNinetiesListFilterAdmin(Book, site)
request = self.request_factory.get('/', {})
changelist = modeladmin.get_changelist_instance(request)
changelist.get_results(request)
self.assertEqual(changelist.full_result_count, 4)
| 43.42196
| 119
| 0.671731
|
4a11c1d6ea97e9b3875786c29f85e406fa447c49
| 1,532
|
py
|
Python
|
edge_promoting.py
|
williamlus/pytorch-CartoonGAN
|
0de1105386529ffa1cd8a0d8f6fdc9faee325785
|
[
"MIT"
] | 356
|
2018-07-24T17:53:12.000Z
|
2022-03-30T15:26:04.000Z
|
edge_promoting.py
|
williamlus/pytorch-CartoonGAN
|
0de1105386529ffa1cd8a0d8f6fdc9faee325785
|
[
"MIT"
] | 17
|
2018-09-24T16:51:28.000Z
|
2021-01-23T03:00:20.000Z
|
edge_promoting.py
|
williamlus/pytorch-CartoonGAN
|
0de1105386529ffa1cd8a0d8f6fdc9faee325785
|
[
"MIT"
] | 84
|
2018-07-30T12:06:13.000Z
|
2021-11-06T08:18:16.000Z
|
import cv2, os
import numpy as np
from tqdm import tqdm
def edge_promoting(root, save):
file_list = os.listdir(root)
if not os.path.isdir(save):
os.makedirs(save)
kernel_size = 5
kernel = np.ones((kernel_size, kernel_size), np.uint8)
gauss = cv2.getGaussianKernel(kernel_size, 0)
gauss = gauss * gauss.transpose(1, 0)
n = 1
for f in tqdm(file_list):
rgb_img = cv2.imread(os.path.join(root, f))
gray_img = cv2.imread(os.path.join(root, f), 0)
rgb_img = cv2.resize(rgb_img, (256, 256))
pad_img = np.pad(rgb_img, ((2,2), (2,2), (0,0)), mode='reflect')
gray_img = cv2.resize(gray_img, (256, 256))
edges = cv2.Canny(gray_img, 100, 200)
dilation = cv2.dilate(edges, kernel)
gauss_img = np.copy(rgb_img)
idx = np.where(dilation != 0)
for i in range(np.sum(dilation != 0)):
gauss_img[idx[0][i], idx[1][i], 0] = np.sum(np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 0], gauss))
gauss_img[idx[0][i], idx[1][i], 1] = np.sum(np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 1], gauss))
gauss_img[idx[0][i], idx[1][i], 2] = np.sum(np.multiply(pad_img[idx[0][i]:idx[0][i] + kernel_size, idx[1][i]:idx[1][i] + kernel_size, 2], gauss))
result = np.concatenate((rgb_img, gauss_img), 1)
cv2.imwrite(os.path.join(save, str(n) + '.png'), result)
n += 1
| 46.424242
| 158
| 0.579634
|
4a11c2604077da636ea32864573cdf92120fbb73
| 1,294
|
py
|
Python
|
rss.py
|
dnuka/banda
|
26d9aa203794f2ea4d0d2327a0486e7b0b425fda
|
[
"MIT"
] | 8
|
2018-05-26T04:34:01.000Z
|
2021-11-08T11:41:31.000Z
|
rss.py
|
dnuka/banda
|
26d9aa203794f2ea4d0d2327a0486e7b0b425fda
|
[
"MIT"
] | 13
|
2018-10-11T04:50:25.000Z
|
2021-10-12T15:32:18.000Z
|
rss.py
|
dnuka/banda
|
26d9aa203794f2ea4d0d2327a0486e7b0b425fda
|
[
"MIT"
] | 11
|
2018-10-11T05:41:49.000Z
|
2020-06-20T03:18:29.000Z
|
#! /usr/bin/env python3
from urllib import request
from urllib.parse import urlparse
from bs4 import BeautifulSoup
feeds = [
"https://news.ycombinator.com/rss",
"https://www.wired.com/feed/category/security/latest/rss",
"https://arstechnica.com/feed/",
]
def identify(url):
# identify rss source
host = urlparse(url).hostname
if len(host.split(".")) == 3:
return host.split(".")[1]
return host.split(".")[0]
def pickup():
# create individual articles
rss_data = []
for feed in feeds:
with request.urlopen(feed) as rss:
data = BeautifulSoup(rss, "xml")
rss_data.append(data)
raw_articles = []
for source in rss_data:
raw_articles.extend(source.find_all("item"))
articles = []
state = True
hackernews = True
for article in raw_articles:
if article.comments is None:
state = False
hackernews = False
item = {
"title": article.title.string,
"link": article.link.string,
"comments": article.comments.string if state else None,
"source":
"hackernews" if hackernews else identify(article.link.string)
}
articles.append(item)
# print(articles)
return articles
| 24.415094
| 73
| 0.603555
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.